hexsha
stringlengths 40
40
| size
int64 4
996k
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
996k
| avg_line_length
float64 1.33
58.2k
| max_line_length
int64 2
323k
| alphanum_fraction
float64 0
0.97
| content_no_comment
stringlengths 0
946k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
790cded3b9600099b43dc5f20edff876ac4d1fd1
| 597
|
py
|
Python
|
mmocr/utils/__init__.py
|
jeffreykuang/mmocr-1
|
b17304edeb493b0a4d7224c23d23b952350d0db5
|
[
"Apache-2.0"
] | 1
|
2021-04-19T02:26:57.000Z
|
2021-04-19T02:26:57.000Z
|
mmocr/utils/__init__.py
|
jeffreykuang/mmocr-1
|
b17304edeb493b0a4d7224c23d23b952350d0db5
|
[
"Apache-2.0"
] | null | null | null |
mmocr/utils/__init__.py
|
jeffreykuang/mmocr-1
|
b17304edeb493b0a4d7224c23d23b952350d0db5
|
[
"Apache-2.0"
] | 1
|
2021-04-16T02:01:26.000Z
|
2021-04-16T02:01:26.000Z
|
from mmcv.utils import Registry, build_from_cfg
from .check_argument import (equal_len, is_2dlist, is_3dlist, is_ndarray_list,
is_none_or_type, is_type_list, valid_boundary)
from .collect_env import collect_env
from .img_util import drop_orientation
from .lmdb_util import lmdb_converter
from .logger import get_root_logger
__all__ = [
'Registry', 'build_from_cfg', 'get_root_logger', 'collect_env',
'is_3dlist', 'is_ndarray_list', 'is_type_list', 'is_none_or_type',
'equal_len', 'is_2dlist', 'valid_boundary', 'lmdb_converter',
'drop_orientation'
]
| 37.3125
| 78
| 0.747069
|
from mmcv.utils import Registry, build_from_cfg
from .check_argument import (equal_len, is_2dlist, is_3dlist, is_ndarray_list,
is_none_or_type, is_type_list, valid_boundary)
from .collect_env import collect_env
from .img_util import drop_orientation
from .lmdb_util import lmdb_converter
from .logger import get_root_logger
__all__ = [
'Registry', 'build_from_cfg', 'get_root_logger', 'collect_env',
'is_3dlist', 'is_ndarray_list', 'is_type_list', 'is_none_or_type',
'equal_len', 'is_2dlist', 'valid_boundary', 'lmdb_converter',
'drop_orientation'
]
| true
| true
|
790cdf8429094e91808ea1c5cdc5626621fd9ba5
| 1,578
|
py
|
Python
|
common/configParams.py
|
MistSun-Chen/py_verifier
|
7e9161d1fdbb611fe4be5eeb2f89a6286fa7b555
|
[
"MIT"
] | null | null | null |
common/configParams.py
|
MistSun-Chen/py_verifier
|
7e9161d1fdbb611fe4be5eeb2f89a6286fa7b555
|
[
"MIT"
] | null | null | null |
common/configParams.py
|
MistSun-Chen/py_verifier
|
7e9161d1fdbb611fe4be5eeb2f89a6286fa7b555
|
[
"MIT"
] | null | null | null |
import os
class ConfigParams:
def __init__(self,configPath):
self.env_dist = os.environ
#权限验证
self.api_key = ""
# userID = ""
# ip = "0.0.0.0"
#模型相关存放根目录
self.modelPath = os.path.join(os.getcwd(),"model")
cpuCores = 0
threads = 2
port = 33388
batchSize = 10
#每个算法使用的GPU数量
self.GPUDevices = 1
topK = 80
featureSize = 512
zmqthreads = 2
self.CPU = 0
self.zmqAddr = "tcp://{}:5560".format(self.env_dist["ZMQ_ADDR"]) if "ZMQ_ADDR" in self.env_dist else "tcp://127.0.0.1:5570"
print(str(self.zmqAddr))
self.helmet_ids = self.parseAI("HELMET") if "HELMET" in self.env_dist else []
self.pose_ids = self.parseAI("POSE") if "POSE" in self.env_dist else []
self.track_coal_ids = self.parseAI("TRACK_COAL") if "TRACK_COAL" in self.env_dist else []
self.smoke_phone_ids = self.parseAI("SMOKEPHONE") if "SMOKEPHONE" in self.env_dist else []
# self.helmet_ids = [1,1,1]
# self.pose_ids = []
# self.track_coal_ids = []
# self.smoke_phone_ids = []
def loadConfig(self,configPath):
pass
def generateDefaultConfig(self,configPath):
pass
def initEasylogging(self,logConfig):
pass
def printParams(self):
print("run configParams function printParams")
pass
def parseAI(self,key):
ai_ids = []
for i in self.env_dist[key].split(','):
ai_ids.append(int(i))
return ai_ids
| 26.3
| 131
| 0.576046
|
import os
class ConfigParams:
def __init__(self,configPath):
self.env_dist = os.environ
self.api_key = ""
self.modelPath = os.path.join(os.getcwd(),"model")
cpuCores = 0
threads = 2
port = 33388
batchSize = 10
self.GPUDevices = 1
topK = 80
featureSize = 512
zmqthreads = 2
self.CPU = 0
self.zmqAddr = "tcp://{}:5560".format(self.env_dist["ZMQ_ADDR"]) if "ZMQ_ADDR" in self.env_dist else "tcp://127.0.0.1:5570"
print(str(self.zmqAddr))
self.helmet_ids = self.parseAI("HELMET") if "HELMET" in self.env_dist else []
self.pose_ids = self.parseAI("POSE") if "POSE" in self.env_dist else []
self.track_coal_ids = self.parseAI("TRACK_COAL") if "TRACK_COAL" in self.env_dist else []
self.smoke_phone_ids = self.parseAI("SMOKEPHONE") if "SMOKEPHONE" in self.env_dist else []
def loadConfig(self,configPath):
pass
def generateDefaultConfig(self,configPath):
pass
def initEasylogging(self,logConfig):
pass
def printParams(self):
print("run configParams function printParams")
pass
def parseAI(self,key):
ai_ids = []
for i in self.env_dist[key].split(','):
ai_ids.append(int(i))
return ai_ids
| true
| true
|
790cdfb802dac570187a5ff4b18edd7acd0d9c4a
| 2,658
|
py
|
Python
|
udacity_deep_learning/download_data.py
|
fcarsten/ai_playground
|
ba52378a56b8a4400d594ae70ff03af2a0e36f12
|
[
"Apache-2.0"
] | null | null | null |
udacity_deep_learning/download_data.py
|
fcarsten/ai_playground
|
ba52378a56b8a4400d594ae70ff03af2a0e36f12
|
[
"Apache-2.0"
] | null | null | null |
udacity_deep_learning/download_data.py
|
fcarsten/ai_playground
|
ba52378a56b8a4400d594ae70ff03af2a0e36f12
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
import tarfile
from six.moves.urllib.request import urlretrieve
url = 'https://commondatastorage.googleapis.com/books1000/'
last_percent_reported = None
data_root = '.' # Change me to store data elsewhere
def download_progress_hook(count, blockSize, totalSize):
"""A hook to report the progress of a download. This is mostly intended for users with
slow internet connections. Reports every 5% change in download progress.
"""
global last_percent_reported
percent = int(count * blockSize * 100 / totalSize)
if last_percent_reported != percent:
if percent % 5 == 0:
sys.stdout.write("%s%%" % percent)
sys.stdout.flush()
else:
sys.stdout.write(".")
sys.stdout.flush()
last_percent_reported = percent
def maybe_download(filename, expected_bytes, force=False):
"""Download a file if not present, and make sure it's the right size."""
dest_filename = os.path.join(data_root, filename)
if force or not os.path.exists(dest_filename):
print('Attempting to download:', filename)
filename, _ = urlretrieve(url + filename, dest_filename, reporthook=download_progress_hook)
print('\nDownload Complete!')
statinfo = os.stat(dest_filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', dest_filename)
else:
raise Exception(
'Failed to verify ' + dest_filename + '. Can you get to it with a browser?')
return dest_filename
train_filename = maybe_download('notMNIST_large.tar.gz', 247336696)
test_filename = maybe_download('notMNIST_small.tar.gz', 8458043)
num_classes = 10
def maybe_extract(filename, force=False):
root = os.path.splitext(os.path.splitext(filename)[0])[0] # remove .tar.gz
if os.path.isdir(root) and not force:
# You may override by setting force=True.
print('%s already present - Skipping extraction of %s.' % (root, filename))
else:
print('Extracting data for %s. This may take a while. Please wait.' % root)
tar = tarfile.open(filename)
sys.stdout.flush()
tar.extractall(data_root)
tar.close()
data_folders = [
os.path.join(root, d) for d in sorted(os.listdir(root))
if os.path.isdir(os.path.join(root, d))]
if len(data_folders) != num_classes:
raise Exception(
'Expected %d folders, one per class. Found %d instead.' % (
num_classes, len(data_folders)))
print(data_folders)
return data_folders
train_folders = maybe_extract(train_filename)
test_folders = maybe_extract(test_filename)
| 35.918919
| 99
| 0.674191
|
import os
import sys
import tarfile
from six.moves.urllib.request import urlretrieve
url = 'https://commondatastorage.googleapis.com/books1000/'
last_percent_reported = None
data_root = '.'
def download_progress_hook(count, blockSize, totalSize):
global last_percent_reported
percent = int(count * blockSize * 100 / totalSize)
if last_percent_reported != percent:
if percent % 5 == 0:
sys.stdout.write("%s%%" % percent)
sys.stdout.flush()
else:
sys.stdout.write(".")
sys.stdout.flush()
last_percent_reported = percent
def maybe_download(filename, expected_bytes, force=False):
dest_filename = os.path.join(data_root, filename)
if force or not os.path.exists(dest_filename):
print('Attempting to download:', filename)
filename, _ = urlretrieve(url + filename, dest_filename, reporthook=download_progress_hook)
print('\nDownload Complete!')
statinfo = os.stat(dest_filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', dest_filename)
else:
raise Exception(
'Failed to verify ' + dest_filename + '. Can you get to it with a browser?')
return dest_filename
train_filename = maybe_download('notMNIST_large.tar.gz', 247336696)
test_filename = maybe_download('notMNIST_small.tar.gz', 8458043)
num_classes = 10
def maybe_extract(filename, force=False):
root = os.path.splitext(os.path.splitext(filename)[0])[0]
if os.path.isdir(root) and not force:
print('%s already present - Skipping extraction of %s.' % (root, filename))
else:
print('Extracting data for %s. This may take a while. Please wait.' % root)
tar = tarfile.open(filename)
sys.stdout.flush()
tar.extractall(data_root)
tar.close()
data_folders = [
os.path.join(root, d) for d in sorted(os.listdir(root))
if os.path.isdir(os.path.join(root, d))]
if len(data_folders) != num_classes:
raise Exception(
'Expected %d folders, one per class. Found %d instead.' % (
num_classes, len(data_folders)))
print(data_folders)
return data_folders
train_folders = maybe_extract(train_filename)
test_folders = maybe_extract(test_filename)
| true
| true
|
790ce08cc2acdf83deaa1a6f0546f793d2ec0e50
| 2,597
|
py
|
Python
|
check_and_approve_hits.py
|
maxspero/ccr-amt
|
11bd8ec499e263034cee52996f6ce9974cfbea10
|
[
"MIT"
] | null | null | null |
check_and_approve_hits.py
|
maxspero/ccr-amt
|
11bd8ec499e263034cee52996f6ce9974cfbea10
|
[
"MIT"
] | null | null | null |
check_and_approve_hits.py
|
maxspero/ccr-amt
|
11bd8ec499e263034cee52996f6ce9974cfbea10
|
[
"MIT"
] | null | null | null |
import argparse, json
import simpleamt
import MySQLdb
if __name__ == '__main__':
parser = argparse.ArgumentParser(parents=[simpleamt.get_parent_parser()])
parser.add_argument('-f', action='store_true', default=False)
args = parser.parse_args()
mtc = simpleamt.get_mturk_connection_from_args(args)
approve_ids = []
reject_ids = []
if args.hit_ids_file is None:
parser.error('Must specify --hit_ids_file.')
with open(args.hit_ids_file, 'r') as f:
hit_ids = [line.strip() for line in f]
conn = MySQLdb.connect(host='localhost', user='root', passwd='password', db='ccr_db')
cursor = conn.cursor()
for hit_id in hit_ids:
try:
assignments = mtc.get_assignments(hit_id)
except:
continue
for a in assignments:
if a.AssignmentStatus == 'Submitted':
try:
# Try to parse the output from the assignment. If it isn't
# valid JSON then we reject the assignment.
output = json.loads(a.answers[0][0].fields[0])
# Check if HIT assignment properly completed!
print("output = ", output)
cursor.execute('SELECT successful, paid FROM hashes WHERE hash=%s;', (output['hash'],))
row = cursor.fetchone();
if row is None:
reject_ids.append(a.AssignmentId)
print('none reject')
continue
successful, paid = row
if paid == 1 or successful == 0:
reject_ids.append(a.AssignmentId)
print('other reject, paid=', paid, 'successful=', successful)
else:
cursor.execute('UPDATE hashes SET paid = 1 WHERE hash=%s;', (output['hash'],))
approve_ids.append(a.AssignmentId)
print('accept')
except ValueError as e:
reject_ids.append(a.AssignmentId)
else:
print "hit %s has already been %s" % (str(hit_id), a.AssignmentStatus)
print ('This will approve %d assignments and reject %d assignments with '
'sandbox=%s' % (len(approve_ids), len(reject_ids), str(args.sandbox)))
print 'Continue?'
if not args.f:
s = raw_input('(Y/N): ')
else:
s = 'Y'
if s == 'Y' or s == 'y':
print 'Approving assignments'
for idx, assignment_id in enumerate(approve_ids):
print 'Approving assignment %d / %d' % (idx + 1, len(approve_ids))
mtc.approve_assignment(assignment_id)
for idx, assignment_id in enumerate(reject_ids):
print 'Rejecting assignment %d / %d' % (idx + 1, len(reject_ids))
mtc.reject_assignment(assignment_id, feedback='Invalid results')
else:
print 'Aborting'
| 36.069444
| 97
| 0.630343
|
import argparse, json
import simpleamt
import MySQLdb
if __name__ == '__main__':
parser = argparse.ArgumentParser(parents=[simpleamt.get_parent_parser()])
parser.add_argument('-f', action='store_true', default=False)
args = parser.parse_args()
mtc = simpleamt.get_mturk_connection_from_args(args)
approve_ids = []
reject_ids = []
if args.hit_ids_file is None:
parser.error('Must specify --hit_ids_file.')
with open(args.hit_ids_file, 'r') as f:
hit_ids = [line.strip() for line in f]
conn = MySQLdb.connect(host='localhost', user='root', passwd='password', db='ccr_db')
cursor = conn.cursor()
for hit_id in hit_ids:
try:
assignments = mtc.get_assignments(hit_id)
except:
continue
for a in assignments:
if a.AssignmentStatus == 'Submitted':
try:
# valid JSON then we reject the assignment.
output = json.loads(a.answers[0][0].fields[0])
# Check if HIT assignment properly completed!
print("output = ", output)
cursor.execute('SELECT successful, paid FROM hashes WHERE hash=%s;', (output['hash'],))
row = cursor.fetchone();
if row is None:
reject_ids.append(a.AssignmentId)
print('none reject')
continue
successful, paid = row
if paid == 1 or successful == 0:
reject_ids.append(a.AssignmentId)
print('other reject, paid=', paid, 'successful=', successful)
else:
cursor.execute('UPDATE hashes SET paid = 1 WHERE hash=%s;', (output['hash'],))
approve_ids.append(a.AssignmentId)
print('accept')
except ValueError as e:
reject_ids.append(a.AssignmentId)
else:
print "hit %s has already been %s" % (str(hit_id), a.AssignmentStatus)
print ('This will approve %d assignments and reject %d assignments with '
'sandbox=%s' % (len(approve_ids), len(reject_ids), str(args.sandbox)))
print 'Continue?'
if not args.f:
s = raw_input('(Y/N): ')
else:
s = 'Y'
if s == 'Y' or s == 'y':
print 'Approving assignments'
for idx, assignment_id in enumerate(approve_ids):
print 'Approving assignment %d / %d' % (idx + 1, len(approve_ids))
mtc.approve_assignment(assignment_id)
for idx, assignment_id in enumerate(reject_ids):
print 'Rejecting assignment %d / %d' % (idx + 1, len(reject_ids))
mtc.reject_assignment(assignment_id, feedback='Invalid results')
else:
print 'Aborting'
| false
| true
|
790ce14c7c3788fda8bc517d6910cbcd5ead08b9
| 2,153
|
py
|
Python
|
turk/turk/report/pending_order_detail/pending_order_detail.py
|
Ehtasham-Muzaffar/turk
|
edb064eed6dac95751f6fe7e510d3a5b3b9b5ff9
|
[
"MIT"
] | 1
|
2021-08-07T12:48:02.000Z
|
2021-08-07T12:48:02.000Z
|
turk/turk/report/pending_order_detail/pending_order_detail.py
|
Ehtasham-Muzaffar/turk
|
edb064eed6dac95751f6fe7e510d3a5b3b9b5ff9
|
[
"MIT"
] | null | null | null |
turk/turk/report/pending_order_detail/pending_order_detail.py
|
Ehtasham-Muzaffar/turk
|
edb064eed6dac95751f6fe7e510d3a5b3b9b5ff9
|
[
"MIT"
] | 4
|
2021-01-16T06:14:58.000Z
|
2022-02-07T06:36:41.000Z
|
# Copyright (c) 2013, RC and contributors
# For license information, please see license.txt
import frappe
from frappe import _
def execute(filters=None):
columns = get_columns()
data = get_data(filters)
return columns, data
def get_columns():
return [
{
"fieldname": "po_number",
"fieldtype": "Data",
"label": "Po Number",
"width": 120
},
{
"fieldname": "ordered_qty",
"fieldtype": "Float",
"label": "Ordered Qty",
"width": 150
},
{
"fieldname": "received_qty",
"fieldtype": "Float",
"label": "Received Qty",
"width": 150
},
{
"fieldname": "pending_qty",
"fieldtype": "Float",
"label": "Pending Qty",
"width": 150
}
]
def get_data(filters):
if not filters.get('company'):
frappe.throw(_("Select Company!"))
if not filters.get('from_date'):
frappe.throw(_("Select From Date!"))
if not filters.get('to_date'):
frappe.throw(_("Select To Date!"))
query = """select po_number, sum(cust_total_box) as order_qty from `tabPurchase Order`
where company = '{0}' and transaction_date between '{1}' and '{2}'
and po_number is not null and po_number != 'PENDING'
and docstatus = 1""".format(filters.get('company'),filters.get('from_date'),filters.get('to_date'))
if filters.get('supplier'):
query += " and supplier = '{0}'".format(filters.get('supplier'))
query += " group by po_number"
po = frappe.db.sql(query, as_dict=True)
data = []
for res in po:
query1 = """select sum(boxes) from `tabPurchase Invoice` as pi
inner join `tabPurchase Invoice Item` as pii on pii.parent = pi.name
where company = '{0}' and pi.posting_date between '{1}' and '{2}'
and pi.po_number = '{3}'
and pi.docstatus = 1""".format(filters.get('company'), filters.get('from_date'),
filters.get('to_date'), res.po_number)
if filters.get('supplier'):
query1 += " and pi.supplier = '{0}'".format(filters.get('supplier'))
pi = float(frappe.db.sql(query1)[0][0] or 0)
data.append(frappe._dict({
"po_number": res.po_number,
"ordered_qty": res.order_qty,
"received_qty": pi,
"pending_qty": res.order_qty - pi
}))
return data
| 26.580247
| 103
| 0.640502
|
import frappe
from frappe import _
def execute(filters=None):
columns = get_columns()
data = get_data(filters)
return columns, data
def get_columns():
return [
{
"fieldname": "po_number",
"fieldtype": "Data",
"label": "Po Number",
"width": 120
},
{
"fieldname": "ordered_qty",
"fieldtype": "Float",
"label": "Ordered Qty",
"width": 150
},
{
"fieldname": "received_qty",
"fieldtype": "Float",
"label": "Received Qty",
"width": 150
},
{
"fieldname": "pending_qty",
"fieldtype": "Float",
"label": "Pending Qty",
"width": 150
}
]
def get_data(filters):
if not filters.get('company'):
frappe.throw(_("Select Company!"))
if not filters.get('from_date'):
frappe.throw(_("Select From Date!"))
if not filters.get('to_date'):
frappe.throw(_("Select To Date!"))
query = """select po_number, sum(cust_total_box) as order_qty from `tabPurchase Order`
where company = '{0}' and transaction_date between '{1}' and '{2}'
and po_number is not null and po_number != 'PENDING'
and docstatus = 1""".format(filters.get('company'),filters.get('from_date'),filters.get('to_date'))
if filters.get('supplier'):
query += " and supplier = '{0}'".format(filters.get('supplier'))
query += " group by po_number"
po = frappe.db.sql(query, as_dict=True)
data = []
for res in po:
query1 = """select sum(boxes) from `tabPurchase Invoice` as pi
inner join `tabPurchase Invoice Item` as pii on pii.parent = pi.name
where company = '{0}' and pi.posting_date between '{1}' and '{2}'
and pi.po_number = '{3}'
and pi.docstatus = 1""".format(filters.get('company'), filters.get('from_date'),
filters.get('to_date'), res.po_number)
if filters.get('supplier'):
query1 += " and pi.supplier = '{0}'".format(filters.get('supplier'))
pi = float(frappe.db.sql(query1)[0][0] or 0)
data.append(frappe._dict({
"po_number": res.po_number,
"ordered_qty": res.order_qty,
"received_qty": pi,
"pending_qty": res.order_qty - pi
}))
return data
| true
| true
|
790ce1a643131eb5ab1c1570f55e4988783c31d3
| 868
|
py
|
Python
|
python_api/notifications/models.py
|
hyecheon/python_api
|
150bad58c21da4c3a635454b768722958035b320
|
[
"MIT"
] | null | null | null |
python_api/notifications/models.py
|
hyecheon/python_api
|
150bad58c21da4c3a635454b768722958035b320
|
[
"MIT"
] | 20
|
2020-06-05T16:58:52.000Z
|
2022-03-11T23:23:08.000Z
|
python_api/notifications/models.py
|
hyecheon/python_api
|
150bad58c21da4c3a635454b768722958035b320
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from python_api.users import models as user_models
from python_api.images import models as image_models
@python_2_unicode_compatible
class Notification(image_models.TimeStampedModel):
TYPE_CHOICES = (
('like', 'Like'),
('comment', 'Comment'),
('follow', 'Follow')
)
creator = models.ForeignKey(user_models.User, related_name='creator')
to = models.ForeignKey(user_models.User, related_name='to')
notification_type = models.CharField(max_length=20, choices=TYPE_CHOICES)
image = models.ForeignKey(image_models.Image, null=True, blank=True)
comment = models.TextField(null=True, blank=True)
class Meat:
ordering = ['-created_at']
def __str__(self):
return 'From {} {}'.format(self.creator, self.to)
| 34.72
| 77
| 0.717742
|
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from python_api.users import models as user_models
from python_api.images import models as image_models
@python_2_unicode_compatible
class Notification(image_models.TimeStampedModel):
TYPE_CHOICES = (
('like', 'Like'),
('comment', 'Comment'),
('follow', 'Follow')
)
creator = models.ForeignKey(user_models.User, related_name='creator')
to = models.ForeignKey(user_models.User, related_name='to')
notification_type = models.CharField(max_length=20, choices=TYPE_CHOICES)
image = models.ForeignKey(image_models.Image, null=True, blank=True)
comment = models.TextField(null=True, blank=True)
class Meat:
ordering = ['-created_at']
def __str__(self):
return 'From {} {}'.format(self.creator, self.to)
| true
| true
|
790ce1d653bc2584a6b89c61dfa40813affa5835
| 544
|
py
|
Python
|
examples/multiprocessing/joinable_queue.py
|
otrack/lithops
|
81ffe3aa16f4483881e172e8805966735cc6e850
|
[
"Apache-2.0"
] | null | null | null |
examples/multiprocessing/joinable_queue.py
|
otrack/lithops
|
81ffe3aa16f4483881e172e8805966735cc6e850
|
[
"Apache-2.0"
] | null | null | null |
examples/multiprocessing/joinable_queue.py
|
otrack/lithops
|
81ffe3aa16f4483881e172e8805966735cc6e850
|
[
"Apache-2.0"
] | null | null | null |
from lithops.multiprocessing import Process, JoinableQueue
def worker(q):
working = True
while working:
x = q.get()
# Do work that may fail
assert x < 10
# Confirm task
q.task_done()
if x == -1:
working = False
if __name__ == '__main__':
q = JoinableQueue()
p = Process(target=worker, args=(q,))
p.start()
for x in range(10):
q.put(x)
# uncomment to hang on the q.join
#q.put(11)
q.join()
q.put(-1) # end loop
p.join()
| 16.484848
| 58
| 0.527574
|
from lithops.multiprocessing import Process, JoinableQueue
def worker(q):
working = True
while working:
x = q.get()
assert x < 10
q.task_done()
if x == -1:
working = False
if __name__ == '__main__':
q = JoinableQueue()
p = Process(target=worker, args=(q,))
p.start()
for x in range(10):
q.put(x)
q.join()
q.put(-1)
p.join()
| true
| true
|
790ce2149d0e699508616c413b6925e846338115
| 10,198
|
py
|
Python
|
hyperengine/tests/spec_test.py
|
KOLANICH/hyper-engine
|
60ba73438fdbef9320a849ee65f36da977f68eca
|
[
"Apache-2.0"
] | null | null | null |
hyperengine/tests/spec_test.py
|
KOLANICH/hyper-engine
|
60ba73438fdbef9320a849ee65f36da977f68eca
|
[
"Apache-2.0"
] | null | null | null |
hyperengine/tests/spec_test.py
|
KOLANICH/hyper-engine
|
60ba73438fdbef9320a849ee65f36da977f68eca
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'maxim'
import six
import unittest
from hyperengine.spec import *
class SpecTest(unittest.TestCase):
def test_zero_nodes(self):
def check_zero_nodes(spec):
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 0)
self.assertEqual(spec, parsed.instantiate([]))
check_zero_nodes(1)
check_zero_nodes([])
check_zero_nodes([1, 2, 3])
check_zero_nodes((1, 2, 3))
check_zero_nodes({})
check_zero_nodes({'a': 0, 'b': 1})
check_zero_nodes({'a': [1, 2], 'b': {'key': (1, 2)}})
def test_uniform(self):
spec = uniform()
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual(0.0, parsed.instantiate([0.0]))
self.assertEqual(0.5, parsed.instantiate([0.5]))
self.assertEqual(1.0, parsed.instantiate([1.0]))
def test_uniform_rev(self):
spec = uniform(4, 0)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual(0.0, parsed.instantiate([0.0]))
self.assertEqual(2.0, parsed.instantiate([0.5]))
self.assertEqual(4.0, parsed.instantiate([1.0]))
def test_uniform_negative(self):
spec = uniform(-4, -2)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual(-4.0, parsed.instantiate([0.0]))
self.assertEqual(-3.0, parsed.instantiate([0.5]))
self.assertEqual(-2.0, parsed.instantiate([1.0]))
def test_uniform_negative_rev(self):
spec = uniform(-2, -4)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual(-4.0, parsed.instantiate([0.0]))
self.assertEqual(-3.0, parsed.instantiate([0.5]))
self.assertEqual(-2.0, parsed.instantiate([1.0]))
def test_normal(self):
spec = normal()
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertAlmostEqual(-1.0, parsed.instantiate([0.1587]), delta=0.001)
self.assertAlmostEqual(-0.5, parsed.instantiate([0.3085]), delta=0.001)
self.assertAlmostEqual( 0.0, parsed.instantiate([0.5000]), delta=0.001)
self.assertAlmostEqual( 0.7, parsed.instantiate([0.7580]), delta=0.001)
self.assertAlmostEqual( 0.9, parsed.instantiate([0.8159]), delta=0.001)
def test_choice(self):
spec = choice([10, 20, 30])
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual(10, parsed.instantiate([0.0]))
self.assertEqual(20, parsed.instantiate([0.5]))
self.assertEqual(30, parsed.instantiate([1.0]))
def test_choice_str(self):
spec = choice(['foo', 'bar'])
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual('foo', parsed.instantiate([0.0]))
self.assertEqual('bar', parsed.instantiate([1.0]))
def test_merge(self):
spec = merge([uniform(), uniform()], lambda x, y: x+y)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 2)
self.assertEqual(0.5, parsed.instantiate([0.0, 0.5]))
self.assertEqual(1.5, parsed.instantiate([0.5, 1.0]))
self.assertEqual(2.0, parsed.instantiate([1.0, 1.0]))
def test_transform(self):
spec = wrap(uniform(), lambda x: x*x)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual(0.0, parsed.instantiate([0.0]))
self.assertEqual(4.0, parsed.instantiate([2.0]))
def test_transform_merge(self):
spec = wrap(merge([uniform(), uniform()], lambda x, y: x+y), lambda x: x*x)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 2)
self.assertEqual(1.0, parsed.instantiate([0.0, 1.0]))
self.assertEqual(4.0, parsed.instantiate([1.0, 1.0]))
def test_duplicate_nodes_1(self):
node = uniform()
spec = merge([node, node, node], lambda x, y, z: x+y+z)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual(3.0, parsed.instantiate([1.0]))
self.assertEqual(9.0, parsed.instantiate([3.0]))
def test_duplicate_nodes_2(self):
node = uniform()
spec = [[node, node]]
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual([[1.0, 1.0]], parsed.instantiate([1.0]))
def test_duplicate_nodes_3(self):
spec = [uniform()] * 3
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual([0.0, 0.0, 0.0], parsed.instantiate([0.0]))
self.assertEqual([1.0, 1.0, 1.0], parsed.instantiate([1.0]))
def test_merge_choice(self):
spec = choice([uniform(0, 1), uniform(2, 3)])
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 3)
self.assertEqual(0.0, parsed.instantiate([0.0, 0.0, 0.0]))
self.assertEqual(1.0, parsed.instantiate([1.0, 0.0, 0.0]))
self.assertEqual(2.0, parsed.instantiate([0.0, 0.0, 0.9]))
self.assertEqual(3.0, parsed.instantiate([0.0, 1.0, 0.9]))
def test_if_condition(self):
def if_cond(switch, size, num):
if switch > 0.5:
return [size, num, num]
return [size, num]
spec = merge([uniform(0, 1), uniform(1, 2), uniform(2, 3)], if_cond)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 3)
self.assertEqual([1, 2], parsed.instantiate([0, 0, 0]))
self.assertEqual([2, 3], parsed.instantiate([0, 1, 1]))
self.assertEqual([1, 2, 2], parsed.instantiate([1, 0, 0]))
self.assertEqual([2, 3, 3], parsed.instantiate([1, 1, 1]))
def test_object(self):
class Dummy: pass
dummy = Dummy
dummy.value = uniform()
dummy.foo = 'bar'
dummy.ref = dummy
spec = dummy
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
instance = parsed.instantiate([0])
self.assertEqual(0, instance.value)
self.assertEqual('bar', instance.foo)
self.assertEqual(instance, instance.ref)
def test_dict(self):
spec = {1: uniform(), 2: choice(['foo', 'bar']), 3: merge(lambda x: -x, uniform())}
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 3)
self.assertEqual({1: 0.0, 2: 'foo', 3: 0.0}, parsed.instantiate([0, 0, 0]))
self.assertEqual({1: 1.0, 2: 'bar', 3: -1.0}, parsed.instantiate([1, 1, 1]))
def test_dict_deep_1(self):
spec = {1: {'foo': uniform() } }
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
def test_dict_deep_2(self):
spec = {'a': {'b': {'c': { 'd': uniform() } } } }
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
def test_math_operations_1(self):
spec = uniform() + 1
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual(2.0, parsed.instantiate([1.0]))
def test_math_operations_2(self):
spec = uniform() * (uniform() ** 2 + 1) / uniform()
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 3)
self.assertEqual(2.0, parsed.instantiate([1.0, 1.0, 1.0]))
self.assertEqual(1.0, parsed.instantiate([0.5, 1.0, 1.0]))
self.assertEqual(1.0, parsed.instantiate([0.5, 0.0, 0.5]))
def test_math_operations_3(self):
spec = 2 / (1 + uniform()) * (3 - uniform() + 4 ** uniform())
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 3)
self.assertEqual(6.0, parsed.instantiate([1.0, 1.0, 1.0]))
def test_math_operations_4(self):
spec = choice(['foo', 'bar']) + '-' + choice(['abc', 'def'])
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 2)
self.assertEqual('foo-abc', parsed.instantiate([0.0, 0.0]))
self.assertEqual('bar-def', parsed.instantiate([1.0, 1.0]))
def test_min_1(self):
spec = min(uniform(), uniform(), 0.5)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 2)
self.assertEqual(0.5, parsed.instantiate([1.0, 0.7]))
self.assertEqual(0.5, parsed.instantiate([1.0, 0.5]))
self.assertEqual(0.0, parsed.instantiate([0.0, 0.5]))
def test_min_2(self):
spec = min(uniform(), 0.8, 0.5)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual(0.5, parsed.instantiate([1.0]))
self.assertEqual(0.5, parsed.instantiate([0.5]))
self.assertEqual(0.2, parsed.instantiate([0.2]))
def test_min_3(self):
spec = min(uniform(), uniform())
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 2)
self.assertEqual(0.5, parsed.instantiate([1.0, 0.5]))
self.assertEqual(0.2, parsed.instantiate([0.2, 0.5]))
def test_max_1(self):
spec = max(0.5)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 0)
self.assertEqual(0.5, parsed.instantiate([]))
def test_max_2(self):
spec = max(0.5, 1.0)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 0)
self.assertEqual(1.0, parsed.instantiate([]))
def test_max_3(self):
spec = max(uniform())
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual(1.0, parsed.instantiate([1.0]))
self.assertEqual(0.0, parsed.instantiate([0.0]))
def test_name_1(self):
aaa = uniform()
bbb = choice(['foo'])
ccc = uniform(-1, 1)
ddd = uniform()
spec = {'aaa': aaa, 'bbb': bbb, 'ccc': ccc **2, 'ddd': [ddd, ddd]}
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 4)
self.assertTrue('aaa' in aaa.name())
self.assertTrue('uniform' in aaa.name())
self.assertTrue('bbb' in bbb.name())
self.assertTrue('choice' in bbb.name())
self.assertTrue('ccc' in ccc.name())
self.assertTrue('uniform' in ccc.name())
self.assertTrue('ddd' in ddd.name())
self.assertTrue('uniform' in ddd.name())
def test_name_2(self):
norm_node = normal()
choice_node = choice([uniform(), uniform(), uniform()])
spec = {'a': {'b': {'c': { 'd': norm_node, 0: choice_node } } } }
# stats.norm.ppf is an instance method in python 2
expected_normal_name = 'norm_gen' if six.PY2 else 'ppf'
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 5)
self.assertTrue('a-b-c-d' in norm_node.name(), 'name=%s' % norm_node.name())
self.assertTrue(expected_normal_name in norm_node.name(), 'name=%s' % norm_node.name())
self.assertTrue('a-b-c-0' in choice_node.name(), 'name=%s' % choice_node.name())
self.assertTrue('choice' in choice_node.name(), 'name=%s' % choice_node.name())
| 32.170347
| 91
| 0.63836
|
__author__ = 'maxim'
import six
import unittest
from hyperengine.spec import *
class SpecTest(unittest.TestCase):
def test_zero_nodes(self):
def check_zero_nodes(spec):
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 0)
self.assertEqual(spec, parsed.instantiate([]))
check_zero_nodes(1)
check_zero_nodes([])
check_zero_nodes([1, 2, 3])
check_zero_nodes((1, 2, 3))
check_zero_nodes({})
check_zero_nodes({'a': 0, 'b': 1})
check_zero_nodes({'a': [1, 2], 'b': {'key': (1, 2)}})
def test_uniform(self):
spec = uniform()
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual(0.0, parsed.instantiate([0.0]))
self.assertEqual(0.5, parsed.instantiate([0.5]))
self.assertEqual(1.0, parsed.instantiate([1.0]))
def test_uniform_rev(self):
spec = uniform(4, 0)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual(0.0, parsed.instantiate([0.0]))
self.assertEqual(2.0, parsed.instantiate([0.5]))
self.assertEqual(4.0, parsed.instantiate([1.0]))
def test_uniform_negative(self):
spec = uniform(-4, -2)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual(-4.0, parsed.instantiate([0.0]))
self.assertEqual(-3.0, parsed.instantiate([0.5]))
self.assertEqual(-2.0, parsed.instantiate([1.0]))
def test_uniform_negative_rev(self):
spec = uniform(-2, -4)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual(-4.0, parsed.instantiate([0.0]))
self.assertEqual(-3.0, parsed.instantiate([0.5]))
self.assertEqual(-2.0, parsed.instantiate([1.0]))
def test_normal(self):
spec = normal()
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertAlmostEqual(-1.0, parsed.instantiate([0.1587]), delta=0.001)
self.assertAlmostEqual(-0.5, parsed.instantiate([0.3085]), delta=0.001)
self.assertAlmostEqual( 0.0, parsed.instantiate([0.5000]), delta=0.001)
self.assertAlmostEqual( 0.7, parsed.instantiate([0.7580]), delta=0.001)
self.assertAlmostEqual( 0.9, parsed.instantiate([0.8159]), delta=0.001)
def test_choice(self):
spec = choice([10, 20, 30])
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual(10, parsed.instantiate([0.0]))
self.assertEqual(20, parsed.instantiate([0.5]))
self.assertEqual(30, parsed.instantiate([1.0]))
def test_choice_str(self):
spec = choice(['foo', 'bar'])
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual('foo', parsed.instantiate([0.0]))
self.assertEqual('bar', parsed.instantiate([1.0]))
def test_merge(self):
spec = merge([uniform(), uniform()], lambda x, y: x+y)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 2)
self.assertEqual(0.5, parsed.instantiate([0.0, 0.5]))
self.assertEqual(1.5, parsed.instantiate([0.5, 1.0]))
self.assertEqual(2.0, parsed.instantiate([1.0, 1.0]))
def test_transform(self):
spec = wrap(uniform(), lambda x: x*x)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual(0.0, parsed.instantiate([0.0]))
self.assertEqual(4.0, parsed.instantiate([2.0]))
def test_transform_merge(self):
spec = wrap(merge([uniform(), uniform()], lambda x, y: x+y), lambda x: x*x)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 2)
self.assertEqual(1.0, parsed.instantiate([0.0, 1.0]))
self.assertEqual(4.0, parsed.instantiate([1.0, 1.0]))
def test_duplicate_nodes_1(self):
node = uniform()
spec = merge([node, node, node], lambda x, y, z: x+y+z)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual(3.0, parsed.instantiate([1.0]))
self.assertEqual(9.0, parsed.instantiate([3.0]))
def test_duplicate_nodes_2(self):
node = uniform()
spec = [[node, node]]
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual([[1.0, 1.0]], parsed.instantiate([1.0]))
def test_duplicate_nodes_3(self):
spec = [uniform()] * 3
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual([0.0, 0.0, 0.0], parsed.instantiate([0.0]))
self.assertEqual([1.0, 1.0, 1.0], parsed.instantiate([1.0]))
def test_merge_choice(self):
spec = choice([uniform(0, 1), uniform(2, 3)])
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 3)
self.assertEqual(0.0, parsed.instantiate([0.0, 0.0, 0.0]))
self.assertEqual(1.0, parsed.instantiate([1.0, 0.0, 0.0]))
self.assertEqual(2.0, parsed.instantiate([0.0, 0.0, 0.9]))
self.assertEqual(3.0, parsed.instantiate([0.0, 1.0, 0.9]))
def test_if_condition(self):
def if_cond(switch, size, num):
if switch > 0.5:
return [size, num, num]
return [size, num]
spec = merge([uniform(0, 1), uniform(1, 2), uniform(2, 3)], if_cond)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 3)
self.assertEqual([1, 2], parsed.instantiate([0, 0, 0]))
self.assertEqual([2, 3], parsed.instantiate([0, 1, 1]))
self.assertEqual([1, 2, 2], parsed.instantiate([1, 0, 0]))
self.assertEqual([2, 3, 3], parsed.instantiate([1, 1, 1]))
def test_object(self):
class Dummy: pass
dummy = Dummy
dummy.value = uniform()
dummy.foo = 'bar'
dummy.ref = dummy
spec = dummy
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
instance = parsed.instantiate([0])
self.assertEqual(0, instance.value)
self.assertEqual('bar', instance.foo)
self.assertEqual(instance, instance.ref)
def test_dict(self):
spec = {1: uniform(), 2: choice(['foo', 'bar']), 3: merge(lambda x: -x, uniform())}
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 3)
self.assertEqual({1: 0.0, 2: 'foo', 3: 0.0}, parsed.instantiate([0, 0, 0]))
self.assertEqual({1: 1.0, 2: 'bar', 3: -1.0}, parsed.instantiate([1, 1, 1]))
def test_dict_deep_1(self):
spec = {1: {'foo': uniform() } }
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
def test_dict_deep_2(self):
spec = {'a': {'b': {'c': { 'd': uniform() } } } }
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
def test_math_operations_1(self):
spec = uniform() + 1
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual(2.0, parsed.instantiate([1.0]))
def test_math_operations_2(self):
spec = uniform() * (uniform() ** 2 + 1) / uniform()
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 3)
self.assertEqual(2.0, parsed.instantiate([1.0, 1.0, 1.0]))
self.assertEqual(1.0, parsed.instantiate([0.5, 1.0, 1.0]))
self.assertEqual(1.0, parsed.instantiate([0.5, 0.0, 0.5]))
def test_math_operations_3(self):
spec = 2 / (1 + uniform()) * (3 - uniform() + 4 ** uniform())
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 3)
self.assertEqual(6.0, parsed.instantiate([1.0, 1.0, 1.0]))
def test_math_operations_4(self):
spec = choice(['foo', 'bar']) + '-' + choice(['abc', 'def'])
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 2)
self.assertEqual('foo-abc', parsed.instantiate([0.0, 0.0]))
self.assertEqual('bar-def', parsed.instantiate([1.0, 1.0]))
def test_min_1(self):
spec = min(uniform(), uniform(), 0.5)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 2)
self.assertEqual(0.5, parsed.instantiate([1.0, 0.7]))
self.assertEqual(0.5, parsed.instantiate([1.0, 0.5]))
self.assertEqual(0.0, parsed.instantiate([0.0, 0.5]))
def test_min_2(self):
spec = min(uniform(), 0.8, 0.5)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual(0.5, parsed.instantiate([1.0]))
self.assertEqual(0.5, parsed.instantiate([0.5]))
self.assertEqual(0.2, parsed.instantiate([0.2]))
def test_min_3(self):
spec = min(uniform(), uniform())
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 2)
self.assertEqual(0.5, parsed.instantiate([1.0, 0.5]))
self.assertEqual(0.2, parsed.instantiate([0.2, 0.5]))
def test_max_1(self):
spec = max(0.5)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 0)
self.assertEqual(0.5, parsed.instantiate([]))
def test_max_2(self):
spec = max(0.5, 1.0)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 0)
self.assertEqual(1.0, parsed.instantiate([]))
def test_max_3(self):
spec = max(uniform())
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual(1.0, parsed.instantiate([1.0]))
self.assertEqual(0.0, parsed.instantiate([0.0]))
def test_name_1(self):
aaa = uniform()
bbb = choice(['foo'])
ccc = uniform(-1, 1)
ddd = uniform()
spec = {'aaa': aaa, 'bbb': bbb, 'ccc': ccc **2, 'ddd': [ddd, ddd]}
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 4)
self.assertTrue('aaa' in aaa.name())
self.assertTrue('uniform' in aaa.name())
self.assertTrue('bbb' in bbb.name())
self.assertTrue('choice' in bbb.name())
self.assertTrue('ccc' in ccc.name())
self.assertTrue('uniform' in ccc.name())
self.assertTrue('ddd' in ddd.name())
self.assertTrue('uniform' in ddd.name())
def test_name_2(self):
norm_node = normal()
choice_node = choice([uniform(), uniform(), uniform()])
spec = {'a': {'b': {'c': { 'd': norm_node, 0: choice_node } } } }
expected_normal_name = 'norm_gen' if six.PY2 else 'ppf'
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 5)
self.assertTrue('a-b-c-d' in norm_node.name(), 'name=%s' % norm_node.name())
self.assertTrue(expected_normal_name in norm_node.name(), 'name=%s' % norm_node.name())
self.assertTrue('a-b-c-0' in choice_node.name(), 'name=%s' % choice_node.name())
self.assertTrue('choice' in choice_node.name(), 'name=%s' % choice_node.name())
| true
| true
|
790ce248c8c6a2c1c8d8f221b0e73b7f35a1261b
| 5,698
|
py
|
Python
|
tensorflow_datasets/image_classification/imagenet2012_real.py
|
sourcery-ai-bot/datasets
|
b623ab0abf3f03bacf6a7ba22c8d37bf76a4db28
|
[
"Apache-2.0"
] | 1
|
2021-05-10T10:41:27.000Z
|
2021-05-10T10:41:27.000Z
|
tensorflow_datasets/image_classification/imagenet2012_real.py
|
sourcery-ai-bot/datasets
|
b623ab0abf3f03bacf6a7ba22c8d37bf76a4db28
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_datasets/image_classification/imagenet2012_real.py
|
sourcery-ai-bot/datasets
|
b623ab0abf3f03bacf6a7ba22c8d37bf76a4db28
|
[
"Apache-2.0"
] | 1
|
2021-07-04T11:07:35.000Z
|
2021-07-04T11:07:35.000Z
|
# coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Imagenet val. annotated by ReaL labels (https://arxiv.org/abs/2006.07159)."""
import json
import os
import tarfile
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
_DESCRIPTION = '''\
This dataset contains ILSVRC-2012 (ImageNet) validation images augmented with a
new set of "Re-Assessed" (ReaL) labels from the "Are we done with ImageNet"
paper, see https://arxiv.org/abs/2006.07159. These labels are collected using
the enhanced protocol, resulting in multi-label and more accurate annotations.
Important note: about 3500 examples contain no label, these should be [excluded
from the averaging when computing the accuracy](https://github.com/google-research/reassessed-imagenet#numpy).
One possible way of doing this is with the following NumPy code:
```python
is_correct = [pred in real_labels[i] for i, pred in enumerate(predictions) if real_labels[i]]
real_accuracy = np.mean(is_correct)
```
'''
_CITATION = '''\
@article{beyer2020imagenet,
title={Are we done with ImageNet?},
author={Lucas Beyer and Olivier J. Henaff and Alexander Kolesnikov and Xiaohua Zhai and Aaron van den Oord},
journal={arXiv preprint arXiv:2002.05709},
year={2020}
}
@article{ILSVRC15,
Author={Olga Russakovsky and Jia Deng and Hao Su and Jonathan Krause and Sanjeev Satheesh and Sean Ma and Zhiheng Huang and Andrej Karpathy and Aditya Khosla and Michael Bernstein and Alexander C. Berg and Li Fei-Fei},
Title={{ImageNet Large Scale Visual Recognition Challenge}},
Year={2015},
journal={International Journal of Computer Vision (IJCV)},
doi={10.1007/s11263-015-0816-y},
volume={115},
number={3},
pages={211-252}
}
'''
_VALIDATION_LABELS_FNAME = 'image_classification/imagenet2012_validation_labels.txt'
_LABELS_FNAME = 'image_classification/imagenet2012_labels.txt'
_REAL_LABELS_URL = 'https://raw.githubusercontent.com/google-research/reassessed-imagenet/master/real.json'
class Imagenet2012Real(tfds.core.GeneratorBasedBuilder):
"""ImageNet validation images with ReaL labels."""
VERSION = tfds.core.Version('1.0.0')
RELEASE_NOTES = {
'1.0.0': 'Initial release',
}
MANUAL_DOWNLOAD_INSTRUCTIONS = """\
manual_dir should contain `ILSVRC2012_img_val.tar` file.
You need to register on http://www.image-net.org/download-images in order
to get the link to download the dataset.
"""
def _info(self):
names_file = tfds.core.tfds_path(_LABELS_FNAME)
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
'image': tfds.features.Image(encoding_format='jpeg'),
'original_label': tfds.features.ClassLabel(names_file=names_file),
'real_label': tfds.features.Sequence(
tfds.features.ClassLabel(names_file=names_file)),
'file_name': tfds.features.Text(),
}),
supervised_keys=('image', 'real_label'),
homepage='https://github.com/google-research/reassessed-imagenet',
citation=_CITATION,
)
def _get_real_labels(self, dl_manager):
with tf.io.gfile.GFile(dl_manager.download(_REAL_LABELS_URL), 'r') as f:
# ReaL labels are ordered in the lexicographical order.
return {'ILSVRC2012_val_{:08}.JPEG'.format(i + 1): labels
for i, labels in enumerate(json.load(f))}
@staticmethod
def _get_original_labels(val_path):
"""Returns labels for validation.
Args:
val_path: path to TAR file containing validation images. It is used to
retrieve the name of pictures and associate them to labels.
Returns:
dict, mapping from image name (str) to label (str).
"""
labels_path = os.fspath(tfds.core.tfds_path(_VALIDATION_LABELS_FNAME))
with tf.io.gfile.GFile(labels_path) as labels_f:
# `splitlines` to remove trailing `\r` in Windows
labels = labels_f.read().strip().splitlines()
with tf.io.gfile.GFile(val_path, 'rb') as tar_f_obj:
tar = tarfile.open(mode='r:', fileobj=tar_f_obj)
images = sorted(tar.getnames())
return dict(zip(images, labels))
def _split_generators(self, dl_manager):
val_path = os.path.join(dl_manager.manual_dir, 'ILSVRC2012_img_val.tar')
if not tf.io.gfile.exists(val_path):
raise AssertionError(
'ImageNet requires manual download of the data. Please download '
'the train and val set and place them into: {}'.format(val_path))
return [
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs={
'archive': dl_manager.iter_archive(val_path),
'original_labels': self._get_original_labels(val_path),
'real_labels': self._get_real_labels(dl_manager),
},
),
]
def _generate_examples(self, archive, original_labels, real_labels):
for fname, fobj in archive:
record = {
'file_name': fname,
'image': fobj,
'original_label': original_labels[fname],
'real_label': real_labels[fname],
}
yield fname, record
| 37.986667
| 220
| 0.707617
|
import json
import os
import tarfile
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
_DESCRIPTION = '''\
This dataset contains ILSVRC-2012 (ImageNet) validation images augmented with a
new set of "Re-Assessed" (ReaL) labels from the "Are we done with ImageNet"
paper, see https://arxiv.org/abs/2006.07159. These labels are collected using
the enhanced protocol, resulting in multi-label and more accurate annotations.
Important note: about 3500 examples contain no label, these should be [excluded
from the averaging when computing the accuracy](https://github.com/google-research/reassessed-imagenet#numpy).
One possible way of doing this is with the following NumPy code:
```python
is_correct = [pred in real_labels[i] for i, pred in enumerate(predictions) if real_labels[i]]
real_accuracy = np.mean(is_correct)
```
'''
_CITATION = '''\
@article{beyer2020imagenet,
title={Are we done with ImageNet?},
author={Lucas Beyer and Olivier J. Henaff and Alexander Kolesnikov and Xiaohua Zhai and Aaron van den Oord},
journal={arXiv preprint arXiv:2002.05709},
year={2020}
}
@article{ILSVRC15,
Author={Olga Russakovsky and Jia Deng and Hao Su and Jonathan Krause and Sanjeev Satheesh and Sean Ma and Zhiheng Huang and Andrej Karpathy and Aditya Khosla and Michael Bernstein and Alexander C. Berg and Li Fei-Fei},
Title={{ImageNet Large Scale Visual Recognition Challenge}},
Year={2015},
journal={International Journal of Computer Vision (IJCV)},
doi={10.1007/s11263-015-0816-y},
volume={115},
number={3},
pages={211-252}
}
'''
_VALIDATION_LABELS_FNAME = 'image_classification/imagenet2012_validation_labels.txt'
_LABELS_FNAME = 'image_classification/imagenet2012_labels.txt'
_REAL_LABELS_URL = 'https://raw.githubusercontent.com/google-research/reassessed-imagenet/master/real.json'
class Imagenet2012Real(tfds.core.GeneratorBasedBuilder):
VERSION = tfds.core.Version('1.0.0')
RELEASE_NOTES = {
'1.0.0': 'Initial release',
}
MANUAL_DOWNLOAD_INSTRUCTIONS = """\
manual_dir should contain `ILSVRC2012_img_val.tar` file.
You need to register on http://www.image-net.org/download-images in order
to get the link to download the dataset.
"""
def _info(self):
names_file = tfds.core.tfds_path(_LABELS_FNAME)
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
'image': tfds.features.Image(encoding_format='jpeg'),
'original_label': tfds.features.ClassLabel(names_file=names_file),
'real_label': tfds.features.Sequence(
tfds.features.ClassLabel(names_file=names_file)),
'file_name': tfds.features.Text(),
}),
supervised_keys=('image', 'real_label'),
homepage='https://github.com/google-research/reassessed-imagenet',
citation=_CITATION,
)
def _get_real_labels(self, dl_manager):
with tf.io.gfile.GFile(dl_manager.download(_REAL_LABELS_URL), 'r') as f:
return {'ILSVRC2012_val_{:08}.JPEG'.format(i + 1): labels
for i, labels in enumerate(json.load(f))}
@staticmethod
def _get_original_labels(val_path):
labels_path = os.fspath(tfds.core.tfds_path(_VALIDATION_LABELS_FNAME))
with tf.io.gfile.GFile(labels_path) as labels_f:
labels = labels_f.read().strip().splitlines()
with tf.io.gfile.GFile(val_path, 'rb') as tar_f_obj:
tar = tarfile.open(mode='r:', fileobj=tar_f_obj)
images = sorted(tar.getnames())
return dict(zip(images, labels))
def _split_generators(self, dl_manager):
val_path = os.path.join(dl_manager.manual_dir, 'ILSVRC2012_img_val.tar')
if not tf.io.gfile.exists(val_path):
raise AssertionError(
'ImageNet requires manual download of the data. Please download '
'the train and val set and place them into: {}'.format(val_path))
return [
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs={
'archive': dl_manager.iter_archive(val_path),
'original_labels': self._get_original_labels(val_path),
'real_labels': self._get_real_labels(dl_manager),
},
),
]
def _generate_examples(self, archive, original_labels, real_labels):
for fname, fobj in archive:
record = {
'file_name': fname,
'image': fobj,
'original_label': original_labels[fname],
'real_label': real_labels[fname],
}
yield fname, record
| true
| true
|
790ce30d76cd775f97e66642375f6d284899897c
| 7,570
|
py
|
Python
|
preprocessing.py
|
enkaranfiles/predict-future-sales
|
528d004b78b5c0d41720fc46daa487e3928c045e
|
[
"MIT"
] | null | null | null |
preprocessing.py
|
enkaranfiles/predict-future-sales
|
528d004b78b5c0d41720fc46daa487e3928c045e
|
[
"MIT"
] | null | null | null |
preprocessing.py
|
enkaranfiles/predict-future-sales
|
528d004b78b5c0d41720fc46daa487e3928c045e
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from itertools import product
from sklearn.preprocessing import LabelEncoder
# =============================================================================
# The lines where we processed our data
# =============================================================================
def lag_feature(df, lags, col):
tmp = df[['date_block_num','shop_id','item_id',col]]
for i in lags:
shifted = tmp.copy()
shifted.columns = ['date_block_num','shop_id','item_id', col+'_lag_'+str(i)]
shifted['date_block_num'] += i
df = pd.merge(df, shifted, on=['date_block_num','shop_id','item_id'], how='left')
return df
items = pd.read_csv(r'dataset\items.csv')
shops = pd.read_csv(r'dataset\shops.csv')
cats = pd.read_csv(r'dataset\item_categories.csv')
train = pd.read_csv(r'dataset\sales_train.csv')
test = pd.read_csv(r'dataset\test.csv').set_index('ID')
train = train[train.item_price<100000]
train = train[train.item_cnt_day<1001]
median = train[(train.shop_id==32)&(train.item_id==2973)&(train.date_block_num==4)&(train.item_price>0)].item_price.median()
train.loc[train.item_price<0, 'item_price'] = median
train.loc[train.shop_id == 0, 'shop_id'] = 57
test.loc[test.shop_id == 0, 'shop_id'] = 57
train.loc[train.shop_id == 1, 'shop_id'] = 58
test.loc[test.shop_id == 1, 'shop_id'] = 58
train.loc[train.shop_id == 10, 'shop_id'] = 11
test.loc[test.shop_id == 10, 'shop_id'] = 11
shops['shop_name'] = shops['shop_name'].apply(lambda x: x.lower()).str.replace('[^\w\s]', '').str.replace('\d+','').str.strip()
shops['city'] = shops['shop_name'].str.partition(' ')[0]
shops['city_code'] = LabelEncoder().fit_transform(shops['city'])
shops['shop_type'] = shops['shop_name'].apply(lambda x: 'мтрц' if 'мтрц' in x else 'трц' if 'трц' in x else 'трк' if 'трк' in x else 'тц' if 'тц' in x else 'тк' if 'тк' in x else 'NO_DATA')
shops['shop_type'] = LabelEncoder().fit_transform(shops['shop_type'])
shops = shops[['shop_id','city_code','shop_type']]
cats['split'] = cats['item_category_name'].str.split('-')
cats['type'] = cats['split'].map(lambda x: x[0].strip())
cats['type_code'] = LabelEncoder().fit_transform(cats['type'])
# if subtype is nan then type
cats['subtype'] = cats['split'].map(lambda x: x[1].strip() if len(x) > 1 else x[0].strip())
cats['subtype_code'] = LabelEncoder().fit_transform(cats['subtype'])
cats = cats[['item_category_id','type_code', 'subtype_code']]
items.drop(['item_name'], axis=1, inplace=True)
matrix = []
cols = ['date_block_num','shop_id','item_id']
for i in range(34):
sales = train[train.date_block_num==i]
matrix.append(np.array(list(product([i], sales.shop_id.unique(), sales.item_id.unique())), dtype='int16'))
matrix = pd.DataFrame(np.vstack(matrix), columns=cols)
matrix['date_block_num'] = matrix['date_block_num'].astype(np.int8)
matrix['shop_id'] = matrix['shop_id'].astype(np.int8)
matrix['item_id'] = matrix['item_id'].astype(np.int16)
matrix.sort_values(cols,inplace=True)
train['revenue'] = train['item_price'] * train['item_cnt_day']
item_price_lag = train.groupby(['date_block_num','item_id']).agg({'item_price':['mean']})
item_price_lag.columns = ['average_item_price']
item_price_by_shop_lag = train.groupby(['date_block_num','shop_id', 'item_id']).agg({'item_price':['mean']})
item_price_by_shop_lag.columns = ['average_item_price_by_shop']
group = train.groupby(['date_block_num','shop_id','item_id']).agg({'item_cnt_day': ['sum']})
group.columns = ['item_cnt_month']
group.reset_index(inplace=True)
matrix = pd.merge(matrix, group, on=cols, how='left')
matrix['item_cnt_month'] = (matrix['item_cnt_month'].fillna(0).clip(0,20).astype(np.float16))
test['date_block_num'] = 34
test['date_block_num'] = test['date_block_num'].astype(np.int8)
test['shop_id'] = test['shop_id'].astype(np.int8)
test['item_id'] = test['item_id'].astype(np.int16)
matrix = pd.concat([matrix, test], ignore_index=True, sort=False, keys=cols)
matrix.fillna(0, inplace=True) # 34 month
matrix = pd.merge(matrix, item_price_lag, on=['date_block_num','item_id'], how='left')
matrix['average_item_price'] = matrix['average_item_price'].astype(np.float16)
matrix = lag_feature(matrix, [1,2,3], 'average_item_price')
matrix.drop(['average_item_price'], axis=1, inplace=True)
matrix = pd.merge(matrix, item_price_by_shop_lag, on=['date_block_num','shop_id','item_id'], how='left')
matrix['average_item_price_by_shop'] = matrix['average_item_price_by_shop'].astype(np.float16)
matrix = lag_feature(matrix, [1,2,3], 'average_item_price_by_shop')
matrix.drop(['average_item_price_by_shop'], axis=1, inplace=True)
matrix = pd.merge(matrix, shops, on=['shop_id'], how='left')
matrix = pd.merge(matrix, items, on=['item_id'], how='left')
matrix = pd.merge(matrix, cats, on=['item_category_id'], how='left')
matrix['city_code'] = matrix['city_code'].astype(np.int8)
matrix['shop_type'] = matrix['shop_type'].astype(np.int8)
matrix['item_category_id'] = matrix['item_category_id'].astype(np.int8)
matrix['type_code'] = matrix['type_code'].astype(np.int8)
matrix['subtype_code'] = matrix['subtype_code'].astype(np.int8)
shop_mean = matrix.groupby(['shop_id']).agg({'item_cnt_month': ['mean']})
shop_mean.columns = ['shop_mean']
shop_mean.reset_index(inplace=True)
shop_item_mean = matrix.groupby(['item_id','shop_id']).agg({'item_cnt_month': ['mean']})
shop_item_mean.columns = ['shop_item_mean']
shop_item_mean.reset_index(inplace=True)
group = matrix.groupby(['date_block_num', 'item_id']).agg({'item_cnt_month': ['mean']})
group.columns = [ 'date_item_avg_item_cnt' ]
group.reset_index(inplace=True)
matrix = pd.merge(matrix, shop_mean, on=['shop_id'], how='left')
matrix = pd.merge(matrix, shop_item_mean, on=['item_id','shop_id'], how='left')
matrix = pd.merge(matrix, group, on=['date_block_num','item_id'], how='left')
matrix['date_item_avg_item_cnt'] = matrix['date_item_avg_item_cnt'].astype(np.float16)
matrix = lag_feature(matrix, [1,2,3], 'date_item_avg_item_cnt')
matrix.drop(['date_item_avg_item_cnt'], axis=1, inplace=True)
matrix = lag_feature(matrix, [1,2,3], 'item_cnt_month')
matrix_last = matrix[matrix.date_block_num > 2]
def fill_na(df):
for col in df.columns:
if ('_lag_' in col) & (df[col].isnull().any()):
if ('item_cnt' in col):
df[col].fillna(0, inplace=True)
if ('shop_mean' in col):
df[col].fillna(0, inplace=True)
if ('average_item_price' in col):
df[col].fillna(0, inplace=True)
return df
matrix = fill_na(matrix_last)
matrix_last.to_pickle('dataset/traintest.pkl')
# =============================================================================
# correlation Matrix
# =============================================================================
cor_data = matrix_last[['shop_item_mean','date_block_num','date_item_avg_item_cnt_lag_1','item_category_id','average_item_price_lag_2','average_item_price_lag_1','item_cnt_month_lag_1','item_cnt_month']]
corr = cor_data.corr()
mask = np.zeros_like(corr, dtype=np.bool)
f,ax = plt.subplots(figsize=(15, 20))
cmap = sns.diverging_palette(220, 10, as_cmap=True)
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5},annot=True)
plt.savefig('outputdata/correlation.png')
| 44.529412
| 204
| 0.660238
|
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from itertools import product
from sklearn.preprocessing import LabelEncoder
def lag_feature(df, lags, col):
tmp = df[['date_block_num','shop_id','item_id',col]]
for i in lags:
shifted = tmp.copy()
shifted.columns = ['date_block_num','shop_id','item_id', col+'_lag_'+str(i)]
shifted['date_block_num'] += i
df = pd.merge(df, shifted, on=['date_block_num','shop_id','item_id'], how='left')
return df
items = pd.read_csv(r'dataset\items.csv')
shops = pd.read_csv(r'dataset\shops.csv')
cats = pd.read_csv(r'dataset\item_categories.csv')
train = pd.read_csv(r'dataset\sales_train.csv')
test = pd.read_csv(r'dataset\test.csv').set_index('ID')
train = train[train.item_price<100000]
train = train[train.item_cnt_day<1001]
median = train[(train.shop_id==32)&(train.item_id==2973)&(train.date_block_num==4)&(train.item_price>0)].item_price.median()
train.loc[train.item_price<0, 'item_price'] = median
train.loc[train.shop_id == 0, 'shop_id'] = 57
test.loc[test.shop_id == 0, 'shop_id'] = 57
train.loc[train.shop_id == 1, 'shop_id'] = 58
test.loc[test.shop_id == 1, 'shop_id'] = 58
train.loc[train.shop_id == 10, 'shop_id'] = 11
test.loc[test.shop_id == 10, 'shop_id'] = 11
shops['shop_name'] = shops['shop_name'].apply(lambda x: x.lower()).str.replace('[^\w\s]', '').str.replace('\d+','').str.strip()
shops['city'] = shops['shop_name'].str.partition(' ')[0]
shops['city_code'] = LabelEncoder().fit_transform(shops['city'])
shops['shop_type'] = shops['shop_name'].apply(lambda x: 'мтрц' if 'мтрц' in x else 'трц' if 'трц' in x else 'трк' if 'трк' in x else 'тц' if 'тц' in x else 'тк' if 'тк' in x else 'NO_DATA')
shops['shop_type'] = LabelEncoder().fit_transform(shops['shop_type'])
shops = shops[['shop_id','city_code','shop_type']]
cats['split'] = cats['item_category_name'].str.split('-')
cats['type'] = cats['split'].map(lambda x: x[0].strip())
cats['type_code'] = LabelEncoder().fit_transform(cats['type'])
cats['subtype'] = cats['split'].map(lambda x: x[1].strip() if len(x) > 1 else x[0].strip())
cats['subtype_code'] = LabelEncoder().fit_transform(cats['subtype'])
cats = cats[['item_category_id','type_code', 'subtype_code']]
items.drop(['item_name'], axis=1, inplace=True)
matrix = []
cols = ['date_block_num','shop_id','item_id']
for i in range(34):
sales = train[train.date_block_num==i]
matrix.append(np.array(list(product([i], sales.shop_id.unique(), sales.item_id.unique())), dtype='int16'))
matrix = pd.DataFrame(np.vstack(matrix), columns=cols)
matrix['date_block_num'] = matrix['date_block_num'].astype(np.int8)
matrix['shop_id'] = matrix['shop_id'].astype(np.int8)
matrix['item_id'] = matrix['item_id'].astype(np.int16)
matrix.sort_values(cols,inplace=True)
train['revenue'] = train['item_price'] * train['item_cnt_day']
item_price_lag = train.groupby(['date_block_num','item_id']).agg({'item_price':['mean']})
item_price_lag.columns = ['average_item_price']
item_price_by_shop_lag = train.groupby(['date_block_num','shop_id', 'item_id']).agg({'item_price':['mean']})
item_price_by_shop_lag.columns = ['average_item_price_by_shop']
group = train.groupby(['date_block_num','shop_id','item_id']).agg({'item_cnt_day': ['sum']})
group.columns = ['item_cnt_month']
group.reset_index(inplace=True)
matrix = pd.merge(matrix, group, on=cols, how='left')
matrix['item_cnt_month'] = (matrix['item_cnt_month'].fillna(0).clip(0,20).astype(np.float16))
test['date_block_num'] = 34
test['date_block_num'] = test['date_block_num'].astype(np.int8)
test['shop_id'] = test['shop_id'].astype(np.int8)
test['item_id'] = test['item_id'].astype(np.int16)
matrix = pd.concat([matrix, test], ignore_index=True, sort=False, keys=cols)
matrix.fillna(0, inplace=True)
matrix = pd.merge(matrix, item_price_lag, on=['date_block_num','item_id'], how='left')
matrix['average_item_price'] = matrix['average_item_price'].astype(np.float16)
matrix = lag_feature(matrix, [1,2,3], 'average_item_price')
matrix.drop(['average_item_price'], axis=1, inplace=True)
matrix = pd.merge(matrix, item_price_by_shop_lag, on=['date_block_num','shop_id','item_id'], how='left')
matrix['average_item_price_by_shop'] = matrix['average_item_price_by_shop'].astype(np.float16)
matrix = lag_feature(matrix, [1,2,3], 'average_item_price_by_shop')
matrix.drop(['average_item_price_by_shop'], axis=1, inplace=True)
matrix = pd.merge(matrix, shops, on=['shop_id'], how='left')
matrix = pd.merge(matrix, items, on=['item_id'], how='left')
matrix = pd.merge(matrix, cats, on=['item_category_id'], how='left')
matrix['city_code'] = matrix['city_code'].astype(np.int8)
matrix['shop_type'] = matrix['shop_type'].astype(np.int8)
matrix['item_category_id'] = matrix['item_category_id'].astype(np.int8)
matrix['type_code'] = matrix['type_code'].astype(np.int8)
matrix['subtype_code'] = matrix['subtype_code'].astype(np.int8)
shop_mean = matrix.groupby(['shop_id']).agg({'item_cnt_month': ['mean']})
shop_mean.columns = ['shop_mean']
shop_mean.reset_index(inplace=True)
shop_item_mean = matrix.groupby(['item_id','shop_id']).agg({'item_cnt_month': ['mean']})
shop_item_mean.columns = ['shop_item_mean']
shop_item_mean.reset_index(inplace=True)
group = matrix.groupby(['date_block_num', 'item_id']).agg({'item_cnt_month': ['mean']})
group.columns = [ 'date_item_avg_item_cnt' ]
group.reset_index(inplace=True)
matrix = pd.merge(matrix, shop_mean, on=['shop_id'], how='left')
matrix = pd.merge(matrix, shop_item_mean, on=['item_id','shop_id'], how='left')
matrix = pd.merge(matrix, group, on=['date_block_num','item_id'], how='left')
matrix['date_item_avg_item_cnt'] = matrix['date_item_avg_item_cnt'].astype(np.float16)
matrix = lag_feature(matrix, [1,2,3], 'date_item_avg_item_cnt')
matrix.drop(['date_item_avg_item_cnt'], axis=1, inplace=True)
matrix = lag_feature(matrix, [1,2,3], 'item_cnt_month')
matrix_last = matrix[matrix.date_block_num > 2]
def fill_na(df):
for col in df.columns:
if ('_lag_' in col) & (df[col].isnull().any()):
if ('item_cnt' in col):
df[col].fillna(0, inplace=True)
if ('shop_mean' in col):
df[col].fillna(0, inplace=True)
if ('average_item_price' in col):
df[col].fillna(0, inplace=True)
return df
matrix = fill_na(matrix_last)
matrix_last.to_pickle('dataset/traintest.pkl')
cor_data = matrix_last[['shop_item_mean','date_block_num','date_item_avg_item_cnt_lag_1','item_category_id','average_item_price_lag_2','average_item_price_lag_1','item_cnt_month_lag_1','item_cnt_month']]
corr = cor_data.corr()
mask = np.zeros_like(corr, dtype=np.bool)
f,ax = plt.subplots(figsize=(15, 20))
cmap = sns.diverging_palette(220, 10, as_cmap=True)
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5},annot=True)
plt.savefig('outputdata/correlation.png')
| true
| true
|
790ce4877f4cf22fe334c3b0f1ac1df422642dee
| 7,542
|
py
|
Python
|
tests/unit/drivers/test_chunk2doc_rank_drivers.py
|
musa-atlihan/jina
|
9d9cbe1dad2703e2da10761a11c66abcc76dd8b8
|
[
"Apache-2.0"
] | 2
|
2021-04-22T16:59:02.000Z
|
2021-04-22T17:14:32.000Z
|
tests/unit/drivers/test_chunk2doc_rank_drivers.py
|
musa-atlihan/jina
|
9d9cbe1dad2703e2da10761a11c66abcc76dd8b8
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/drivers/test_chunk2doc_rank_drivers.py
|
musa-atlihan/jina
|
9d9cbe1dad2703e2da10761a11c66abcc76dd8b8
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from jina.drivers.rank import Chunk2DocRankDriver
from jina.executors.rankers import Chunk2DocRanker
from jina.hub.rankers.MaxRanker import MaxRanker
from jina.hub.rankers.MinRanker import MinRanker
from jina.proto import jina_pb2
class MockLengthRanker(Chunk2DocRanker):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.required_keys = {'length'}
def _get_score(self, match_idx, query_chunk_meta, match_chunk_meta, *args, **kwargs):
return match_idx[0][self.col_doc_id], match_chunk_meta[match_idx[0][self.col_chunk_id]]['length']
class SimpleChunk2DocRankDriver(Chunk2DocRankDriver):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def exec_fn(self):
return self._exec_fn
def create_document_to_score():
# doc: 1
# |- chunk: 2
# | |- matches: (id: 4, parent_id: 40, score.value: 4),
# | |- matches: (id: 5, parent_id: 50, score.value: 5),
# |
# |- chunk: 3
# |- matches: (id: 6, parent_id: 60, score.value: 6),
# |- matches: (id: 7, parent_id: 70, score.value: 7)
doc = jina_pb2.Document()
doc.id = 1
for c in range(2):
chunk = doc.chunks.add()
chunk.id = doc.id + c + 1
for m in range(2):
match = chunk.matches.add()
match.id = 2 * chunk.id + m
match.parent_id = 10 * match.id
match.length = match.id
# to be used by MaxRanker and MinRanker
match.score.ref_id = chunk.id
match.score.value = match.id
return doc
def create_chunk_matches_to_score():
# doc: (id: 100, granularity=0)
# |- chunks: (id: 10)
# | |- matches: (id: 11, parent_id: 1, score.value: 2),
# | |- matches: (id: 12, parent_id: 1, score.value: 3),
# |- chunks: (id: 20)
# |- matches: (id: 21, parent_id: 2, score.value: 4),
# |- matches: (id: 22, parent_id: 2, score.value: 5)
doc = jina_pb2.Document()
doc.id = 100
doc.granularity = 0
num_matches = 2
for parent_id in range(1, 3):
chunk = doc.chunks.add()
chunk.id = parent_id * 10
chunk.granularity = doc.granularity + 1
for score_value in range(parent_id * 2, parent_id * 2 + num_matches):
match = chunk.matches.add()
match.granularity = chunk.granularity
match.parent_id = parent_id
match.score.value = score_value
match.score.ref_id = chunk.id
match.id = 10 * parent_id + score_value
match.length = 4
return doc
def create_chunk_chunk_matches_to_score():
# doc: (id: 100, granularity=0)
# |- chunk: (id: 101, granularity=1)
# |- chunks: (id: 10)
# | |- matches: (id: 11, parent_id: 1, score.value: 2),
# | |- matches: (id: 12, parent_id: 1, score.value: 3),
# |- chunks: (id: 20)
# |- matches: (id: 21, parent_id: 2, score.value: 4),
# |- matches: (id: 22, parent_id: 2, score.value: 5)
doc = jina_pb2.Document()
doc.id = 100
doc.granularity = 0
chunk = doc.chunks.add()
chunk.id = 101
chunk.granularity = doc.granularity + 1
num_matches = 2
for parent_id in range(1, 3):
chunk_chunk = chunk.chunks.add()
chunk_chunk.id = parent_id * 10
chunk_chunk.granularity = chunk.granularity + 1
for score_value in range(parent_id * 2, parent_id * 2 + num_matches):
match = chunk_chunk.matches.add()
match.parent_id = parent_id
match.score.value = score_value
match.score.ref_id = chunk_chunk.id
match.id = 10 * parent_id + score_value
match.length = 4
return doc
def test_chunk2doc_ranker_driver_mock_exec():
doc = create_document_to_score()
driver = SimpleChunk2DocRankDriver()
executor = MockLengthRanker()
driver.attach(executor=executor, pea=None)
driver._apply_all(doc.chunks, doc)
assert len(doc.matches) == 4
assert doc.matches[0].id == 70
assert doc.matches[0].score.value == 7
assert doc.matches[1].id == 60
assert doc.matches[1].score.value == 6
assert doc.matches[2].id == 50
assert doc.matches[2].score.value == 5
assert doc.matches[3].id == 40
assert doc.matches[3].score.value == 4
for match in doc.matches:
# match score is computed w.r.t to doc.id
assert match.score.ref_id == doc.id
def test_chunk2doc_ranker_driver_max_ranker():
doc = create_document_to_score()
driver = SimpleChunk2DocRankDriver()
executor = MaxRanker()
driver.attach(executor=executor, pea=None)
driver._apply_all(doc.chunks, doc)
assert len(doc.matches) == 4
assert doc.matches[0].id == 70
assert doc.matches[0].score.value == 7
assert doc.matches[1].id == 60
assert doc.matches[1].score.value == 6
assert doc.matches[2].id == 50
assert doc.matches[2].score.value == 5
assert doc.matches[3].id == 40
assert doc.matches[3].score.value == 4
for match in doc.matches:
# match score is computed w.r.t to doc.id
assert match.score.ref_id == doc.id
def test_chunk2doc_ranker_driver_min_ranker():
doc = create_document_to_score()
driver = SimpleChunk2DocRankDriver()
executor = MinRanker()
driver.attach(executor=executor, pea=None)
driver._apply_all(doc.chunks, doc)
assert len(doc.matches) == 4
assert doc.matches[0].id == 40
assert doc.matches[0].score.value == pytest.approx(1 / (1 + 4), 0.0001)
assert doc.matches[1].id == 50
assert doc.matches[1].score.value == pytest.approx(1 / (1 + 5), 0.0001)
assert doc.matches[2].id == 60
assert doc.matches[2].score.value == pytest.approx(1 / (1 + 6), 0.0001)
assert doc.matches[3].id == 70
assert doc.matches[3].score.value == pytest.approx(1 / (1 + 7), 0.0001)
for match in doc.matches:
# match score is computed w.r.t to doc.id
assert match.score.ref_id == doc.id
def test_chunk2doc_ranker_driver_traverse_apply():
docs = [create_chunk_matches_to_score(), ]
driver = SimpleChunk2DocRankDriver(recur_range=(0, 1))
executor = MinRanker()
driver.attach(executor=executor, pea=None)
driver._traverse_apply(docs)
for doc in docs:
assert len(doc.matches) == 2
for idx, m in enumerate(doc.matches):
# the score should be 1 / (1 + id * 2)
assert m.score.value == pytest.approx(1. / (1 + m.id * 2.), 0.0001)
def test_chunk2doc_ranker_driver_traverse_apply_larger_range():
docs = [create_chunk_chunk_matches_to_score(), ]
driver = SimpleChunk2DocRankDriver(granularity_range=(0, 2))
executor = MinRanker()
driver.attach(executor=executor, pea=None)
driver._traverse_apply(docs)
for doc in docs:
assert len(doc.matches) == 1
assert len(doc.chunks) == 1
chunk = doc.chunks[0]
assert len(chunk.matches) == 2
min_granularity_2 = chunk.matches[0].score.value
for idx, m in enumerate(chunk.matches):
# the score should be 1 / (1 + id * 2)
if m.score.value < min_granularity_2:
min_granularity_2 = m.score.value
assert m.score.value == pytest.approx(1. / (1 + m.id * 2.), 0.0001)
assert m.score.ref_id == 101
match = doc.matches[0]
assert match.score.ref_id == 100
assert match.score.value == pytest.approx(1. / (1 + min_granularity_2), 0.0001)
| 36.790244
| 105
| 0.620525
|
import pytest
from jina.drivers.rank import Chunk2DocRankDriver
from jina.executors.rankers import Chunk2DocRanker
from jina.hub.rankers.MaxRanker import MaxRanker
from jina.hub.rankers.MinRanker import MinRanker
from jina.proto import jina_pb2
class MockLengthRanker(Chunk2DocRanker):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.required_keys = {'length'}
def _get_score(self, match_idx, query_chunk_meta, match_chunk_meta, *args, **kwargs):
return match_idx[0][self.col_doc_id], match_chunk_meta[match_idx[0][self.col_chunk_id]]['length']
class SimpleChunk2DocRankDriver(Chunk2DocRankDriver):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def exec_fn(self):
return self._exec_fn
def create_document_to_score():
doc = jina_pb2.Document()
doc.id = 1
for c in range(2):
chunk = doc.chunks.add()
chunk.id = doc.id + c + 1
for m in range(2):
match = chunk.matches.add()
match.id = 2 * chunk.id + m
match.parent_id = 10 * match.id
match.length = match.id
match.score.ref_id = chunk.id
match.score.value = match.id
return doc
def create_chunk_matches_to_score():
doc = jina_pb2.Document()
doc.id = 100
doc.granularity = 0
num_matches = 2
for parent_id in range(1, 3):
chunk = doc.chunks.add()
chunk.id = parent_id * 10
chunk.granularity = doc.granularity + 1
for score_value in range(parent_id * 2, parent_id * 2 + num_matches):
match = chunk.matches.add()
match.granularity = chunk.granularity
match.parent_id = parent_id
match.score.value = score_value
match.score.ref_id = chunk.id
match.id = 10 * parent_id + score_value
match.length = 4
return doc
def create_chunk_chunk_matches_to_score():
doc = jina_pb2.Document()
doc.id = 100
doc.granularity = 0
chunk = doc.chunks.add()
chunk.id = 101
chunk.granularity = doc.granularity + 1
num_matches = 2
for parent_id in range(1, 3):
chunk_chunk = chunk.chunks.add()
chunk_chunk.id = parent_id * 10
chunk_chunk.granularity = chunk.granularity + 1
for score_value in range(parent_id * 2, parent_id * 2 + num_matches):
match = chunk_chunk.matches.add()
match.parent_id = parent_id
match.score.value = score_value
match.score.ref_id = chunk_chunk.id
match.id = 10 * parent_id + score_value
match.length = 4
return doc
def test_chunk2doc_ranker_driver_mock_exec():
doc = create_document_to_score()
driver = SimpleChunk2DocRankDriver()
executor = MockLengthRanker()
driver.attach(executor=executor, pea=None)
driver._apply_all(doc.chunks, doc)
assert len(doc.matches) == 4
assert doc.matches[0].id == 70
assert doc.matches[0].score.value == 7
assert doc.matches[1].id == 60
assert doc.matches[1].score.value == 6
assert doc.matches[2].id == 50
assert doc.matches[2].score.value == 5
assert doc.matches[3].id == 40
assert doc.matches[3].score.value == 4
for match in doc.matches:
assert match.score.ref_id == doc.id
def test_chunk2doc_ranker_driver_max_ranker():
doc = create_document_to_score()
driver = SimpleChunk2DocRankDriver()
executor = MaxRanker()
driver.attach(executor=executor, pea=None)
driver._apply_all(doc.chunks, doc)
assert len(doc.matches) == 4
assert doc.matches[0].id == 70
assert doc.matches[0].score.value == 7
assert doc.matches[1].id == 60
assert doc.matches[1].score.value == 6
assert doc.matches[2].id == 50
assert doc.matches[2].score.value == 5
assert doc.matches[3].id == 40
assert doc.matches[3].score.value == 4
for match in doc.matches:
assert match.score.ref_id == doc.id
def test_chunk2doc_ranker_driver_min_ranker():
doc = create_document_to_score()
driver = SimpleChunk2DocRankDriver()
executor = MinRanker()
driver.attach(executor=executor, pea=None)
driver._apply_all(doc.chunks, doc)
assert len(doc.matches) == 4
assert doc.matches[0].id == 40
assert doc.matches[0].score.value == pytest.approx(1 / (1 + 4), 0.0001)
assert doc.matches[1].id == 50
assert doc.matches[1].score.value == pytest.approx(1 / (1 + 5), 0.0001)
assert doc.matches[2].id == 60
assert doc.matches[2].score.value == pytest.approx(1 / (1 + 6), 0.0001)
assert doc.matches[3].id == 70
assert doc.matches[3].score.value == pytest.approx(1 / (1 + 7), 0.0001)
for match in doc.matches:
assert match.score.ref_id == doc.id
def test_chunk2doc_ranker_driver_traverse_apply():
docs = [create_chunk_matches_to_score(), ]
driver = SimpleChunk2DocRankDriver(recur_range=(0, 1))
executor = MinRanker()
driver.attach(executor=executor, pea=None)
driver._traverse_apply(docs)
for doc in docs:
assert len(doc.matches) == 2
for idx, m in enumerate(doc.matches):
assert m.score.value == pytest.approx(1. / (1 + m.id * 2.), 0.0001)
def test_chunk2doc_ranker_driver_traverse_apply_larger_range():
docs = [create_chunk_chunk_matches_to_score(), ]
driver = SimpleChunk2DocRankDriver(granularity_range=(0, 2))
executor = MinRanker()
driver.attach(executor=executor, pea=None)
driver._traverse_apply(docs)
for doc in docs:
assert len(doc.matches) == 1
assert len(doc.chunks) == 1
chunk = doc.chunks[0]
assert len(chunk.matches) == 2
min_granularity_2 = chunk.matches[0].score.value
for idx, m in enumerate(chunk.matches):
if m.score.value < min_granularity_2:
min_granularity_2 = m.score.value
assert m.score.value == pytest.approx(1. / (1 + m.id * 2.), 0.0001)
assert m.score.ref_id == 101
match = doc.matches[0]
assert match.score.ref_id == 100
assert match.score.value == pytest.approx(1. / (1 + min_granularity_2), 0.0001)
| true
| true
|
790ce63bd651890a0c39fd212c1219cc44737519
| 9,647
|
py
|
Python
|
ts-avatar-service/base64toimage.py
|
docc-lab/train-ticket
|
350f62000e6658e0e543730580c599d8558253e7
|
[
"Apache-2.0"
] | 341
|
2018-11-23T15:19:33.000Z
|
2022-03-31T14:29:42.000Z
|
ts-avatar-service/base64toimage.py
|
docc-lab/train-ticket
|
350f62000e6658e0e543730580c599d8558253e7
|
[
"Apache-2.0"
] | 107
|
2018-12-27T11:10:09.000Z
|
2022-03-30T02:26:21.000Z
|
ts-avatar-service/base64toimage.py
|
docc-lab/train-ticket
|
350f62000e6658e0e543730580c599d8558253e7
|
[
"Apache-2.0"
] | 211
|
2018-12-06T15:49:32.000Z
|
2022-03-31T16:02:42.000Z
|
import base64
import numpy as np
import cv2
path_save = "./images/"
def base64_cv2(base64_str):
imgString = base64.b64decode(base64_str)
nparr = np.fromstring(imgString,np.uint8)
image = cv2.imdecode(nparr,cv2.IMREAD_COLOR)
a = cv2.imwrite(path_save + "img_face_1" + ".jpg", image)
print(a)
return image
if __name__ == '__main__':
s = "/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAIBAQEBAQIBAQECAgICAgQDAgICAgUEBAMEBgUGBgYFBgYGBwkIBgcJBwYGCAsICQoKCgoKBggLDAsKDAkKCgr/2wBDAQICAgICAgUDAwUKBwYHCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgr/wAARCACBAIEDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwDw6HTNKtNTXUtXvLqGIHe1zfYzLnkYB5OfTjmtyx8cWNvZPJpmhtZ2xP7y8ucebn2U9P1qm0nhK4tZz4Zs572WVtsmoXXzJIxP3lB4B+nFdX4R+C+peJZLbUdfgklWFQRZRnt/ePqa8GpWjA+qo0ZTRH4H0GdrWfxLdSHyJyzvO8mfO/8A1dq8u+MfiWLX9Qj0fSWKxC3fzZlAzFECOOnVm217v8XLuHwv4PuIn0vyLa1G20jQYAJGMn8a+YNe1KbQNCvNXvYdt1cTJHZK3LzysSQq/THSs8PVUpOQ61OUUoFDQ/B9x4m8W2nhkIy2VjAdQ1+YnKwRrggZ6c+lee/tAX0Gr+IrrXYoCjXcix6ZagdIUyi5Hocbvxr3/wAQafL8J/hpb+G711t9c8Qw/wBoa9dSf8srWP5jH7buAR3rwvwN4D8TfHX4pwadpVjOZb2cpZqW+SGEnl2/D7vpXRSrpxdSWyM3QfOqS3Z237C37Jc/xu8cQx31o7aHpbJPf6gv3bifcCI/p2r9U9F8Aad4c0C30/SbNYEiQJ5X/PMAYH51zH7K37O/hz4F/Dy20TTLICUQr57seWb1PrXpGoXUUoaJxsZRwB396+Zx2Iq1qj7I97B4dUY8p4p+0NooPgTVIXQF3t3KH3CmvyC+JfhKWxb7Ax/e3WquxPYfNzX7KfGeNLrRbu3uFBT7LIRn/dNfkV8YtRtYfFlpZ53ZuJ3BPs5FevkD92ovM8zOoyj7N90ed+KvDxsryyhVtyyWUi89zuNcv4j0ww/E29s1LD5I3VQexTFeh61FFNc6MZUDMzSoFI7A5rnPFmleV8W/tjx7hc6dA4A+hFfQnhJJnJ+JbKf+01nK5WWzU5I6gHBrN8SWDXNjd6LMnzLCHQ9xxmuw8WWJSzgkULvjhaF19MtkVheKS0V1Hc7BuuVVG+gGKlP3zOokloef2Ci805LUuGeTdEd394d6+9P+CM15qD2XiHTRcubNrwTMjciN9oGB6dK+DZLdLHU5oYRtG7zIsfw89q+8v+CIWrW0njXxp4NnjzHcwR3tuMfdIA3AVRz1G0tD9EfOt/8An4k/Kit37Hpv/PiP++aKDDnl3OE/Z+/Zrm1zUI9Y1CIw2OStlAwznvuAHHOf0r6XT4P6R4S0O20jR9Ljk1C9cxRIo/1aYy7N9K6n9nfwZDH4RsNSlgEEMdqBc38i/LbpjkKPXGOfeuJ/bb+O+nfBrwJqVr4BBHinV7IxpxuOnWB+US57PJ1x6CvkKq+s1eTufd0ZuhRcz43/AGy/G/hi98dH4WeH/JudL0APLq1+r5E9xnI59BXkPwO+H1t8XfiFc/F3xZ+78PaHC8lt5v3YypB3DPckAfiaxdfsLrxbrFl4C0GOaIXtyJ9Zu2yZZsn7pr1X9oPxbYfAj4UWnwZ8MWUQ1XVQk94qdYox9xcehBzz3UV3yXKo4ePQ44yTTrT3ex4r+0j4pPijxa/h2yvTL9quRc6lKHwAoOLexT/dzlvrX2J/wTh/ZIj0fSX8ceIdMQXEifugBwOSOOOnFfHXwg+FHjD4gfE3To38O3F+iyfa289cb5c/NI2OnoB7V933fx9+Onwt8KQWGjeHLa2toYAkKR2hYnHqcVlia8IuNNK6R1YOjUcXNrVn1FFpljYW/wBhWEcIBwRxXMeLrSO3U3cCHBGNwFfFup/8FGf2hvD17IbzwdZXJXOPMt2Qn8hUFl/wVT17ULxYvGnwtNrF0aSzkL/iVrx6ictj0KUorRn0J8SNl9atBIoMUvyvuHVcf44r8U/j/quoWXxPlsbmNlfTNWubYgevng/y5r9WLT9pLwZ8TbA3vhy9BddshhYncgzzkHpX5u/8FF/htJ4N+NN3r+jKBaeJP9MhkboJFxuH19q9HKJKlNw7nFm1Pnw0J9UeWXXjCRSl6E/eWkjCJ89eGH+FUbnxjNqPiEeJLuUZjt1hhB/iORWNdXsN6BJbSg2jjO7vuNU72CC6vLe0SYBBIuBnvnvX0zaex8vFqSujt/EV5bapK6Iyfwnn3FcN4tu3knAcAGBtq4NbVtdRDfACHcTYyD2Fc/rlvLPqMq/KuZAQCe1YwbdZp7FyjFw1OfvLVpHFyR3xmvqT/gkR47/4Qj9q+x0kSEQaxZvbybjwG5I/PFfNGq2zxWRYEDY+SPWu5/ZI8Uv4X+P/AIa1uG4MQi1eFHYHnlx/jXU5ROCqmkfvp/Ys3980Vj/8LDk/57R/nRUXRyn0d4n+MWjeAvBkelaLZC6mtIlje2EYxcTfwRL65J546V8D/t9/GJvAKX3h7xXfW+peIJ5k1HxFPBgpJKR/o9mh9I+4Hevp2fxjoXwk8Ca3+1D8RoilxpQaPwlpkq5HmuoC3Ei/xEZ4BzjFfnz8O/h54i/ba/aEvtf8W6yttoOk3bX+sapLzCGySS2O1fLULcntOvQ+8lCUvc6dTT/Z50SfwZ4Yu/jx8RwUaMNNLLOPuswyqrnqf9kc0fB74XeJP2o/iJqnxk8Z2rjTrGTF5PO33mYhoLJP+mmAWb0AArpviqn/AAv34g2HwU+HNwLLw7osoFu0owrqrDN3N6MB9xT175r6R8F+EtF8JeEtO8BfDfSJJdJ0ouUkEf7y8mb79xJ6u3OD6dMVNbEzhG32n+Hka0cJGrNS+zHT1Nf4B+AvBnwysm1/WNCt4BNzcSSsAEP9wZ546YFaHxn+MHgyy8Pvdy6AVseSst2sdujD/ZMhBI9wK5nxjB+0B4jgOi/CbwjYQywAeRd6x8wVsH+E9TXwN+354F8V+CvF8Fh8YbXWvG2r+IfD0pi8QXepvb2+j3IXBhjhX5eCMjPWs8Bg5Y58kpWNsfjvqUeaMb+h7t4z/aE+DU2p7bizMKbTiWIJNHt9Tisybwz8K/GTQ6xp4t50kH+vhTaEz6rX58fsV/CDxb8QvGV5bavqWorpNlp7Jc3QuG+SXnbg5wa+mPhPqPiD4TfENvAur6mt7buf9Afd80691PPWqx2WLCO6lexyYHMp4puMoWPoXw98ARab9T8N6hiN/lj8w/OCf6VzH7Qv7GF58cPAF14U1HV1h1JAbjTLw52284HGf9k9MDvXrnwHu7/xVeNZWcTOY/vKx5TngfhzXtWvfDhtP0ea4nQrlAWwMjgV51CtJS5l0PTq0YVI8k9mfgN8XPgt8Y/gv4rm8JeOPCF3bzpISRHCXjmGc+YhHYjnHWuJvItZt5ftMNtcb1ORHJCymv18/aa1bQxqIh1iytrp4AVWWaIZiGevTmvmrxDovhjxXctHY6HbSz7iHKwKNo9cmvo6WbVHTXMtT53EZXCFS0HofDen+JNTt3802J3hssHyKq6prt5JqQup0kjVyD8wOCfQV9d6t4D+Bnhm/KeItR0+O66yISDj6YNc74t8A/Bjx3Y/ZrW/tgUciJYG2t9cYrphi5SfM4s5Z4F7KaPm+9vUvbKSQxOmCq4fvnvW18CGt7X4l6NfXbDaNXj+Vumd4A/UCtD4rfDjTPAUjT2uoyyQcYEwGR6fWqf7P2jy+Kfizo2nQpJ5EmqR9B0O7P8AOu2nL2kOZHm16TirH7S/25P/AM9LX8z/AI0Vm/8ACubn/npN+dFXys845z/gov8AtIX3xR8R3PwY8K6z5WkaFctHe3aLvE9y3/LNQPvBRtHHQg1wOjfFHVfA3w1tP2ePgho7y63ft5mop5JZp3cceaw6qD/D2q/4N/ZL+Mmp+GdHTwP4burrWfETPMLu5TP2dGOTO2eSWJYj2xX6Rf8ABOr/AIJJeFvgnpUfxC+J8banr9zGHuJ5k3E5GcDPTmvloVIRp8tPWR93LmT5qukevdo8J/Y4/wCCdfjTS9L/AOEp8Xxub7UcT6tPKSfOkPUJxyo7V9V2fwI0rwTZRW9pbOFhUbAVOa+p/wDhF9D0KwWxsNLjSKNcRjZjiuL8Z2VlPC7GFQT90YqJ4eUIuc3q9TrpYv2tlBWj0PBj4b02zuWR0IOMjjj8a8L/AGpfgF8OfidF53ieCJzEjFRNEHQHHavpDxbpcULyjeR3yteU+O2sr6N7aZMhUO4kda4VWnTm3FndCgqj1jdH55+Iv2frHwU02m+C7u0sLFZGfyrKAoW471wGk/s36dqnjm21yy1K/wDttrcrIsnmsUbnkYNfcPiH4faNqt3lLQD229a2fAfwI0KLVIpEsIyxOWIXin9Zry0buOeBo025R0Iv2NfgTeabdzeILqzbZdy8Bv7uOP619AfErwCkXhK4hSABhCRz9K7T4MfD2DTtHitYbNQFbsK674i+As+FZ5DBngj3+7W9HC/u23ocNSq1VSR+GP7b817pfxEm0meYEtOwAz/DzXzP8VvHvjTwro9ho3hfwzc3j6nOs+qtbQkMbVWGYgy9Cf5V9lf8FNfAFxoXxKGqBQoLMcEV8/8AgbW7mwnjEZQvjYu4ZGOta4acYNSavY5MXTlO6Ttc+PI/Ds/i/wCOBsNBtbu3s7vUy5tZXZvIjzna2fxr2n4/fDXwh4Os7LUPCTpBqaoqboJMbm9frXvmufDvwj4keTWbzQlhvjgtc2qKhPryBzXnnif4U6CL77UWuJPLcNH9ok3EV7M81o1I8qVmebHLpUVzuTZ4H8fEvv8AhDrO58QSO12IYU+Ycsc/rXr3/BNr4EJ49+LnhS2hR3vJtclupXhOUit7dASGHbnP5V57+1rpix6PosgDYk1EALjl+SFXH9K/RT/git+zbceEfh/ffG7xTY7L7VXaysIiMCFIxhmAPQsSc16GB5alDU8rGy5JcqPsD/hDbT/ngf8Avmiu1/sFPUUV3eyXc8o+jv2UPg3oqCXxXdaYiK7NDZJtBEMEZ2Io/AZ7da+jpPKtLBI4FHyrgDAFcv8ADbw7BoGhQacLUR+Uu1wON3PWuivJUERxnr0r5bC4eNCF1ufU4+s8TiVfZGFrd2Xg2s+MjGa878cXBHyqeF4yO9dxrUkZtSxHO7GPSuM122WfLMuQTXPipSuz1MHGNjyPxzd5eXII4rx7xjZz3LkwuVBbOQOo9K9t8d6aHmkKxkjpwK848SaLgALGRg9DXlODbufQ4erZJHmK20Vtc7nAJB6kV6N8ILW0mvzLKUK7h1riNf0mSNmmC8YOOe9bn7PE91q/xBtdBnzsdwWUHrTpJe0RWJ1gz7B+Gmh2M9jCygKD0xXReONJs30NoWX5QGH14pnhLwxf6bAj2jlUQD5CvWnfFLVYLLw4zi5jVgpyA2STXvJL6ufJ1K3Nikon5H/8FgfhpG+lrr1na4aNiC47ivz00nbbXiLEQxVgeDzn0r9Vf+Cm8llq/wAPriKXGRGSmeufTFfljotkJNaljVfuuSR75ryI7tHqVEuZHpmhSLqdqscp2/KOlUfF3hu0W2ZwvzY64q34Zj+zOok6ECpfGt5CbJxCx3BeFAqrIppKBh/s8/stWP7Q/wAc9A1LxJZiXw/4TvXvr+KVciWVAWVT7Gv0n/Zn0K2tvh+rW9rHbxzapcSwxRxbVRGc4AH4V84/sVeF5PCPwP1fxpexlJtYdjAzLjgDYpHtmvrz4SaMdK8GaZYOwJjtQz5GCCx3Y/Wvq8BBqij4fH1E8Q7nT/ZE9R+VFXvs8P8Ac/WivWPNPuq12BMDAOag1i1nu7B4LWcRyMflYCltX2yMH4OKkklTZuzwK+Xd7HvvdHM+IPLCYXHPXFcvqvlG2YN1zxjrXVeIYfLjMgA4HFcjqU29SpUDnrmuCqnfU93DPQ5DxZpccg3RHAzzXmvi+wdZTtQkAHkCvUte3SExoM8Vy2taRCsLSTclhnbXLUWmh6dOo4s8Y1jRbq8BgEf04xmui+FXgK88Gazb+MLO3CzxPn5jkEeldBZaPa3moFTECoPGa6E2JtljWDhV5xjioo07aoJYhtWaKfx0/az/AGhtE0GOL4LfCOLV7rbtxe6gIIj68is3Qfjrq3iz4YS698TNIXR9UsI8alp0d35qrIemG7ir2qXiW1q864LA/vF3cEZ7DtXx1+0v8W9e1XxpqnhLw34gezjitpGdbVgQSB/FjqaVWpVg9WOFOi43UEvM4/8Abq+Lg8VtLp1rMgjddqNMcBh6818VWOk21l4luIoZInJOWKHI/Csz4j/8Jf4r8es/iTxdqV0sLnyYnuWVQc+n9K2fD2k/ZWEoQZwMsWyTTpwcVfuctSsnW0OjsowrRlG4XPekTRbzxb4os/DWnxM0tzMquPRMjJ/KkhlECEsPvE5PoK9K/Zo8ORw6rc+LtYtt8u39wX/gA7iurDUnOumZY7E+ywzfU+gtH0S10/RvDXwu0wKIWmRHSMjiFBzwPcZr6N8KRwFEZY8ZUADHHHFeA/CC6HiHxNceKWRTFDEILE47d2H1r6B8Jgi3QmvsKCskfATk5TbZ0eI/RaKbsb0orqIuj7G1DVI7S4eRyQD0UdqdbanDd2oaOXJIzj0rnfF+ri3EjlgAK4q1+JEmh3xeebdAT857r9K+DeKaqWlsfdxwMZUYyPQ/ENyDbkt0A5FclNEkuZNhxnjmrcni+x1a3TyZVk3c5XoPas+6vYI3KRvwR0qpVFNGtOk4mRq23cWYDg4ArmfFMqRwM/onSug124hC7gT69a43xXqkLWpTBLbDnBrHY7VscrN438OeGZXOpakqFeRIeF/M1yXxC/bp/Z0+FsCweIPiPYXN4w/48bWZXf6cHr7V43+0f+ydrf7THiC3027+IGpaRZ2kxbZY3DL5oHO0kHv0rzDV/wDgkJ+yrpmrt4hvZLxddQCSK5mvZGDsB1OWxRRtUi3LQ9HAZdTx7XO9e3c9q1X/AIKHw+PLC+g+G3gRJEjQp5l2ArAHvj8K+XPjb8Vv+EJ1hdYufD1rLq2sW0hZoTwi45JHr+Nc98Tvg5qPw2kn1HwP8TLqGZJirJIuQwA6DA6fWvmn47aB+0VqmjjVLb4kRwSO5ZblYyWVQfu4OQKqOGhUloz3quXPAUGvY9N9zZ1HWZdV1qPUxCXW4LFgo5j+talrK/mhI2+XHJzXlvwy+JXi+98Uad4L8T6PHczTDEuoxHaHxwTjtmvZ9Q0a0snWW1RmMgwsacnNOcXF2Pi6kYczaViTw7p134h1e10eAjM8gDFj0x1r2rwzpcOlzRaXDfnYjBZAh6+ozXI+CPAMvhzSBrN6P39yd6BhzHxW3ocgstRi+Y5L5OT1r2cHh3TpczWrPlswxvtans09j6b+FS2lpaxQ28aJGMYRRXtHgyUNECW4J4HpXz/8LNVSSKHc3XFe5eDbxRbja3Oa9qhsjw60W5HdbR/fFFUPtx9VorqMuVn0R8QtY3xSor9GzxXkPizxGVLujEBR0Brr/GniASpKofndnGO1ePeNNbmM0pUjHTOa/KcROXNc/WcPCLjY3NI+K+reFtstmnn2zNmWFm5A9vSu/wDD/wASdD8UwLPpl6rttzLGZDuSvnWXXFUKZG474qsniXUdHl/tfw7ceVOh4V2wGHXB/KnSxU0+WxFSgrcyPpi+vPte4hsqvA+bNctrdu9y+1W4Jwea5X4e/H/w/wCJZ10fWHGnaiqjzopW/dy/TNdjcXVvJKoUhi3IwMcdq6lU5nucyb7HEX2nSWd87QPs2gsNvrXmHxlv9YktDJYzlZ1Q4VYwQ31r2rVNNt5pXkfGADmvK/iJb2SR3BVRtweamTai7HTTk4SXI7Hxl8W9C8W+I9QkOpyoDj/Vqh5H4dK8c8e+CtQi037I88IhXqIY8sPzr6o8fW9qb2Rggx7V8+fFbVrfSri4SNcEDc5I4A6dacJ1OWy3O2rmmNUOXndrdTyjSPB+laHdtqsEZEnH7x1G4j+lfRf7OP7OeseLrQfEPxdA0Ol2z77GKQbftj/3h6KPT2qx+yJ+x9q3xguoPiZ8QtOa38MwTbraBn2vqEinOCDyIx9Oa+wdb0qwg0MaVp9ikMFuoS3hiHCKBivqMry6pKCq1lqj4PN80bg4U3fuz5c8e6SILySBEUKjYAQYUewriZIfJ1ILjo/Few/EbQzvmBj/AI68t1+z+yzllXgda9Sorux85CTWp6f8IdVCmKJjnaQOa+hfAt8kkargYr5R+E+sFLyNN3fivpH4eX5NvGCw61tR0Lk+bU9K+0+9FVftY9TRXUZnrPi//WS/7v8AjXkXjHo/+/RRX5NXP1nDdTjLn7g+lQn/AFK/9dBRRWcSp/w2cj4t/wCRjtv+uy/zr6T8H/8AIBtv+uIooropfEccfhLlz/yD5v8Ark38jXlPjv8A48ZfoaKK3l8JcPiR87fEv/j4P+61fN/xY/15/wCvhP8A0IUUVvg/40fUxxn8KXofo58J/wDkifhX/sGxf+ihTtQ+5J9KKK/SqX8JH5vX+16niPxM/wCPif8A368e8Wffk+lFFefU+NmS2RP8Mf8AkKR/71fSnw5/49ovrRRWlE06HotFFFdJB//Z"
base64_cv2(s)
| 438.5
| 9,266
| 0.94589
|
import base64
import numpy as np
import cv2
path_save = "./images/"
def base64_cv2(base64_str):
imgString = base64.b64decode(base64_str)
nparr = np.fromstring(imgString,np.uint8)
image = cv2.imdecode(nparr,cv2.IMREAD_COLOR)
a = cv2.imwrite(path_save + "img_face_1" + ".jpg", image)
print(a)
return image
if __name__ == '__main__':
s = "/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAIBAQEBAQIBAQECAgICAgQDAgICAgUEBAMEBgUGBgYFBgYGBwkIBgcJBwYGCAsICQoKCgoKBggLDAsKDAkKCgr/2wBDAQICAgICAgUDAwUKBwYHCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgr/wAARCACBAIEDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwDw6HTNKtNTXUtXvLqGIHe1zfYzLnkYB5OfTjmtyx8cWNvZPJpmhtZ2xP7y8ucebn2U9P1qm0nhK4tZz4Zs572WVtsmoXXzJIxP3lB4B+nFdX4R+C+peJZLbUdfgklWFQRZRnt/ePqa8GpWjA+qo0ZTRH4H0GdrWfxLdSHyJyzvO8mfO/8A1dq8u+MfiWLX9Qj0fSWKxC3fzZlAzFECOOnVm217v8XLuHwv4PuIn0vyLa1G20jQYAJGMn8a+YNe1KbQNCvNXvYdt1cTJHZK3LzysSQq/THSs8PVUpOQ61OUUoFDQ/B9x4m8W2nhkIy2VjAdQ1+YnKwRrggZ6c+lee/tAX0Gr+IrrXYoCjXcix6ZagdIUyi5Hocbvxr3/wAQafL8J/hpb+G711t9c8Qw/wBoa9dSf8srWP5jH7buAR3rwvwN4D8TfHX4pwadpVjOZb2cpZqW+SGEnl2/D7vpXRSrpxdSWyM3QfOqS3Z237C37Jc/xu8cQx31o7aHpbJPf6gv3bifcCI/p2r9U9F8Aad4c0C30/SbNYEiQJ5X/PMAYH51zH7K37O/hz4F/Dy20TTLICUQr57seWb1PrXpGoXUUoaJxsZRwB396+Zx2Iq1qj7I97B4dUY8p4p+0NooPgTVIXQF3t3KH3CmvyC+JfhKWxb7Ax/e3WquxPYfNzX7KfGeNLrRbu3uFBT7LIRn/dNfkV8YtRtYfFlpZ53ZuJ3BPs5FevkD92ovM8zOoyj7N90ed+KvDxsryyhVtyyWUi89zuNcv4j0ww/E29s1LD5I3VQexTFeh61FFNc6MZUDMzSoFI7A5rnPFmleV8W/tjx7hc6dA4A+hFfQnhJJnJ+JbKf+01nK5WWzU5I6gHBrN8SWDXNjd6LMnzLCHQ9xxmuw8WWJSzgkULvjhaF19MtkVheKS0V1Hc7BuuVVG+gGKlP3zOokloef2Ci805LUuGeTdEd394d6+9P+CM15qD2XiHTRcubNrwTMjciN9oGB6dK+DZLdLHU5oYRtG7zIsfw89q+8v+CIWrW0njXxp4NnjzHcwR3tuMfdIA3AVRz1G0tD9EfOt/8An4k/Kit37Hpv/PiP++aKDDnl3OE/Z+/Zrm1zUI9Y1CIw2OStlAwznvuAHHOf0r6XT4P6R4S0O20jR9Ljk1C9cxRIo/1aYy7N9K6n9nfwZDH4RsNSlgEEMdqBc38i/LbpjkKPXGOfeuJ/bb+O+nfBrwJqVr4BBHinV7IxpxuOnWB+US57PJ1x6CvkKq+s1eTufd0ZuhRcz43/AGy/G/hi98dH4WeH/JudL0APLq1+r5E9xnI59BXkPwO+H1t8XfiFc/F3xZ+78PaHC8lt5v3YypB3DPckAfiaxdfsLrxbrFl4C0GOaIXtyJ9Zu2yZZsn7pr1X9oPxbYfAj4UWnwZ8MWUQ1XVQk94qdYox9xcehBzz3UV3yXKo4ePQ44yTTrT3ex4r+0j4pPijxa/h2yvTL9quRc6lKHwAoOLexT/dzlvrX2J/wTh/ZIj0fSX8ceIdMQXEifugBwOSOOOnFfHXwg+FHjD4gfE3To38O3F+iyfa289cb5c/NI2OnoB7V933fx9+Onwt8KQWGjeHLa2toYAkKR2hYnHqcVlia8IuNNK6R1YOjUcXNrVn1FFpljYW/wBhWEcIBwRxXMeLrSO3U3cCHBGNwFfFup/8FGf2hvD17IbzwdZXJXOPMt2Qn8hUFl/wVT17ULxYvGnwtNrF0aSzkL/iVrx6ictj0KUorRn0J8SNl9atBIoMUvyvuHVcf44r8U/j/quoWXxPlsbmNlfTNWubYgevng/y5r9WLT9pLwZ8TbA3vhy9BddshhYncgzzkHpX5u/8FF/htJ4N+NN3r+jKBaeJP9MhkboJFxuH19q9HKJKlNw7nFm1Pnw0J9UeWXXjCRSl6E/eWkjCJ89eGH+FUbnxjNqPiEeJLuUZjt1hhB/iORWNdXsN6BJbSg2jjO7vuNU72CC6vLe0SYBBIuBnvnvX0zaex8vFqSujt/EV5bapK6Iyfwnn3FcN4tu3knAcAGBtq4NbVtdRDfACHcTYyD2Fc/rlvLPqMq/KuZAQCe1YwbdZp7FyjFw1OfvLVpHFyR3xmvqT/gkR47/4Qj9q+x0kSEQaxZvbybjwG5I/PFfNGq2zxWRYEDY+SPWu5/ZI8Uv4X+P/AIa1uG4MQi1eFHYHnlx/jXU5ROCqmkfvp/Ys3980Vj/8LDk/57R/nRUXRyn0d4n+MWjeAvBkelaLZC6mtIlje2EYxcTfwRL65J546V8D/t9/GJvAKX3h7xXfW+peIJ5k1HxFPBgpJKR/o9mh9I+4Hevp2fxjoXwk8Ca3+1D8RoilxpQaPwlpkq5HmuoC3Ei/xEZ4BzjFfnz8O/h54i/ba/aEvtf8W6yttoOk3bX+sapLzCGySS2O1fLULcntOvQ+8lCUvc6dTT/Z50SfwZ4Yu/jx8RwUaMNNLLOPuswyqrnqf9kc0fB74XeJP2o/iJqnxk8Z2rjTrGTF5PO33mYhoLJP+mmAWb0AArpviqn/AAv34g2HwU+HNwLLw7osoFu0owrqrDN3N6MB9xT175r6R8F+EtF8JeEtO8BfDfSJJdJ0ouUkEf7y8mb79xJ6u3OD6dMVNbEzhG32n+Hka0cJGrNS+zHT1Nf4B+AvBnwysm1/WNCt4BNzcSSsAEP9wZ546YFaHxn+MHgyy8Pvdy6AVseSst2sdujD/ZMhBI9wK5nxjB+0B4jgOi/CbwjYQywAeRd6x8wVsH+E9TXwN+354F8V+CvF8Fh8YbXWvG2r+IfD0pi8QXepvb2+j3IXBhjhX5eCMjPWs8Bg5Y58kpWNsfjvqUeaMb+h7t4z/aE+DU2p7bizMKbTiWIJNHt9Tisybwz8K/GTQ6xp4t50kH+vhTaEz6rX58fsV/CDxb8QvGV5bavqWorpNlp7Jc3QuG+SXnbg5wa+mPhPqPiD4TfENvAur6mt7buf9Afd80691PPWqx2WLCO6lexyYHMp4puMoWPoXw98ARab9T8N6hiN/lj8w/OCf6VzH7Qv7GF58cPAF14U1HV1h1JAbjTLw52284HGf9k9MDvXrnwHu7/xVeNZWcTOY/vKx5TngfhzXtWvfDhtP0ea4nQrlAWwMjgV51CtJS5l0PTq0YVI8k9mfgN8XPgt8Y/gv4rm8JeOPCF3bzpISRHCXjmGc+YhHYjnHWuJvItZt5ftMNtcb1ORHJCymv18/aa1bQxqIh1iytrp4AVWWaIZiGevTmvmrxDovhjxXctHY6HbSz7iHKwKNo9cmvo6WbVHTXMtT53EZXCFS0HofDen+JNTt3802J3hssHyKq6prt5JqQup0kjVyD8wOCfQV9d6t4D+Bnhm/KeItR0+O66yISDj6YNc74t8A/Bjx3Y/ZrW/tgUciJYG2t9cYrphi5SfM4s5Z4F7KaPm+9vUvbKSQxOmCq4fvnvW18CGt7X4l6NfXbDaNXj+Vumd4A/UCtD4rfDjTPAUjT2uoyyQcYEwGR6fWqf7P2jy+Kfizo2nQpJ5EmqR9B0O7P8AOu2nL2kOZHm16TirH7S/25P/AM9LX8z/AI0Vm/8ACubn/npN+dFXys845z/gov8AtIX3xR8R3PwY8K6z5WkaFctHe3aLvE9y3/LNQPvBRtHHQg1wOjfFHVfA3w1tP2ePgho7y63ft5mop5JZp3cceaw6qD/D2q/4N/ZL+Mmp+GdHTwP4burrWfETPMLu5TP2dGOTO2eSWJYj2xX6Rf8ABOr/AIJJeFvgnpUfxC+J8banr9zGHuJ5k3E5GcDPTmvloVIRp8tPWR93LmT5qukevdo8J/Y4/wCCdfjTS9L/AOEp8Xxub7UcT6tPKSfOkPUJxyo7V9V2fwI0rwTZRW9pbOFhUbAVOa+p/wDhF9D0KwWxsNLjSKNcRjZjiuL8Z2VlPC7GFQT90YqJ4eUIuc3q9TrpYv2tlBWj0PBj4b02zuWR0IOMjjj8a8L/AGpfgF8OfidF53ieCJzEjFRNEHQHHavpDxbpcULyjeR3yteU+O2sr6N7aZMhUO4kda4VWnTm3FndCgqj1jdH55+Iv2frHwU02m+C7u0sLFZGfyrKAoW471wGk/s36dqnjm21yy1K/wDttrcrIsnmsUbnkYNfcPiH4faNqt3lLQD229a2fAfwI0KLVIpEsIyxOWIXin9Zry0buOeBo025R0Iv2NfgTeabdzeILqzbZdy8Bv7uOP619AfErwCkXhK4hSABhCRz9K7T4MfD2DTtHitYbNQFbsK674i+As+FZ5DBngj3+7W9HC/u23ocNSq1VSR+GP7b817pfxEm0meYEtOwAz/DzXzP8VvHvjTwro9ho3hfwzc3j6nOs+qtbQkMbVWGYgy9Cf5V9lf8FNfAFxoXxKGqBQoLMcEV8/8AgbW7mwnjEZQvjYu4ZGOta4acYNSavY5MXTlO6Ttc+PI/Ds/i/wCOBsNBtbu3s7vUy5tZXZvIjzna2fxr2n4/fDXwh4Os7LUPCTpBqaoqboJMbm9frXvmufDvwj4keTWbzQlhvjgtc2qKhPryBzXnnif4U6CL77UWuJPLcNH9ok3EV7M81o1I8qVmebHLpUVzuTZ4H8fEvv8AhDrO58QSO12IYU+Ycsc/rXr3/BNr4EJ49+LnhS2hR3vJtclupXhOUit7dASGHbnP5V57+1rpix6PosgDYk1EALjl+SFXH9K/RT/git+zbceEfh/ffG7xTY7L7VXaysIiMCFIxhmAPQsSc16GB5alDU8rGy5JcqPsD/hDbT/ngf8Avmiu1/sFPUUV3eyXc8o+jv2UPg3oqCXxXdaYiK7NDZJtBEMEZ2Io/AZ7da+jpPKtLBI4FHyrgDAFcv8ADbw7BoGhQacLUR+Uu1wON3PWuivJUERxnr0r5bC4eNCF1ufU4+s8TiVfZGFrd2Xg2s+MjGa878cXBHyqeF4yO9dxrUkZtSxHO7GPSuM122WfLMuQTXPipSuz1MHGNjyPxzd5eXII4rx7xjZz3LkwuVBbOQOo9K9t8d6aHmkKxkjpwK848SaLgALGRg9DXlODbufQ4erZJHmK20Vtc7nAJB6kV6N8ILW0mvzLKUK7h1riNf0mSNmmC8YOOe9bn7PE91q/xBtdBnzsdwWUHrTpJe0RWJ1gz7B+Gmh2M9jCygKD0xXReONJs30NoWX5QGH14pnhLwxf6bAj2jlUQD5CvWnfFLVYLLw4zi5jVgpyA2STXvJL6ufJ1K3Nikon5H/8FgfhpG+lrr1na4aNiC47ivz00nbbXiLEQxVgeDzn0r9Vf+Cm8llq/wAPriKXGRGSmeufTFfljotkJNaljVfuuSR75ryI7tHqVEuZHpmhSLqdqscp2/KOlUfF3hu0W2ZwvzY64q34Zj+zOok6ECpfGt5CbJxCx3BeFAqrIppKBh/s8/stWP7Q/wAc9A1LxJZiXw/4TvXvr+KVciWVAWVT7Gv0n/Zn0K2tvh+rW9rHbxzapcSwxRxbVRGc4AH4V84/sVeF5PCPwP1fxpexlJtYdjAzLjgDYpHtmvrz4SaMdK8GaZYOwJjtQz5GCCx3Y/Wvq8BBqij4fH1E8Q7nT/ZE9R+VFXvs8P8Ac/WivWPNPuq12BMDAOag1i1nu7B4LWcRyMflYCltX2yMH4OKkklTZuzwK+Xd7HvvdHM+IPLCYXHPXFcvqvlG2YN1zxjrXVeIYfLjMgA4HFcjqU29SpUDnrmuCqnfU93DPQ5DxZpccg3RHAzzXmvi+wdZTtQkAHkCvUte3SExoM8Vy2taRCsLSTclhnbXLUWmh6dOo4s8Y1jRbq8BgEf04xmui+FXgK88Gazb+MLO3CzxPn5jkEeldBZaPa3moFTECoPGa6E2JtljWDhV5xjioo07aoJYhtWaKfx0/az/AGhtE0GOL4LfCOLV7rbtxe6gIIj68is3Qfjrq3iz4YS698TNIXR9UsI8alp0d35qrIemG7ir2qXiW1q864LA/vF3cEZ7DtXx1+0v8W9e1XxpqnhLw34gezjitpGdbVgQSB/FjqaVWpVg9WOFOi43UEvM4/8Abq+Lg8VtLp1rMgjddqNMcBh6818VWOk21l4luIoZInJOWKHI/Csz4j/8Jf4r8es/iTxdqV0sLnyYnuWVQc+n9K2fD2k/ZWEoQZwMsWyTTpwcVfuctSsnW0OjsowrRlG4XPekTRbzxb4os/DWnxM0tzMquPRMjJ/KkhlECEsPvE5PoK9K/Zo8ORw6rc+LtYtt8u39wX/gA7iurDUnOumZY7E+ywzfU+gtH0S10/RvDXwu0wKIWmRHSMjiFBzwPcZr6N8KRwFEZY8ZUADHHHFeA/CC6HiHxNceKWRTFDEILE47d2H1r6B8Jgi3QmvsKCskfATk5TbZ0eI/RaKbsb0orqIuj7G1DVI7S4eRyQD0UdqdbanDd2oaOXJIzj0rnfF+ri3EjlgAK4q1+JEmh3xeebdAT857r9K+DeKaqWlsfdxwMZUYyPQ/ENyDbkt0A5FclNEkuZNhxnjmrcni+x1a3TyZVk3c5XoPas+6vYI3KRvwR0qpVFNGtOk4mRq23cWYDg4ArmfFMqRwM/onSug124hC7gT69a43xXqkLWpTBLbDnBrHY7VscrN438OeGZXOpakqFeRIeF/M1yXxC/bp/Z0+FsCweIPiPYXN4w/48bWZXf6cHr7V43+0f+ydrf7THiC3027+IGpaRZ2kxbZY3DL5oHO0kHv0rzDV/wDgkJ+yrpmrt4hvZLxddQCSK5mvZGDsB1OWxRRtUi3LQ9HAZdTx7XO9e3c9q1X/AIKHw+PLC+g+G3gRJEjQp5l2ArAHvj8K+XPjb8Vv+EJ1hdYufD1rLq2sW0hZoTwi45JHr+Nc98Tvg5qPw2kn1HwP8TLqGZJirJIuQwA6DA6fWvmn47aB+0VqmjjVLb4kRwSO5ZblYyWVQfu4OQKqOGhUloz3quXPAUGvY9N9zZ1HWZdV1qPUxCXW4LFgo5j+talrK/mhI2+XHJzXlvwy+JXi+98Uad4L8T6PHczTDEuoxHaHxwTjtmvZ9Q0a0snWW1RmMgwsacnNOcXF2Pi6kYczaViTw7p134h1e10eAjM8gDFj0x1r2rwzpcOlzRaXDfnYjBZAh6+ozXI+CPAMvhzSBrN6P39yd6BhzHxW3ocgstRi+Y5L5OT1r2cHh3TpczWrPlswxvtans09j6b+FS2lpaxQ28aJGMYRRXtHgyUNECW4J4HpXz/8LNVSSKHc3XFe5eDbxRbja3Oa9qhsjw60W5HdbR/fFFUPtx9VorqMuVn0R8QtY3xSor9GzxXkPizxGVLujEBR0Brr/GniASpKofndnGO1ePeNNbmM0pUjHTOa/KcROXNc/WcPCLjY3NI+K+reFtstmnn2zNmWFm5A9vSu/wDD/wASdD8UwLPpl6rttzLGZDuSvnWXXFUKZG474qsniXUdHl/tfw7ceVOh4V2wGHXB/KnSxU0+WxFSgrcyPpi+vPte4hsqvA+bNctrdu9y+1W4Jwea5X4e/H/w/wCJZ10fWHGnaiqjzopW/dy/TNdjcXVvJKoUhi3IwMcdq6lU5nucyb7HEX2nSWd87QPs2gsNvrXmHxlv9YktDJYzlZ1Q4VYwQ31r2rVNNt5pXkfGADmvK/iJb2SR3BVRtweamTai7HTTk4SXI7Hxl8W9C8W+I9QkOpyoDj/Vqh5H4dK8c8e+CtQi037I88IhXqIY8sPzr6o8fW9qb2Rggx7V8+fFbVrfSri4SNcEDc5I4A6dacJ1OWy3O2rmmNUOXndrdTyjSPB+laHdtqsEZEnH7x1G4j+lfRf7OP7OeseLrQfEPxdA0Ol2z77GKQbftj/3h6KPT2qx+yJ+x9q3xguoPiZ8QtOa38MwTbraBn2vqEinOCDyIx9Oa+wdb0qwg0MaVp9ikMFuoS3hiHCKBivqMry6pKCq1lqj4PN80bg4U3fuz5c8e6SILySBEUKjYAQYUewriZIfJ1ILjo/Few/EbQzvmBj/AI68t1+z+yzllXgda9Sorux85CTWp6f8IdVCmKJjnaQOa+hfAt8kkargYr5R+E+sFLyNN3fivpH4eX5NvGCw61tR0Lk+bU9K+0+9FVftY9TRXUZnrPi//WS/7v8AjXkXjHo/+/RRX5NXP1nDdTjLn7g+lQn/AFK/9dBRRWcSp/w2cj4t/wCRjtv+uy/zr6T8H/8AIBtv+uIooropfEccfhLlz/yD5v8Ark38jXlPjv8A48ZfoaKK3l8JcPiR87fEv/j4P+61fN/xY/15/wCvhP8A0IUUVvg/40fUxxn8KXofo58J/wDkifhX/sGxf+ihTtQ+5J9KKK/SqX8JH5vX+16niPxM/wCPif8A368e8Wffk+lFFefU+NmS2RP8Mf8AkKR/71fSnw5/49ovrRRWlE06HotFFFdJB//Z"
base64_cv2(s)
| true
| true
|
790ce7637a5ce9f336f48ba6550fc45f58aa80bf
| 1,108
|
py
|
Python
|
rate/users/tests/test_forms.py
|
Jeongkiwon/rate_everything
|
0931483a823288e75e0cb7a467b99594994911d4
|
[
"MIT"
] | 1
|
2019-01-10T04:48:44.000Z
|
2019-01-10T04:48:44.000Z
|
rate/users/tests/test_forms.py
|
Jeongkiwon/rate_everything
|
0931483a823288e75e0cb7a467b99594994911d4
|
[
"MIT"
] | 5
|
2020-06-05T19:54:51.000Z
|
2021-09-08T00:55:43.000Z
|
rate/users/tests/test_forms.py
|
Jeongkiwon/rate_everything
|
0931483a823288e75e0cb7a467b99594994911d4
|
[
"MIT"
] | null | null | null |
import pytest
from rate.users.forms import UserCreationForm
from rate.users.tests.factories import UserFactory
pytestmark = pytest.mark.django_db
class TestUserCreationForm:
def test_clean_username(self):
# A user with proto_user params does not exist yet.
proto_user = UserFactory.build()
form = UserCreationForm(
{
"username": proto_user.username,
"password1": proto_user._password,
"password2": proto_user._password,
}
)
assert form.is_valid()
assert form.clean_username() == proto_user.username
# Creating a user.
form.save()
# The user with proto_user params already exists,
# hence cannot be created.
form = UserCreationForm(
{
"username": proto_user.username,
"password1": proto_user._password,
"password2": proto_user._password,
}
)
assert not form.is_valid()
assert len(form.errors) == 1
assert "username" in form.errors
| 26.380952
| 59
| 0.590253
|
import pytest
from rate.users.forms import UserCreationForm
from rate.users.tests.factories import UserFactory
pytestmark = pytest.mark.django_db
class TestUserCreationForm:
def test_clean_username(self):
proto_user = UserFactory.build()
form = UserCreationForm(
{
"username": proto_user.username,
"password1": proto_user._password,
"password2": proto_user._password,
}
)
assert form.is_valid()
assert form.clean_username() == proto_user.username
form.save()
form = UserCreationForm(
{
"username": proto_user.username,
"password1": proto_user._password,
"password2": proto_user._password,
}
)
assert not form.is_valid()
assert len(form.errors) == 1
assert "username" in form.errors
| true
| true
|
790ce9b259eeb276fefbe96bf7f91079330b857a
| 6,051
|
py
|
Python
|
cx_Oracle-doc/test/uLobVar.py
|
zaygeee/MASTER
|
6e11ec3383a13ae6f86ab1a23613bee7a2fc9ed5
|
[
"bzip2-1.0.6"
] | null | null | null |
cx_Oracle-doc/test/uLobVar.py
|
zaygeee/MASTER
|
6e11ec3383a13ae6f86ab1a23613bee7a2fc9ed5
|
[
"bzip2-1.0.6"
] | null | null | null |
cx_Oracle-doc/test/uLobVar.py
|
zaygeee/MASTER
|
6e11ec3383a13ae6f86ab1a23613bee7a2fc9ed5
|
[
"bzip2-1.0.6"
] | null | null | null |
"""Module for testing LOB (CLOB and BLOB) variables."""
class TestLobVar(BaseTestCase):
def __PerformTest(self, type, inputType):
if type.endswith("CLOB"):
longString = u""
else:
longString = ""
directType = getattr(cx_Oracle, type)
self.cursor.execute(u"truncate table Test%ss" % type)
for i in range(0, 11):
if i > 0:
if type.endswith("CLOB"):
char = unichr(ord('A') + i - 1)
else:
char = chr(ord('A') + i - 1)
longString += char * 25000
elif inputType != directType:
continue
self.cursor.setinputsizes(longString = inputType)
self.cursor.execute(u"""
insert into Test%ss (
IntCol,
%sCol
) values (
:integerValue,
:longString
)""" % (type, type),
integerValue = i,
longString = longString)
self.connection.commit()
self.cursor.execute(u"""
select *
from Test%ss
order by IntCol""" % type)
longString = ""
for row in self.cursor:
integerValue, lob = row
if integerValue == 0:
self.failUnlessEqual(lob.size(), 0)
self.failUnlessEqual(lob.read(), "")
else:
if type.endswith("CLOB"):
char = unichr(ord('A') + integerValue - 1)
prevChar = unichr(ord('A') + integerValue - 2)
actualValue = unicode(lob)
else:
char = chr(ord('A') + integerValue - 1)
prevChar = chr(ord('A') + integerValue - 2)
actualValue = str(lob)
longString += char * 25000
self.failUnlessEqual(lob.size(), len(longString))
self.failUnlessEqual(lob.read(), longString)
self.failUnlessEqual(actualValue, longString)
self.failUnlessEqual(lob.read(len(longString)), char)
if integerValue > 1:
offset = (integerValue - 1) * 25000 - 4
string = prevChar * 5 + char * 5
self.failUnlessEqual(lob.read(offset, 10), string)
def __TestTrim(self, type):
self.cursor.execute(u"truncate table Test%ss" % type)
self.cursor.setinputsizes(longString = getattr(cx_Oracle, type))
longString = "X" * 75000
if type.endswith("CLOB"):
longString = unicode(longString)
self.cursor.execute(u"""
insert into Test%ss (
IntCol,
%sCol
) values (
:integerValue,
:longString
)""" % (type, type),
integerValue = 1,
longString = longString)
self.cursor.execute(u"""
select %sCol
from Test%ss
where IntCol = 1""" % (type, type))
lob, = self.cursor.fetchone()
self.failUnlessEqual(lob.size(), 75000)
lob.trim(25000)
self.failUnlessEqual(lob.size(), 25000)
lob.trim()
self.failUnlessEqual(lob.size(), 0)
def testBLOBCursorDescription(self):
"test cursor description is accurate for BLOBs"
self.cursor.execute(u"select * from TestBLOBs")
self.failUnlessEqual(self.cursor.description,
[ (u'INTCOL', cx_Oracle.NUMBER, 10, 22, 9, 0, 0),
(u'BLOBCOL', cx_Oracle.BLOB, -1, 4000, 0, 0, 0) ])
def testBLOBsDirect(self):
"test binding and fetching BLOB data (directly)"
self.__PerformTest("BLOB", cx_Oracle.BLOB)
def testBLOBsIndirect(self):
"test binding and fetching BLOB data (indirectly)"
self.__PerformTest("BLOB", cx_Oracle.LONG_BINARY)
def testBLOBTrim(self):
"test trimming a BLOB"
self.__TestTrim("BLOB")
def testCLOBCursorDescription(self):
"test cursor description is accurate for CLOBs"
self.cursor.execute(u"select * from TestCLOBs")
self.failUnlessEqual(self.cursor.description,
[ (u'INTCOL', cx_Oracle.NUMBER, 10, 22, 9, 0, 0),
(u'CLOBCOL', cx_Oracle.CLOB, -1, 4000, 0, 0, 0) ])
def testCLOBsDirect(self):
"test binding and fetching CLOB data (directly)"
self.__PerformTest("CLOB", cx_Oracle.CLOB)
def testCLOBsIndirect(self):
"test binding and fetching CLOB data (indirectly)"
self.__PerformTest("CLOB", cx_Oracle.LONG_STRING)
def testCLOBTrim(self):
"test trimming a CLOB"
self.__TestTrim("CLOB")
def testMultipleFetch(self):
"test retrieving data from a CLOB after multiple fetches"
self.cursor.arraysize = 1
self.cursor.execute(u"select CLOBCol from TestCLOBS")
rows = self.cursor.fetchall()
self.failUnlessRaises(cx_Oracle.ProgrammingError, rows[1][0].read)
def testNCLOBCursorDescription(self):
"test cursor description is accurate for NCLOBs"
self.cursor.execute(u"select * from TestNCLOBs")
self.failUnlessEqual(self.cursor.description,
[ (u'INTCOL', cx_Oracle.NUMBER, 10, 22, 9, 0, 0),
(u'NCLOBCOL', cx_Oracle.NCLOB, -1, 4000, 0, 0, 0) ])
def testNCLOBsDirect(self):
"test binding and fetching NCLOB data (directly)"
self.__PerformTest("NCLOB", cx_Oracle.NCLOB)
def testNCLOBsIndirect(self):
"test binding and fetching NCLOB data (indirectly)"
self.__PerformTest("NCLOB", cx_Oracle.LONG_STRING)
def testNCLOBTrim(self):
"test trimming a NCLOB"
self.__TestTrim("NCLOB")
| 39.54902
| 75
| 0.529334
|
class TestLobVar(BaseTestCase):
def __PerformTest(self, type, inputType):
if type.endswith("CLOB"):
longString = u""
else:
longString = ""
directType = getattr(cx_Oracle, type)
self.cursor.execute(u"truncate table Test%ss" % type)
for i in range(0, 11):
if i > 0:
if type.endswith("CLOB"):
char = unichr(ord('A') + i - 1)
else:
char = chr(ord('A') + i - 1)
longString += char * 25000
elif inputType != directType:
continue
self.cursor.setinputsizes(longString = inputType)
self.cursor.execute(u"""
insert into Test%ss (
IntCol,
%sCol
) values (
:integerValue,
:longString
)""" % (type, type),
integerValue = i,
longString = longString)
self.connection.commit()
self.cursor.execute(u"""
select *
from Test%ss
order by IntCol""" % type)
longString = ""
for row in self.cursor:
integerValue, lob = row
if integerValue == 0:
self.failUnlessEqual(lob.size(), 0)
self.failUnlessEqual(lob.read(), "")
else:
if type.endswith("CLOB"):
char = unichr(ord('A') + integerValue - 1)
prevChar = unichr(ord('A') + integerValue - 2)
actualValue = unicode(lob)
else:
char = chr(ord('A') + integerValue - 1)
prevChar = chr(ord('A') + integerValue - 2)
actualValue = str(lob)
longString += char * 25000
self.failUnlessEqual(lob.size(), len(longString))
self.failUnlessEqual(lob.read(), longString)
self.failUnlessEqual(actualValue, longString)
self.failUnlessEqual(lob.read(len(longString)), char)
if integerValue > 1:
offset = (integerValue - 1) * 25000 - 4
string = prevChar * 5 + char * 5
self.failUnlessEqual(lob.read(offset, 10), string)
def __TestTrim(self, type):
self.cursor.execute(u"truncate table Test%ss" % type)
self.cursor.setinputsizes(longString = getattr(cx_Oracle, type))
longString = "X" * 75000
if type.endswith("CLOB"):
longString = unicode(longString)
self.cursor.execute(u"""
insert into Test%ss (
IntCol,
%sCol
) values (
:integerValue,
:longString
)""" % (type, type),
integerValue = 1,
longString = longString)
self.cursor.execute(u"""
select %sCol
from Test%ss
where IntCol = 1""" % (type, type))
lob, = self.cursor.fetchone()
self.failUnlessEqual(lob.size(), 75000)
lob.trim(25000)
self.failUnlessEqual(lob.size(), 25000)
lob.trim()
self.failUnlessEqual(lob.size(), 0)
def testBLOBCursorDescription(self):
self.cursor.execute(u"select * from TestBLOBs")
self.failUnlessEqual(self.cursor.description,
[ (u'INTCOL', cx_Oracle.NUMBER, 10, 22, 9, 0, 0),
(u'BLOBCOL', cx_Oracle.BLOB, -1, 4000, 0, 0, 0) ])
def testBLOBsDirect(self):
self.__PerformTest("BLOB", cx_Oracle.BLOB)
def testBLOBsIndirect(self):
self.__PerformTest("BLOB", cx_Oracle.LONG_BINARY)
def testBLOBTrim(self):
self.__TestTrim("BLOB")
def testCLOBCursorDescription(self):
self.cursor.execute(u"select * from TestCLOBs")
self.failUnlessEqual(self.cursor.description,
[ (u'INTCOL', cx_Oracle.NUMBER, 10, 22, 9, 0, 0),
(u'CLOBCOL', cx_Oracle.CLOB, -1, 4000, 0, 0, 0) ])
def testCLOBsDirect(self):
self.__PerformTest("CLOB", cx_Oracle.CLOB)
def testCLOBsIndirect(self):
self.__PerformTest("CLOB", cx_Oracle.LONG_STRING)
def testCLOBTrim(self):
self.__TestTrim("CLOB")
def testMultipleFetch(self):
self.cursor.arraysize = 1
self.cursor.execute(u"select CLOBCol from TestCLOBS")
rows = self.cursor.fetchall()
self.failUnlessRaises(cx_Oracle.ProgrammingError, rows[1][0].read)
def testNCLOBCursorDescription(self):
self.cursor.execute(u"select * from TestNCLOBs")
self.failUnlessEqual(self.cursor.description,
[ (u'INTCOL', cx_Oracle.NUMBER, 10, 22, 9, 0, 0),
(u'NCLOBCOL', cx_Oracle.NCLOB, -1, 4000, 0, 0, 0) ])
def testNCLOBsDirect(self):
self.__PerformTest("NCLOB", cx_Oracle.NCLOB)
def testNCLOBsIndirect(self):
self.__PerformTest("NCLOB", cx_Oracle.LONG_STRING)
def testNCLOBTrim(self):
self.__TestTrim("NCLOB")
| true
| true
|
790cea73f41da9665bff23934f6a6a8608f39f69
| 75
|
py
|
Python
|
vivit/extensions/secondorder/__init__.py
|
PwLo3K46/vivit
|
937642975be2ade122632d4eaef273461992d7ab
|
[
"MIT"
] | 7
|
2022-02-11T11:58:46.000Z
|
2022-02-15T01:40:36.000Z
|
vivit/extensions/secondorder/__init__.py
|
PwLo3K46/vivit
|
937642975be2ade122632d4eaef273461992d7ab
|
[
"MIT"
] | 18
|
2022-02-11T17:37:01.000Z
|
2022-03-20T16:46:53.000Z
|
vivit/extensions/secondorder/__init__.py
|
PwLo3K46/vivit
|
937642975be2ade122632d4eaef273461992d7ab
|
[
"MIT"
] | 1
|
2022-02-12T10:16:29.000Z
|
2022-02-12T10:16:29.000Z
|
"""BackPACK extensions/hooks for computing low-rank factors of the GGN."""
| 37.5
| 74
| 0.76
| true
| true
|
|
790ceae8930692601cf76d087c8e9730cab23cb6
| 2,038
|
py
|
Python
|
bin/pylama/lint/pylama_pycodestyle.py
|
ShadowLNC/linter-pylama
|
86e6960455f46c099bfd500c859e40c6bd3f9f7e
|
[
"MIT"
] | 463
|
2015-01-15T08:17:42.000Z
|
2022-03-28T15:10:20.000Z
|
bin/pylama/lint/pylama_pycodestyle.py
|
ShadowLNC/linter-pylama
|
86e6960455f46c099bfd500c859e40c6bd3f9f7e
|
[
"MIT"
] | 52
|
2015-01-06T02:43:59.000Z
|
2022-03-14T11:15:21.000Z
|
bin/pylama/lint/pylama_pycodestyle.py
|
ShadowLNC/linter-pylama
|
86e6960455f46c099bfd500c859e40c6bd3f9f7e
|
[
"MIT"
] | 249
|
2015-01-07T22:49:49.000Z
|
2022-03-18T02:32:06.000Z
|
"""pycodestyle support."""
from pycodestyle import BaseReport, StyleGuide, get_parser, _parse_multi_options
from pylama.lint import Linter as Abstract
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
class Linter(Abstract):
"""pycodestyle runner."""
@staticmethod
def run(path, code=None, params=None, **meta):
"""Check code with pycodestyle.
:return list: List of errors.
"""
parser = get_parser()
for option in parser.option_list:
if option.dest and option.dest in params:
value = params[option.dest]
if isinstance(value, str):
params[option.dest] = option.convert_value(option, value)
for key in ["filename", "exclude", "select", "ignore"]:
if key in params and isinstance(params[key], str):
params[key] = _parse_multi_options(params[key])
P8Style = StyleGuide(reporter=_PycodestyleReport, **params)
buf = StringIO(code)
return P8Style.input_file(path, lines=buf.readlines())
class _PycodestyleReport(BaseReport):
def __init__(self, *args, **kwargs):
super(_PycodestyleReport, self).__init__(*args, **kwargs)
self.errors = []
def init_file(self, filename, lines, expected, line_offset):
"""Prepare storage for errors."""
super(_PycodestyleReport, self).init_file(
filename, lines, expected, line_offset)
self.errors = []
def error(self, line_number, offset, text, check):
"""Save errors."""
code = super(_PycodestyleReport, self).error(
line_number, offset, text, check)
if code:
self.errors.append(dict(
text=text,
type=code.replace('E', 'C'),
col=offset + 1,
lnum=line_number,
))
def get_file_results(self):
"""Get errors.
:return list: List of errors.
"""
return self.errors
| 28.704225
| 80
| 0.597645
|
from pycodestyle import BaseReport, StyleGuide, get_parser, _parse_multi_options
from pylama.lint import Linter as Abstract
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
class Linter(Abstract):
@staticmethod
def run(path, code=None, params=None, **meta):
parser = get_parser()
for option in parser.option_list:
if option.dest and option.dest in params:
value = params[option.dest]
if isinstance(value, str):
params[option.dest] = option.convert_value(option, value)
for key in ["filename", "exclude", "select", "ignore"]:
if key in params and isinstance(params[key], str):
params[key] = _parse_multi_options(params[key])
P8Style = StyleGuide(reporter=_PycodestyleReport, **params)
buf = StringIO(code)
return P8Style.input_file(path, lines=buf.readlines())
class _PycodestyleReport(BaseReport):
def __init__(self, *args, **kwargs):
super(_PycodestyleReport, self).__init__(*args, **kwargs)
self.errors = []
def init_file(self, filename, lines, expected, line_offset):
super(_PycodestyleReport, self).init_file(
filename, lines, expected, line_offset)
self.errors = []
def error(self, line_number, offset, text, check):
code = super(_PycodestyleReport, self).error(
line_number, offset, text, check)
if code:
self.errors.append(dict(
text=text,
type=code.replace('E', 'C'),
col=offset + 1,
lnum=line_number,
))
def get_file_results(self):
return self.errors
| true
| true
|
790ceb38bb9c588d34f11ab8bc9e706b2fbb1076
| 982
|
py
|
Python
|
var/spack/repos/builtin/packages/parsplice/package.py
|
whitfin/spack
|
aabd2be31a511d0e00c1017f7311a421659319d9
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2018-08-20T06:55:11.000Z
|
2018-08-20T06:55:11.000Z
|
var/spack/repos/builtin/packages/parsplice/package.py
|
whitfin/spack
|
aabd2be31a511d0e00c1017f7311a421659319d9
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2019-04-29T22:36:27.000Z
|
2019-04-30T12:51:38.000Z
|
var/spack/repos/builtin/packages/parsplice/package.py
|
whitfin/spack
|
aabd2be31a511d0e00c1017f7311a421659319d9
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2020-03-12T19:27:17.000Z
|
2020-03-12T19:27:17.000Z
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Parsplice(CMakePackage):
"""ParSplice code implements the Parallel Trajectory Splicing algorithm"""
homepage = "https://gitlab.com/exaalt/parsplice"
url = "https://gitlab.com/api/v4/projects/exaalt%2Fparsplice/repository/archive.tar.gz?sha=v1.1"
git = "https://gitlab.com/exaalt/parsplice.git"
tags = ['ecp', 'ecp-apps']
version('develop', branch='master')
version('1.1', '3a72340d49d731a076e8942f2ae2f4e9')
depends_on("cmake@3.1:", type='build')
depends_on("berkeley-db")
depends_on("nauty")
depends_on("boost")
depends_on("mpi")
depends_on("eigen@3:")
depends_on("lammps+lib@20170901:")
def cmake_args(self):
options = ['-DBUILD_SHARED_LIBS=ON']
return options
| 28.882353
| 105
| 0.685336
|
from spack import *
class Parsplice(CMakePackage):
homepage = "https://gitlab.com/exaalt/parsplice"
url = "https://gitlab.com/api/v4/projects/exaalt%2Fparsplice/repository/archive.tar.gz?sha=v1.1"
git = "https://gitlab.com/exaalt/parsplice.git"
tags = ['ecp', 'ecp-apps']
version('develop', branch='master')
version('1.1', '3a72340d49d731a076e8942f2ae2f4e9')
depends_on("cmake@3.1:", type='build')
depends_on("berkeley-db")
depends_on("nauty")
depends_on("boost")
depends_on("mpi")
depends_on("eigen@3:")
depends_on("lammps+lib@20170901:")
def cmake_args(self):
options = ['-DBUILD_SHARED_LIBS=ON']
return options
| true
| true
|
790cecfbcb6cea49dc4c4b612ae4395fb990ccb8
| 3,798
|
py
|
Python
|
tests/test_utils.py
|
lukerm48/dyc
|
f7b0a1daf9cdcc4d19bc48cbc4e22c5d5a9b8426
|
[
"MIT"
] | null | null | null |
tests/test_utils.py
|
lukerm48/dyc
|
f7b0a1daf9cdcc4d19bc48cbc4e22c5d5a9b8426
|
[
"MIT"
] | null | null | null |
tests/test_utils.py
|
lukerm48/dyc
|
f7b0a1daf9cdcc4d19bc48cbc4e22c5d5a9b8426
|
[
"MIT"
] | null | null | null |
from dyc.utils import (
get_leading_whitespace,
read_yaml,
get_indent_forward,
get_indent_backward,
get_extension,
is_comment,
)
class TestGetLeadingWhitespace:
def test_tabs(self):
"""Test tabs functionality"""
text = '\t\tHello'
expected = '\t\t'
got = get_leading_whitespace(text)
assert expected == got
def test_whitespace(self):
"""Test whitespace functionality"""
space = ' '
text = '{space}Such a long whitespace'.format(space=space)
expected = space
got = get_leading_whitespace(text)
assert expected == got
class TestReadYaml:
def test_should_return_none_if_not_found(self):
random_path = '/path/to/non/existing/file.yaml'
expected = None
got = read_yaml(random_path)
assert expected == got
class TestGetIndentForward:
def test_forward(self):
lines = []
lines.append( '\n')
lines.append('This is a Test')
assert get_indent_forward(lines, 0) == '\n'
class TestGetIndentBackward:
def test_backward(self):
lines = []
lines.append( '\n')
lines.append('This is a Test')
assert get_indent_backward(lines, 1) == 'This is a Test'
class TestGetExtension:
def test_existing_extension_valid(self):
ext = 'file.puk'
expected = 'puk'
got = get_extension(ext)
assert expected == got
def test_non_existing_extension(self):
ext = 'file'
expected = ''
got = get_extension(ext)
assert expected == got
def test_wrong_extension_type(self):
exts = [dict(), False, True, [], 123]
expected = ''
for ext in exts:
got = get_extension(ext)
assert expected == got
class TestIsComment:
def test_valid_comments(self):
"""Testing valid comments"""
text = '# Hello World'
assert is_comment(text, ['#']) == True
def test_invalid_comments(self):
"""Testing invalid comments"""
text = '# Hello World'
assert is_comment(text, ['//']) == False
class UtilsTest():
def __init__(self, whitespace, read_yaml, extension, comment,
indent_forward, indent_backward):
self.test_get_leading_white_space = whitespace
self.test_read_yaml = read_yaml
self.test_get_extension = extension
self.test_is_comment = comment
self.test_get_indent_forward = indent_forward
self.test_get_indent_backward = indent_backward
def test_whitespace(self):
self.test_get_leading_white_space.test_tabs()
self.test_get_leading_white_space.test_whitespace()
def test_readYaml(self):
self.test_read_yaml.test_should_return_none_if_not_found()
def test_extension(self):
self.test_get_extension.test_existing_extension_valid()
self.test_get_extension.test_non_existing_extension()
self.test_get_extension.test_wrong_extension_type()
def test_comment(self):
self.test_is_comment.test_valid_comments()
self.test_is_comment.test_invalid_comments()
def test_indent_forward(self):
self.test_get_indent_forward.test_forward()
def test_indent_backward(self):
self.test_get_indent_backward.test_backward()
utils_test = UtilsTest(TestGetLeadingWhitespace(),
TestReadYaml(),
TestGetExtension(),
TestIsComment(),
TestGetIndentForward(),
TestGetIndentBackward())
utils_test.test_whitespace()
utils_test.test_readYaml()
utils_test.test_extension()
utils_test.test_comment()
utils_test.test_indent_forward()
utils_test.test_indent_backward()
| 29.44186
| 66
| 0.644813
|
from dyc.utils import (
get_leading_whitespace,
read_yaml,
get_indent_forward,
get_indent_backward,
get_extension,
is_comment,
)
class TestGetLeadingWhitespace:
def test_tabs(self):
text = '\t\tHello'
expected = '\t\t'
got = get_leading_whitespace(text)
assert expected == got
def test_whitespace(self):
space = ' '
text = '{space}Such a long whitespace'.format(space=space)
expected = space
got = get_leading_whitespace(text)
assert expected == got
class TestReadYaml:
def test_should_return_none_if_not_found(self):
random_path = '/path/to/non/existing/file.yaml'
expected = None
got = read_yaml(random_path)
assert expected == got
class TestGetIndentForward:
def test_forward(self):
lines = []
lines.append( '\n')
lines.append('This is a Test')
assert get_indent_forward(lines, 0) == '\n'
class TestGetIndentBackward:
def test_backward(self):
lines = []
lines.append( '\n')
lines.append('This is a Test')
assert get_indent_backward(lines, 1) == 'This is a Test'
class TestGetExtension:
def test_existing_extension_valid(self):
ext = 'file.puk'
expected = 'puk'
got = get_extension(ext)
assert expected == got
def test_non_existing_extension(self):
ext = 'file'
expected = ''
got = get_extension(ext)
assert expected == got
def test_wrong_extension_type(self):
exts = [dict(), False, True, [], 123]
expected = ''
for ext in exts:
got = get_extension(ext)
assert expected == got
class TestIsComment:
def test_valid_comments(self):
text = '# Hello World'
assert is_comment(text, ['#']) == True
def test_invalid_comments(self):
text = '# Hello World'
assert is_comment(text, ['//']) == False
class UtilsTest():
def __init__(self, whitespace, read_yaml, extension, comment,
indent_forward, indent_backward):
self.test_get_leading_white_space = whitespace
self.test_read_yaml = read_yaml
self.test_get_extension = extension
self.test_is_comment = comment
self.test_get_indent_forward = indent_forward
self.test_get_indent_backward = indent_backward
def test_whitespace(self):
self.test_get_leading_white_space.test_tabs()
self.test_get_leading_white_space.test_whitespace()
def test_readYaml(self):
self.test_read_yaml.test_should_return_none_if_not_found()
def test_extension(self):
self.test_get_extension.test_existing_extension_valid()
self.test_get_extension.test_non_existing_extension()
self.test_get_extension.test_wrong_extension_type()
def test_comment(self):
self.test_is_comment.test_valid_comments()
self.test_is_comment.test_invalid_comments()
def test_indent_forward(self):
self.test_get_indent_forward.test_forward()
def test_indent_backward(self):
self.test_get_indent_backward.test_backward()
utils_test = UtilsTest(TestGetLeadingWhitespace(),
TestReadYaml(),
TestGetExtension(),
TestIsComment(),
TestGetIndentForward(),
TestGetIndentBackward())
utils_test.test_whitespace()
utils_test.test_readYaml()
utils_test.test_extension()
utils_test.test_comment()
utils_test.test_indent_forward()
utils_test.test_indent_backward()
| true
| true
|
790cee1ca81498cc735ffdeae364a130fd8ac3c8
| 5,714
|
py
|
Python
|
DIPDenoising/image_reading.py
|
junyuchen245/SPECT-Img-Denoising-DIP-Keras
|
5334c81de364438137a648302b208e58aef82d20
|
[
"MIT"
] | 1
|
2020-05-22T02:19:43.000Z
|
2020-05-22T02:19:43.000Z
|
DIPDenoising/image_reading.py
|
junyuchen245/SPECT-Img-Denoising-DIP-Keras
|
5334c81de364438137a648302b208e58aef82d20
|
[
"MIT"
] | null | null | null |
DIPDenoising/image_reading.py
|
junyuchen245/SPECT-Img-Denoising-DIP-Keras
|
5334c81de364438137a648302b208e58aef82d20
|
[
"MIT"
] | 2
|
2020-01-08T06:35:39.000Z
|
2021-04-10T08:27:06.000Z
|
import os
import numpy as np
import warnings
#import SimpleITK as sitk
import cv2
from scipy import misc
from scipy import ndimage
def load_image_from_folder(folder_path, new_size, HE=False, Truc=False, Aug=False):
"""loads images in the folder_path and returns a ndarray and threshold the label image"""
image_list = []
label_list = []
#counter = 0
for image_name in os.listdir(folder_path):
image_original = np.load(folder_path + image_name)
image_original = image_original['a']
#if image_original.shape[0] != 320:
# continue
#counter = counter + 1
#print image_name, counter
image_ct = image_original[:, 0:len(image_original)]
image_spect = image_original[:,len(image_original):len(image_original)*2]
label = image_original[:,len(image_original)*2:len(image_original)*3]
#image_ct = cv2.resize(image_ct, new_size)
#image_spect = cv2.resize(image_spect, new_size)
#label = cv2.resize(label, new_size)
#activate below for binary-class segmentation
#super_threshold_indices = label != 0
#label[super_threshold_indices] = 255
#label = label / 255.0
if HE == True:
image_ct = cv2.equalizeHist(image_ct)
image_spect = cv2.equalizeHist(image_spect)
elif Truc == True:
clahe = cv2.createCLAHE(clipLimit=0.1, tileGridSize=(8,8))
image_spect = clahe.apply(image_spect)
image_ct = clahe.apply(image_ct)
#ret, image = cv2.threshold(image,200,255,cv2.THRESH_TRUNC)
else:
image_spect = image_spect
image_ct = image_ct
#image augmentation method in the FusionNet paper
if Aug == True:
'''SPECT'''
imageSPECT_aug_1 = ndimage.rotate(image_spect, -90)
imageSPECT_aug_2 = np.flipud(imageSPECT_aug_1)
imageSPECT_aug_3 = ndimage.rotate(image_spect, -180)
imageSPECT_aug_4 = np.flipud(imageSPECT_aug_3)
imageSPECT_aug_5 = ndimage.rotate(image_spect, -270)
imageSPECT_aug_6 = np.flipud(imageSPECT_aug_5)
imageSPECT_aug_7 = np.flipud(image_spect)
'''CT'''
imageCT_aug_1 = ndimage.rotate(image_ct, -90)
imageCT_aug_2 = np.flipud(imageCT_aug_1)
imageCT_aug_3 = ndimage.rotate(image_ct, -180)
imageCT_aug_4 = np.flipud(imageCT_aug_3)
imageCT_aug_5 = ndimage.rotate(image_ct, -270)
imageCT_aug_6 = np.flipud(imageCT_aug_5)
imageCT_aug_7 = np.flipud(image_ct)
'''label'''
label_aug_1 = ndimage.rotate(label, -90)
label_aug_1 = label_aug_1.astype(int)
label_aug_2 = np.flipud(label_aug_1)
label_aug_2 = label_aug_2.astype(int)
label_aug_3 = ndimage.rotate(label, -180)
label_aug_3 = label_aug_3.astype(int)
label_aug_4 = np.flipud(label_aug_3)
label_aug_4 = label_aug_4.astype(int)
label_aug_5 = ndimage.rotate(label, -270)
label_aug_5 = label_aug_5.astype(int)
label_aug_6 = np.flipud(label_aug_5)
label_aug_6 = label_aug_6.astype(int)
label_aug_7 = np.flipud(label)
label_aug_7 = label_aug_7.astype(int)
image_all_0 = np.concatenate((image_ct,image_spect),axis=1)
image_all_1 = np.concatenate((imageCT_aug_1, imageSPECT_aug_1), axis=1)
image_all_2 = np.concatenate((imageCT_aug_2, imageSPECT_aug_2), axis=1)
image_all_3 = np.concatenate((imageCT_aug_3, imageSPECT_aug_3), axis=1)
image_all_4 = np.concatenate((imageCT_aug_4, imageSPECT_aug_4), axis=1)
image_all_5 = np.concatenate((imageCT_aug_5, imageSPECT_aug_5), axis=1)
image_all_6 = np.concatenate((imageCT_aug_6, imageSPECT_aug_6), axis=1)
image_all_7 = np.concatenate((imageCT_aug_7, imageSPECT_aug_7), axis=1)
image_list.append(image_all_0)
image_list.append(image_all_1)
image_list.append(image_all_2)
image_list.append(image_all_3)
image_list.append(image_all_4)
image_list.append(image_all_5)
image_list.append(image_all_6)
image_list.append(image_all_7)
label_list.append(label)
label_list.append(label_aug_1)
label_list.append(label_aug_2)
label_list.append(label_aug_3)
label_list.append(label_aug_4)
label_list.append(label_aug_5)
label_list.append(label_aug_6)
label_list.append(label_aug_7)
else:
image_all = np.concatenate((image_ct, image_spect), axis=1)
image_list.append(image_all)
label_list.append(label)
image_array = np.asarray(image_list)
label_array = np.asarray(label_list)
return image_array, label_array
def load_test_from_folder(folder_path, new_size, HE=False, Truc=False, Aug=False):
"""loads images in the folder_path and returns a ndarray and threshold the label image"""
image_list = []
#counter = 0
for image_name in os.listdir(folder_path):
image_original = np.load(folder_path + image_name)
image_original = image_original['a']
#counter = counter + 1
#print image_name, counter
image_ct = image_original[:, 0:len(image_original)]
image_spect = image_original[:,len(image_original):len(image_original)*2]
image_all = np.concatenate((image_ct, image_spect), axis=1)
image_list.append(image_all)
image_array = np.asarray(image_list)
return image_array
| 41.405797
| 93
| 0.645082
|
import os
import numpy as np
import warnings
import cv2
from scipy import misc
from scipy import ndimage
def load_image_from_folder(folder_path, new_size, HE=False, Truc=False, Aug=False):
image_list = []
label_list = []
for image_name in os.listdir(folder_path):
image_original = np.load(folder_path + image_name)
image_original = image_original['a']
image_ct = image_original[:, 0:len(image_original)]
image_spect = image_original[:,len(image_original):len(image_original)*2]
label = image_original[:,len(image_original)*2:len(image_original)*3]
if HE == True:
image_ct = cv2.equalizeHist(image_ct)
image_spect = cv2.equalizeHist(image_spect)
elif Truc == True:
clahe = cv2.createCLAHE(clipLimit=0.1, tileGridSize=(8,8))
image_spect = clahe.apply(image_spect)
image_ct = clahe.apply(image_ct)
else:
image_spect = image_spect
image_ct = image_ct
if Aug == True:
imageSPECT_aug_1 = ndimage.rotate(image_spect, -90)
imageSPECT_aug_2 = np.flipud(imageSPECT_aug_1)
imageSPECT_aug_3 = ndimage.rotate(image_spect, -180)
imageSPECT_aug_4 = np.flipud(imageSPECT_aug_3)
imageSPECT_aug_5 = ndimage.rotate(image_spect, -270)
imageSPECT_aug_6 = np.flipud(imageSPECT_aug_5)
imageSPECT_aug_7 = np.flipud(image_spect)
imageCT_aug_1 = ndimage.rotate(image_ct, -90)
imageCT_aug_2 = np.flipud(imageCT_aug_1)
imageCT_aug_3 = ndimage.rotate(image_ct, -180)
imageCT_aug_4 = np.flipud(imageCT_aug_3)
imageCT_aug_5 = ndimage.rotate(image_ct, -270)
imageCT_aug_6 = np.flipud(imageCT_aug_5)
imageCT_aug_7 = np.flipud(image_ct)
label_aug_1 = ndimage.rotate(label, -90)
label_aug_1 = label_aug_1.astype(int)
label_aug_2 = np.flipud(label_aug_1)
label_aug_2 = label_aug_2.astype(int)
label_aug_3 = ndimage.rotate(label, -180)
label_aug_3 = label_aug_3.astype(int)
label_aug_4 = np.flipud(label_aug_3)
label_aug_4 = label_aug_4.astype(int)
label_aug_5 = ndimage.rotate(label, -270)
label_aug_5 = label_aug_5.astype(int)
label_aug_6 = np.flipud(label_aug_5)
label_aug_6 = label_aug_6.astype(int)
label_aug_7 = np.flipud(label)
label_aug_7 = label_aug_7.astype(int)
image_all_0 = np.concatenate((image_ct,image_spect),axis=1)
image_all_1 = np.concatenate((imageCT_aug_1, imageSPECT_aug_1), axis=1)
image_all_2 = np.concatenate((imageCT_aug_2, imageSPECT_aug_2), axis=1)
image_all_3 = np.concatenate((imageCT_aug_3, imageSPECT_aug_3), axis=1)
image_all_4 = np.concatenate((imageCT_aug_4, imageSPECT_aug_4), axis=1)
image_all_5 = np.concatenate((imageCT_aug_5, imageSPECT_aug_5), axis=1)
image_all_6 = np.concatenate((imageCT_aug_6, imageSPECT_aug_6), axis=1)
image_all_7 = np.concatenate((imageCT_aug_7, imageSPECT_aug_7), axis=1)
image_list.append(image_all_0)
image_list.append(image_all_1)
image_list.append(image_all_2)
image_list.append(image_all_3)
image_list.append(image_all_4)
image_list.append(image_all_5)
image_list.append(image_all_6)
image_list.append(image_all_7)
label_list.append(label)
label_list.append(label_aug_1)
label_list.append(label_aug_2)
label_list.append(label_aug_3)
label_list.append(label_aug_4)
label_list.append(label_aug_5)
label_list.append(label_aug_6)
label_list.append(label_aug_7)
else:
image_all = np.concatenate((image_ct, image_spect), axis=1)
image_list.append(image_all)
label_list.append(label)
image_array = np.asarray(image_list)
label_array = np.asarray(label_list)
return image_array, label_array
def load_test_from_folder(folder_path, new_size, HE=False, Truc=False, Aug=False):
image_list = []
for image_name in os.listdir(folder_path):
image_original = np.load(folder_path + image_name)
image_original = image_original['a']
image_ct = image_original[:, 0:len(image_original)]
image_spect = image_original[:,len(image_original):len(image_original)*2]
image_all = np.concatenate((image_ct, image_spect), axis=1)
image_list.append(image_all)
image_array = np.asarray(image_list)
return image_array
| true
| true
|
790cee37b5bf06c4a4ecbb3615f999d2aaf405ae
| 13,157
|
py
|
Python
|
tensorflow/python/ops/boosted_trees_ops.py
|
vixadd/tensorflow
|
8c624204eb686a91779149dc500e6c8c60096074
|
[
"Apache-2.0"
] | 3
|
2019-11-19T14:07:27.000Z
|
2020-10-04T12:57:40.000Z
|
tensorflow/python/ops/boosted_trees_ops.py
|
vixadd/tensorflow
|
8c624204eb686a91779149dc500e6c8c60096074
|
[
"Apache-2.0"
] | 4
|
2020-04-09T16:22:20.000Z
|
2021-12-15T13:57:36.000Z
|
tensorflow/python/ops/boosted_trees_ops.py
|
vixadd/tensorflow
|
8c624204eb686a91779149dc500e6c8c60096074
|
[
"Apache-2.0"
] | 4
|
2022-01-13T11:23:44.000Z
|
2022-03-02T11:11:42.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for boosted_trees."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_boosted_trees_ops
from tensorflow.python.ops import resources
# Re-exporting ops used by other modules.
# pylint: disable=unused-import
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_aggregate_stats
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_bucketize
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_calculate_best_feature_split as calculate_best_feature_split
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_calculate_best_feature_split_v2 as calculate_best_feature_split_v2
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_calculate_best_gains_per_feature as calculate_best_gains_per_feature
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_center_bias as center_bias
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_create_quantile_stream_resource as create_quantile_stream_resource
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_example_debug_outputs as example_debug_outputs
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_make_quantile_summaries as make_quantile_summaries
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_make_stats_summary as make_stats_summary
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_predict as predict
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_quantile_stream_resource_add_summaries as quantile_add_summaries
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_quantile_stream_resource_deserialize as quantile_resource_deserialize
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_quantile_stream_resource_flush as quantile_flush
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_quantile_stream_resource_get_bucket_boundaries as get_bucket_boundaries
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_quantile_stream_resource_handle_op as quantile_resource_handle_op
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_sparse_aggregate_stats
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_sparse_calculate_best_feature_split as sparse_calculate_best_feature_split
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_training_predict as training_predict
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_update_ensemble as update_ensemble
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_update_ensemble_v2 as update_ensemble_v2
from tensorflow.python.ops.gen_boosted_trees_ops import is_boosted_trees_quantile_stream_resource_initialized as is_quantile_resource_initialized
# pylint: enable=unused-import
from tensorflow.python.training import saver
from tensorflow.python.training.tracking import tracking
class PruningMode(object):
"""Class for working with Pruning modes."""
NO_PRUNING, PRE_PRUNING, POST_PRUNING = range(0, 3)
_map = {'none': NO_PRUNING, 'pre': PRE_PRUNING, 'post': POST_PRUNING}
@classmethod
def from_str(cls, mode):
if mode in cls._map:
return cls._map[mode]
else:
raise ValueError(
'pruning_mode mode must be one of: {}. Found: {}'.format(', '.join(
sorted(cls._map)), mode))
class QuantileAccumulatorSaveable(saver.BaseSaverBuilder.SaveableObject):
"""SaveableObject implementation for QuantileAccumulator."""
def __init__(self, resource_handle, create_op, num_streams, name):
self._resource_handle = resource_handle
self._num_streams = num_streams
self._create_op = create_op
bucket_boundaries = get_bucket_boundaries(self._resource_handle,
self._num_streams)
slice_spec = ''
specs = []
def make_save_spec(tensor, suffix):
return saver.BaseSaverBuilder.SaveSpec(tensor, slice_spec, name + suffix)
for i in range(self._num_streams):
specs += [
make_save_spec(bucket_boundaries[i], '_bucket_boundaries_' + str(i))
]
super(QuantileAccumulatorSaveable, self).__init__(self._resource_handle,
specs, name)
def restore(self, restored_tensors, unused_tensor_shapes):
bucket_boundaries = restored_tensors
with ops.control_dependencies([self._create_op]):
return quantile_resource_deserialize(
self._resource_handle, bucket_boundaries=bucket_boundaries)
class QuantileAccumulator(tracking.TrackableResource):
"""SaveableObject implementation for QuantileAccumulator.
The bucket boundaries are serialized and deserialized from checkpointing.
"""
def __init__(self,
epsilon,
num_streams,
num_quantiles,
name=None,
max_elements=None):
self._eps = epsilon
self._num_streams = num_streams
self._num_quantiles = num_quantiles
super(QuantileAccumulator, self).__init__()
with ops.name_scope(name, 'QuantileAccumulator') as name:
self._name = name
self._resource_handle = self._create_resource()
self._init_op = self._initialize()
is_initialized_op = self.is_initialized()
resources.register_resource(self.resource_handle, self._init_op,
is_initialized_op)
self._saveable = QuantileAccumulatorSaveable(
self.resource_handle, self._init_op, self._num_streams,
self.resource_handle.name)
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, self._saveable)
def _create_resource(self):
return quantile_resource_handle_op(
container='', shared_name=self._name, name=self._name)
def _initialize(self):
return create_quantile_stream_resource(self.resource_handle, self._eps,
self._num_streams)
@property
def initializer(self):
if self._init_op is None:
self._init_op = self._initialize()
return self._init_op
def is_initialized(self):
return is_quantile_resource_initialized(self.resource_handle)
@property
def saveable(self):
return self._saveable
def _gather_saveables_for_checkpoint(self):
return {'quantile_accumulator', self._saveable}
def add_summaries(self, float_columns, example_weights):
summaries = make_quantile_summaries(float_columns, example_weights,
self._eps)
summary_op = quantile_add_summaries(self.resource_handle, summaries)
return summary_op
def flush(self):
return quantile_flush(self.resource_handle, self._num_quantiles)
def get_bucket_boundaries(self):
return get_bucket_boundaries(self.resource_handle, self._num_streams)
class _TreeEnsembleSavable(saver.BaseSaverBuilder.SaveableObject):
"""SaveableObject implementation for TreeEnsemble."""
def __init__(self, resource_handle, create_op, name):
"""Creates a _TreeEnsembleSavable object.
Args:
resource_handle: handle to the decision tree ensemble variable.
create_op: the op to initialize the variable.
name: the name to save the tree ensemble variable under.
"""
stamp_token, serialized = (
gen_boosted_trees_ops.boosted_trees_serialize_ensemble(resource_handle))
# slice_spec is useful for saving a slice from a variable.
# It's not meaningful the tree ensemble variable. So we just pass an empty
# value.
slice_spec = ''
specs = [
saver.BaseSaverBuilder.SaveSpec(stamp_token, slice_spec,
name + '_stamp'),
saver.BaseSaverBuilder.SaveSpec(serialized, slice_spec,
name + '_serialized'),
]
super(_TreeEnsembleSavable, self).__init__(resource_handle, specs, name)
self._resource_handle = resource_handle
self._create_op = create_op
def restore(self, restored_tensors, unused_restored_shapes):
"""Restores the associated tree ensemble from 'restored_tensors'.
Args:
restored_tensors: the tensors that were loaded from a checkpoint.
unused_restored_shapes: the shapes this object should conform to after
restore. Not meaningful for trees.
Returns:
The operation that restores the state of the tree ensemble variable.
"""
with ops.control_dependencies([self._create_op]):
return gen_boosted_trees_ops.boosted_trees_deserialize_ensemble(
self._resource_handle,
stamp_token=restored_tensors[0],
tree_ensemble_serialized=restored_tensors[1])
class TreeEnsemble(tracking.TrackableResource):
"""Creates TreeEnsemble resource."""
def __init__(self, name, stamp_token=0, is_local=False, serialized_proto=''):
self._stamp_token = stamp_token
self._serialized_proto = serialized_proto
self._is_local = is_local
with ops.name_scope(name, 'TreeEnsemble') as name:
self._name = name
self._resource_handle = self._create_resource()
self._init_op = self._initialize()
is_initialized_op = self.is_initialized()
# Adds the variable to the savable list.
if not is_local:
self._saveable = _TreeEnsembleSavable(
self.resource_handle, self.initializer, self.resource_handle.name)
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, self._saveable)
resources.register_resource(
self.resource_handle,
self.initializer,
is_initialized_op,
is_shared=not is_local)
def _create_resource(self):
return gen_boosted_trees_ops.boosted_trees_ensemble_resource_handle_op(
container='', shared_name=self._name, name=self._name)
def _initialize(self):
return gen_boosted_trees_ops.boosted_trees_create_ensemble(
self.resource_handle,
self._stamp_token,
tree_ensemble_serialized=self._serialized_proto)
@property
def initializer(self):
if self._init_op is None:
self._init_op = self._initialize()
return self._init_op
def is_initialized(self):
return gen_boosted_trees_ops.is_boosted_trees_ensemble_initialized(
self.resource_handle)
def _gather_saveables_for_checkpoint(self):
if not self._is_local:
return {'tree_ensemble': self._saveable}
def get_stamp_token(self):
"""Returns the current stamp token of the resource."""
stamp_token, _, _, _, _ = (
gen_boosted_trees_ops.boosted_trees_get_ensemble_states(
self.resource_handle))
return stamp_token
def get_states(self):
"""Returns states of the tree ensemble.
Returns:
stamp_token, num_trees, num_finalized_trees, num_attempted_layers and
range of the nodes in the latest layer.
"""
(stamp_token, num_trees, num_finalized_trees, num_attempted_layers,
nodes_range) = (
gen_boosted_trees_ops.boosted_trees_get_ensemble_states(
self.resource_handle))
# Use identity to give names.
return (array_ops.identity(stamp_token, name='stamp_token'),
array_ops.identity(num_trees, name='num_trees'),
array_ops.identity(num_finalized_trees, name='num_finalized_trees'),
array_ops.identity(
num_attempted_layers, name='num_attempted_layers'),
array_ops.identity(nodes_range, name='last_layer_nodes_range'))
def serialize(self):
"""Serializes the ensemble into proto and returns the serialized proto.
Returns:
stamp_token: int64 scalar Tensor to denote the stamp of the resource.
serialized_proto: string scalar Tensor of the serialized proto.
"""
return gen_boosted_trees_ops.boosted_trees_serialize_ensemble(
self.resource_handle)
def deserialize(self, stamp_token, serialized_proto):
"""Deserialize the input proto and resets the ensemble from it.
Args:
stamp_token: int64 scalar Tensor to denote the stamp of the resource.
serialized_proto: string scalar Tensor of the serialized proto.
Returns:
Operation (for dependencies).
"""
return gen_boosted_trees_ops.boosted_trees_deserialize_ensemble(
self.resource_handle, stamp_token, serialized_proto)
| 42.996732
| 145
| 0.751995
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_boosted_trees_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_aggregate_stats
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_bucketize
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_calculate_best_feature_split as calculate_best_feature_split
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_calculate_best_feature_split_v2 as calculate_best_feature_split_v2
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_calculate_best_gains_per_feature as calculate_best_gains_per_feature
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_center_bias as center_bias
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_create_quantile_stream_resource as create_quantile_stream_resource
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_example_debug_outputs as example_debug_outputs
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_make_quantile_summaries as make_quantile_summaries
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_make_stats_summary as make_stats_summary
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_predict as predict
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_quantile_stream_resource_add_summaries as quantile_add_summaries
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_quantile_stream_resource_deserialize as quantile_resource_deserialize
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_quantile_stream_resource_flush as quantile_flush
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_quantile_stream_resource_get_bucket_boundaries as get_bucket_boundaries
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_quantile_stream_resource_handle_op as quantile_resource_handle_op
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_sparse_aggregate_stats
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_sparse_calculate_best_feature_split as sparse_calculate_best_feature_split
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_training_predict as training_predict
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_update_ensemble as update_ensemble
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_update_ensemble_v2 as update_ensemble_v2
from tensorflow.python.ops.gen_boosted_trees_ops import is_boosted_trees_quantile_stream_resource_initialized as is_quantile_resource_initialized
from tensorflow.python.training import saver
from tensorflow.python.training.tracking import tracking
class PruningMode(object):
NO_PRUNING, PRE_PRUNING, POST_PRUNING = range(0, 3)
_map = {'none': NO_PRUNING, 'pre': PRE_PRUNING, 'post': POST_PRUNING}
@classmethod
def from_str(cls, mode):
if mode in cls._map:
return cls._map[mode]
else:
raise ValueError(
'pruning_mode mode must be one of: {}. Found: {}'.format(', '.join(
sorted(cls._map)), mode))
class QuantileAccumulatorSaveable(saver.BaseSaverBuilder.SaveableObject):
def __init__(self, resource_handle, create_op, num_streams, name):
self._resource_handle = resource_handle
self._num_streams = num_streams
self._create_op = create_op
bucket_boundaries = get_bucket_boundaries(self._resource_handle,
self._num_streams)
slice_spec = ''
specs = []
def make_save_spec(tensor, suffix):
return saver.BaseSaverBuilder.SaveSpec(tensor, slice_spec, name + suffix)
for i in range(self._num_streams):
specs += [
make_save_spec(bucket_boundaries[i], '_bucket_boundaries_' + str(i))
]
super(QuantileAccumulatorSaveable, self).__init__(self._resource_handle,
specs, name)
def restore(self, restored_tensors, unused_tensor_shapes):
bucket_boundaries = restored_tensors
with ops.control_dependencies([self._create_op]):
return quantile_resource_deserialize(
self._resource_handle, bucket_boundaries=bucket_boundaries)
class QuantileAccumulator(tracking.TrackableResource):
def __init__(self,
epsilon,
num_streams,
num_quantiles,
name=None,
max_elements=None):
self._eps = epsilon
self._num_streams = num_streams
self._num_quantiles = num_quantiles
super(QuantileAccumulator, self).__init__()
with ops.name_scope(name, 'QuantileAccumulator') as name:
self._name = name
self._resource_handle = self._create_resource()
self._init_op = self._initialize()
is_initialized_op = self.is_initialized()
resources.register_resource(self.resource_handle, self._init_op,
is_initialized_op)
self._saveable = QuantileAccumulatorSaveable(
self.resource_handle, self._init_op, self._num_streams,
self.resource_handle.name)
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, self._saveable)
def _create_resource(self):
return quantile_resource_handle_op(
container='', shared_name=self._name, name=self._name)
def _initialize(self):
return create_quantile_stream_resource(self.resource_handle, self._eps,
self._num_streams)
@property
def initializer(self):
if self._init_op is None:
self._init_op = self._initialize()
return self._init_op
def is_initialized(self):
return is_quantile_resource_initialized(self.resource_handle)
@property
def saveable(self):
return self._saveable
def _gather_saveables_for_checkpoint(self):
return {'quantile_accumulator', self._saveable}
def add_summaries(self, float_columns, example_weights):
summaries = make_quantile_summaries(float_columns, example_weights,
self._eps)
summary_op = quantile_add_summaries(self.resource_handle, summaries)
return summary_op
def flush(self):
return quantile_flush(self.resource_handle, self._num_quantiles)
def get_bucket_boundaries(self):
return get_bucket_boundaries(self.resource_handle, self._num_streams)
class _TreeEnsembleSavable(saver.BaseSaverBuilder.SaveableObject):
def __init__(self, resource_handle, create_op, name):
stamp_token, serialized = (
gen_boosted_trees_ops.boosted_trees_serialize_ensemble(resource_handle))
# value.
slice_spec = ''
specs = [
saver.BaseSaverBuilder.SaveSpec(stamp_token, slice_spec,
name + '_stamp'),
saver.BaseSaverBuilder.SaveSpec(serialized, slice_spec,
name + '_serialized'),
]
super(_TreeEnsembleSavable, self).__init__(resource_handle, specs, name)
self._resource_handle = resource_handle
self._create_op = create_op
def restore(self, restored_tensors, unused_restored_shapes):
with ops.control_dependencies([self._create_op]):
return gen_boosted_trees_ops.boosted_trees_deserialize_ensemble(
self._resource_handle,
stamp_token=restored_tensors[0],
tree_ensemble_serialized=restored_tensors[1])
class TreeEnsemble(tracking.TrackableResource):
def __init__(self, name, stamp_token=0, is_local=False, serialized_proto=''):
self._stamp_token = stamp_token
self._serialized_proto = serialized_proto
self._is_local = is_local
with ops.name_scope(name, 'TreeEnsemble') as name:
self._name = name
self._resource_handle = self._create_resource()
self._init_op = self._initialize()
is_initialized_op = self.is_initialized()
# Adds the variable to the savable list.
if not is_local:
self._saveable = _TreeEnsembleSavable(
self.resource_handle, self.initializer, self.resource_handle.name)
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, self._saveable)
resources.register_resource(
self.resource_handle,
self.initializer,
is_initialized_op,
is_shared=not is_local)
def _create_resource(self):
return gen_boosted_trees_ops.boosted_trees_ensemble_resource_handle_op(
container='', shared_name=self._name, name=self._name)
def _initialize(self):
return gen_boosted_trees_ops.boosted_trees_create_ensemble(
self.resource_handle,
self._stamp_token,
tree_ensemble_serialized=self._serialized_proto)
@property
def initializer(self):
if self._init_op is None:
self._init_op = self._initialize()
return self._init_op
def is_initialized(self):
return gen_boosted_trees_ops.is_boosted_trees_ensemble_initialized(
self.resource_handle)
def _gather_saveables_for_checkpoint(self):
if not self._is_local:
return {'tree_ensemble': self._saveable}
def get_stamp_token(self):
stamp_token, _, _, _, _ = (
gen_boosted_trees_ops.boosted_trees_get_ensemble_states(
self.resource_handle))
return stamp_token
def get_states(self):
(stamp_token, num_trees, num_finalized_trees, num_attempted_layers,
nodes_range) = (
gen_boosted_trees_ops.boosted_trees_get_ensemble_states(
self.resource_handle))
# Use identity to give names.
return (array_ops.identity(stamp_token, name='stamp_token'),
array_ops.identity(num_trees, name='num_trees'),
array_ops.identity(num_finalized_trees, name='num_finalized_trees'),
array_ops.identity(
num_attempted_layers, name='num_attempted_layers'),
array_ops.identity(nodes_range, name='last_layer_nodes_range'))
def serialize(self):
return gen_boosted_trees_ops.boosted_trees_serialize_ensemble(
self.resource_handle)
def deserialize(self, stamp_token, serialized_proto):
return gen_boosted_trees_ops.boosted_trees_deserialize_ensemble(
self.resource_handle, stamp_token, serialized_proto)
| true
| true
|
790cefb87f9e42652e95c4ee938b171b4c8bc962
| 268
|
py
|
Python
|
backend/apps/cabins/migrations/0022_merge_20220210_1705.py
|
hovedstyret/indok-web
|
598e9ca0b5f3a5e776a85dec0a8694b9bcd5a159
|
[
"MIT"
] | 3
|
2021-11-18T09:29:14.000Z
|
2022-01-13T20:12:11.000Z
|
backend/apps/cabins/migrations/0022_merge_20220210_1705.py
|
rubberdok/indok-web
|
598e9ca0b5f3a5e776a85dec0a8694b9bcd5a159
|
[
"MIT"
] | 277
|
2022-01-17T18:16:44.000Z
|
2022-03-31T19:44:04.000Z
|
backend/apps/cabins/migrations/0022_merge_20220210_1705.py
|
hovedstyret/indok-web
|
598e9ca0b5f3a5e776a85dec0a8694b9bcd5a159
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.11 on 2022-02-10 16:05
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("cabins", "0020_auto_20211111_1825"),
("cabins", "0021_booking_is_declined"),
]
operations = []
| 19.142857
| 48
| 0.660448
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("cabins", "0020_auto_20211111_1825"),
("cabins", "0021_booking_is_declined"),
]
operations = []
| true
| true
|
790cefed3568de997b86c088df9e84f65fd2cf7f
| 736
|
py
|
Python
|
plugins/holland.backup.mysql_lvm/holland/backup/mysql_lvm/actions/mysql/lock.py
|
a5a351e7/holland
|
58a12a5ce10206eed9434ab42b02217de29784bb
|
[
"BSD-3-Clause"
] | 1
|
2019-06-06T01:07:34.000Z
|
2019-06-06T01:07:34.000Z
|
plugins/holland.backup.mysql_lvm/holland/backup/mysql_lvm/actions/mysql/lock.py
|
a5a351e7/holland
|
58a12a5ce10206eed9434ab42b02217de29784bb
|
[
"BSD-3-Clause"
] | null | null | null |
plugins/holland.backup.mysql_lvm/holland/backup/mysql_lvm/actions/mysql/lock.py
|
a5a351e7/holland
|
58a12a5ce10206eed9434ab42b02217de29784bb
|
[
"BSD-3-Clause"
] | 2
|
2015-12-04T12:17:59.000Z
|
2022-03-23T07:22:02.000Z
|
import logging
LOG = logging.getLogger(__name__)
class FlushAndLockMySQLAction(object):
def __init__(self, client, extra_flush=True):
self.client = client
self.extra_flush = extra_flush
def __call__(self, event, snapshot_fsm, snapshot_vol):
if event == 'pre-snapshot':
if self.extra_flush:
LOG.debug("Executing FLUSH TABLES")
self.client.flush_tables()
LOG.debug("Executing FLUSH TABLES WITH READ LOCK")
LOG.info("Acquiring read-lock and flushing tables")
self.client.flush_tables_with_read_lock()
elif event == 'post-snapshot':
LOG.info("Releasing read-lock")
self.client.unlock_tables()
| 35.047619
| 63
| 0.634511
|
import logging
LOG = logging.getLogger(__name__)
class FlushAndLockMySQLAction(object):
def __init__(self, client, extra_flush=True):
self.client = client
self.extra_flush = extra_flush
def __call__(self, event, snapshot_fsm, snapshot_vol):
if event == 'pre-snapshot':
if self.extra_flush:
LOG.debug("Executing FLUSH TABLES")
self.client.flush_tables()
LOG.debug("Executing FLUSH TABLES WITH READ LOCK")
LOG.info("Acquiring read-lock and flushing tables")
self.client.flush_tables_with_read_lock()
elif event == 'post-snapshot':
LOG.info("Releasing read-lock")
self.client.unlock_tables()
| true
| true
|
790cf05b1f2ffc4e39e3da9eee2f324a65bd2ac3
| 6,184
|
py
|
Python
|
src/desktopvirtualization/azext_desktopvirtualization/vendored_sdks/desktopvirtualization/_desktop_virtualization_api_client.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | 1
|
2022-01-25T07:33:18.000Z
|
2022-01-25T07:33:18.000Z
|
src/desktopvirtualization/azext_desktopvirtualization/vendored_sdks/desktopvirtualization/_desktop_virtualization_api_client.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | 9
|
2022-03-25T19:35:49.000Z
|
2022-03-31T06:09:47.000Z
|
src/desktopvirtualization/azext_desktopvirtualization/vendored_sdks/desktopvirtualization/_desktop_virtualization_api_client.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | 1
|
2022-03-10T22:13:02.000Z
|
2022-03-10T22:13:02.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.mgmt.core import ARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Optional
from azure.core.credentials import TokenCredential
from ._configuration import DesktopVirtualizationAPIClientConfiguration
from .operations import Operations
from .operations import WorkspacesOperations
from .operations import ScalingPlansOperations
from .operations import ApplicationGroupsOperations
from .operations import StartMenuItemsOperations
from .operations import ApplicationsOperations
from .operations import DesktopsOperations
from .operations import HostPoolsOperations
from .operations import UserSessionsOperations
from .operations import SessionHostsOperations
from .operations import MsixPackagesOperations
from .operations import MsixImagesOperations
from . import models
class DesktopVirtualizationAPIClient(object):
"""DesktopVirtualizationAPIClient.
:ivar operations: Operations operations
:vartype operations: desktop_virtualization_api_client.operations.Operations
:ivar workspaces: WorkspacesOperations operations
:vartype workspaces: desktop_virtualization_api_client.operations.WorkspacesOperations
:ivar scaling_plans: ScalingPlansOperations operations
:vartype scaling_plans: desktop_virtualization_api_client.operations.ScalingPlansOperations
:ivar application_groups: ApplicationGroupsOperations operations
:vartype application_groups: desktop_virtualization_api_client.operations.ApplicationGroupsOperations
:ivar start_menu_items: StartMenuItemsOperations operations
:vartype start_menu_items: desktop_virtualization_api_client.operations.StartMenuItemsOperations
:ivar applications: ApplicationsOperations operations
:vartype applications: desktop_virtualization_api_client.operations.ApplicationsOperations
:ivar desktops: DesktopsOperations operations
:vartype desktops: desktop_virtualization_api_client.operations.DesktopsOperations
:ivar host_pools: HostPoolsOperations operations
:vartype host_pools: desktop_virtualization_api_client.operations.HostPoolsOperations
:ivar user_sessions: UserSessionsOperations operations
:vartype user_sessions: desktop_virtualization_api_client.operations.UserSessionsOperations
:ivar session_hosts: SessionHostsOperations operations
:vartype session_hosts: desktop_virtualization_api_client.operations.SessionHostsOperations
:ivar msix_packages: MsixPackagesOperations operations
:vartype msix_packages: desktop_virtualization_api_client.operations.MsixPackagesOperations
:ivar msix_images: MsixImagesOperations operations
:vartype msix_images: desktop_virtualization_api_client.operations.MsixImagesOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
base_url=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
if not base_url:
base_url = 'https://management.azure.com'
self._config = DesktopVirtualizationAPIClientConfiguration(credential, subscription_id, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.workspaces = WorkspacesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.scaling_plans = ScalingPlansOperations(
self._client, self._config, self._serialize, self._deserialize)
self.application_groups = ApplicationGroupsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.start_menu_items = StartMenuItemsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.applications = ApplicationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.desktops = DesktopsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.host_pools = HostPoolsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.user_sessions = UserSessionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.session_hosts = SessionHostsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.msix_packages = MsixPackagesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.msix_images = MsixImagesOperations(
self._client, self._config, self._serialize, self._deserialize)
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> DesktopVirtualizationAPIClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
| 49.472
| 105
| 0.747413
|
from typing import TYPE_CHECKING
from azure.mgmt.core import ARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
from typing import Any, Optional
from azure.core.credentials import TokenCredential
from ._configuration import DesktopVirtualizationAPIClientConfiguration
from .operations import Operations
from .operations import WorkspacesOperations
from .operations import ScalingPlansOperations
from .operations import ApplicationGroupsOperations
from .operations import StartMenuItemsOperations
from .operations import ApplicationsOperations
from .operations import DesktopsOperations
from .operations import HostPoolsOperations
from .operations import UserSessionsOperations
from .operations import SessionHostsOperations
from .operations import MsixPackagesOperations
from .operations import MsixImagesOperations
from . import models
class DesktopVirtualizationAPIClient(object):
def __init__(
self,
credential,
subscription_id,
base_url=None,
**kwargs
):
if not base_url:
base_url = 'https://management.azure.com'
self._config = DesktopVirtualizationAPIClientConfiguration(credential, subscription_id, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.workspaces = WorkspacesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.scaling_plans = ScalingPlansOperations(
self._client, self._config, self._serialize, self._deserialize)
self.application_groups = ApplicationGroupsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.start_menu_items = StartMenuItemsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.applications = ApplicationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.desktops = DesktopsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.host_pools = HostPoolsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.user_sessions = UserSessionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.session_hosts = SessionHostsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.msix_packages = MsixPackagesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.msix_images = MsixImagesOperations(
self._client, self._config, self._serialize, self._deserialize)
def close(self):
self._client.close()
def __enter__(self):
self._client.__enter__()
return self
def __exit__(self, *exc_details):
self._client.__exit__(*exc_details)
| true
| true
|
790cf0917728b0fd2c56d1cef1a5a4d45cb2cea1
| 20
|
py
|
Python
|
tests/__init__.py
|
noamkatzir/palm-hand-reading
|
1a405759c03218fc74d661805bced8e4f4a92e74
|
[
"BSD-3-Clause"
] | 5
|
2018-10-21T13:07:36.000Z
|
2021-11-26T17:01:47.000Z
|
tests/__init__.py
|
noamkatzir/palm-hand-reading
|
1a405759c03218fc74d661805bced8e4f4a92e74
|
[
"BSD-3-Clause"
] | null | null | null |
tests/__init__.py
|
noamkatzir/palm-hand-reading
|
1a405759c03218fc74d661805bced8e4f4a92e74
|
[
"BSD-3-Clause"
] | 3
|
2019-06-08T07:04:36.000Z
|
2019-11-27T03:12:53.000Z
|
__author__ = 'noam'
| 10
| 19
| 0.7
|
__author__ = 'noam'
| true
| true
|
790cf1f006df6c4a1016d16f9fc43efc942a64d6
| 5,072
|
py
|
Python
|
tools/kernelspecs/kernels/R_kubernetes/scripts/launch_kubernetes.py
|
spotinst/wave-operator
|
c6cf6ad544b5df98bf80ae640245d309223f99fc
|
[
"Apache-2.0"
] | null | null | null |
tools/kernelspecs/kernels/R_kubernetes/scripts/launch_kubernetes.py
|
spotinst/wave-operator
|
c6cf6ad544b5df98bf80ae640245d309223f99fc
|
[
"Apache-2.0"
] | 9
|
2020-11-17T23:56:26.000Z
|
2021-04-26T22:26:29.000Z
|
tools/kernelspecs/kernels/R_kubernetes/scripts/launch_kubernetes.py
|
spotinst/wave-operator
|
c6cf6ad544b5df98bf80ae640245d309223f99fc
|
[
"Apache-2.0"
] | 1
|
2020-10-22T17:41:17.000Z
|
2020-10-22T17:41:17.000Z
|
import os
import sys
import yaml
import argparse
from kubernetes import client, config
import urllib3
from jinja2 import FileSystemLoader, Environment
urllib3.disable_warnings()
KERNEL_POD_TEMPLATE_PATH = '/kernel-pod.yaml.j2'
def generate_kernel_pod_yaml(keywords):
"""Return the kubernetes pod spec as a yaml string.
- load jinja2 template from this file directory.
- substitute template variables with keywords items.
"""
j_env = Environment(loader=FileSystemLoader(os.path.dirname(__file__)), trim_blocks=True, lstrip_blocks=True)
# jinja2 template substitutes template variables with None though keywords doesn't contain corresponding item.
# Therfore, no need to check if any are left unsubstituted. Kubernetes API server will validate the pod spec instead.
k8s_yaml = j_env.get_template(KERNEL_POD_TEMPLATE_PATH).render(**keywords)
return k8s_yaml
def launch_kubernetes_kernel(kernel_id, port_range, response_addr, spark_context_init_mode):
# Launches a containerized kernel as a kubernetes pod.
config.load_incluster_config()
# Capture keywords and their values.
keywords = dict()
# Factory values...
# Since jupyter lower cases the kernel directory as the kernel-name, we need to capture its case-sensitive
# value since this is used to locate the kernel launch script within the image.
keywords['eg_port_range'] = port_range
keywords['eg_response_address'] = response_addr
keywords['kernel_id'] = kernel_id
keywords['kernel_name'] = os.path.basename(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
keywords['kernel_spark_context_init_mode'] = spark_context_init_mode
# Walk env variables looking for names prefixed with KERNEL_. When found, set corresponding keyword value
# with name in lower case.
for name, value in os.environ.items():
if name.startswith('KERNEL_'):
keywords[name.lower()] = yaml.safe_load(value)
# Substitute all template variable (wrapped with {{ }}) and generate `yaml` string.
k8s_yaml = generate_kernel_pod_yaml(keywords)
# For each k8s object (kind), call the appropriate API method. Too bad there isn't a method
# that can take a set of objects.
#
# Creation for additional kinds of k8s objects can be added below. Refer to
# https://github.com/kubernetes-client/python for API signatures. Other examples can be found in
# https://github.com/jupyter-incubator/enterprise_gateway/blob/master/enterprise_gateway/services/processproxies/k8s.py
#
kernel_namespace = keywords['kernel_namespace']
k8s_objs = yaml.safe_load_all(k8s_yaml)
for k8s_obj in k8s_objs:
if k8s_obj.get('kind'):
if k8s_obj['kind'] == 'Pod':
#print("{}".format(k8s_obj)) # useful for debug
client.CoreV1Api(client.ApiClient()).create_namespaced_pod(body=k8s_obj, namespace=kernel_namespace)
elif k8s_obj['kind'] == 'Secret':
client.CoreV1Api(client.ApiClient()).create_namespaced_secret(body=k8s_obj, namespace=kernel_namespace)
elif k8s_obj['kind'] == 'PersistentVolumeClaim':
client.CoreV1Api(client.ApiClient()).create_namespaced_persistent_volume_claim(
body=k8s_obj, namespace=kernel_namespace)
elif k8s_obj['kind'] == 'PersistentVolume':
client.CoreV1Api(client.ApiClient()).create_persistent_volume(body=k8s_obj)
else:
sys.exit("ERROR - Unhandled Kubernetes object kind '{}' found in yaml file - kernel launch terminating!".
format(k8s_obj['kind']))
else:
sys.exit("ERROR - Unknown Kubernetes object '{}' found in yaml file - kernel launch terminating!".
format(k8s_obj))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--RemoteProcessProxy.kernel-id', dest='kernel_id', nargs='?',
help='Indicates the id associated with the launched kernel.')
parser.add_argument('--RemoteProcessProxy.port-range', dest='port_range', nargs='?',
metavar='<lowerPort>..<upperPort>', help='Port range to impose for kernel ports')
parser.add_argument('--RemoteProcessProxy.response-address', dest='response_address', nargs='?',
metavar='<ip>:<port>', help='Connection address (<ip>:<port>) for returning connection file')
parser.add_argument('--RemoteProcessProxy.spark-context-initialization-mode', dest='spark_context_init_mode',
nargs='?', help='Indicates whether or how a spark context should be created',
default='none')
arguments = vars(parser.parse_args())
kernel_id = arguments['kernel_id']
port_range = arguments['port_range']
response_addr = arguments['response_address']
spark_context_init_mode = arguments['spark_context_init_mode']
launch_kubernetes_kernel(kernel_id, port_range, response_addr, spark_context_init_mode)
| 49.242718
| 123
| 0.699921
|
import os
import sys
import yaml
import argparse
from kubernetes import client, config
import urllib3
from jinja2 import FileSystemLoader, Environment
urllib3.disable_warnings()
KERNEL_POD_TEMPLATE_PATH = '/kernel-pod.yaml.j2'
def generate_kernel_pod_yaml(keywords):
j_env = Environment(loader=FileSystemLoader(os.path.dirname(__file__)), trim_blocks=True, lstrip_blocks=True)
# Therfore, no need to check if any are left unsubstituted. Kubernetes API server will validate the pod spec instead.
k8s_yaml = j_env.get_template(KERNEL_POD_TEMPLATE_PATH).render(**keywords)
return k8s_yaml
def launch_kubernetes_kernel(kernel_id, port_range, response_addr, spark_context_init_mode):
# Launches a containerized kernel as a kubernetes pod.
config.load_incluster_config()
# Capture keywords and their values.
keywords = dict()
# Factory values...
# Since jupyter lower cases the kernel directory as the kernel-name, we need to capture its case-sensitive
# value since this is used to locate the kernel launch script within the image.
keywords['eg_port_range'] = port_range
keywords['eg_response_address'] = response_addr
keywords['kernel_id'] = kernel_id
keywords['kernel_name'] = os.path.basename(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
keywords['kernel_spark_context_init_mode'] = spark_context_init_mode
# Walk env variables looking for names prefixed with KERNEL_. When found, set corresponding keyword value
# with name in lower case.
for name, value in os.environ.items():
if name.startswith('KERNEL_'):
keywords[name.lower()] = yaml.safe_load(value)
# Substitute all template variable (wrapped with {{ }}) and generate `yaml` string.
k8s_yaml = generate_kernel_pod_yaml(keywords)
# For each k8s object (kind), call the appropriate API method. Too bad there isn't a method
kernel_namespace = keywords['kernel_namespace']
k8s_objs = yaml.safe_load_all(k8s_yaml)
for k8s_obj in k8s_objs:
if k8s_obj.get('kind'):
if k8s_obj['kind'] == 'Pod':
lient.CoreV1Api(client.ApiClient()).create_namespaced_pod(body=k8s_obj, namespace=kernel_namespace)
elif k8s_obj['kind'] == 'Secret':
client.CoreV1Api(client.ApiClient()).create_namespaced_secret(body=k8s_obj, namespace=kernel_namespace)
elif k8s_obj['kind'] == 'PersistentVolumeClaim':
client.CoreV1Api(client.ApiClient()).create_namespaced_persistent_volume_claim(
body=k8s_obj, namespace=kernel_namespace)
elif k8s_obj['kind'] == 'PersistentVolume':
client.CoreV1Api(client.ApiClient()).create_persistent_volume(body=k8s_obj)
else:
sys.exit("ERROR - Unhandled Kubernetes object kind '{}' found in yaml file - kernel launch terminating!".
format(k8s_obj['kind']))
else:
sys.exit("ERROR - Unknown Kubernetes object '{}' found in yaml file - kernel launch terminating!".
format(k8s_obj))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--RemoteProcessProxy.kernel-id', dest='kernel_id', nargs='?',
help='Indicates the id associated with the launched kernel.')
parser.add_argument('--RemoteProcessProxy.port-range', dest='port_range', nargs='?',
metavar='<lowerPort>..<upperPort>', help='Port range to impose for kernel ports')
parser.add_argument('--RemoteProcessProxy.response-address', dest='response_address', nargs='?',
metavar='<ip>:<port>', help='Connection address (<ip>:<port>) for returning connection file')
parser.add_argument('--RemoteProcessProxy.spark-context-initialization-mode', dest='spark_context_init_mode',
nargs='?', help='Indicates whether or how a spark context should be created',
default='none')
arguments = vars(parser.parse_args())
kernel_id = arguments['kernel_id']
port_range = arguments['port_range']
response_addr = arguments['response_address']
spark_context_init_mode = arguments['spark_context_init_mode']
launch_kubernetes_kernel(kernel_id, port_range, response_addr, spark_context_init_mode)
| true
| true
|
790cf1fc028c389713a0897411285dc88313b7b0
| 144
|
py
|
Python
|
src/bot/cocoa/src/basic/sessions/__init__.py
|
s-akanksha/DialoGraph_ICLR21
|
d5bbc10b2623c9f84d21a99a5e54e7dcfdfb1bcc
|
[
"Apache-2.0"
] | 12
|
2021-03-17T05:15:33.000Z
|
2022-01-19T06:09:21.000Z
|
src/bot/cocoa/src/basic/sessions/__init__.py
|
s-akanksha/DialoGraph_ICLR21
|
d5bbc10b2623c9f84d21a99a5e54e7dcfdfb1bcc
|
[
"Apache-2.0"
] | 2
|
2021-05-25T07:28:46.000Z
|
2022-02-11T01:54:43.000Z
|
src/bot/cocoa/src/basic/sessions/__init__.py
|
s-akanksha/DialoGraph_ICLR21
|
d5bbc10b2623c9f84d21a99a5e54e7dcfdfb1bcc
|
[
"Apache-2.0"
] | 4
|
2021-10-11T03:39:38.000Z
|
2022-02-01T23:58:50.000Z
|
__author__ = 'anushabala'
import sys
sys.path.append('/usr1/home/rjoshi2/negotiation_personality/src/negotiation/bot/cocoa/src/basic/sessions')
| 36
| 106
| 0.819444
|
__author__ = 'anushabala'
import sys
sys.path.append('/usr1/home/rjoshi2/negotiation_personality/src/negotiation/bot/cocoa/src/basic/sessions')
| true
| true
|
790cf2e0bcb6b2a4328753d39bb931054a1bdf96
| 5,891
|
py
|
Python
|
application/workprogramsapp/expertise/views.py
|
18ariana/analytics_backend
|
bfcda70564dd14dadb72de6a70fe2d66790eae85
|
[
"MIT"
] | null | null | null |
application/workprogramsapp/expertise/views.py
|
18ariana/analytics_backend
|
bfcda70564dd14dadb72de6a70fe2d66790eae85
|
[
"MIT"
] | null | null | null |
application/workprogramsapp/expertise/views.py
|
18ariana/analytics_backend
|
bfcda70564dd14dadb72de6a70fe2d66790eae85
|
[
"MIT"
] | null | null | null |
from rest_framework import generics
from rest_framework.exceptions import NotFound
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from workprogramsapp.expertise.models import UserExpertise, ExpertiseComments, Expertise
from workprogramsapp.expertise.serializers import UserExpertiseSerializer, CommentSerializer, ExpertiseSerializer
from workprogramsapp.permissions import IsMemberOfExpertise, IsRpdDeveloperOrReadOnly, IsMemberOfUserExpertise, \
IsExpertiseMaster, IsWorkProgramMemberOfExpertise
from workprogramsapp.workprogram_additions.models import UserStructuralUnit
class UserExpertiseListView(generics.ListAPIView):
"""
Вывод всей информации об экспертизе для эксперта (автоматически по токену пользователя выдает экспертизы, в которых он учавствует):
Если нужна опредленная экспертиза от пользователя, то надо указать ее id
"""
queryset = UserExpertise.objects.all()
serializer_class = UserExpertiseSerializer
permission_classes = [IsMemberOfExpertise]
def get_queryset(self, *args, **kwargs):
if ('pk' in dict(self.kwargs)):
return UserExpertise.objects.filter(expertise=self.kwargs['pk'], expert=self.request.user)
else:
return UserExpertise.objects.filter(expert=self.request.user)
class UserExpertiseCreateView(generics.CreateAPIView):
"""
создание экспертизы
"""
queryset = UserExpertise.objects.all()
serializer_class = UserExpertiseSerializer
permission_classes = [IsMemberOfExpertise]
class ExpertiseCommentsView(generics.ListAPIView):
"""
View для получения и отправки комментариев
Комментарии можно получить или отправить, указав в адресе id экспертизы,
При желании можно в параметрах указать блок комментариев для GET-запроса
"""
queryset = ExpertiseComments.objects.all()
serializer_class = CommentSerializer
permission_classes = [IsMemberOfExpertise]
def get_queryset(self, *args, **kwargs):
if ('pk' in dict(self.kwargs)):
if self.request.query_params.get('block') != None:
return ExpertiseComments.objects.filter(user_expertise__expertise=self.kwargs['pk'],
comment_block=self.request.query_params.get('block'))
else:
return ExpertiseComments.objects.filter(user_expertise__expertise=self.kwargs['pk'])
else:
return ExpertiseComments.objects.all()
class ExpertiseCommentCreateView(generics.CreateAPIView):
"""
создание коммента к экспертизе
"""
queryset = ExpertiseComments.objects.all()
serializer_class = CommentSerializer
permission_classes = [IsMemberOfExpertise]
class ExpertiseWorkProgramView(generics.RetrieveAPIView):
# TODO: Зачем вообще эта вьюха нужна?
"""
ссылка выдает все экспертизы связанные с id рабочей программы
"""
queryset = Expertise.objects.all()
serializer_class = ExpertiseSerializer
permission_classes = [IsWorkProgramMemberOfExpertise, IsRpdDeveloperOrReadOnly]
def get_object(self):
try:
return Expertise.objects.get(work_program__id=self.kwargs['pk'])
except Expertise.DoesNotExist:
raise NotFound()
class ExpertiseListView(generics.ListAPIView):
queryset = Expertise.objects.all()
serializer_class = ExpertiseSerializer
permission_classes = [IsMemberOfUserExpertise]
def list(self, request, **kwargs):
# Note the use of `get_queryset()` instead of `self.queryset`
if request.user.groups.filter(name="expertise_master"):
queryset = Expertise.objects.all()
elif UserStructuralUnit.objects.filter(user=request.user, status__in=["leader", "deputy"]):
queryset = Expertise.objects.filter(
work_program__structural_unit__user_in_structural_unit__user=request.user,
work_program__structural_unit__user_in_structural_unit__status__in=["leader", "deputy"]).distinct() | \
Expertise.objects.filter(expertse_users_in_rpd__expert=request.user).distinct()
else:
queryset = Expertise.objects.filter(expertse_users_in_rpd__expert=request.user)
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
newdata = dict(serializer.data[0])
return Response("newdata")
class ExpertiseViewById(generics.RetrieveAPIView):
queryset = Expertise.objects.all()
serializer_class = ExpertiseSerializer
permission_classes = [IsExpertiseMaster]
class ExpertiseCreateView(generics.CreateAPIView):
"""
Создание экспертизы
Автоматически добавляет пользователя-создателя как лидера экспертизы
(Подробней о создании экспертизы см. сериализатор)
"""
queryset = Expertise.objects.all()
serializer_class = ExpertiseSerializer
permission_classes = [IsRpdDeveloperOrReadOnly]
class ChangeExpertiseView(generics.UpdateAPIView):
"""
Редактирование экспертизы
"""
queryset = Expertise.objects.all()
serializer_class = ExpertiseSerializer
permission_classes = [IsExpertiseMaster]
class ChangeUserExpertiseView(generics.UpdateAPIView):
"""
Редактирование экспертизы отдельного пользователя
"""
queryset = UserExpertise.objects.all()
serializer_class = UserExpertiseSerializer
permission_classes = [IsMemberOfUserExpertise]
class DeleteUserExpertise(generics.DestroyAPIView):
"""
Редактирование экспертизы отдельного пользователя
"""
queryset = UserExpertise.objects.all()
serializer_class = UserExpertiseSerializer
permission_classes = [IsExpertiseMaster]
| 38.253247
| 135
| 0.733831
|
from rest_framework import generics
from rest_framework.exceptions import NotFound
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from workprogramsapp.expertise.models import UserExpertise, ExpertiseComments, Expertise
from workprogramsapp.expertise.serializers import UserExpertiseSerializer, CommentSerializer, ExpertiseSerializer
from workprogramsapp.permissions import IsMemberOfExpertise, IsRpdDeveloperOrReadOnly, IsMemberOfUserExpertise, \
IsExpertiseMaster, IsWorkProgramMemberOfExpertise
from workprogramsapp.workprogram_additions.models import UserStructuralUnit
class UserExpertiseListView(generics.ListAPIView):
queryset = UserExpertise.objects.all()
serializer_class = UserExpertiseSerializer
permission_classes = [IsMemberOfExpertise]
def get_queryset(self, *args, **kwargs):
if ('pk' in dict(self.kwargs)):
return UserExpertise.objects.filter(expertise=self.kwargs['pk'], expert=self.request.user)
else:
return UserExpertise.objects.filter(expert=self.request.user)
class UserExpertiseCreateView(generics.CreateAPIView):
queryset = UserExpertise.objects.all()
serializer_class = UserExpertiseSerializer
permission_classes = [IsMemberOfExpertise]
class ExpertiseCommentsView(generics.ListAPIView):
queryset = ExpertiseComments.objects.all()
serializer_class = CommentSerializer
permission_classes = [IsMemberOfExpertise]
def get_queryset(self, *args, **kwargs):
if ('pk' in dict(self.kwargs)):
if self.request.query_params.get('block') != None:
return ExpertiseComments.objects.filter(user_expertise__expertise=self.kwargs['pk'],
comment_block=self.request.query_params.get('block'))
else:
return ExpertiseComments.objects.filter(user_expertise__expertise=self.kwargs['pk'])
else:
return ExpertiseComments.objects.all()
class ExpertiseCommentCreateView(generics.CreateAPIView):
queryset = ExpertiseComments.objects.all()
serializer_class = CommentSerializer
permission_classes = [IsMemberOfExpertise]
class ExpertiseWorkProgramView(generics.RetrieveAPIView):
queryset = Expertise.objects.all()
serializer_class = ExpertiseSerializer
permission_classes = [IsWorkProgramMemberOfExpertise, IsRpdDeveloperOrReadOnly]
def get_object(self):
try:
return Expertise.objects.get(work_program__id=self.kwargs['pk'])
except Expertise.DoesNotExist:
raise NotFound()
class ExpertiseListView(generics.ListAPIView):
queryset = Expertise.objects.all()
serializer_class = ExpertiseSerializer
permission_classes = [IsMemberOfUserExpertise]
def list(self, request, **kwargs):
if request.user.groups.filter(name="expertise_master"):
queryset = Expertise.objects.all()
elif UserStructuralUnit.objects.filter(user=request.user, status__in=["leader", "deputy"]):
queryset = Expertise.objects.filter(
work_program__structural_unit__user_in_structural_unit__user=request.user,
work_program__structural_unit__user_in_structural_unit__status__in=["leader", "deputy"]).distinct() | \
Expertise.objects.filter(expertse_users_in_rpd__expert=request.user).distinct()
else:
queryset = Expertise.objects.filter(expertse_users_in_rpd__expert=request.user)
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
newdata = dict(serializer.data[0])
return Response("newdata")
class ExpertiseViewById(generics.RetrieveAPIView):
queryset = Expertise.objects.all()
serializer_class = ExpertiseSerializer
permission_classes = [IsExpertiseMaster]
class ExpertiseCreateView(generics.CreateAPIView):
queryset = Expertise.objects.all()
serializer_class = ExpertiseSerializer
permission_classes = [IsRpdDeveloperOrReadOnly]
class ChangeExpertiseView(generics.UpdateAPIView):
queryset = Expertise.objects.all()
serializer_class = ExpertiseSerializer
permission_classes = [IsExpertiseMaster]
class ChangeUserExpertiseView(generics.UpdateAPIView):
queryset = UserExpertise.objects.all()
serializer_class = UserExpertiseSerializer
permission_classes = [IsMemberOfUserExpertise]
class DeleteUserExpertise(generics.DestroyAPIView):
queryset = UserExpertise.objects.all()
serializer_class = UserExpertiseSerializer
permission_classes = [IsExpertiseMaster]
| true
| true
|
790cf4aaf35e246daa9960fc72e7aa450963d30f
| 11,681
|
py
|
Python
|
fedlearner/trainer/sparse_estimator.py
|
Hsy-Intel/fedlearner
|
d5d0bb5549e115eaf0dec5a00a78dcb21ac0909d
|
[
"Apache-2.0"
] | 772
|
2020-01-21T13:59:42.000Z
|
2022-03-30T08:20:16.000Z
|
fedlearner/trainer/sparse_estimator.py
|
Hsy-Intel/fedlearner
|
d5d0bb5549e115eaf0dec5a00a78dcb21ac0909d
|
[
"Apache-2.0"
] | 126
|
2020-03-03T07:54:39.000Z
|
2022-03-08T23:24:03.000Z
|
fedlearner/trainer/sparse_estimator.py
|
Hsy-Intel/fedlearner
|
d5d0bb5549e115eaf0dec5a00a78dcb21ac0909d
|
[
"Apache-2.0"
] | 198
|
2020-01-22T02:16:17.000Z
|
2022-03-31T01:13:05.000Z
|
# Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
# pylint: disable=protected-access
import tensorflow.compat.v1 as tf
from tensorflow.contrib import graph_editor as ge
from fedlearner.trainer import embedding
from fedlearner.trainer import estimator
from fedlearner.trainer import feature
from fedlearner.trainer import operator
from fedlearner.trainer import utils
class ConfigRunError(Exception):
pass
class SparseFLModel(estimator.FLModel):
def __init__(self, role, bridge, example_ids, exporting=False,
config_run=True,
bias_tensor=None, vec_tensor=None,
bias_embedding=None, vec_embedding=None,
feature_columns=None):
super(SparseFLModel, self).__init__(role,
bridge, example_ids, exporting)
self._config_run = config_run
self._num_shards = 1
if config_run:
self._bias_tensor = tf.placeholder(tf.float32, shape=[None, None])
self._vec_tensor = tf.placeholder(tf.float32, shape=[None, None])
else:
self._bias_tensor = bias_tensor
self._vec_tensor = vec_tensor
self._bias_embedding = bias_embedding
self._vec_embedding = vec_embedding
self._feature_columns = feature_columns
self._frozen = False
self._slot_ids = []
self._feature_slots = {}
self._feature_column_v1s = {}
self._use_fid_v2 = False
self._num_embedding_groups = 3
def add_feature_slot(self, *args, **kwargs):
assert not self._frozen, "Cannot modify model after finalization"
fs = feature.FeatureSlot(*args, **kwargs)
if self._use_fid_v2:
assert 0 <= fs.slot_id < utils.MAX_SLOTS_v2, \
"Invalid slot id %d"%fs.slot_id
else:
assert 0 <= fs.slot_id < utils.MAX_SLOTS, \
"Invalid slot id %d"%fs.slot_id
self._slot_ids.append(fs.slot_id)
self._feature_slots[fs.slot_id] = fs
return fs
def add_feature_column(self, *args, **kwargs):
assert not self._frozen, "Cannot modify model after finalization"
fc = feature.FeatureColumnV1(*args, **kwargs)
slot_id = fc.feature_slot.slot_id
assert slot_id in self._feature_slots and \
self._feature_slots[slot_id] is fc.feature_slot, \
"FeatureSlot with id %d must be added to Model first"%slot_id
assert slot_id not in self._feature_column_v1s, \
"Only one FeatureColumnV1 can be created for each slot"
self._feature_column_v1s[slot_id] = fc
return fc
def set_use_fid_v2(self, use_fid_v2):
self._use_fid_v2 = use_fid_v2
def get_bias(self):
return self._bias_tensor
def get_vec(self):
return self._vec_tensor
def _get_bias_slot_configs(self):
if not self._config_run:
return self._bias_embedding.config if self._bias_embedding else None
slot_list = []
fs_map = {}
for slot_id in self._slot_ids:
fs = self._feature_slots[slot_id]
key = (id(fs._bias_initializer), id(fs._bias_optimizer))
fs_map[key] = fs
slot_list.append((fs.slot_id, 1, fs.hash_table_size, key))
if not slot_list:
return None
bias_config = utils._compute_slot_config(slot_list, 1,
self._use_fid_v2)
bias_config['name'] = 'bias'
bias_config['slot_list'] = slot_list
bias_config['initializers'] = [fs_map[i]._bias_initializer
for i in bias_config['weight_group_keys']]
bias_config['optimizers'] = [fs_map[i]._bias_optimizer
for i in bias_config['weight_group_keys']]
bias_config['use_fid_v2'] = self._use_fid_v2
return bias_config
def _get_vec_slot_configs(self):
if not self._config_run:
return self._vec_embedding.config if self._vec_embedding else None
slot_list = []
fs_map = {}
for slot_id in self._slot_ids:
if slot_id not in self._feature_column_v1s:
continue
fc = self._feature_column_v1s[slot_id]
fs = fc.feature_slot
if fc.feature_slot.dim > 1:
key = (id(fs._vec_initializer), id(fs._vec_optimizer))
fs_map[key] = fs
slot_list.append((slot_id, fs.dim - 1, fs.hash_table_size, key))
if not slot_list:
return None
vec_config = utils._compute_slot_config(slot_list,
self._num_embedding_groups,
self._use_fid_v2)
vec_config['name'] = 'vec'
vec_config['slot_list'] = slot_list
vec_config['initializers'] = [fs_map[i]._vec_initializer
for i in vec_config['weight_group_keys']]
vec_config['optimizers'] = [fs_map[i]._vec_optimizer
for i in vec_config['weight_group_keys']]
vec_config['use_fid_v2'] = self._use_fid_v2
return vec_config
def get_feature_columns(self):
return self._feature_column_v1s
def freeze_slots(self, features):
assert not self._frozen, "Already finalized"
if self._config_run:
raise ConfigRunError()
self._sparse_v2opt = {}
bias_config = self._get_bias_slot_configs()
if bias_config:
bias_weights = self._bias_embedding.weights
for i, opt in enumerate(bias_config['optimizers']):
for j in range(self._num_shards):
self._sparse_v2opt[bias_weights[i][j]] = opt
vec_config = self._get_vec_slot_configs()
if vec_config:
vec_weights = self._vec_embedding.weights
for i, opt in enumerate(vec_config['optimizers']):
for j in range(self._num_shards):
self._sparse_v2opt[vec_weights[i][j]] = opt
placeholders = []
dims = []
for slot_id, _, _, _ in vec_config['slot_list']:
fc = self._feature_column_v1s[slot_id]
for sslice in fc.feature_slot.feature_slices:
dims.append(sslice.len)
placeholders.append(fc.get_vector(sslice))
vec_split = tf.split(self._vec_tensor, dims, axis=1)
ge.swap_ts(vec_split, placeholders)
for slot in self._feature_slots.values():
slot._frozen = True
self._frozen = True
class SparseFLEstimator(estimator.FLEstimator):
def __init__(self,
cluster_server,
trainer_master,
bridge,
role,
model_fn,
is_chief=False):
super(SparseFLEstimator, self).__init__(
cluster_server, trainer_master, bridge, role, model_fn, is_chief)
self._bias_slot_configs = None
self._vec_slot_configs = None
self._slot_configs = None
try:
ps_indices = cluster_server.cluster_spec.task_indices('ps')
except ValueError:
ps_indices = None
finally:
self._embedding_devices = [None,] if not ps_indices else \
['/job:ps/task:%d'%i for i in ps_indices]
self._num_shards = len(self._embedding_devices)
def _preprocess_fids(self, fids, configs):
if fids.indices.shape.rank == 2:
fids = tf.IndexedSlices(indices=fids.indices[:, 0],
values=fids.values,
dense_shape=fids.dense_shape)
features = {}
for config in configs:
features.update(operator._multidevice_preprocess_fids(
fids, config, num_shards=self._num_shards))
return features
def _set_model_configs(self, mode): #features, labels, mode):
with tf.Graph().as_default() as g:
M = SparseFLModel(self._role,
self._bridge,
None, #features['example_id'],
config_run=True)
try:
self._model_fn(M, None, None, mode) # features, labels, mode)
except ConfigRunError as e:
self._bias_slot_configs = M._get_bias_slot_configs()
self._vec_slot_configs = M._get_vec_slot_configs()
self._feature_columns = M.get_feature_columns()
self._slot_configs = [self._bias_slot_configs,
self._vec_slot_configs]
return self._slot_configs
raise UserWarning("Failed to get model config. Did you forget to call \
freeze_slots in model_fn?")
def _get_features_and_labels_from_input_fn(self, input_fn, mode):
slot_configs = self._set_model_configs(mode) # features, labels, mode)
def input_fn_wrapper(*args, **kwargs):
dataset = input_fn(self._bridge, self._trainer_master)
def mapper(features, *args):
features.update(self._preprocess_fids(features.pop('fids'),
slot_configs))
return (features,) + args if args else features
dataset = dataset.map(
mapper, num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.prefetch(2)
return dataset
return super(SparseFLEstimator, self
)._get_features_and_labels_from_input_fn(input_fn_wrapper, mode)
def _get_model_spec(self, features, labels, mode):
features = features.copy()
if mode == tf.estimator.ModeKeys.PREDICT:
fids = tf.IndexedSlices(
indices=features.pop('fids_indices'),
values=features.pop('fids_values'),
dense_shape=features.pop('fids_dense_shape'))
features.update(self._preprocess_fids(
fids, self._slot_configs))
bias_embedding = embedding.Embedding(self._bias_slot_configs,
devices=self._embedding_devices)
bias_tensor = bias_embedding.lookup(features)
if self._vec_slot_configs is not None:
vec_embedding = embedding.Embedding(self._vec_slot_configs,
devices=self._embedding_devices)
vec_tensor = vec_embedding.lookup(features)
else:
vec_embedding = None
vec_tensor = None
model = SparseFLModel(self._role, self._bridge,
features.get('example_id', None),
config_run=False,
bias_tensor=bias_tensor,
bias_embedding=bias_embedding,
vec_tensor=vec_tensor,
vec_embedding=vec_embedding,
feature_columns=self._feature_columns)
spec = self._model_fn(model, features, labels, mode)
assert model._frozen, "Please finalize model in model_fn"
return spec, model
| 40.559028
| 80
| 0.606712
|
import tensorflow.compat.v1 as tf
from tensorflow.contrib import graph_editor as ge
from fedlearner.trainer import embedding
from fedlearner.trainer import estimator
from fedlearner.trainer import feature
from fedlearner.trainer import operator
from fedlearner.trainer import utils
class ConfigRunError(Exception):
pass
class SparseFLModel(estimator.FLModel):
def __init__(self, role, bridge, example_ids, exporting=False,
config_run=True,
bias_tensor=None, vec_tensor=None,
bias_embedding=None, vec_embedding=None,
feature_columns=None):
super(SparseFLModel, self).__init__(role,
bridge, example_ids, exporting)
self._config_run = config_run
self._num_shards = 1
if config_run:
self._bias_tensor = tf.placeholder(tf.float32, shape=[None, None])
self._vec_tensor = tf.placeholder(tf.float32, shape=[None, None])
else:
self._bias_tensor = bias_tensor
self._vec_tensor = vec_tensor
self._bias_embedding = bias_embedding
self._vec_embedding = vec_embedding
self._feature_columns = feature_columns
self._frozen = False
self._slot_ids = []
self._feature_slots = {}
self._feature_column_v1s = {}
self._use_fid_v2 = False
self._num_embedding_groups = 3
def add_feature_slot(self, *args, **kwargs):
assert not self._frozen, "Cannot modify model after finalization"
fs = feature.FeatureSlot(*args, **kwargs)
if self._use_fid_v2:
assert 0 <= fs.slot_id < utils.MAX_SLOTS_v2, \
"Invalid slot id %d"%fs.slot_id
else:
assert 0 <= fs.slot_id < utils.MAX_SLOTS, \
"Invalid slot id %d"%fs.slot_id
self._slot_ids.append(fs.slot_id)
self._feature_slots[fs.slot_id] = fs
return fs
def add_feature_column(self, *args, **kwargs):
assert not self._frozen, "Cannot modify model after finalization"
fc = feature.FeatureColumnV1(*args, **kwargs)
slot_id = fc.feature_slot.slot_id
assert slot_id in self._feature_slots and \
self._feature_slots[slot_id] is fc.feature_slot, \
"FeatureSlot with id %d must be added to Model first"%slot_id
assert slot_id not in self._feature_column_v1s, \
"Only one FeatureColumnV1 can be created for each slot"
self._feature_column_v1s[slot_id] = fc
return fc
def set_use_fid_v2(self, use_fid_v2):
self._use_fid_v2 = use_fid_v2
def get_bias(self):
return self._bias_tensor
def get_vec(self):
return self._vec_tensor
def _get_bias_slot_configs(self):
if not self._config_run:
return self._bias_embedding.config if self._bias_embedding else None
slot_list = []
fs_map = {}
for slot_id in self._slot_ids:
fs = self._feature_slots[slot_id]
key = (id(fs._bias_initializer), id(fs._bias_optimizer))
fs_map[key] = fs
slot_list.append((fs.slot_id, 1, fs.hash_table_size, key))
if not slot_list:
return None
bias_config = utils._compute_slot_config(slot_list, 1,
self._use_fid_v2)
bias_config['name'] = 'bias'
bias_config['slot_list'] = slot_list
bias_config['initializers'] = [fs_map[i]._bias_initializer
for i in bias_config['weight_group_keys']]
bias_config['optimizers'] = [fs_map[i]._bias_optimizer
for i in bias_config['weight_group_keys']]
bias_config['use_fid_v2'] = self._use_fid_v2
return bias_config
def _get_vec_slot_configs(self):
if not self._config_run:
return self._vec_embedding.config if self._vec_embedding else None
slot_list = []
fs_map = {}
for slot_id in self._slot_ids:
if slot_id not in self._feature_column_v1s:
continue
fc = self._feature_column_v1s[slot_id]
fs = fc.feature_slot
if fc.feature_slot.dim > 1:
key = (id(fs._vec_initializer), id(fs._vec_optimizer))
fs_map[key] = fs
slot_list.append((slot_id, fs.dim - 1, fs.hash_table_size, key))
if not slot_list:
return None
vec_config = utils._compute_slot_config(slot_list,
self._num_embedding_groups,
self._use_fid_v2)
vec_config['name'] = 'vec'
vec_config['slot_list'] = slot_list
vec_config['initializers'] = [fs_map[i]._vec_initializer
for i in vec_config['weight_group_keys']]
vec_config['optimizers'] = [fs_map[i]._vec_optimizer
for i in vec_config['weight_group_keys']]
vec_config['use_fid_v2'] = self._use_fid_v2
return vec_config
def get_feature_columns(self):
return self._feature_column_v1s
def freeze_slots(self, features):
assert not self._frozen, "Already finalized"
if self._config_run:
raise ConfigRunError()
self._sparse_v2opt = {}
bias_config = self._get_bias_slot_configs()
if bias_config:
bias_weights = self._bias_embedding.weights
for i, opt in enumerate(bias_config['optimizers']):
for j in range(self._num_shards):
self._sparse_v2opt[bias_weights[i][j]] = opt
vec_config = self._get_vec_slot_configs()
if vec_config:
vec_weights = self._vec_embedding.weights
for i, opt in enumerate(vec_config['optimizers']):
for j in range(self._num_shards):
self._sparse_v2opt[vec_weights[i][j]] = opt
placeholders = []
dims = []
for slot_id, _, _, _ in vec_config['slot_list']:
fc = self._feature_column_v1s[slot_id]
for sslice in fc.feature_slot.feature_slices:
dims.append(sslice.len)
placeholders.append(fc.get_vector(sslice))
vec_split = tf.split(self._vec_tensor, dims, axis=1)
ge.swap_ts(vec_split, placeholders)
for slot in self._feature_slots.values():
slot._frozen = True
self._frozen = True
class SparseFLEstimator(estimator.FLEstimator):
def __init__(self,
cluster_server,
trainer_master,
bridge,
role,
model_fn,
is_chief=False):
super(SparseFLEstimator, self).__init__(
cluster_server, trainer_master, bridge, role, model_fn, is_chief)
self._bias_slot_configs = None
self._vec_slot_configs = None
self._slot_configs = None
try:
ps_indices = cluster_server.cluster_spec.task_indices('ps')
except ValueError:
ps_indices = None
finally:
self._embedding_devices = [None,] if not ps_indices else \
['/job:ps/task:%d'%i for i in ps_indices]
self._num_shards = len(self._embedding_devices)
def _preprocess_fids(self, fids, configs):
if fids.indices.shape.rank == 2:
fids = tf.IndexedSlices(indices=fids.indices[:, 0],
values=fids.values,
dense_shape=fids.dense_shape)
features = {}
for config in configs:
features.update(operator._multidevice_preprocess_fids(
fids, config, num_shards=self._num_shards))
return features
def _set_model_configs(self, mode):
with tf.Graph().as_default() as g:
M = SparseFLModel(self._role,
self._bridge,
None,
config_run=True)
try:
self._model_fn(M, None, None, mode)
except ConfigRunError as e:
self._bias_slot_configs = M._get_bias_slot_configs()
self._vec_slot_configs = M._get_vec_slot_configs()
self._feature_columns = M.get_feature_columns()
self._slot_configs = [self._bias_slot_configs,
self._vec_slot_configs]
return self._slot_configs
raise UserWarning("Failed to get model config. Did you forget to call \
freeze_slots in model_fn?")
def _get_features_and_labels_from_input_fn(self, input_fn, mode):
slot_configs = self._set_model_configs(mode)
def input_fn_wrapper(*args, **kwargs):
dataset = input_fn(self._bridge, self._trainer_master)
def mapper(features, *args):
features.update(self._preprocess_fids(features.pop('fids'),
slot_configs))
return (features,) + args if args else features
dataset = dataset.map(
mapper, num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.prefetch(2)
return dataset
return super(SparseFLEstimator, self
)._get_features_and_labels_from_input_fn(input_fn_wrapper, mode)
def _get_model_spec(self, features, labels, mode):
features = features.copy()
if mode == tf.estimator.ModeKeys.PREDICT:
fids = tf.IndexedSlices(
indices=features.pop('fids_indices'),
values=features.pop('fids_values'),
dense_shape=features.pop('fids_dense_shape'))
features.update(self._preprocess_fids(
fids, self._slot_configs))
bias_embedding = embedding.Embedding(self._bias_slot_configs,
devices=self._embedding_devices)
bias_tensor = bias_embedding.lookup(features)
if self._vec_slot_configs is not None:
vec_embedding = embedding.Embedding(self._vec_slot_configs,
devices=self._embedding_devices)
vec_tensor = vec_embedding.lookup(features)
else:
vec_embedding = None
vec_tensor = None
model = SparseFLModel(self._role, self._bridge,
features.get('example_id', None),
config_run=False,
bias_tensor=bias_tensor,
bias_embedding=bias_embedding,
vec_tensor=vec_tensor,
vec_embedding=vec_embedding,
feature_columns=self._feature_columns)
spec = self._model_fn(model, features, labels, mode)
assert model._frozen, "Please finalize model in model_fn"
return spec, model
| true
| true
|
790cf563324939c7d34424931157221f1225e53d
| 473
|
py
|
Python
|
sphinx/source/docs/first_steps/examples/first_steps_4_background.py
|
g-parki/bokeh
|
664ead5306bba64609e734d4105c8aa8cfb76d81
|
[
"BSD-3-Clause"
] | 15,193
|
2015-01-01T05:11:45.000Z
|
2022-03-31T19:30:20.000Z
|
sphinx/source/docs/first_steps/examples/first_steps_4_background.py
|
g-parki/bokeh
|
664ead5306bba64609e734d4105c8aa8cfb76d81
|
[
"BSD-3-Clause"
] | 9,554
|
2015-01-01T03:16:54.000Z
|
2022-03-31T22:59:39.000Z
|
sphinx/source/docs/first_steps/examples/first_steps_4_background.py
|
g-parki/bokeh
|
664ead5306bba64609e734d4105c8aa8cfb76d81
|
[
"BSD-3-Clause"
] | 4,829
|
2015-01-02T03:35:32.000Z
|
2022-03-30T16:40:26.000Z
|
from bokeh.plotting import figure, show
# prepare some data
x = [1, 2, 3, 4, 5]
y = [4, 5, 5, 7, 2]
# create a plot
p = figure(
title="Background colors example",
sizing_mode="stretch_width",
max_width=500,
height=250,
)
# add a renderer
p.line(x, y, line_color="green", line_width=2)
# change the fill colors
p.background_fill_color = (204, 255, 255)
p.border_fill_color = (102, 204, 255)
p.outline_line_color = (0, 0, 255)
# show the results
show(p)
| 18.92
| 46
| 0.668076
|
from bokeh.plotting import figure, show
x = [1, 2, 3, 4, 5]
y = [4, 5, 5, 7, 2]
p = figure(
title="Background colors example",
sizing_mode="stretch_width",
max_width=500,
height=250,
)
p.line(x, y, line_color="green", line_width=2)
p.background_fill_color = (204, 255, 255)
p.border_fill_color = (102, 204, 255)
p.outline_line_color = (0, 0, 255)
show(p)
| true
| true
|
790cf7caeb741e9e1c22fc1370b58849da1405f3
| 3,902
|
py
|
Python
|
python/init.py
|
ur4ltz/ollypython
|
0193c64892c19ca5ada2545f1a63560d4dbc1360
|
[
"BSD-3-Clause"
] | null | null | null |
python/init.py
|
ur4ltz/ollypython
|
0193c64892c19ca5ada2545f1a63560d4dbc1360
|
[
"BSD-3-Clause"
] | null | null | null |
python/init.py
|
ur4ltz/ollypython
|
0193c64892c19ca5ada2545f1a63560d4dbc1360
|
[
"BSD-3-Clause"
] | 1
|
2016-11-14T14:11:42.000Z
|
2016-11-14T14:11:42.000Z
|
import os
import sys
import time
import _ollyapi
def addscriptpath(script):
"""
Add the path part of the scriptfile to the system path to
allow modules to be loaded from the same place.
Each path is added only once.
"""
pathfound = 0
scriptpath = os.path.dirname(script)
for pathitem in sys.path:
if pathitem == scriptpath:
pathfound = 1
break
if pathfound == 0:
sys.path.append(scriptpath)
def runscript(script):
"""
Run the specified script after adding its directory path to
system path.
This function is used by the low-level plugin code.
"""
addscriptpath(script)
watchdog.reset()
argv = sys.argv
sys.argv = [ script ]
execfile(script, globals())
sys.argv = argv
#-----------------------------------------------------------
# Take over the standard text outputs
#-----------------------------------------------------------
class MyStdOut:
"""
Dummy file-like class that receives stout and stderr
"""
def write(self, text):
# OllyDbg can't handle newlines so strip them out
fixed = text.replace('\n', '')
if fixed != '':
_ollyapi.Addtolist(0, 0, fixed)
def flush(self):
pass
def isatty(self):
return False
# Redirect stderr and stdout to the OllyDbg log window
sys.stdout = sys.stderr = MyStdOut()
# Assign a default sys.argv
sys.argv = [ "" ]
# Have to make sure Python finds our modules
sys.path.append(OLLYPYTHON_PATH)
from ollyapi import *
from ollyutils import *
#-------------------------------------------------------------
# Watchdog to catch runaway scripts after a specified timeout
#
# Usage:
# watchdog.install()
# watchdog.activate(10) # Use 10-second timeout
#
# Note: The watchdog only works for code running inside
# functions, not in global/module namespace.
#-------------------------------------------------------------
class WatchDog():
"""
Python tracer-based watchdog class
"""
def __init__(self, timeout=10):
self.timestamp = 0
self.timeout = timeout
self.installed = False
self.active = False
def install(self):
""" Install the tracer function, required for the watchdog """
if not self.installed:
sys.settrace(self.tracer)
self.installed = True
def activate(self, timeout=None):
""" Activate the watchdog, with optional timeout change """
assert self.installed, "WatchDog must be installed before activating"
if timeout:
self.timeout = timeout
self.reset()
self.active = True
def deactivate(self):
""" Deactivate the watchdog """
self.active = True
def reset(self):
""" Reset the timer, useful for long-running scripts """
self.timestamp = time.clock()
def tracer(self, frame, event, arg):
""" Tracer function that receives the tracing events """
if not self.active:
return None
#if event == 'line':
# if time.clock() - self.timestamp > self.timeout:
# if AskYN(0, "The script has not finished in %d seconds\nWould you like to stop it now?" % self.timeout) == 1:
# raise KeyboardInterrupt
# else:
# self.timestamp = time.clock()
return self.tracer
watchdog = WatchDog(10)
# Load the users personal init file
# Plugin callback handlers
ollypython_shortcuts = []
def add_shortcut_handler(func):
# Need to also make sure the function is the right type
ollypython_shortcuts.append(func)
def remove_shortcut_handler(func):
ollypython_shortcuts.remove(func)
| 28.071942
| 127
| 0.563557
|
import os
import sys
import time
import _ollyapi
def addscriptpath(script):
pathfound = 0
scriptpath = os.path.dirname(script)
for pathitem in sys.path:
if pathitem == scriptpath:
pathfound = 1
break
if pathfound == 0:
sys.path.append(scriptpath)
def runscript(script):
addscriptpath(script)
watchdog.reset()
argv = sys.argv
sys.argv = [ script ]
execfile(script, globals())
sys.argv = argv
class MyStdOut:
def write(self, text):
fixed = text.replace('\n', '')
if fixed != '':
_ollyapi.Addtolist(0, 0, fixed)
def flush(self):
pass
def isatty(self):
return False
# Redirect stderr and stdout to the OllyDbg log window
sys.stdout = sys.stderr = MyStdOut()
# Assign a default sys.argv
sys.argv = [ "" ]
# Have to make sure Python finds our modules
sys.path.append(OLLYPYTHON_PATH)
from ollyapi import *
from ollyutils import *
#-------------------------------------------------------------
# Watchdog to catch runaway scripts after a specified timeout
#
# Usage:
# watchdog.install()
# watchdog.activate(10) # Use 10-second timeout
#
# Note: The watchdog only works for code running inside
# functions, not in global/module namespace.
#-------------------------------------------------------------
class WatchDog():
def __init__(self, timeout=10):
self.timestamp = 0
self.timeout = timeout
self.installed = False
self.active = False
def install(self):
if not self.installed:
sys.settrace(self.tracer)
self.installed = True
def activate(self, timeout=None):
assert self.installed, "WatchDog must be installed before activating"
if timeout:
self.timeout = timeout
self.reset()
self.active = True
def deactivate(self):
self.active = True
def reset(self):
self.timestamp = time.clock()
def tracer(self, frame, event, arg):
if not self.active:
return None
#if event == 'line':
# if time.clock() - self.timestamp > self.timeout:
# if AskYN(0, "The script has not finished in %d seconds\nWould you like to stop it now?" % self.timeout) == 1:
# raise KeyboardInterrupt
# else:
# self.timestamp = time.clock()
return self.tracer
watchdog = WatchDog(10)
# Load the users personal init file
# Plugin callback handlers
ollypython_shortcuts = []
def add_shortcut_handler(func):
# Need to also make sure the function is the right type
ollypython_shortcuts.append(func)
def remove_shortcut_handler(func):
ollypython_shortcuts.remove(func)
| true
| true
|
790cf84f46eb94506d78b0f795b48648a2f8c55e
| 19,575
|
py
|
Python
|
IntOpt/shortespath/shortespath.py
|
Patyrn/Divide-and-Learn
|
ff03689c7ab6a7155ebd019babce8f79d0757a53
|
[
"MIT"
] | 7
|
2020-11-06T01:29:48.000Z
|
2022-01-02T12:49:40.000Z
|
IntOpt/shortespath/shortespath.py
|
Patyrn/Divide-and-Learn
|
ff03689c7ab6a7155ebd019babce8f79d0757a53
|
[
"MIT"
] | 2
|
2021-01-19T16:59:04.000Z
|
2021-01-25T10:17:46.000Z
|
IntOpt/shortespath/shortespath.py
|
Patyrn/Divide-and-Learn
|
ff03689c7ab6a7155ebd019babce8f79d0757a53
|
[
"MIT"
] | 5
|
2021-07-13T04:47:13.000Z
|
2022-01-17T14:05:06.000Z
|
import torch
from torch import nn
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
import random
import numpy as np
import scipy as sp
import gurobipy as gp
from qpthlocal.qp import QPFunction
from qpthlocal.qp import QPSolvers
from qpthlocal.qp import make_gurobi_model
import pickle
import sys
import datetime
from collections import defaultdict
import math
from sklearn import preprocessing
from sklearn.metrics import confusion_matrix
import logging
import datetime
import time
from collections import defaultdict
from sklearn.metrics import mean_squared_error as mse
from scipy.special import expit, logit
import copy
sys.path.insert(0,'../Interior/')
sys.path.insert(0,'../..')
# from ip_model import *
from ip_model_whole import *
from remove_redundancy import _remove_redundancy, _remove_redundancy_sparse, _remove_redundancy_dense
from sgd_learner import *
import pandas as pd
def bceloss(inputs,target):
return -(np.log(1-expit(inputs)) + target*inputs).mean()
def _remove_redundant_rows (A_eq):
# remove redundant (linearly dependent) rows from equality constraints
n_rows_A = A_eq.shape[0]
redundancy_warning = ("A_eq does not appear to be of full row rank. To "
"improve performance, check the problem formulation "
"for redundant equality constraints.")
# if (sps.issparse(A_eq)):
# if rr and A_eq.size > 0: # TODO: Fast sparse rank check?
# A_eq, b_eq, status, message = _remove_redundancy_sparse(A_eq, b_eq)
# if A_eq.shape[0] < n_rows_A:
# warn(redundancy_warning, OptimizeWarning, stacklevel=1)
# if status != 0:
# complete = True
# return (c, c0, A_ub, b_ub, A_eq, b_eq, bounds,
# x, x0, undo, complete, status, message)
# This is a wild guess for which redundancy removal algorithm will be
# faster. More testing would be good.
small_nullspace = 5
if A_eq.size > 0:
try: # TODO: instead use results of first SVD in _remove_redundancy
rank = np.linalg.matrix_rank(A_eq)
except Exception: # oh well, we'll have to go with _remove_redundancy_dense
rank = 0
if A_eq.size > 0 and rank < A_eq.shape[0]:
warn(redundancy_warning, OptimizeWarning, stacklevel=3)
dim_row_nullspace = A_eq.shape[0]-rank
if dim_row_nullspace <= small_nullspace:
d_removed, status, message = _remove_redundancy(A_eq)
if dim_row_nullspace > small_nullspace :
d_removed, status, message = _remove_redundancy_dense(A_eq)
if A_eq.shape[0] < rank:
message = ("Due to numerical issues, redundant equality "
"constraints could not be removed automatically. "
"Try providing your constraint matrices as sparse "
"matrices to activate sparse presolve, try turning "
"off redundancy removal, or try turning off presolve "
"altogether.")
status = 4
if status != 0:
complete = True
return d_removed
def get_loss(net,A, X, y,instances):
net.eval()
rslt = []
c_pred = net(torch.from_numpy(X).float()).squeeze().detach().numpy()
c = y
for k,v in instances.items():
source, destination = v
b = np.zeros(len(A))
b [source] =1
b[destination ]=-1
model = gp.Model()
model.setParam('OutputFlag', 0)
x = model.addMVar(shape=A.shape[1], vtype=gp.GRB.BINARY, name="x")
model.setObjective(c_pred @x, gp.GRB.MINIMIZE)
model.addConstr(A @ x == b, name="eq")
model.optimize()
if model.status ==2:
sol =x.X
rslt.append( c.dot(sol))
else:
print(model.status, k,v)
net.train()
return mse(c_pred,c), sum(rslt)
def validation_module(net,A, X,y, training_instances,validation_instances, test_instances,time,
epoch,subepoch,**kwargs):
# return bceloss(c_pred,c), sum(rslt)
dict_validation = {}
losses_test = get_loss(net, A, X,y,test_instances)
dict_validation['test_prediction_loss'] = losses_test[0]
dict_validation['test_task_loss'] = losses_test[1]
losses_train = get_loss(net, A, X,y,training_instances)
dict_validation['train_prediction_loss'] = losses_train[0]
dict_validation['train_task_loss'] = losses_train[1]
losses_validation = get_loss(net, A, X,y,validation_instances)
dict_validation['validation_prediction_loss'] = losses_validation[0]
dict_validation['validation_task_loss'] = losses_validation[1]
dict_validation['batch'] = subepoch
dict_validation['epoch'] = epoch
dict_validation['time'] = time
return dict_validation
def make_fc(num_layers, num_features, num_targets=1,
activation_fn = nn.ReLU,intermediate_size=50, regularizers = True):
net_layers = [nn.Linear(num_features, intermediate_size),
activation_fn()]
for hidden in range(num_layers-2):
net_layers.append(nn.Linear(intermediate_size, intermediate_size))
net_layers.append(activation_fn())
net_layers.append(nn.Linear(intermediate_size, num_targets))
net_layers.append(nn.ReLU())
return nn.Sequential(*net_layers)
class two_stage_matching:
def __init__(self,A,num_features, num_layers, intermediate_size,
activation_fn = nn.ReLU, num_instance=1,
epochs=10,batchsize= 256, optimizer=optim.Adam,
validation=False,**hyperparams):
self.A = A
self.num_features = num_features
self.num_layers = num_layers
self.activation_fn = activation_fn
self.intermediate_size = intermediate_size
self.epochs = epochs
self.batchsize = batchsize
self.validation = validation
self.net = make_fc(num_layers=num_layers, num_features=num_features,
activation_fn= activation_fn,
intermediate_size= intermediate_size)
self.optimizer = optimizer(self.net.parameters(), **hyperparams)
def fit(self,X,y,instances):
test_instances = instances['test']
validation_instances = instances['validation']
train_instances = instances['train']
time_ = 0
self.model_time = 0
n_train = X.shape[0]
if self.validation:
validation_list = []
indexes = np.arange(n_train)
loss_fn = nn.MSELoss()# nn.KLDivLoss(reduction='batchmean')
for e in range(self.epochs):
start_time = time.time()
np.random.shuffle(indexes)
num_batches = len(indexes) //(self.batchsize)
bi = 0#batch-index
for b in range(num_batches):
self.optimizer.zero_grad()
X_np = X[indexes[bi:(bi+self.batchsize)]]
y_np = y[indexes[bi:(bi+self.batchsize)]]
bi += self.batchsize
X_torch = torch.from_numpy(X_np).float()
y_torch = torch.from_numpy(y_np).float()
c_pred = self.net(X_torch).squeeze()
loss = loss_fn(c_pred,y_torch)
loss.backward()
self.optimizer.step()
end_time = time.time()
time_ += end_time - start_time
if self.validation:
validation_list.append( validation_module(self.net,self.A,
X,y,train_instances,validation_instances, test_instances,time_,e,b))
print("Epoch {} Loss:{} Time: {:%Y-%m-%d %H:%M:%S}".format(e+1,loss.sum().item(),
datetime.datetime.now()))
if self.validation :
dd = defaultdict(list)
for d in validation_list:
for key, value in d.items():
dd[key].append(value)
df = pd.DataFrame.from_dict(dd)
logging.info('Completion Time %s \n' %str(datetime.datetime.now()) )
return df
def predict(self,X):
X_torch = torch.from_numpy(X).float()
self.net.eval()
pred= self.net(X_torch)
self.net.train()
return pred.detach().detach().numpy().squeeze()
def validation_result(self,X,y, instances):
validation_rslt = get_loss(self.net, self.A, X,y,instances)
return validation_rslt[0], validation_rslt[1]
class qptl:
def __init__(self,A,num_features, num_layers, intermediate_size,num_instance= 1,
activation_fn = nn.ReLU, epochs=10,optimizer=optim.Adam,
gamma=1e-5,validation=False,
**hyperparams):
self.num_features = num_features
self.num_layers = num_layers
self.activation_fn = activation_fn
self.intermediate_size = intermediate_size
self.A = A
self.num_instance = num_instance
self.epochs = epochs
self.optimizer = optimizer
self.validation = validation
self.net = make_fc(num_layers=num_layers, num_features=num_features,
activation_fn= activation_fn,
intermediate_size= intermediate_size)
self.optimizer = optimizer(self.net.parameters(), **hyperparams)
self.gamma= gamma
def fit(self,X,y,instances):
test_instances = instances['test']
validation_instances = instances['validation']
train_instances = instances['train']
time_ = 0
self.model_time = 0
n_train = X.shape[0]
if self.validation:
validation_list = []
logging.info("training started")
# rows_to_be_removed = _remove_redundant_rows(self.A)
# A_torch = torch.from_numpy(np.delete(self.A, rows_to_be_removed, axis=0)).float()
A_torch = torch.from_numpy(self.A).float()
Q_torch = self.gamma*torch.eye(A_torch.shape[1])
X_torch = torch.from_numpy(X).float()
y_torch = torch.from_numpy(y).float()
G_torch = -1*torch.eye(A_torch.shape[1])
h_torch = torch.zeros(A_torch.shape[1])
for e in range(self.epochs):
for i in range(self.num_instance):
start_time = time.time()
self.optimizer.zero_grad()
source, dest = train_instances[i]
# b = np.zeros(len(self.A))
# b[source] =1
# b[dest ]=-1
# b= np.delete(b, rows_to_be_removed)
# b_torch = torch.from_numpy(b).float()
b_torch = torch.zeros(len(self.A))
b_torch[source] =1
b_torch[dest ]=-1
model_params_quad = make_gurobi_model(G_torch.detach().numpy(),
h_torch.detach().numpy(),A_torch.detach().numpy(),
b_torch.detach().numpy(), Q_torch.detach().numpy())
# model_params_quad = make_gurobi_model(None,None,
# A_torch.detach().numpy(),
# b_torch.detach().numpy(), Q_torch.detach().numpy())
c_pred = self.net(X_torch)
if any(torch.isnan(torch.flatten(c_pred)).tolist()):
logging.info("**Alert** nan in param c_pred ")
if any(torch.isinf(torch.flatten(c_pred)).tolist()):
logging.info("**Alert** inf in param c_pred ")
logging.info("shapes c {} A {} b {} G {} h {} Q {}".format(c_pred.shape,
A_torch.shape,b_torch.shape,G_torch.shape,h_torch.shape,
Q_torch.shape ))
x = QPFunction(verbose=False, solver=QPSolvers.GUROBI,
model_params= model_params_quad)(Q_torch.expand(1, *Q_torch.shape),
c_pred.squeeze(),G_torch.expand(1, *G_torch.shape),
h_torch.expand(1, *h_torch.shape),
A_torch.expand(1, *A_torch.shape),
b_torch.expand(1, *b_torch.shape))
# x = QPFunction(verbose=False, solver=QPSolvers.GUROBI,
# model_params= model_params_quad)(Q_torch.expand(1, *Q_torch.shape),
# c_pred.squeeze(),torch.Tensor(),
# torch.Tensor(),
# A_torch.expand(1, *A_torch.shape),
# b_torch.expand(1, *b_torch.shape))
c_pred.retain_grad()
loss = (y_torch*x).mean()
loss.backward()
c_grad = copy.deepcopy(c_pred.grad)
if any(torch.isnan(torch.flatten(c_grad)).tolist()):
logging.info("**Alert** nan in param c_grad ")
self.optimizer.step()
# logging.info("bkwd done")
end_time = time.time()
time_ += end_time - start_time
if self.validation:
if ((i+1)%20==0):
validation_list.append( validation_module(self.net,self.A,
X,y,train_instances,validation_instances,
test_instances,time_,e,i))
print("Epoch {} Loss:{} Time: {:%Y-%m-%d %H:%M:%S}".format(e+1,loss.sum().item(),
datetime.datetime.now()))
if self.validation :
dd = defaultdict(list)
for d in validation_list:
for key, value in d.items():
dd[key].append(value)
df = pd.DataFrame.from_dict(dd)
logging.info('Completion Time %s \n' %str(datetime.datetime.now()) )
return df
def predict(self,X):
X_torch = torch.from_numpy(X).float()
self.net.eval()
pred= self.net(X_torch)
self.net.train()
return pred.detach().detach().numpy().squeeze()
def validation_result(self,X,y, instances):
validation_rslt = get_loss(self.net, self.A, X,y,instances)
return validation_rslt[0], validation_rslt[1]
class intopt:
def __init__(self,A, num_features, num_layers, intermediate_size,
num_instance= 1,activation_fn = nn.ReLU,epochs=10,optimizer=optim.Adam,
method=1,max_iter=100,smoothing=False,thr = None,mu0=None,full_row_rank=True,
validation=False,**hyperparams):
self.A = A
self.num_features = num_features
self.num_layers = num_layers
self.activation_fn = activation_fn
self.intermediate_size = intermediate_size
self.num_instance = num_instance
self.method = method
self.epochs = epochs
self.method = method
self.optimizer = optimizer
self.max_iter = max_iter
self.smoothing = smoothing
self.thr = thr
self.mu0 = mu0
self.validation = validation
self.full_row_rank = full_row_rank
self.net = make_fc(num_layers=num_layers, num_features=num_features,
activation_fn= activation_fn,
intermediate_size= intermediate_size)
self.optimizer = optimizer(self.net.parameters(), **hyperparams)
def fit(self,X,y,instances):
#A_torch = torch.from_numpy(self.A).float()
test_instances = instances['test']
validation_instances = instances['validation']
train_instances = instances['train']
time_ = 0
self.model_time = 0
n_train = X.shape[0]
if self.validation:
validation_list = []
# model = gp.Model()
# model.setParam('OutputFlag', 0)
# x = model.addMVar(shape= self.A.shape[1], lb=0.0, vtype=gp.GRB.CONTINUOUS, name="x")
if self.full_row_rank:
rows_to_be_removed = _remove_redundant_rows(self.A)
A_torch = torch.from_numpy(np.delete(self.A, rows_to_be_removed, axis=0)).float()
else:
A_torch = torch.from_numpy(self.A).float()
logging.info("shape of A {} shape of A-torch {}".format(self.A.shape,A_torch.shape))
# A_ = np.delete(A_, rows_to_be_removed, axis=0)
# b_ = np.delete(b_, rows_to_be_removed)
# A_torch = torch.from_numpy(self.A).float()
X_torch = torch.from_numpy(X).float()
y_torch = torch.from_numpy(y).float()
logging.info("training started")
for e in range(self.epochs):
for i in range(self.num_instance):
start_time = time.time()
self.optimizer.zero_grad()
source, dest = train_instances[i]
if self.full_row_rank:
b = np.zeros(len(self.A))
b[source] =1
b[dest ]=-1
b= np.delete(b, rows_to_be_removed)
b_torch = torch.from_numpy(b).float()
else:
b_torch = torch.zeros(len(self.A))
b_torch[source] = 1
b_torch[dest] = -1
c_pred = self.net(X_torch).squeeze()
x = IPOfunc(A_torch,b_torch,torch.Tensor(),torch.Tensor(),
bounds= [(0., None)],
max_iter=self.max_iter,mu0 = self.mu0,
thr=self.thr,method = self.method,
smoothing=self.smoothing)(c_pred)
loss = (y_torch*x).mean()
loss.backward()
self.optimizer.step()
end_time = time.time()
time_ += end_time - start_time
if self.validation:
if ((i+1)%20==0) :
validation_list.append( validation_module(self.net,self.A,
X,y,train_instances,validation_instances,
test_instances,time_,e,i))
print("Epoch {} Loss:{} Time: {:%Y-%m-%d %H:%M:%S}".format(e+1,loss.item(),
datetime.datetime.now()))
if self.validation :
dd = defaultdict(list)
for d in validation_list:
for key, value in d.items():
dd[key].append(value)
df = pd.DataFrame.from_dict(dd)
logging.info('Completion Time %s \n' %str(datetime.datetime.now()) )
return df
def predict(self,X):
X_torch = torch.from_numpy(X).float()
self.net.eval()
pred= self.net(X_torch)
self.net.train()
return pred.detach().detach().numpy().squeeze()
def validation_result(self,X,y, instances):
validation_rslt = get_loss(self.net, self.A, X,y,instances)
return validation_rslt[0], validation_rslt[1]
class SPO:
def __init__(self,A,num_features, num_layers, intermediate_size,num_instance= 1,
activation_fn = nn.ReLU, epochs=10,optimizer=optim.Adam,
validation=False,**hyperparams):
self.A = A
self.num_features = num_features
self.num_layers = num_layers
self.activation_fn = activation_fn
self.intermediate_size = intermediate_size
self.epochs = epochs
self.num_instance = num_instance
self.validation = validation
self.net = make_fc(num_layers=num_layers, num_features=num_features,
activation_fn= activation_fn,
intermediate_size= intermediate_size)
self.optimizer = optimizer(self.net.parameters(), **hyperparams)
def fit(self,X,y,instances):
#A_torch = torch.from_numpy(self.A).float()
test_instances = instances['test']
validation_instances = instances['validation']
train_instances = instances['train']
time_ = 0
self.model_time = 0
n_train = X.shape[0]
if self.validation:
validation_list = []
X_torch = torch.from_numpy(X).float()
y_torch = torch.from_numpy(y).float()
true_solution ={}
logging.info("training started")
for e in range(self.epochs):
for i in range(self.num_instance):
start_time = time.time()
self.optimizer.zero_grad()
source, dest = train_instances[i]
b = np.zeros(len(self.A))
b[source] =1
b[dest ]=-1
if i not in true_solution:
model = gp.Model()
model.setParam('OutputFlag', 0)
x = model.addMVar(shape= self.A.shape[1], lb=0.0, vtype=gp.GRB.CONTINUOUS, name="x")
model.addConstr(self.A @ x == b, name="eq")
model.setObjective((y_torch.detach().numpy())@x, gp.GRB.MINIMIZE)
model.optimize()
x_true = x.X
true_solution[i] = np.copy(x_true)
x_true = true_solution[i]
c_pred = self.net(X_torch).squeeze()
c_spo = (2*c_pred - y_torch)
model = gp.Model()
model.setParam('OutputFlag', 0)
x = model.addMVar(shape= self.A.shape[1], lb=0.0, ub=1.0,vtype=gp.GRB.CONTINUOUS, name="x")
model.addConstr(self.A @ x == b, name="eq")
model.setObjective((c_spo.detach().numpy())@x, gp.GRB.MINIMIZE)
model.optimize()
#print(model.status)
x_spo = x.X
grad = torch.from_numpy( x_true - x_spo).float()
loss = self.net(X_torch).squeeze()
loss.backward(gradient=grad)
self.optimizer.step()
logging.info("bkwd done")
end_time = time.time()
time_ += end_time - start_time
if self.validation:
if ((i+1)%20==0):
validation_list.append( validation_module(self.net,self.A,
X,y,train_instances,validation_instances,
test_instances,time_,e,i))
print("Epoch {} Loss:{} Time: {:%Y-%m-%d %H:%M:%S}".format(e+1,loss.sum().item(),
datetime.datetime.now()))
if self.validation :
dd = defaultdict(list)
for d in validation_list:
for key, value in d.items():
dd[key].append(value)
df = pd.DataFrame.from_dict(dd)
# print(validation_module(self.net,self.A,
# X,y,train_instances,validation_instances,
# test_instances,time_,e,i))
# pred = self.predict(X)
# print(mse(pred,y))
logging.info('Completion Time %s \n' %str(datetime.datetime.now()) )
return df
def validation_result(self,X,y, instances):
validation_rslt = get_loss(self.net, self.A, X,y,instances)
return validation_rslt[0], validation_rslt[1]
def predict(self,X):
X_torch = torch.from_numpy(X).float()
self.net.eval()
pred= self.net(X_torch)
self.net.train()
return pred.detach().detach().numpy().squeeze()
| 34.463028
| 101
| 0.678161
|
import torch
from torch import nn
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
import random
import numpy as np
import scipy as sp
import gurobipy as gp
from qpthlocal.qp import QPFunction
from qpthlocal.qp import QPSolvers
from qpthlocal.qp import make_gurobi_model
import pickle
import sys
import datetime
from collections import defaultdict
import math
from sklearn import preprocessing
from sklearn.metrics import confusion_matrix
import logging
import datetime
import time
from collections import defaultdict
from sklearn.metrics import mean_squared_error as mse
from scipy.special import expit, logit
import copy
sys.path.insert(0,'../Interior/')
sys.path.insert(0,'../..')
from ip_model_whole import *
from remove_redundancy import _remove_redundancy, _remove_redundancy_sparse, _remove_redundancy_dense
from sgd_learner import *
import pandas as pd
def bceloss(inputs,target):
return -(np.log(1-expit(inputs)) + target*inputs).mean()
def _remove_redundant_rows (A_eq):
n_rows_A = A_eq.shape[0]
redundancy_warning = ("A_eq does not appear to be of full row rank. To "
"improve performance, check the problem formulation "
"for redundant equality constraints.")
small_nullspace = 5
if A_eq.size > 0:
try:
rank = np.linalg.matrix_rank(A_eq)
except Exception:
rank = 0
if A_eq.size > 0 and rank < A_eq.shape[0]:
warn(redundancy_warning, OptimizeWarning, stacklevel=3)
dim_row_nullspace = A_eq.shape[0]-rank
if dim_row_nullspace <= small_nullspace:
d_removed, status, message = _remove_redundancy(A_eq)
if dim_row_nullspace > small_nullspace :
d_removed, status, message = _remove_redundancy_dense(A_eq)
if A_eq.shape[0] < rank:
message = ("Due to numerical issues, redundant equality "
"constraints could not be removed automatically. "
"Try providing your constraint matrices as sparse "
"matrices to activate sparse presolve, try turning "
"off redundancy removal, or try turning off presolve "
"altogether.")
status = 4
if status != 0:
complete = True
return d_removed
def get_loss(net,A, X, y,instances):
net.eval()
rslt = []
c_pred = net(torch.from_numpy(X).float()).squeeze().detach().numpy()
c = y
for k,v in instances.items():
source, destination = v
b = np.zeros(len(A))
b [source] =1
b[destination ]=-1
model = gp.Model()
model.setParam('OutputFlag', 0)
x = model.addMVar(shape=A.shape[1], vtype=gp.GRB.BINARY, name="x")
model.setObjective(c_pred @x, gp.GRB.MINIMIZE)
model.addConstr(A @ x == b, name="eq")
model.optimize()
if model.status ==2:
sol =x.X
rslt.append( c.dot(sol))
else:
print(model.status, k,v)
net.train()
return mse(c_pred,c), sum(rslt)
def validation_module(net,A, X,y, training_instances,validation_instances, test_instances,time,
epoch,subepoch,**kwargs):
# return bceloss(c_pred,c), sum(rslt)
dict_validation = {}
losses_test = get_loss(net, A, X,y,test_instances)
dict_validation['test_prediction_loss'] = losses_test[0]
dict_validation['test_task_loss'] = losses_test[1]
losses_train = get_loss(net, A, X,y,training_instances)
dict_validation['train_prediction_loss'] = losses_train[0]
dict_validation['train_task_loss'] = losses_train[1]
losses_validation = get_loss(net, A, X,y,validation_instances)
dict_validation['validation_prediction_loss'] = losses_validation[0]
dict_validation['validation_task_loss'] = losses_validation[1]
dict_validation['batch'] = subepoch
dict_validation['epoch'] = epoch
dict_validation['time'] = time
return dict_validation
def make_fc(num_layers, num_features, num_targets=1,
activation_fn = nn.ReLU,intermediate_size=50, regularizers = True):
net_layers = [nn.Linear(num_features, intermediate_size),
activation_fn()]
for hidden in range(num_layers-2):
net_layers.append(nn.Linear(intermediate_size, intermediate_size))
net_layers.append(activation_fn())
net_layers.append(nn.Linear(intermediate_size, num_targets))
net_layers.append(nn.ReLU())
return nn.Sequential(*net_layers)
class two_stage_matching:
def __init__(self,A,num_features, num_layers, intermediate_size,
activation_fn = nn.ReLU, num_instance=1,
epochs=10,batchsize= 256, optimizer=optim.Adam,
validation=False,**hyperparams):
self.A = A
self.num_features = num_features
self.num_layers = num_layers
self.activation_fn = activation_fn
self.intermediate_size = intermediate_size
self.epochs = epochs
self.batchsize = batchsize
self.validation = validation
self.net = make_fc(num_layers=num_layers, num_features=num_features,
activation_fn= activation_fn,
intermediate_size= intermediate_size)
self.optimizer = optimizer(self.net.parameters(), **hyperparams)
def fit(self,X,y,instances):
test_instances = instances['test']
validation_instances = instances['validation']
train_instances = instances['train']
time_ = 0
self.model_time = 0
n_train = X.shape[0]
if self.validation:
validation_list = []
indexes = np.arange(n_train)
loss_fn = nn.MSELoss()# nn.KLDivLoss(reduction='batchmean')
for e in range(self.epochs):
start_time = time.time()
np.random.shuffle(indexes)
num_batches = len(indexes) //(self.batchsize)
bi = 0#batch-index
for b in range(num_batches):
self.optimizer.zero_grad()
X_np = X[indexes[bi:(bi+self.batchsize)]]
y_np = y[indexes[bi:(bi+self.batchsize)]]
bi += self.batchsize
X_torch = torch.from_numpy(X_np).float()
y_torch = torch.from_numpy(y_np).float()
c_pred = self.net(X_torch).squeeze()
loss = loss_fn(c_pred,y_torch)
loss.backward()
self.optimizer.step()
end_time = time.time()
time_ += end_time - start_time
if self.validation:
validation_list.append( validation_module(self.net,self.A,
X,y,train_instances,validation_instances, test_instances,time_,e,b))
print("Epoch {} Loss:{} Time: {:%Y-%m-%d %H:%M:%S}".format(e+1,loss.sum().item(),
datetime.datetime.now()))
if self.validation :
dd = defaultdict(list)
for d in validation_list:
for key, value in d.items():
dd[key].append(value)
df = pd.DataFrame.from_dict(dd)
logging.info('Completion Time %s \n' %str(datetime.datetime.now()) )
return df
def predict(self,X):
X_torch = torch.from_numpy(X).float()
self.net.eval()
pred= self.net(X_torch)
self.net.train()
return pred.detach().detach().numpy().squeeze()
def validation_result(self,X,y, instances):
validation_rslt = get_loss(self.net, self.A, X,y,instances)
return validation_rslt[0], validation_rslt[1]
class qptl:
def __init__(self,A,num_features, num_layers, intermediate_size,num_instance= 1,
activation_fn = nn.ReLU, epochs=10,optimizer=optim.Adam,
gamma=1e-5,validation=False,
**hyperparams):
self.num_features = num_features
self.num_layers = num_layers
self.activation_fn = activation_fn
self.intermediate_size = intermediate_size
self.A = A
self.num_instance = num_instance
self.epochs = epochs
self.optimizer = optimizer
self.validation = validation
self.net = make_fc(num_layers=num_layers, num_features=num_features,
activation_fn= activation_fn,
intermediate_size= intermediate_size)
self.optimizer = optimizer(self.net.parameters(), **hyperparams)
self.gamma= gamma
def fit(self,X,y,instances):
test_instances = instances['test']
validation_instances = instances['validation']
train_instances = instances['train']
time_ = 0
self.model_time = 0
n_train = X.shape[0]
if self.validation:
validation_list = []
logging.info("training started")
# rows_to_be_removed = _remove_redundant_rows(self.A)
# A_torch = torch.from_numpy(np.delete(self.A, rows_to_be_removed, axis=0)).float()
A_torch = torch.from_numpy(self.A).float()
Q_torch = self.gamma*torch.eye(A_torch.shape[1])
X_torch = torch.from_numpy(X).float()
y_torch = torch.from_numpy(y).float()
G_torch = -1*torch.eye(A_torch.shape[1])
h_torch = torch.zeros(A_torch.shape[1])
for e in range(self.epochs):
for i in range(self.num_instance):
start_time = time.time()
self.optimizer.zero_grad()
source, dest = train_instances[i]
# b = np.zeros(len(self.A))
# b[source] =1
# b[dest ]=-1
# b= np.delete(b, rows_to_be_removed)
# b_torch = torch.from_numpy(b).float()
b_torch = torch.zeros(len(self.A))
b_torch[source] =1
b_torch[dest ]=-1
model_params_quad = make_gurobi_model(G_torch.detach().numpy(),
h_torch.detach().numpy(),A_torch.detach().numpy(),
b_torch.detach().numpy(), Q_torch.detach().numpy())
# model_params_quad = make_gurobi_model(None,None,
# A_torch.detach().numpy(),
# b_torch.detach().numpy(), Q_torch.detach().numpy())
c_pred = self.net(X_torch)
if any(torch.isnan(torch.flatten(c_pred)).tolist()):
logging.info("**Alert** nan in param c_pred ")
if any(torch.isinf(torch.flatten(c_pred)).tolist()):
logging.info("**Alert** inf in param c_pred ")
logging.info("shapes c {} A {} b {} G {} h {} Q {}".format(c_pred.shape,
A_torch.shape,b_torch.shape,G_torch.shape,h_torch.shape,
Q_torch.shape ))
x = QPFunction(verbose=False, solver=QPSolvers.GUROBI,
model_params= model_params_quad)(Q_torch.expand(1, *Q_torch.shape),
c_pred.squeeze(),G_torch.expand(1, *G_torch.shape),
h_torch.expand(1, *h_torch.shape),
A_torch.expand(1, *A_torch.shape),
b_torch.expand(1, *b_torch.shape))
# x = QPFunction(verbose=False, solver=QPSolvers.GUROBI,
# model_params= model_params_quad)(Q_torch.expand(1, *Q_torch.shape),
# c_pred.squeeze(),torch.Tensor(),
# torch.Tensor(),
# A_torch.expand(1, *A_torch.shape),
# b_torch.expand(1, *b_torch.shape))
c_pred.retain_grad()
loss = (y_torch*x).mean()
loss.backward()
c_grad = copy.deepcopy(c_pred.grad)
if any(torch.isnan(torch.flatten(c_grad)).tolist()):
logging.info("**Alert** nan in param c_grad ")
self.optimizer.step()
# logging.info("bkwd done")
end_time = time.time()
time_ += end_time - start_time
if self.validation:
if ((i+1)%20==0):
validation_list.append( validation_module(self.net,self.A,
X,y,train_instances,validation_instances,
test_instances,time_,e,i))
print("Epoch {} Loss:{} Time: {:%Y-%m-%d %H:%M:%S}".format(e+1,loss.sum().item(),
datetime.datetime.now()))
if self.validation :
dd = defaultdict(list)
for d in validation_list:
for key, value in d.items():
dd[key].append(value)
df = pd.DataFrame.from_dict(dd)
logging.info('Completion Time %s \n' %str(datetime.datetime.now()) )
return df
def predict(self,X):
X_torch = torch.from_numpy(X).float()
self.net.eval()
pred= self.net(X_torch)
self.net.train()
return pred.detach().detach().numpy().squeeze()
def validation_result(self,X,y, instances):
validation_rslt = get_loss(self.net, self.A, X,y,instances)
return validation_rslt[0], validation_rslt[1]
class intopt:
def __init__(self,A, num_features, num_layers, intermediate_size,
num_instance= 1,activation_fn = nn.ReLU,epochs=10,optimizer=optim.Adam,
method=1,max_iter=100,smoothing=False,thr = None,mu0=None,full_row_rank=True,
validation=False,**hyperparams):
self.A = A
self.num_features = num_features
self.num_layers = num_layers
self.activation_fn = activation_fn
self.intermediate_size = intermediate_size
self.num_instance = num_instance
self.method = method
self.epochs = epochs
self.method = method
self.optimizer = optimizer
self.max_iter = max_iter
self.smoothing = smoothing
self.thr = thr
self.mu0 = mu0
self.validation = validation
self.full_row_rank = full_row_rank
self.net = make_fc(num_layers=num_layers, num_features=num_features,
activation_fn= activation_fn,
intermediate_size= intermediate_size)
self.optimizer = optimizer(self.net.parameters(), **hyperparams)
def fit(self,X,y,instances):
#A_torch = torch.from_numpy(self.A).float()
test_instances = instances['test']
validation_instances = instances['validation']
train_instances = instances['train']
time_ = 0
self.model_time = 0
n_train = X.shape[0]
if self.validation:
validation_list = []
# model = gp.Model()
# model.setParam('OutputFlag', 0)
# x = model.addMVar(shape= self.A.shape[1], lb=0.0, vtype=gp.GRB.CONTINUOUS, name="x")
if self.full_row_rank:
rows_to_be_removed = _remove_redundant_rows(self.A)
A_torch = torch.from_numpy(np.delete(self.A, rows_to_be_removed, axis=0)).float()
else:
A_torch = torch.from_numpy(self.A).float()
logging.info("shape of A {} shape of A-torch {}".format(self.A.shape,A_torch.shape))
# A_ = np.delete(A_, rows_to_be_removed, axis=0)
# b_ = np.delete(b_, rows_to_be_removed)
# A_torch = torch.from_numpy(self.A).float()
X_torch = torch.from_numpy(X).float()
y_torch = torch.from_numpy(y).float()
logging.info("training started")
for e in range(self.epochs):
for i in range(self.num_instance):
start_time = time.time()
self.optimizer.zero_grad()
source, dest = train_instances[i]
if self.full_row_rank:
b = np.zeros(len(self.A))
b[source] =1
b[dest ]=-1
b= np.delete(b, rows_to_be_removed)
b_torch = torch.from_numpy(b).float()
else:
b_torch = torch.zeros(len(self.A))
b_torch[source] = 1
b_torch[dest] = -1
c_pred = self.net(X_torch).squeeze()
x = IPOfunc(A_torch,b_torch,torch.Tensor(),torch.Tensor(),
bounds= [(0., None)],
max_iter=self.max_iter,mu0 = self.mu0,
thr=self.thr,method = self.method,
smoothing=self.smoothing)(c_pred)
loss = (y_torch*x).mean()
loss.backward()
self.optimizer.step()
end_time = time.time()
time_ += end_time - start_time
if self.validation:
if ((i+1)%20==0) :
validation_list.append( validation_module(self.net,self.A,
X,y,train_instances,validation_instances,
test_instances,time_,e,i))
print("Epoch {} Loss:{} Time: {:%Y-%m-%d %H:%M:%S}".format(e+1,loss.item(),
datetime.datetime.now()))
if self.validation :
dd = defaultdict(list)
for d in validation_list:
for key, value in d.items():
dd[key].append(value)
df = pd.DataFrame.from_dict(dd)
logging.info('Completion Time %s \n' %str(datetime.datetime.now()) )
return df
def predict(self,X):
X_torch = torch.from_numpy(X).float()
self.net.eval()
pred= self.net(X_torch)
self.net.train()
return pred.detach().detach().numpy().squeeze()
def validation_result(self,X,y, instances):
validation_rslt = get_loss(self.net, self.A, X,y,instances)
return validation_rslt[0], validation_rslt[1]
class SPO:
def __init__(self,A,num_features, num_layers, intermediate_size,num_instance= 1,
activation_fn = nn.ReLU, epochs=10,optimizer=optim.Adam,
validation=False,**hyperparams):
self.A = A
self.num_features = num_features
self.num_layers = num_layers
self.activation_fn = activation_fn
self.intermediate_size = intermediate_size
self.epochs = epochs
self.num_instance = num_instance
self.validation = validation
self.net = make_fc(num_layers=num_layers, num_features=num_features,
activation_fn= activation_fn,
intermediate_size= intermediate_size)
self.optimizer = optimizer(self.net.parameters(), **hyperparams)
def fit(self,X,y,instances):
#A_torch = torch.from_numpy(self.A).float()
test_instances = instances['test']
validation_instances = instances['validation']
train_instances = instances['train']
time_ = 0
self.model_time = 0
n_train = X.shape[0]
if self.validation:
validation_list = []
X_torch = torch.from_numpy(X).float()
y_torch = torch.from_numpy(y).float()
true_solution ={}
logging.info("training started")
for e in range(self.epochs):
for i in range(self.num_instance):
start_time = time.time()
self.optimizer.zero_grad()
source, dest = train_instances[i]
b = np.zeros(len(self.A))
b[source] =1
b[dest ]=-1
if i not in true_solution:
model = gp.Model()
model.setParam('OutputFlag', 0)
x = model.addMVar(shape= self.A.shape[1], lb=0.0, vtype=gp.GRB.CONTINUOUS, name="x")
model.addConstr(self.A @ x == b, name="eq")
model.setObjective((y_torch.detach().numpy())@x, gp.GRB.MINIMIZE)
model.optimize()
x_true = x.X
true_solution[i] = np.copy(x_true)
x_true = true_solution[i]
c_pred = self.net(X_torch).squeeze()
c_spo = (2*c_pred - y_torch)
model = gp.Model()
model.setParam('OutputFlag', 0)
x = model.addMVar(shape= self.A.shape[1], lb=0.0, ub=1.0,vtype=gp.GRB.CONTINUOUS, name="x")
model.addConstr(self.A @ x == b, name="eq")
model.setObjective((c_spo.detach().numpy())@x, gp.GRB.MINIMIZE)
model.optimize()
#print(model.status)
x_spo = x.X
grad = torch.from_numpy( x_true - x_spo).float()
loss = self.net(X_torch).squeeze()
loss.backward(gradient=grad)
self.optimizer.step()
logging.info("bkwd done")
end_time = time.time()
time_ += end_time - start_time
if self.validation:
if ((i+1)%20==0):
validation_list.append( validation_module(self.net,self.A,
X,y,train_instances,validation_instances,
test_instances,time_,e,i))
print("Epoch {} Loss:{} Time: {:%Y-%m-%d %H:%M:%S}".format(e+1,loss.sum().item(),
datetime.datetime.now()))
if self.validation :
dd = defaultdict(list)
for d in validation_list:
for key, value in d.items():
dd[key].append(value)
df = pd.DataFrame.from_dict(dd)
# print(validation_module(self.net,self.A,
# X,y,train_instances,validation_instances,
# test_instances,time_,e,i))
# pred = self.predict(X)
# print(mse(pred,y))
logging.info('Completion Time %s \n' %str(datetime.datetime.now()) )
return df
def validation_result(self,X,y, instances):
validation_rslt = get_loss(self.net, self.A, X,y,instances)
return validation_rslt[0], validation_rslt[1]
def predict(self,X):
X_torch = torch.from_numpy(X).float()
self.net.eval()
pred= self.net(X_torch)
self.net.train()
return pred.detach().detach().numpy().squeeze()
| true
| true
|
790cf8aa27b10f4715d6d4ea418e882eee85a64e
| 11,748
|
py
|
Python
|
src/dapt_pretraining.py
|
TysonYu/AdaptSum
|
a4f17060e7a8e6f9b86d33a930804445e4226ba4
|
[
"CC-BY-4.0"
] | 29
|
2021-03-18T03:43:27.000Z
|
2022-03-23T02:13:46.000Z
|
src/dapt_pretraining.py
|
TysonYu/AdaptSum
|
a4f17060e7a8e6f9b86d33a930804445e4226ba4
|
[
"CC-BY-4.0"
] | 4
|
2021-04-17T13:33:29.000Z
|
2021-12-13T13:52:45.000Z
|
src/dapt_pretraining.py
|
TysonYu/AdaptSum
|
a4f17060e7a8e6f9b86d33a930804445e4226ba4
|
[
"CC-BY-4.0"
] | 1
|
2021-06-07T08:30:35.000Z
|
2021-06-07T08:30:35.000Z
|
import torch
from torch.utils.data import Dataset, DataLoader
from transformers import BartForConditionalGeneration, BartTokenizer, get_linear_schedule_with_warmup
from others.logging import logger
from others.utils import pad_sents, get_mask
from others.optimizer import build_optim
from tqdm import tqdm
import numpy as np
import argparse
import random
import os
from nltk.tokenize import sent_tokenize
def text_infilling(sent, mask_probability=0.05, lamda=3):
'''
inputs:
sent: a sentence string
mask_probability: probability for masking tokens
lamda: lamda for poission distribution
outputs:
sent: a list of tokens with masked tokens
'''
sent = sent.split()
length = len(sent)
mask_indices = (np.random.uniform(0, 1, length) < mask_probability) * 1
span_list = np.random.poisson(lamda, length) # lamda for poission distribution
nonzero_idx = np.nonzero(mask_indices)[0]
for item in nonzero_idx:
span = min(span_list[item], 5) # maximum mask 5 continuous tokens
for i in range(span):
if item+i >= length:
continue
mask_indices[item+i] = 1
for i in range(length):
if mask_indices[i] == 1:
sent[i] = '<mask>'
# merge the <mask>s to one <mask>
final_sent = []
mask_flag = 0
for word in sent:
if word != '<mask>':
mask_flag = 0
final_sent.append(word)
else:
if mask_flag == 0:
final_sent.append(word)
mask_flag = 1
return final_sent
def sent_permutation(sent):
'''
inputs:
sent: a sentence string
outputs:
shuffle_sent: a string after sentence permutations
'''
# split sentences based on '.'
splits = sent_tokenize(sent)
random.shuffle(splits)
return " ".join(splits)
def add_noise(sents, mask_probability):
noisy_sent_list = []
for sent in sents:
noisy_sent = sent_permutation(sent)
noisy_sent = text_infilling(noisy_sent, mask_probability)
noisy_sent = " ".join(noisy_sent)
noisy_sent_list.append(noisy_sent)
return noisy_sent_list
class CorpusDataset(Dataset):
def __init__(self, data_path, denoising_flag=False):
self.data = []
with open(data_path, "r", ) as f:
for i, line in enumerate(f):
line = line.strip()
if denoising_flag:
line = "denoising: " + line
self.data.append(line) # append a list of tokens each time
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
class BartLMTrainer(object):
def __init__(self, model, dataloader, tokenizer, args, pretrained_model=None):
self.args = args
self.model = model
self.pretrained_model = pretrained_model
self.optimizer = build_optim(args, model, None, pretrained_model)
self.dataloader = dataloader
self.tokenizer = tokenizer
self.epoch = args.epoch
self.mask_probability = args.mask_prob
self.accumulation_steps = args.accum_step
self.clip = args.clip
self.domain = args.dm
self.path = args.path
if args.recadam:
if args.max_steps > 0:
t_total = args.max_steps
self.epoch = args.max_steps // (len(self.dataloader) // self.accumulation_steps) + 1
else:
t_total = len(self.dataloader) // self.accumulation_steps * self.epoch
self.scheduler = get_linear_schedule_with_warmup(self.optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total)
def train(self):
print('Start finetuning BART language model')
iteration = 0
for epoch_i in range(self.epoch):
self.model.train()
if self.pretrained_model is not None:
self.pretrained_model.eval()
print('[ Epoch : {}]'.format(epoch_i))
loss_list = []
dist_sum, dist_num = 0.0, 0
pbar = tqdm(self.dataloader, total=len(self.dataloader))
for sents in pbar:
sents = [self.shorten_sent(sent) for sent in sents]
iteration += 1
tokenized_sents = self.tokenize(sents)
decoder_ids = [[self.tokenizer.bos_token_id] + item for item in tokenized_sents]
label_ids = [item + [self.tokenizer.eos_token_id] for item in tokenized_sents]
# print("before:")
# print(sents[0])
# print("tokenized sents:")
# print(tokenized_sents[0])
# sents: a list of sentence, each item inside is a string
noisy_text = add_noise(sents, self.mask_probability)
# noisy_text: a list of sentence, each item inside is a string
# print("after:")
# print(noisy_text[0])
inputs_ids = self.tokenize(noisy_text)
# print("tokenized noisy text:")
# print(inputs_ids[0])
# prepare data for training
mask = torch.tensor(get_mask(inputs_ids, max_len=512)).cuda()
inputs_ids = torch.tensor(pad_sents(inputs_ids, pad_token=self.tokenizer.pad_token_id, max_len=512)[0]).cuda()
decoder_ids = torch.tensor(pad_sents(decoder_ids, pad_token=self.tokenizer.pad_token_id, max_len=512)[0]).cuda()
label_ids = torch.tensor(pad_sents(label_ids, pad_token=-100, max_len=512)[0]).cuda()
#optimize model
loss = self.model(input_ids=inputs_ids, attention_mask=mask, decoder_input_ids=decoder_ids, labels=label_ids)[0]
loss_list.append(loss.item())
loss = loss / self.accumulation_steps
loss.backward()
if self.args.logging_Euclid_dist:
dist = torch.sum(torch.abs(torch.cat(
[p.view(-1) for n, p in self.model.named_parameters()]) - torch.cat(
[p.view(-1) for n, p in self.pretrained_model.named_parameters()])) ** 2).item()
dist_sum += dist
dist_num += 1
if iteration % self.accumulation_steps == 0:
if self.args.recadam:
torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1.0)
self.optimizer.step()
if self.args.recadam:
self.scheduler.step()
self.model.zero_grad()
loss_list = [np.mean(loss_list)]
if self.args.logging_Euclid_dist:
# pbar.set_description("(Epoch {}) LOSS: {:.6f} Euclid dist: {:.6f} LR: {:.6f}".format(epoch_i, np.mean(loss_list), dist_sum / dist_num, self.scheduler.get_last_lr()[0]))
pbar.set_description("(Epoch {}) LOSS: {:.6f} Euclid dist: {:.6f}".format(epoch_i, np.mean(loss_list), dist_sum / dist_num))
else:
pbar.set_description("(Epoch {}) LOSS: {:.6f} LearningRate: {:.10f}".format(epoch_i, np.mean(loss_list), self.optimizer.learning_rate))
if iteration % args.save_interval == 0:
self.save_model(iteration)
def shorten_sent(self, sent):
split_sent = sent.split()
if len(split_sent) > 400:
sent = ' '.join(split_sent[:400])
return sent
def tokenize(self, sents):
tokenized_text = [self.tokenizer.encode(sent, add_special_tokens=False) for sent in sents]
return tokenized_text
def save_model(self, iter_num):
print("saving model")
saved_path = os.path.join('DAPT_save/{}_{}.chkpt'.format(args.dm, iter_num))
torch.save(self.model, saved_path)
if __name__ == "__main__":
# configuration
parser = argparse.ArgumentParser()
parser.add_argument('-visible_gpu', default='1', type=str)
parser.add_argument('-bsz', type=int, default=4, help="batch size")
parser.add_argument('-path', type=str, default="", help="data path")
parser.add_argument('-epoch', type=int, default=10, help="epoch size")
parser.add_argument('-mask_prob', type=float, default=0.15, help="mask probability")
parser.add_argument('-dm', type=str, default="", help="domain name")
parser.add_argument('-random_seed', type=int, default=0)
parser.add_argument('-save_interval', default=10000, type=int)
# optimizer configuration
parser.add_argument('-lr', default=0.05, type=float)
parser.add_argument('-optim', default='adam', type=str)
parser.add_argument('-max_grad_norm', default=0, type=float)
parser.add_argument('-beta1', default=0.9, type=float)
parser.add_argument('-beta2', default=0.998, type=float)
parser.add_argument('-warmup_steps', default=10000, type=int)
parser.add_argument('-decay_method', default='noam', type=str)
parser.add_argument('-enc_hidden_size', default=768, type=int)
parser.add_argument('-clip', type=float, default=1.0, help="gradient clip")
parser.add_argument('-accum_step', type=int, default=10, help="accumulation steps")
parser.add_argument('-train_from', default='', type=str)
# using RecAdam
parser.add_argument("-adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument('-recadam', default=False, action='store_true')
parser.add_argument("-weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("-anneal_w", type=float, default=1.0, help="Weight for the annealing function in RecAdam. Default 1.0.")
parser.add_argument("-anneal_fun", type=str, default='sigmoid', choices=["sigmoid", "linear", 'constant'], help="the type of annealing function in RecAdam. Default sigmoid")
parser.add_argument("-anneal_t0", type=int, default=1000, help="t0 for the annealing function in RecAdam.")
parser.add_argument("-anneal_k", type=float, default=0.1, help="k for the annealing function in RecAdam.")
parser.add_argument("-pretrain_cof", type=float, default=5000.0, help="Coefficient of the quadratic penalty in RecAdam. Default 5000.0.")
parser.add_argument("-logging_Euclid_dist", action="store_true", help="Whether to log the Euclidean distance between the pretrained model and fine-tuning model")
parser.add_argument("-max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("-model_type", type=str, default="layers")
args = parser.parse_args()
# set random seed
random.seed(args.random_seed)
np.random.seed(args.random_seed)
torch.manual_seed(args.random_seed)
torch.cuda.manual_seed(args.random_seed)
torch.backends.cudnn.deterministic = True
# set gpu
os.environ["CUDA_VISIBLE_DEVICES"] = args.visible_gpu
print("Loading datasets ...")
dataset = CorpusDataset(args.path)
dataloader = DataLoader(dataset=dataset, batch_size=args.bsz, shuffle=True)
if args.train_from:
model = torch.load(args.train_from, map_location='cpu')
else:
model = BartForConditionalGeneration.from_pretrained('facebook/bart-base')
model.cuda()
tokenizer = BartTokenizer.from_pretrained('facebook/bart-base')
if args.recadam:
pretrained_model = BartForConditionalGeneration.from_pretrained('facebook/bart-base')
pretrained_model.cuda()
else:
pretrained_model = None
bart_lm_trainer = BartLMTrainer(model, dataloader, tokenizer, args, pretrained_model)
bart_lm_trainer.train()
| 44.165414
| 190
| 0.636279
|
import torch
from torch.utils.data import Dataset, DataLoader
from transformers import BartForConditionalGeneration, BartTokenizer, get_linear_schedule_with_warmup
from others.logging import logger
from others.utils import pad_sents, get_mask
from others.optimizer import build_optim
from tqdm import tqdm
import numpy as np
import argparse
import random
import os
from nltk.tokenize import sent_tokenize
def text_infilling(sent, mask_probability=0.05, lamda=3):
sent = sent.split()
length = len(sent)
mask_indices = (np.random.uniform(0, 1, length) < mask_probability) * 1
span_list = np.random.poisson(lamda, length)
nonzero_idx = np.nonzero(mask_indices)[0]
for item in nonzero_idx:
span = min(span_list[item], 5)
for i in range(span):
if item+i >= length:
continue
mask_indices[item+i] = 1
for i in range(length):
if mask_indices[i] == 1:
sent[i] = '<mask>'
final_sent = []
mask_flag = 0
for word in sent:
if word != '<mask>':
mask_flag = 0
final_sent.append(word)
else:
if mask_flag == 0:
final_sent.append(word)
mask_flag = 1
return final_sent
def sent_permutation(sent):
splits = sent_tokenize(sent)
random.shuffle(splits)
return " ".join(splits)
def add_noise(sents, mask_probability):
noisy_sent_list = []
for sent in sents:
noisy_sent = sent_permutation(sent)
noisy_sent = text_infilling(noisy_sent, mask_probability)
noisy_sent = " ".join(noisy_sent)
noisy_sent_list.append(noisy_sent)
return noisy_sent_list
class CorpusDataset(Dataset):
def __init__(self, data_path, denoising_flag=False):
self.data = []
with open(data_path, "r", ) as f:
for i, line in enumerate(f):
line = line.strip()
if denoising_flag:
line = "denoising: " + line
self.data.append(line)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
class BartLMTrainer(object):
def __init__(self, model, dataloader, tokenizer, args, pretrained_model=None):
self.args = args
self.model = model
self.pretrained_model = pretrained_model
self.optimizer = build_optim(args, model, None, pretrained_model)
self.dataloader = dataloader
self.tokenizer = tokenizer
self.epoch = args.epoch
self.mask_probability = args.mask_prob
self.accumulation_steps = args.accum_step
self.clip = args.clip
self.domain = args.dm
self.path = args.path
if args.recadam:
if args.max_steps > 0:
t_total = args.max_steps
self.epoch = args.max_steps // (len(self.dataloader) // self.accumulation_steps) + 1
else:
t_total = len(self.dataloader) // self.accumulation_steps * self.epoch
self.scheduler = get_linear_schedule_with_warmup(self.optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total)
def train(self):
print('Start finetuning BART language model')
iteration = 0
for epoch_i in range(self.epoch):
self.model.train()
if self.pretrained_model is not None:
self.pretrained_model.eval()
print('[ Epoch : {}]'.format(epoch_i))
loss_list = []
dist_sum, dist_num = 0.0, 0
pbar = tqdm(self.dataloader, total=len(self.dataloader))
for sents in pbar:
sents = [self.shorten_sent(sent) for sent in sents]
iteration += 1
tokenized_sents = self.tokenize(sents)
decoder_ids = [[self.tokenizer.bos_token_id] + item for item in tokenized_sents]
label_ids = [item + [self.tokenizer.eos_token_id] for item in tokenized_sents]
noisy_text = add_noise(sents, self.mask_probability)
inputs_ids = self.tokenize(noisy_text)
mask = torch.tensor(get_mask(inputs_ids, max_len=512)).cuda()
inputs_ids = torch.tensor(pad_sents(inputs_ids, pad_token=self.tokenizer.pad_token_id, max_len=512)[0]).cuda()
decoder_ids = torch.tensor(pad_sents(decoder_ids, pad_token=self.tokenizer.pad_token_id, max_len=512)[0]).cuda()
label_ids = torch.tensor(pad_sents(label_ids, pad_token=-100, max_len=512)[0]).cuda()
loss = self.model(input_ids=inputs_ids, attention_mask=mask, decoder_input_ids=decoder_ids, labels=label_ids)[0]
loss_list.append(loss.item())
loss = loss / self.accumulation_steps
loss.backward()
if self.args.logging_Euclid_dist:
dist = torch.sum(torch.abs(torch.cat(
[p.view(-1) for n, p in self.model.named_parameters()]) - torch.cat(
[p.view(-1) for n, p in self.pretrained_model.named_parameters()])) ** 2).item()
dist_sum += dist
dist_num += 1
if iteration % self.accumulation_steps == 0:
if self.args.recadam:
torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1.0)
self.optimizer.step()
if self.args.recadam:
self.scheduler.step()
self.model.zero_grad()
loss_list = [np.mean(loss_list)]
if self.args.logging_Euclid_dist:
pbar.set_description("(Epoch {}) LOSS: {:.6f} Euclid dist: {:.6f}".format(epoch_i, np.mean(loss_list), dist_sum / dist_num))
else:
pbar.set_description("(Epoch {}) LOSS: {:.6f} LearningRate: {:.10f}".format(epoch_i, np.mean(loss_list), self.optimizer.learning_rate))
if iteration % args.save_interval == 0:
self.save_model(iteration)
def shorten_sent(self, sent):
split_sent = sent.split()
if len(split_sent) > 400:
sent = ' '.join(split_sent[:400])
return sent
def tokenize(self, sents):
tokenized_text = [self.tokenizer.encode(sent, add_special_tokens=False) for sent in sents]
return tokenized_text
def save_model(self, iter_num):
print("saving model")
saved_path = os.path.join('DAPT_save/{}_{}.chkpt'.format(args.dm, iter_num))
torch.save(self.model, saved_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-visible_gpu', default='1', type=str)
parser.add_argument('-bsz', type=int, default=4, help="batch size")
parser.add_argument('-path', type=str, default="", help="data path")
parser.add_argument('-epoch', type=int, default=10, help="epoch size")
parser.add_argument('-mask_prob', type=float, default=0.15, help="mask probability")
parser.add_argument('-dm', type=str, default="", help="domain name")
parser.add_argument('-random_seed', type=int, default=0)
parser.add_argument('-save_interval', default=10000, type=int)
parser.add_argument('-lr', default=0.05, type=float)
parser.add_argument('-optim', default='adam', type=str)
parser.add_argument('-max_grad_norm', default=0, type=float)
parser.add_argument('-beta1', default=0.9, type=float)
parser.add_argument('-beta2', default=0.998, type=float)
parser.add_argument('-warmup_steps', default=10000, type=int)
parser.add_argument('-decay_method', default='noam', type=str)
parser.add_argument('-enc_hidden_size', default=768, type=int)
parser.add_argument('-clip', type=float, default=1.0, help="gradient clip")
parser.add_argument('-accum_step', type=int, default=10, help="accumulation steps")
parser.add_argument('-train_from', default='', type=str)
parser.add_argument("-adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument('-recadam', default=False, action='store_true')
parser.add_argument("-weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("-anneal_w", type=float, default=1.0, help="Weight for the annealing function in RecAdam. Default 1.0.")
parser.add_argument("-anneal_fun", type=str, default='sigmoid', choices=["sigmoid", "linear", 'constant'], help="the type of annealing function in RecAdam. Default sigmoid")
parser.add_argument("-anneal_t0", type=int, default=1000, help="t0 for the annealing function in RecAdam.")
parser.add_argument("-anneal_k", type=float, default=0.1, help="k for the annealing function in RecAdam.")
parser.add_argument("-pretrain_cof", type=float, default=5000.0, help="Coefficient of the quadratic penalty in RecAdam. Default 5000.0.")
parser.add_argument("-logging_Euclid_dist", action="store_true", help="Whether to log the Euclidean distance between the pretrained model and fine-tuning model")
parser.add_argument("-max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("-model_type", type=str, default="layers")
args = parser.parse_args()
random.seed(args.random_seed)
np.random.seed(args.random_seed)
torch.manual_seed(args.random_seed)
torch.cuda.manual_seed(args.random_seed)
torch.backends.cudnn.deterministic = True
os.environ["CUDA_VISIBLE_DEVICES"] = args.visible_gpu
print("Loading datasets ...")
dataset = CorpusDataset(args.path)
dataloader = DataLoader(dataset=dataset, batch_size=args.bsz, shuffle=True)
if args.train_from:
model = torch.load(args.train_from, map_location='cpu')
else:
model = BartForConditionalGeneration.from_pretrained('facebook/bart-base')
model.cuda()
tokenizer = BartTokenizer.from_pretrained('facebook/bart-base')
if args.recadam:
pretrained_model = BartForConditionalGeneration.from_pretrained('facebook/bart-base')
pretrained_model.cuda()
else:
pretrained_model = None
bart_lm_trainer = BartLMTrainer(model, dataloader, tokenizer, args, pretrained_model)
bart_lm_trainer.train()
| true
| true
|
790cf9193aabd5c8184321cab368b4311e4b7f54
| 786
|
py
|
Python
|
ao2j/lt1300/055/A.py
|
neshdev/competitive-prog
|
f406a85d62e83c3dbd3ad41f42ae121ebefd0fda
|
[
"MIT"
] | null | null | null |
ao2j/lt1300/055/A.py
|
neshdev/competitive-prog
|
f406a85d62e83c3dbd3ad41f42ae121ebefd0fda
|
[
"MIT"
] | null | null | null |
ao2j/lt1300/055/A.py
|
neshdev/competitive-prog
|
f406a85d62e83c3dbd3ad41f42ae121ebefd0fda
|
[
"MIT"
] | null | null | null |
n = int(input())
arr = [[None for i in range(2*n+1)]for i in range(2*n+1)]
m = (2*n + 1) // 2
for i in range(n):
arr[i][m] = i
arr[n][m] = n
for i in range(n+1,2*n+1):
arr[i][m] = arr[i-1][m]-1
for y in range(1,m+1):
for x in range(len(arr[0])):
if x < m:
arr[y][x] = arr[y-1][x+1]
if x > m:
arr[y][x] = arr[y-1][x-1]
for y in range(2*n-1,m,-1):
for x in range(len(arr[0])):
if x < m:
arr[y][x] = arr[y+1][x+1]
if x > m:
arr[y][x] = arr[y+1][x-1]
for y in range(len(arr)):
for x in range(len(arr[0])):
if arr[y][x] is None:
arr[y][x] = ' '
else:
arr[y][x] = str(arr[y][x])
out = [" ".join(xs).rstrip() for xs in arr]
print("\n".join(out))
| 21.833333
| 57
| 0.431298
|
n = int(input())
arr = [[None for i in range(2*n+1)]for i in range(2*n+1)]
m = (2*n + 1) // 2
for i in range(n):
arr[i][m] = i
arr[n][m] = n
for i in range(n+1,2*n+1):
arr[i][m] = arr[i-1][m]-1
for y in range(1,m+1):
for x in range(len(arr[0])):
if x < m:
arr[y][x] = arr[y-1][x+1]
if x > m:
arr[y][x] = arr[y-1][x-1]
for y in range(2*n-1,m,-1):
for x in range(len(arr[0])):
if x < m:
arr[y][x] = arr[y+1][x+1]
if x > m:
arr[y][x] = arr[y+1][x-1]
for y in range(len(arr)):
for x in range(len(arr[0])):
if arr[y][x] is None:
arr[y][x] = ' '
else:
arr[y][x] = str(arr[y][x])
out = [" ".join(xs).rstrip() for xs in arr]
print("\n".join(out))
| true
| true
|
790cf9506ce83c39688251073c3a5cd139466739
| 3,278
|
py
|
Python
|
pyRoutines/angle_transformation.py
|
aasensio/hazel
|
899c8461324061bacc14da7165b9ac7eed35c96b
|
[
"MIT"
] | 6
|
2016-01-11T05:03:00.000Z
|
2018-08-31T11:13:24.000Z
|
pyRoutines/angle_transformation.py
|
aasensio/hazel
|
899c8461324061bacc14da7165b9ac7eed35c96b
|
[
"MIT"
] | 12
|
2017-04-22T16:10:43.000Z
|
2021-01-11T14:03:59.000Z
|
pyRoutines/angle_transformation.py
|
aasensio/hazel
|
899c8461324061bacc14da7165b9ac7eed35c96b
|
[
"MIT"
] | 4
|
2016-02-25T19:35:07.000Z
|
2018-10-01T17:12:52.000Z
|
# cdiazbas@iac.es
import numpy as np
# Return the angles in the plane of the sky given angles with respect
# to the vertical for observations on the limb (in degrees!)
def absolute_to_sky(thetaB, chiB):
thetaB = np.deg2rad(thetaB)
chiB = np.deg2rad(chiB)
t1 = np.sin(thetaB) * np.sin(chiB)
t2 = -np.cos(thetaB)
t3 = np.sin(thetaB) * np.cos(chiB)
thetaSky = np.arccos(t3)
sinthSky = np.sqrt(1.e0 - t3**2)
sinChiSky = t1 / sinthSky
cosChiSky = t2 / sinthSky
# Test for the quadrant
chiSky_preliminary = np.arccos(cosChiSky)
if (np.sign(sinChiSky) > 0.e0):
chiSky = chiSky_preliminary
else:
chiSky = -chiSky_preliminary
return [np.rad2deg(thetaSky), np.rad2deg(chiSky)]
# Return the angles in the vertical system given angles in the
# plane of the sky for observations on the limb (in degrees!)
def sky_to_absolute(thetaSky, chiSky):
thetaSky = np.deg2rad(thetaSky)
chiSky = np.deg2rad(chiSky)
t1 = np.sin(thetaSky) * np.sin(chiSky)
t2 = np.cos(thetaSky)
t3 = -np.sin(thetaSky) * np.cos(chiSky)
thetaB = np.arccos(t3)
sinthB = np.sqrt(1.e0 - t3**2)
sinChiB = t1 / sinthB
cosChiB = t2 / sinthB
# Test for the quadrant
chiB_preliminary = np.arccos(cosChiB)
if (np.sign(sinChiB) > 0.e0):
chiB = chiB_preliminary
else:
chiB = -chiB_preliminary
return [np.rad2deg(thetaB), np.rad2deg(chiB)]
# Return the angles in the plane of the sky given angles with respect
# to the vertical for observations at angle theta (in degrees!)
def absolute_to_sky_general(theta, thetaB, chiB):
theta = np.deg2rad(theta)
thetaB = np.deg2rad(thetaB)
chiB = np.deg2rad(chiB)
cosThetaSky = np.cos(theta) * np.cos(thetaB) + \
np.sin(theta) * np.sin(thetaB) * np.cos(chiB)
sinThetaSky = np.sqrt(1.e0 - cosThetaSky**2)
thetaSky = np.arccos(cosThetaSky)
cosChiSky = (np.cos(theta) * np.sin(thetaB) * np.cos(chiB) -
np.cos(thetaB) * np.sin(theta)) / sinThetaSky
sinChiSky = (np.sin(thetaB) * np.sin(chiB)) / sinThetaSky
# Test for the quadrant
chiSky_preliminary = np.arccos(cosChiSky)
if (np.sign(sinChiSky) > 0.e0):
chiSky = chiSky_preliminary
else:
chiSky = -chiSky_preliminary
return [np.rad2deg(thetaSky), np.rad2deg(chiSky)]
# Return the angles in the plane of the sky given angles with respect
# to the vertical for observations at angle theta (in degrees!)
def sky_to_absolute_general(theta, thetaSky, chiSky):
theta = np.deg2rad(theta)
thetaSky = np.deg2rad(thetaSky)
chiSky = np.deg2rad(chiSky)
cosThetaB = np.cos(theta) * np.cos(thetaSky) - \
np.sin(theta) * np.sin(thetaSky) * np.cos(chiSky)
sinThetaB = np.sqrt(1.e0 - cosThetaB**2)
thetaB = np.arccos(cosThetaB)
cosChiB = (np.cos(theta) * np.sin(thetaSky) * np.cos(chiSky) +
np.cos(thetaSky) * np.sin(theta)) / sinThetaB
sinChiB = (np.sin(thetaSky) * np.sin(chiSky)) / sinThetaB
# Test for the quadrant
chiB_preliminary = np.arccos(cosChiB)
if (np.sign(sinChiB) > 0.e0):
chiB = chiB_preliminary
else:
chiB = -chiB_preliminary
return [np.rad2deg(thetaB), np.rad2deg(chiB)]
if __name__ == '__main__':
pass
| 28.754386
| 69
| 0.655583
|
import numpy as np
def absolute_to_sky(thetaB, chiB):
thetaB = np.deg2rad(thetaB)
chiB = np.deg2rad(chiB)
t1 = np.sin(thetaB) * np.sin(chiB)
t2 = -np.cos(thetaB)
t3 = np.sin(thetaB) * np.cos(chiB)
thetaSky = np.arccos(t3)
sinthSky = np.sqrt(1.e0 - t3**2)
sinChiSky = t1 / sinthSky
cosChiSky = t2 / sinthSky
chiSky_preliminary = np.arccos(cosChiSky)
if (np.sign(sinChiSky) > 0.e0):
chiSky = chiSky_preliminary
else:
chiSky = -chiSky_preliminary
return [np.rad2deg(thetaSky), np.rad2deg(chiSky)]
def sky_to_absolute(thetaSky, chiSky):
thetaSky = np.deg2rad(thetaSky)
chiSky = np.deg2rad(chiSky)
t1 = np.sin(thetaSky) * np.sin(chiSky)
t2 = np.cos(thetaSky)
t3 = -np.sin(thetaSky) * np.cos(chiSky)
thetaB = np.arccos(t3)
sinthB = np.sqrt(1.e0 - t3**2)
sinChiB = t1 / sinthB
cosChiB = t2 / sinthB
chiB_preliminary = np.arccos(cosChiB)
if (np.sign(sinChiB) > 0.e0):
chiB = chiB_preliminary
else:
chiB = -chiB_preliminary
return [np.rad2deg(thetaB), np.rad2deg(chiB)]
def absolute_to_sky_general(theta, thetaB, chiB):
theta = np.deg2rad(theta)
thetaB = np.deg2rad(thetaB)
chiB = np.deg2rad(chiB)
cosThetaSky = np.cos(theta) * np.cos(thetaB) + \
np.sin(theta) * np.sin(thetaB) * np.cos(chiB)
sinThetaSky = np.sqrt(1.e0 - cosThetaSky**2)
thetaSky = np.arccos(cosThetaSky)
cosChiSky = (np.cos(theta) * np.sin(thetaB) * np.cos(chiB) -
np.cos(thetaB) * np.sin(theta)) / sinThetaSky
sinChiSky = (np.sin(thetaB) * np.sin(chiB)) / sinThetaSky
chiSky_preliminary = np.arccos(cosChiSky)
if (np.sign(sinChiSky) > 0.e0):
chiSky = chiSky_preliminary
else:
chiSky = -chiSky_preliminary
return [np.rad2deg(thetaSky), np.rad2deg(chiSky)]
def sky_to_absolute_general(theta, thetaSky, chiSky):
theta = np.deg2rad(theta)
thetaSky = np.deg2rad(thetaSky)
chiSky = np.deg2rad(chiSky)
cosThetaB = np.cos(theta) * np.cos(thetaSky) - \
np.sin(theta) * np.sin(thetaSky) * np.cos(chiSky)
sinThetaB = np.sqrt(1.e0 - cosThetaB**2)
thetaB = np.arccos(cosThetaB)
cosChiB = (np.cos(theta) * np.sin(thetaSky) * np.cos(chiSky) +
np.cos(thetaSky) * np.sin(theta)) / sinThetaB
sinChiB = (np.sin(thetaSky) * np.sin(chiSky)) / sinThetaB
chiB_preliminary = np.arccos(cosChiB)
if (np.sign(sinChiB) > 0.e0):
chiB = chiB_preliminary
else:
chiB = -chiB_preliminary
return [np.rad2deg(thetaB), np.rad2deg(chiB)]
if __name__ == '__main__':
pass
| true
| true
|
790cf99896e3378cf4d5dfe9ee9b7a79e978e407
| 520
|
py
|
Python
|
14.py
|
profamaroca/Lista3-1
|
4a90d9d5293cd823b0da8dbb618668a6e4455910
|
[
"Unlicense"
] | null | null | null |
14.py
|
profamaroca/Lista3-1
|
4a90d9d5293cd823b0da8dbb618668a6e4455910
|
[
"Unlicense"
] | null | null | null |
14.py
|
profamaroca/Lista3-1
|
4a90d9d5293cd823b0da8dbb618668a6e4455910
|
[
"Unlicense"
] | null | null | null |
import math
numero_turmas = int(input('Qual o número de turmas? '))
for _ in range(numero_turmas):
numero_alunos = int(input('Qual o número de alunos? '))
soma = 0
menor = math.inf
maior = 0
for i in range(numero_alunos):
nota = float(input(f'Qual a nota do aluno {i + 1}? '))
soma += nota
if menor > nota:
menor = nota
if maior < nota:
maior = nota
print(f'A média é {soma / numero_alunos}. A menor nota é {menor}, e a maior é {maior}.')
| 30.588235
| 92
| 0.578846
|
import math
numero_turmas = int(input('Qual o número de turmas? '))
for _ in range(numero_turmas):
numero_alunos = int(input('Qual o número de alunos? '))
soma = 0
menor = math.inf
maior = 0
for i in range(numero_alunos):
nota = float(input(f'Qual a nota do aluno {i + 1}? '))
soma += nota
if menor > nota:
menor = nota
if maior < nota:
maior = nota
print(f'A média é {soma / numero_alunos}. A menor nota é {menor}, e a maior é {maior}.')
| true
| true
|
790cf9adae4bc44bf1f478eb7dbd3d59a8f174ea
| 1,346
|
py
|
Python
|
saleor/graphql/shop/schema.py
|
fooliscool/saleor
|
9502467c0e745eb8afdbfa373d634814d133e864
|
[
"CC-BY-4.0"
] | 1
|
2020-11-13T14:25:51.000Z
|
2020-11-13T14:25:51.000Z
|
saleor/graphql/shop/schema.py
|
fooliscool/saleor
|
9502467c0e745eb8afdbfa373d634814d133e864
|
[
"CC-BY-4.0"
] | 51
|
2019-12-06T08:06:07.000Z
|
2021-05-06T02:10:50.000Z
|
saleor/graphql/shop/schema.py
|
jnbao2020/saleor
|
e1773b42a8ecd78114cf4485d553b09469b5f1f8
|
[
"CC-BY-4.0"
] | null | null | null |
import graphene
from ..translations.mutations import ShopSettingsTranslate
from .mutations import (
AuthorizationKeyAdd,
AuthorizationKeyDelete,
HomepageCollectionUpdate,
ShopAddressUpdate,
ShopDomainUpdate,
ShopFetchTaxRates,
ShopSettingsUpdate,
StaffNotificationRecipientCreate,
StaffNotificationRecipientDelete,
StaffNotificationRecipientUpdate,
)
from .types import Shop
class ShopQueries(graphene.ObjectType):
shop = graphene.Field(Shop, description="Return information about the shop.")
def resolve_shop(self, _info):
return Shop()
class ShopMutations(graphene.ObjectType):
authorization_key_add = AuthorizationKeyAdd.Field()
authorization_key_delete = AuthorizationKeyDelete.Field()
staff_notification_recipient_create = StaffNotificationRecipientCreate.Field()
staff_notification_recipient_update = StaffNotificationRecipientUpdate.Field()
staff_notification_recipient_delete = StaffNotificationRecipientDelete.Field()
homepage_collection_update = HomepageCollectionUpdate.Field()
shop_domain_update = ShopDomainUpdate.Field()
shop_settings_update = ShopSettingsUpdate.Field()
shop_fetch_tax_rates = ShopFetchTaxRates.Field()
shop_settings_translate = ShopSettingsTranslate.Field()
shop_address_update = ShopAddressUpdate.Field()
| 33.65
| 82
| 0.801634
|
import graphene
from ..translations.mutations import ShopSettingsTranslate
from .mutations import (
AuthorizationKeyAdd,
AuthorizationKeyDelete,
HomepageCollectionUpdate,
ShopAddressUpdate,
ShopDomainUpdate,
ShopFetchTaxRates,
ShopSettingsUpdate,
StaffNotificationRecipientCreate,
StaffNotificationRecipientDelete,
StaffNotificationRecipientUpdate,
)
from .types import Shop
class ShopQueries(graphene.ObjectType):
shop = graphene.Field(Shop, description="Return information about the shop.")
def resolve_shop(self, _info):
return Shop()
class ShopMutations(graphene.ObjectType):
authorization_key_add = AuthorizationKeyAdd.Field()
authorization_key_delete = AuthorizationKeyDelete.Field()
staff_notification_recipient_create = StaffNotificationRecipientCreate.Field()
staff_notification_recipient_update = StaffNotificationRecipientUpdate.Field()
staff_notification_recipient_delete = StaffNotificationRecipientDelete.Field()
homepage_collection_update = HomepageCollectionUpdate.Field()
shop_domain_update = ShopDomainUpdate.Field()
shop_settings_update = ShopSettingsUpdate.Field()
shop_fetch_tax_rates = ShopFetchTaxRates.Field()
shop_settings_translate = ShopSettingsTranslate.Field()
shop_address_update = ShopAddressUpdate.Field()
| true
| true
|
790cfadbb88ba354a6a5c3ad91a35f82094de4d4
| 10,763
|
py
|
Python
|
generate_plabel_dark_zurich.py
|
qimw/UACDA
|
75d8d03786cba009f56cdb1efd2d6d5abe0c5f77
|
[
"MIT"
] | 7
|
2021-03-08T04:28:55.000Z
|
2021-04-29T04:55:11.000Z
|
generate_plabel_dark_zurich.py
|
qimw/UACDA
|
75d8d03786cba009f56cdb1efd2d6d5abe0c5f77
|
[
"MIT"
] | null | null | null |
generate_plabel_dark_zurich.py
|
qimw/UACDA
|
75d8d03786cba009f56cdb1efd2d6d5abe0c5f77
|
[
"MIT"
] | null | null | null |
import argparse
import scipy
from scipy import ndimage
import numpy as np
import sys
import re
from packaging import version
import torch
from torch.autograd import Variable
import torchvision.models as models
import torch.nn.functional as F
from torch.utils import data, model_zoo
from model.deeplab import Res_Deeplab
from model.deeplab_multi import DeeplabMulti
from model.deeplab_vgg import DeeplabVGG
from dataset.dark_zurich_dataset import DarkZurichDataSet
import os
from PIL import Image
from utils.tool import fliplr
import matplotlib.pyplot as plt
import torch.nn as nn
import yaml
import imageio as iio
torch.backends.cudnn.benchmark=True
IMG_MEAN = np.array((104.00698793,116.66876762,122.67891434), dtype=np.float32)
DATA_DIRECTORY = './data/Cityscapes/data'
DATA_LIST_PATH = './dataset/cityscapes_list/train.txt'
SAVE_PATH = './data/Dark_zurich/data/pseudo_ohl-1/test'
if not os.path.isdir('./data/Dark_zurich/data/pseudo_ohl-1/'):
os.makedirs('./data/Dark_zurich/data/pseudo_ohl-1/')
os.makedirs(SAVE_PATH)
IGNORE_LABEL = 255
NUM_CLASSES = 19
RESTORE_FROM = 'http://vllab.ucmerced.edu/ytsai/CVPR18/GTA2Cityscapes_multi-ed35151c.pth'
RESTORE_FROM_VGG = 'http://vllab.ucmerced.edu/ytsai/CVPR18/GTA2Cityscapes_vgg-ac4ac9f6.pth'
RESTORE_FROM_ORC = 'http://vllab1.ucmerced.edu/~whung/adaptSeg/cityscapes_oracle-b7b9934.pth'
SET = 'train' # We generate pseudo label for training set
INPUT_SIZE = '800,512'
MODEL = 'DeeplabMulti'
palette = [128, 64, 128, 244, 35, 232, 70, 70, 70, 102, 102, 156, 190, 153, 153, 153, 153, 153, 250, 170, 30,
220, 220, 0, 107, 142, 35, 152, 251, 152, 70, 130, 180, 220, 20, 60, 255, 0, 0, 0, 0, 142, 0, 0, 70,
0, 60, 100, 0, 80, 100, 0, 0, 230, 119, 11, 32]
zero_pad = 256 * 3 - len(palette)
for i in range(zero_pad):
palette.append(0)
def colorize_mask(mask):
# mask: numpy array of the mask
new_mask = Image.fromarray(mask.astype(np.uint8)).convert('P')
new_mask.putpalette(palette)
return new_mask
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="DeepLab-ResNet Network")
parser.add_argument("--model", type=str, default=MODEL,
help="Model Choice (DeeplabMulti/DeeplabVGG/Oracle).")
parser.add_argument("--data-dir", type=str, default=DATA_DIRECTORY,
help="Path to the directory containing the Cityscapes dataset.")
parser.add_argument("--data-list", type=str, default=DATA_LIST_PATH,
help="Path to the file listing the images in the dataset.")
parser.add_argument("--ignore-label", type=int, default=IGNORE_LABEL,
help="The index of the label to ignore during the training.")
parser.add_argument("--num-classes", type=int, default=NUM_CLASSES,
help="Number of classes to predict (including background).")
parser.add_argument("--restore-from", type=str, default=RESTORE_FROM,
help="Where restore model parameters from.")
parser.add_argument("--gpu", type=int, default=0,
help="choose gpu device.")
parser.add_argument("--batchsize", type=int, default=4,
help="choose gpu device.")
parser.add_argument("--set", type=str, default=SET,
help="choose evaluation set.")
parser.add_argument("--save", type=str, default=SAVE_PATH,
help="Path to save result.")
parser.add_argument("--input-size", type=str, default=INPUT_SIZE,
help="Comma-separated string with height and width of source images.")
return parser.parse_args()
def save_heatmap(output_name):
output, name = output_name
fig = plt.figure()
plt.axis('off')
heatmap = plt.imshow(output, cmap='viridis')
fig.colorbar(heatmap)
fig.savefig('%s_heatmap.png' % (name.split('.jpg')[0]))
return
def main():
"""Create the model and start the evaluation process."""
args = get_arguments()
w, h = map(int, args.input_size.split(','))
config_path = os.path.join(os.path.dirname(args.restore_from),'opts.yaml')
with open(config_path, 'r') as stream:
config = yaml.load(stream)
args.model = config['model']
print('ModelType:%s'%args.model)
print('NormType:%s'%config['norm_style'])
gpu0 = args.gpu
batchsize = args.batchsize
model_name = os.path.basename( os.path.dirname(args.restore_from) )
#args.save += model_name
if not os.path.exists(args.save):
os.makedirs(args.save)
confidence_path = os.path.join(args.save, 'submit/confidence')
label_path = os.path.join(args.save, 'submit/labelTrainIds')
label_invalid_path = os.path.join(args.save, 'submit/labelTrainIds_invalid')
for path in [confidence_path, label_path, label_invalid_path]:
if not os.path.exists(path):
os.makedirs(path)
if args.model == 'DeepLab':
model = DeeplabMulti(num_classes=args.num_classes, use_se = config['use_se'], train_bn = False, norm_style = config['norm_style'])
elif args.model == 'Oracle':
model = Res_Deeplab(num_classes=args.num_classes)
if args.restore_from == RESTORE_FROM:
args.restore_from = RESTORE_FROM_ORC
elif args.model == 'DeeplabVGG':
model = DeeplabVGG(num_classes=args.num_classes)
if args.restore_from == RESTORE_FROM:
args.restore_from = RESTORE_FROM_VGG
if args.restore_from[:4] == 'http' :
saved_state_dict = model_zoo.load_url(args.restore_from)
else:
saved_state_dict = torch.load(args.restore_from)
try:
model.load_state_dict(saved_state_dict)
except:
model = torch.nn.DataParallel(model)
model.load_state_dict(saved_state_dict)
model.eval()
model.cuda(gpu0)
testloader = data.DataLoader(DarkZurichDataSet(args.data_dir, args.data_list, crop_size=(h, w), resize_size=(w, h), mean=IMG_MEAN, scale=False, mirror=False, set=args.set),
batch_size=batchsize, shuffle=False, pin_memory=True, num_workers=4)
scale = 1.25
testloader2 = data.DataLoader(DarkZurichDataSet(args.data_dir, args.data_list, crop_size=(round(h*scale), round(w*scale) ), resize_size=( round(w*scale), round(h*scale)), mean=IMG_MEAN, scale=False, mirror=False, set=args.set),
batch_size=batchsize, shuffle=False, pin_memory=True, num_workers=4)
if version.parse(torch.__version__) >= version.parse('0.4.0'):
interp = nn.Upsample(size=(1080, 1920), mode='bilinear', align_corners=True)
else:
interp = nn.Upsample(size=(1080, 1920), mode='bilinear')
sm = torch.nn.Softmax(dim = 1)
log_sm = torch.nn.LogSoftmax(dim = 1)
kl_distance = nn.KLDivLoss( reduction = 'none')
prior = np.load('./utils/prior_all.npy').transpose((2,0,1))[np.newaxis, :, :, :]
prior = torch.from_numpy(prior)
for index, img_data in enumerate(zip(testloader, testloader2) ):
batch, batch2 = img_data
image, _, name = batch
image2, _, name2 = batch2
inputs = image.cuda()
inputs2 = image2.cuda()
print('\r>>>>Extracting feature...%04d/%04d'%(index*batchsize, args.batchsize*len(testloader)), end='')
if args.model == 'DeepLab':
with torch.no_grad():
output1, output2 = model(inputs)
output_batch = interp(sm(0.5* output1 + output2))
heatmap_batch = torch.sum(kl_distance(log_sm(output1), sm(output2)), dim=1)
output1, output2 = model(fliplr(inputs))
output1, output2 = fliplr(output1), fliplr(output2)
output_batch += interp(sm(0.5 * output1 + output2))
del output1, output2, inputs
output1, output2 = model(inputs2)
output_batch += interp(sm(0.5* output1 + output2))
output1, output2 = model(fliplr(inputs2))
output1, output2 = fliplr(output1), fliplr(output2)
output_batch += interp(sm(0.5 * output1 + output2))
del output1, output2, inputs2
ratio = 0.95
output_batch = output_batch.cpu() / 4
# output_batch = output_batch *(ratio + (1 - ratio) * prior)
output_batch = output_batch.data.numpy()
heatmap_batch = heatmap_batch.cpu().data.numpy()
elif args.model == 'DeeplabVGG' or args.model == 'Oracle':
output_batch = model(Variable(image).cuda())
output_batch = interp(output_batch).cpu().data.numpy()
output_batch = output_batch.transpose(0,2,3,1)
score_batch = np.max(output_batch, axis=3)
output_batch = np.asarray(np.argmax(output_batch, axis=3), dtype=np.uint8)
threshold = 0.3274
for i in range(output_batch.shape[0]):
output_single = output_batch[i,:,:]
output_col = colorize_mask(output_single)
output = Image.fromarray(output_single)
name_tmp = name[i].split('/')[-1]
dir_name = name[i].split('/')[-2]
save_path = args.save + '/' + dir_name
if not os.path.isdir(save_path):
os.mkdir(save_path)
output.save('%s/%s' % (save_path, name_tmp))
print('%s/%s' % (save_path, name_tmp))
output_col.save('%s/%s_color.png' % (save_path, name_tmp.split('.')[0]))
# heatmap_tmp = heatmap_batch[i,:,:]/np.max(heatmap_batch[i,:,:])
# fig = plt.figure()
# plt.axis('off')
# heatmap = plt.imshow(heatmap_tmp, cmap='viridis')
# fig.colorbar(heatmap)
# fig.savefig('%s/%s_heatmap.png' % (save_path, name_tmp.split('.')[0]))
if args.set == 'test' or args.set == 'val':
# label
output.save('%s/%s' % (label_path, name_tmp))
# label invalid
output_single[score_batch[i, :, :] < threshold] = 255
output = Image.fromarray(output_single)
output.save('%s/%s' % (label_invalid_path, name_tmp))
# conficence
confidence = score_batch[i, :, :] * 65535
confidence = np.asarray(confidence, dtype=np.uint16)
print(confidence.min(), confidence.max())
iio.imwrite('%s/%s' % (confidence_path, name_tmp), confidence)
return args.save
if __name__ == '__main__':
with torch.no_grad():
save_path = main()
#os.system('python compute_iou.py ./data/Cityscapes/data/gtFine/train %s'%save_path)
| 42.207843
| 231
| 0.632816
|
import argparse
import scipy
from scipy import ndimage
import numpy as np
import sys
import re
from packaging import version
import torch
from torch.autograd import Variable
import torchvision.models as models
import torch.nn.functional as F
from torch.utils import data, model_zoo
from model.deeplab import Res_Deeplab
from model.deeplab_multi import DeeplabMulti
from model.deeplab_vgg import DeeplabVGG
from dataset.dark_zurich_dataset import DarkZurichDataSet
import os
from PIL import Image
from utils.tool import fliplr
import matplotlib.pyplot as plt
import torch.nn as nn
import yaml
import imageio as iio
torch.backends.cudnn.benchmark=True
IMG_MEAN = np.array((104.00698793,116.66876762,122.67891434), dtype=np.float32)
DATA_DIRECTORY = './data/Cityscapes/data'
DATA_LIST_PATH = './dataset/cityscapes_list/train.txt'
SAVE_PATH = './data/Dark_zurich/data/pseudo_ohl-1/test'
if not os.path.isdir('./data/Dark_zurich/data/pseudo_ohl-1/'):
os.makedirs('./data/Dark_zurich/data/pseudo_ohl-1/')
os.makedirs(SAVE_PATH)
IGNORE_LABEL = 255
NUM_CLASSES = 19
RESTORE_FROM = 'http://vllab.ucmerced.edu/ytsai/CVPR18/GTA2Cityscapes_multi-ed35151c.pth'
RESTORE_FROM_VGG = 'http://vllab.ucmerced.edu/ytsai/CVPR18/GTA2Cityscapes_vgg-ac4ac9f6.pth'
RESTORE_FROM_ORC = 'http://vllab1.ucmerced.edu/~whung/adaptSeg/cityscapes_oracle-b7b9934.pth'
SET = 'train'
INPUT_SIZE = '800,512'
MODEL = 'DeeplabMulti'
palette = [128, 64, 128, 244, 35, 232, 70, 70, 70, 102, 102, 156, 190, 153, 153, 153, 153, 153, 250, 170, 30,
220, 220, 0, 107, 142, 35, 152, 251, 152, 70, 130, 180, 220, 20, 60, 255, 0, 0, 0, 0, 142, 0, 0, 70,
0, 60, 100, 0, 80, 100, 0, 0, 230, 119, 11, 32]
zero_pad = 256 * 3 - len(palette)
for i in range(zero_pad):
palette.append(0)
def colorize_mask(mask):
new_mask = Image.fromarray(mask.astype(np.uint8)).convert('P')
new_mask.putpalette(palette)
return new_mask
def get_arguments():
parser = argparse.ArgumentParser(description="DeepLab-ResNet Network")
parser.add_argument("--model", type=str, default=MODEL,
help="Model Choice (DeeplabMulti/DeeplabVGG/Oracle).")
parser.add_argument("--data-dir", type=str, default=DATA_DIRECTORY,
help="Path to the directory containing the Cityscapes dataset.")
parser.add_argument("--data-list", type=str, default=DATA_LIST_PATH,
help="Path to the file listing the images in the dataset.")
parser.add_argument("--ignore-label", type=int, default=IGNORE_LABEL,
help="The index of the label to ignore during the training.")
parser.add_argument("--num-classes", type=int, default=NUM_CLASSES,
help="Number of classes to predict (including background).")
parser.add_argument("--restore-from", type=str, default=RESTORE_FROM,
help="Where restore model parameters from.")
parser.add_argument("--gpu", type=int, default=0,
help="choose gpu device.")
parser.add_argument("--batchsize", type=int, default=4,
help="choose gpu device.")
parser.add_argument("--set", type=str, default=SET,
help="choose evaluation set.")
parser.add_argument("--save", type=str, default=SAVE_PATH,
help="Path to save result.")
parser.add_argument("--input-size", type=str, default=INPUT_SIZE,
help="Comma-separated string with height and width of source images.")
return parser.parse_args()
def save_heatmap(output_name):
output, name = output_name
fig = plt.figure()
plt.axis('off')
heatmap = plt.imshow(output, cmap='viridis')
fig.colorbar(heatmap)
fig.savefig('%s_heatmap.png' % (name.split('.jpg')[0]))
return
def main():
args = get_arguments()
w, h = map(int, args.input_size.split(','))
config_path = os.path.join(os.path.dirname(args.restore_from),'opts.yaml')
with open(config_path, 'r') as stream:
config = yaml.load(stream)
args.model = config['model']
print('ModelType:%s'%args.model)
print('NormType:%s'%config['norm_style'])
gpu0 = args.gpu
batchsize = args.batchsize
model_name = os.path.basename( os.path.dirname(args.restore_from) )
if not os.path.exists(args.save):
os.makedirs(args.save)
confidence_path = os.path.join(args.save, 'submit/confidence')
label_path = os.path.join(args.save, 'submit/labelTrainIds')
label_invalid_path = os.path.join(args.save, 'submit/labelTrainIds_invalid')
for path in [confidence_path, label_path, label_invalid_path]:
if not os.path.exists(path):
os.makedirs(path)
if args.model == 'DeepLab':
model = DeeplabMulti(num_classes=args.num_classes, use_se = config['use_se'], train_bn = False, norm_style = config['norm_style'])
elif args.model == 'Oracle':
model = Res_Deeplab(num_classes=args.num_classes)
if args.restore_from == RESTORE_FROM:
args.restore_from = RESTORE_FROM_ORC
elif args.model == 'DeeplabVGG':
model = DeeplabVGG(num_classes=args.num_classes)
if args.restore_from == RESTORE_FROM:
args.restore_from = RESTORE_FROM_VGG
if args.restore_from[:4] == 'http' :
saved_state_dict = model_zoo.load_url(args.restore_from)
else:
saved_state_dict = torch.load(args.restore_from)
try:
model.load_state_dict(saved_state_dict)
except:
model = torch.nn.DataParallel(model)
model.load_state_dict(saved_state_dict)
model.eval()
model.cuda(gpu0)
testloader = data.DataLoader(DarkZurichDataSet(args.data_dir, args.data_list, crop_size=(h, w), resize_size=(w, h), mean=IMG_MEAN, scale=False, mirror=False, set=args.set),
batch_size=batchsize, shuffle=False, pin_memory=True, num_workers=4)
scale = 1.25
testloader2 = data.DataLoader(DarkZurichDataSet(args.data_dir, args.data_list, crop_size=(round(h*scale), round(w*scale) ), resize_size=( round(w*scale), round(h*scale)), mean=IMG_MEAN, scale=False, mirror=False, set=args.set),
batch_size=batchsize, shuffle=False, pin_memory=True, num_workers=4)
if version.parse(torch.__version__) >= version.parse('0.4.0'):
interp = nn.Upsample(size=(1080, 1920), mode='bilinear', align_corners=True)
else:
interp = nn.Upsample(size=(1080, 1920), mode='bilinear')
sm = torch.nn.Softmax(dim = 1)
log_sm = torch.nn.LogSoftmax(dim = 1)
kl_distance = nn.KLDivLoss( reduction = 'none')
prior = np.load('./utils/prior_all.npy').transpose((2,0,1))[np.newaxis, :, :, :]
prior = torch.from_numpy(prior)
for index, img_data in enumerate(zip(testloader, testloader2) ):
batch, batch2 = img_data
image, _, name = batch
image2, _, name2 = batch2
inputs = image.cuda()
inputs2 = image2.cuda()
print('\r>>>>Extracting feature...%04d/%04d'%(index*batchsize, args.batchsize*len(testloader)), end='')
if args.model == 'DeepLab':
with torch.no_grad():
output1, output2 = model(inputs)
output_batch = interp(sm(0.5* output1 + output2))
heatmap_batch = torch.sum(kl_distance(log_sm(output1), sm(output2)), dim=1)
output1, output2 = model(fliplr(inputs))
output1, output2 = fliplr(output1), fliplr(output2)
output_batch += interp(sm(0.5 * output1 + output2))
del output1, output2, inputs
output1, output2 = model(inputs2)
output_batch += interp(sm(0.5* output1 + output2))
output1, output2 = model(fliplr(inputs2))
output1, output2 = fliplr(output1), fliplr(output2)
output_batch += interp(sm(0.5 * output1 + output2))
del output1, output2, inputs2
ratio = 0.95
output_batch = output_batch.cpu() / 4
output_batch = output_batch.data.numpy()
heatmap_batch = heatmap_batch.cpu().data.numpy()
elif args.model == 'DeeplabVGG' or args.model == 'Oracle':
output_batch = model(Variable(image).cuda())
output_batch = interp(output_batch).cpu().data.numpy()
output_batch = output_batch.transpose(0,2,3,1)
score_batch = np.max(output_batch, axis=3)
output_batch = np.asarray(np.argmax(output_batch, axis=3), dtype=np.uint8)
threshold = 0.3274
for i in range(output_batch.shape[0]):
output_single = output_batch[i,:,:]
output_col = colorize_mask(output_single)
output = Image.fromarray(output_single)
name_tmp = name[i].split('/')[-1]
dir_name = name[i].split('/')[-2]
save_path = args.save + '/' + dir_name
if not os.path.isdir(save_path):
os.mkdir(save_path)
output.save('%s/%s' % (save_path, name_tmp))
print('%s/%s' % (save_path, name_tmp))
output_col.save('%s/%s_color.png' % (save_path, name_tmp.split('.')[0]))
if args.set == 'test' or args.set == 'val':
output.save('%s/%s' % (label_path, name_tmp))
output_single[score_batch[i, :, :] < threshold] = 255
output = Image.fromarray(output_single)
output.save('%s/%s' % (label_invalid_path, name_tmp))
confidence = score_batch[i, :, :] * 65535
confidence = np.asarray(confidence, dtype=np.uint16)
print(confidence.min(), confidence.max())
iio.imwrite('%s/%s' % (confidence_path, name_tmp), confidence)
return args.save
if __name__ == '__main__':
with torch.no_grad():
save_path = main()
| true
| true
|
790cfc197cc05559af2afb829af887577346f90f
| 2,352
|
py
|
Python
|
model/classifier.py
|
haifangong/TNSC-classification-baseline
|
2fb8696699b44fbeb0512fd60deda792b464a958
|
[
"MIT"
] | 10
|
2020-07-31T14:24:26.000Z
|
2021-08-20T05:34:11.000Z
|
model/classifier.py
|
haifangong/TNSC-classification-baseline
|
2fb8696699b44fbeb0512fd60deda792b464a958
|
[
"MIT"
] | null | null | null |
model/classifier.py
|
haifangong/TNSC-classification-baseline
|
2fb8696699b44fbeb0512fd60deda792b464a958
|
[
"MIT"
] | null | null | null |
import torch
from torch import nn
from torch.nn import init
import torch.nn.functional as F
class SCNN(nn.Module):
def __init__(self, in_channels, n_classes):
super(SCNN, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels, out_channels=16, kernel_size=3),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2),
)
self.conv2 = nn.Sequential(
nn.Conv2d(16, out_channels=32, kernel_size=3),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2),
)
self.conv3 = nn.Sequential(
nn.Conv2d(32, out_channels=64, kernel_size=3),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2),
)
self.fc = nn.Sequential(
nn.Linear(43264, 4096),
nn.BatchNorm1d(4096),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5),
nn.Linear(4096, 512),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5),
nn.Linear(512, n_classes),
)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
class Classifier(nn.Module):
def __init__(self, in_channels, n_classes):
super(Classifier, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(in_channels, 1024),
nn.BatchNorm1d(1024),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5),
nn.Linear(1024, 512),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5),
nn.Linear(512, n_classes),
# nn.Softmax(dim=1)
)
self._init_weight()
def forward(self, x):
x = self.avg_pool(x)
x = torch.flatten(x, 1)
out = self.fc(x)
return out
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
| 30.153846
| 67
| 0.519983
|
import torch
from torch import nn
from torch.nn import init
import torch.nn.functional as F
class SCNN(nn.Module):
def __init__(self, in_channels, n_classes):
super(SCNN, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels, out_channels=16, kernel_size=3),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2),
)
self.conv2 = nn.Sequential(
nn.Conv2d(16, out_channels=32, kernel_size=3),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2),
)
self.conv3 = nn.Sequential(
nn.Conv2d(32, out_channels=64, kernel_size=3),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2),
)
self.fc = nn.Sequential(
nn.Linear(43264, 4096),
nn.BatchNorm1d(4096),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5),
nn.Linear(4096, 512),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5),
nn.Linear(512, n_classes),
)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
class Classifier(nn.Module):
def __init__(self, in_channels, n_classes):
super(Classifier, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(in_channels, 1024),
nn.BatchNorm1d(1024),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5),
nn.Linear(1024, 512),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5),
nn.Linear(512, n_classes),
)
self._init_weight()
def forward(self, x):
x = self.avg_pool(x)
x = torch.flatten(x, 1)
out = self.fc(x)
return out
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
| true
| true
|
790cfce881c85684705a873733f42fcdf6cb74cd
| 792
|
py
|
Python
|
app_util.py
|
Bhaskers-Blu-Org1/long-way-home-callforcode
|
81cc683f4b2e86f3d3afaafb8b2ced915707ea2b
|
[
"Apache-2.0"
] | 6
|
2019-07-29T06:16:35.000Z
|
2021-11-08T09:34:00.000Z
|
app_util.py
|
Bhaskers-Blu-Org1/long-way-home-callforcode
|
81cc683f4b2e86f3d3afaafb8b2ced915707ea2b
|
[
"Apache-2.0"
] | 15
|
2019-08-27T09:57:58.000Z
|
2022-02-26T10:52:55.000Z
|
app_util.py
|
IBM/long-way-home-callforcode
|
7a86266d33c67f84b6e471912a3710d7db0bec6f
|
[
"Apache-2.0"
] | 2
|
2019-11-02T08:54:00.000Z
|
2020-06-29T14:30:31.000Z
|
from flask import g
import logging
from datetime import datetime
import config
def get_logger(name):
# type: (str) -> logging.Logger
logging.basicConfig()
logger = logging.getLogger(name)
logger.setLevel(config.GLOBAL_LOGGING_LEVEL)
ch = logging.StreamHandler()
ch.setLevel(config.GLOBAL_LOGGING_LEVEL)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
# logger.addHandler(ch)
return logger
logger = get_logger('util')
def get_db_client(conn_pool, *args, **kws):
logger.debug("Getting DB Connection")
if 'db' not in g:
logger.debug("Creating new DB connection")
g.db = conn_pool.get()
return g.db
def teardown_db(conn_pool):
db = g.pop('db', None)
if db is not None:
conn_pool.put(db)
| 26.4
| 87
| 0.715909
|
from flask import g
import logging
from datetime import datetime
import config
def get_logger(name):
logging.basicConfig()
logger = logging.getLogger(name)
logger.setLevel(config.GLOBAL_LOGGING_LEVEL)
ch = logging.StreamHandler()
ch.setLevel(config.GLOBAL_LOGGING_LEVEL)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
return logger
logger = get_logger('util')
def get_db_client(conn_pool, *args, **kws):
logger.debug("Getting DB Connection")
if 'db' not in g:
logger.debug("Creating new DB connection")
g.db = conn_pool.get()
return g.db
def teardown_db(conn_pool):
db = g.pop('db', None)
if db is not None:
conn_pool.put(db)
| true
| true
|
790cfd31a2cc573ec974c74a888c2263a3fdae84
| 2,275
|
py
|
Python
|
aiida/storage/psql_dos/migrations/versions/django_0009_base_data_plugin_type_string.py
|
mkrack/aiida-core
|
bab1ad6cfc8e4ff041bce268f9270c613663cb35
|
[
"MIT",
"BSD-3-Clause"
] | 153
|
2016-12-23T20:59:03.000Z
|
2019-07-02T06:47:52.000Z
|
aiida/storage/psql_dos/migrations/versions/django_0009_base_data_plugin_type_string.py
|
mkrack/aiida-core
|
bab1ad6cfc8e4ff041bce268f9270c613663cb35
|
[
"MIT",
"BSD-3-Clause"
] | 2,466
|
2016-12-24T01:03:52.000Z
|
2019-07-04T13:41:08.000Z
|
aiida/storage/psql_dos/migrations/versions/django_0009_base_data_plugin_type_string.py
|
mkrack/aiida-core
|
bab1ad6cfc8e4ff041bce268f9270c613663cb35
|
[
"MIT",
"BSD-3-Clause"
] | 88
|
2016-12-23T16:28:00.000Z
|
2019-07-01T15:55:20.000Z
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=invalid-name,no-member
"""Change `db_dbnode.type` for base `Data` types.
The base Data types Bool, Float, Int and Str have been moved in the source code, which means that their
module path changes, which determines the plugin type string which is stored in the databse.
The type string now will have a type string prefix that is unique to each sub type.
Revision ID: django_0009
Revises: django_0008
"""
from alembic import op
revision = 'django_0009'
down_revision = 'django_0008'
branch_labels = None
depends_on = None
def upgrade():
"""Migrations for the upgrade."""
op.execute(
"""
UPDATE db_dbnode SET type = 'data.bool.Bool.' WHERE type = 'data.base.Bool.';
UPDATE db_dbnode SET type = 'data.float.Float.' WHERE type = 'data.base.Float.';
UPDATE db_dbnode SET type = 'data.int.Int.' WHERE type = 'data.base.Int.';
UPDATE db_dbnode SET type = 'data.str.Str.' WHERE type = 'data.base.Str.';
UPDATE db_dbnode SET type = 'data.list.List.' WHERE type = 'data.base.List.';
"""
)
def downgrade():
"""Migrations for the downgrade."""
op.execute(
"""
UPDATE db_dbnode SET type = 'data.base.Bool.' WHERE type = 'data.bool.Bool.';
UPDATE db_dbnode SET type = 'data.base.Float.' WHERE type = 'data.float.Float.';
UPDATE db_dbnode SET type = 'data.base.Int.' WHERE type = 'data.int.Int.';
UPDATE db_dbnode SET type = 'data.base.Str.' WHERE type = 'data.str.Str.';
UPDATE db_dbnode SET type = 'data.base.List.' WHERE type = 'data.list.List.';
"""
)
| 42.924528
| 103
| 0.566154
| true
| true
|
|
790cfdb12440dec3a0e2fe3bf2b9299b4a29c812
| 7
|
py
|
Python
|
examples/py33-0012-uprefix1.py
|
jwilk-forks/python-grammar-changes
|
5cbc14e520fadfef8539760a4ffdbe14b9d02f39
|
[
"MIT"
] | 8
|
2020-11-21T22:39:41.000Z
|
2022-03-13T18:45:53.000Z
|
examples/py33-0012-uprefix1.py
|
jwilk-forks/python-grammar-changes
|
5cbc14e520fadfef8539760a4ffdbe14b9d02f39
|
[
"MIT"
] | 1
|
2021-12-10T10:45:38.000Z
|
2021-12-10T10:45:38.000Z
|
examples/py33-0012-uprefix1.py
|
jwilk-forks/python-grammar-changes
|
5cbc14e520fadfef8539760a4ffdbe14b9d02f39
|
[
"MIT"
] | 1
|
2022-02-07T11:16:38.000Z
|
2022-02-07T11:16:38.000Z
|
u"foo"
| 3.5
| 6
| 0.571429
| true
| true
|
|
790cfdf50546893f5f06d536c2cf418c85d0d312
| 12,326
|
py
|
Python
|
noxfile.py
|
texnofobix/python-genbadge
|
67ec6b5031a57a0b577bee5c1111437ef1037130
|
[
"BSD-3-Clause"
] | null | null | null |
noxfile.py
|
texnofobix/python-genbadge
|
67ec6b5031a57a0b577bee5c1111437ef1037130
|
[
"BSD-3-Clause"
] | null | null | null |
noxfile.py
|
texnofobix/python-genbadge
|
67ec6b5031a57a0b577bee5c1111437ef1037130
|
[
"BSD-3-Clause"
] | null | null | null |
from itertools import product
from json import dumps
import logging
import nox # noqa
from pathlib import Path # noqa
import sys
# add parent folder to python path so that we can import noxfile_utils.py
# note that you need to "pip install -r noxfile-requiterements.txt" for this file to work.
sys.path.append(str(Path(__file__).parent / "ci_tools"))
from nox_utils import PY27, PY37, PY36, PY35, PY38, PY39, power_session, rm_folder, rm_file, PowerSession # noqa
pkg_name = "genbadge"
gh_org = "smarie"
gh_repo = "python-genbadge"
ENVS = {
PY39: {"coverage": False, "pkg_specs": {"pip": ">19"}},
PY27: {"coverage": False, "pkg_specs": {"pip": ">10"}},
PY35: {"coverage": False, "pkg_specs": {"pip": ">10"}},
PY36: {"coverage": False, "pkg_specs": {"pip": ">19"}},
PY38: {"coverage": False, "pkg_specs": {"pip": ">19"}},
# IMPORTANT: this should be last so that the folder docs/reports is not deleted afterwards
PY37: {"coverage": True, "pkg_specs": {"pip": ">19"}}, # , "pytest-html": "1.9.0"
}
# set the default activated sessions, minimal for CI
nox.options.sessions = ["tests", "flake8"] # , "docs", "gh_pages"
nox.options.reuse_existing_virtualenvs = True # this can be done using -r
# if platform.system() == "Windows": >> always use this for better control
nox.options.default_venv_backend = "conda"
# os.environ["NO_COLOR"] = "True" # nox.options.nocolor = True does not work
# nox.options.verbose = True
nox_logger = logging.getLogger("nox")
# nox_logger.setLevel(logging.INFO) NO !!!! this prevents the "verbose" nox flag to work !
class Folders:
root = Path(__file__).parent
ci_tools = root / "ci_tools"
runlogs = root / Path(nox.options.envdir or ".nox") / "_runlogs"
runlogs.mkdir(parents=True, exist_ok=True)
dist = root / "dist"
site = root / "site"
site_reports = site / "reports"
reports_root = root / "docs" / "reports"
test_reports = reports_root / "junit"
test_xml = test_reports / "junit.xml"
test_html = test_reports / "report.html"
test_badge = test_reports / "junit-badge.svg"
coverage_reports = reports_root / "coverage"
coverage_xml = coverage_reports / "coverage.xml"
coverage_intermediate_file = root / ".coverage"
coverage_badge = coverage_reports / "coverage-badge.svg"
flake8_reports = reports_root / "flake8"
flake8_intermediate_file = root / "flake8stats.txt"
flake8_badge = flake8_reports / "flake8-badge.svg"
@power_session(envs=ENVS, logsdir=Folders.runlogs)
def tests(session: PowerSession, coverage, pkg_specs):
"""Run the test suite, including test reports generation and coverage reports. """
# As soon as this runs, we delete the target site and coverage files to avoid reporting wrong coverage/etc.
rm_folder(Folders.site)
rm_folder(Folders.reports_root)
# delete the .coverage files if any (they are not supposed to be any, but just in case)
rm_file(Folders.coverage_intermediate_file)
rm_file(Folders.root / "coverage.xml")
# CI-only dependencies
# Did we receive a flag through positional arguments ? (nox -s tests -- <flag>)
# install_ci_deps = False
# if len(session.posargs) == 1:
# assert session.posargs[0] == "keyrings.alt"
# install_ci_deps = True
# elif len(session.posargs) > 1:
# raise ValueError("Only a single positional argument is accepted, received: %r" % session.posargs)
# uncomment and edit if you wish to uninstall something without deleting the whole env
# session.run2("pip uninstall pytest-asyncio --yes")
# install all requirements
# session.install_reqs(phase="pip", phase_reqs=("pip",), versions_dct=pkg_specs)
session.install_reqs(setup=True, install=True, tests=True, extras=("all",), versions_dct=pkg_specs)
# install CI-only dependencies
# if install_ci_deps:
# session.install2("keyrings.alt")
# list all (conda list alone does not work correctly on github actions)
# session.run2("conda list")
conda_prefix = Path(session.bin)
if conda_prefix.name == "bin":
conda_prefix = conda_prefix.parent
session.run2("conda list", env={"CONDA_PREFIX": str(conda_prefix), "CONDA_DEFAULT_ENV": session.get_session_id()})
# Fail if the assumed python version is not the actual one
session.run2("python ci_tools/check_python_version.py %s" % session.python)
# install self so that it is recognized by pytest
session.run2("pip install -e . --no-deps")
# check that it can be imported even from a different folder
session.run2(['python', '-c', '"import os; os.chdir(\'./docs/\'); import %s"' % pkg_name])
# finally run all tests
if not coverage:
# simple: pytest only
session.run2("python -m pytest --cache-clear -v %s/tests/" % pkg_name)
else:
# coverage + junit html reports + badge generation
session.install_reqs(phase="coverage", phase_reqs=["coverage", "pytest-html", "requests"],
versions_dct=pkg_specs)
# --coverage + junit html reports
session.run2("coverage run --source {pkg_name} "
"-m pytest --cache-clear --junitxml={test_xml} --html={test_html} -v {pkg_name}/tests/"
"".format(pkg_name=pkg_name, test_xml=Folders.test_xml, test_html=Folders.test_html))
session.run2("coverage report")
session.run2("coverage xml -o {covxml}".format(covxml=Folders.coverage_xml))
session.run2("coverage html -d {dst}".format(dst=Folders.coverage_reports))
# delete this intermediate file, it is not needed anymore
rm_file(Folders.coverage_intermediate_file)
# --generates the badge for the test results and fail build if less than x% tests pass
nox_logger.info("Generating badge for tests coverage")
# Use our own package to generate the badge
session.run2("genbadge tests -i %s -o %s -t 100" % (Folders.test_xml, Folders.test_badge))
session.run2("genbadge coverage -i %s -o %s" % (Folders.coverage_xml, Folders.coverage_badge))
@power_session(python=PY38, logsdir=Folders.runlogs)
def flake8(session: PowerSession):
"""Launch flake8 qualimetry."""
session.install("-r", str(Folders.ci_tools / "flake8-requirements.txt"))
session.run2("pip install -e .[flake8]")
rm_folder(Folders.flake8_reports)
rm_file(Folders.flake8_intermediate_file)
# Options are set in `setup.cfg` file
session.run("flake8", pkg_name, "--exit-zero", "--format=html", "--htmldir", str(Folders.flake8_reports),
"--statistics", "--tee", "--output-file", str(Folders.flake8_intermediate_file))
# generate our badge
session.run2("genbadge flake8 -i %s -o %s" % (Folders.flake8_intermediate_file, Folders.flake8_badge))
rm_file(Folders.flake8_intermediate_file)
@power_session(python=[PY37])
def docs(session: PowerSession):
"""Generates the doc and serves it on a local http server. Pass '-- build' to build statically instead."""
session.install_reqs(phase="docs", phase_reqs=["mkdocs-material", "mkdocs", "pymdown-extensions", "pygments"])
if session.posargs:
# use posargs instead of "serve"
session.run2("mkdocs -f ./docs/mkdocs.yml %s" % " ".join(session.posargs))
else:
session.run2("mkdocs serve -f ./docs/mkdocs.yml")
@power_session(python=[PY37])
def publish(session: PowerSession):
"""Deploy the docs+reports on github pages. Note: this rebuilds the docs"""
session.install_reqs(phase="mkdocs", phase_reqs=["mkdocs-material", "mkdocs", "pymdown-extensions", "pygments"])
# possibly rebuild the docs in a static way (mkdocs serve does not build locally)
session.run2("mkdocs build -f ./docs/mkdocs.yml")
# check that the doc has been generated with coverage
if not Folders.site_reports.exists():
raise ValueError("Test reports have not been built yet. Please run 'nox -s tests-3.7' first")
# publish the docs
session.run2("mkdocs gh-deploy -f ./docs/mkdocs.yml")
# publish the coverage - now in github actions only
# session.install_reqs(phase="codecov", phase_reqs=["codecov", "keyring"])
# # keyring set https://app.codecov.io/gh/<org>/<repo> token
# import keyring # (note that this import is not from the session env but the main nox env)
# codecov_token = keyring.get_password("https://app.codecov.io/gh/<org>/<repo>>", "token")
# # note: do not use --root nor -f ! otherwise "There was an error processing coverage reports"
# session.run2('codecov -t %s -f %s' % (codecov_token, Folders.coverage_xml))
@power_session(python=[PY37])
def release(session: PowerSession):
"""Create a release on github corresponding to the latest tag"""
# Get current tag using setuptools_scm and make sure this is not a dirty/dev one
from setuptools_scm import get_version # (note that this import is not from the session env but the main nox env)
from setuptools_scm.version import guess_next_dev_version
version = []
def my_scheme(version_):
version.append(version_)
return guess_next_dev_version(version_)
current_tag = get_version(".", version_scheme=my_scheme)
# create the package
session.install_reqs(phase="setup.py#dist", phase_reqs=["setuptools_scm"])
rm_folder(Folders.dist)
session.run2("python setup.py sdist bdist_wheel")
if version[0].dirty or not version[0].exact:
raise ValueError("You need to execute this action on a clean tag version with no local changes.")
# Did we receive a token through positional arguments ? (nox -s release -- <token>)
if len(session.posargs) == 1:
# Run from within github actions - no need to publish on pypi
gh_token = session.posargs[0]
publish_on_pypi = False
elif len(session.posargs) == 0:
# Run from local commandline - assume we want to manually publish on PyPi
publish_on_pypi = True
# keyring set https://docs.github.com/en/rest token
import keyring # (note that this import is not from the session env but the main nox env)
gh_token = keyring.get_password("https://docs.github.com/en/rest", "token")
assert len(gh_token) > 0
else:
raise ValueError("Only a single positional arg is allowed for now")
# publish the package on PyPi
if publish_on_pypi:
# keyring set https://upload.pypi.org/legacy/ your-username
# keyring set https://test.pypi.org/legacy/ your-username
session.install_reqs(phase="PyPi", phase_reqs=["twine"])
session.run2("twine upload dist/* -u smarie") # -r testpypi
# create the github release
session.install_reqs(phase="release", phase_reqs=["click", "PyGithub"])
session.run2("python ci_tools/github_release.py -s {gh_token} "
"--repo-slug {gh_org}/{gh_repo} -cf ./docs/changelog.md "
"-d https://{gh_org}.github.io/{gh_repo}/changelog.html {tag}"
"".format(gh_token=gh_token, gh_org=gh_org, gh_repo=gh_repo, tag=current_tag))
@nox.session(python=False)
def gha_list(session):
"""(mandatory arg: <base_session_name>) Prints all sessions available for <base_session_name>, for GithubActions."""
# see https://stackoverflow.com/q/66747359/7262247
# get the desired base session to generate the list for
if len(session.posargs) != 1:
raise ValueError("This session has a mandatory argument: <base_session_name>")
session_func = globals()[session.posargs[0]]
# list all sessions for this base session
try:
session_func.parametrize
except AttributeError:
sessions_list = ["%s-%s" % (session_func.__name__, py) for py in session_func.python]
else:
sessions_list = ["%s-%s(%s)" % (session_func.__name__, py, param)
for py, param in product(session_func.python, session_func.parametrize)]
# print the list so that it can be caught by GHA.
# Note that json.dumps is optional since this is a list of string.
# However it is to remind us that GHA expects a well-formatted json list of strings.
print(dumps(sessions_list))
# if __name__ == '__main__':
# # allow this file to be executable for easy debugging in any IDE
# nox.run(globals())
| 44.498195
| 120
| 0.682865
|
from itertools import product
from json import dumps
import logging
import nox
from pathlib import Path
import sys
sys.path.append(str(Path(__file__).parent / "ci_tools"))
from nox_utils import PY27, PY37, PY36, PY35, PY38, PY39, power_session, rm_folder, rm_file, PowerSession
pkg_name = "genbadge"
gh_org = "smarie"
gh_repo = "python-genbadge"
ENVS = {
PY39: {"coverage": False, "pkg_specs": {"pip": ">19"}},
PY27: {"coverage": False, "pkg_specs": {"pip": ">10"}},
PY35: {"coverage": False, "pkg_specs": {"pip": ">10"}},
PY36: {"coverage": False, "pkg_specs": {"pip": ">19"}},
PY38: {"coverage": False, "pkg_specs": {"pip": ">19"}},
PY37: {"coverage": True, "pkg_specs": {"pip": ">19"}},
}
nox.options.sessions = ["tests", "flake8"]
nox.options.reuse_existing_virtualenvs = True
nox.options.default_venv_backend = "conda"
class Folders:
root = Path(__file__).parent
ci_tools = root / "ci_tools"
runlogs = root / Path(nox.options.envdir or ".nox") / "_runlogs"
runlogs.mkdir(parents=True, exist_ok=True)
dist = root / "dist"
site = root / "site"
site_reports = site / "reports"
reports_root = root / "docs" / "reports"
test_reports = reports_root / "junit"
test_xml = test_reports / "junit.xml"
test_html = test_reports / "report.html"
test_badge = test_reports / "junit-badge.svg"
coverage_reports = reports_root / "coverage"
coverage_xml = coverage_reports / "coverage.xml"
coverage_intermediate_file = root / ".coverage"
coverage_badge = coverage_reports / "coverage-badge.svg"
flake8_reports = reports_root / "flake8"
flake8_intermediate_file = root / "flake8stats.txt"
flake8_badge = flake8_reports / "flake8-badge.svg"
@power_session(envs=ENVS, logsdir=Folders.runlogs)
def tests(session: PowerSession, coverage, pkg_specs):
rm_folder(Folders.site)
rm_folder(Folders.reports_root)
rm_file(Folders.coverage_intermediate_file)
rm_file(Folders.root / "coverage.xml")
session.install_reqs(setup=True, install=True, tests=True, extras=("all",), versions_dct=pkg_specs)
conda_prefix = Path(session.bin)
if conda_prefix.name == "bin":
conda_prefix = conda_prefix.parent
session.run2("conda list", env={"CONDA_PREFIX": str(conda_prefix), "CONDA_DEFAULT_ENV": session.get_session_id()})
session.run2("python ci_tools/check_python_version.py %s" % session.python)
session.run2("pip install -e . --no-deps")
session.run2(['python', '-c', '"import os; os.chdir(\'./docs/\'); import %s"' % pkg_name])
if not coverage:
session.run2("python -m pytest --cache-clear -v %s/tests/" % pkg_name)
else:
session.install_reqs(phase="coverage", phase_reqs=["coverage", "pytest-html", "requests"],
versions_dct=pkg_specs)
session.run2("coverage run --source {pkg_name} "
"-m pytest --cache-clear --junitxml={test_xml} --html={test_html} -v {pkg_name}/tests/"
"".format(pkg_name=pkg_name, test_xml=Folders.test_xml, test_html=Folders.test_html))
session.run2("coverage report")
session.run2("coverage xml -o {covxml}".format(covxml=Folders.coverage_xml))
session.run2("coverage html -d {dst}".format(dst=Folders.coverage_reports))
rm_file(Folders.coverage_intermediate_file)
nox_logger.info("Generating badge for tests coverage")
session.run2("genbadge tests -i %s -o %s -t 100" % (Folders.test_xml, Folders.test_badge))
session.run2("genbadge coverage -i %s -o %s" % (Folders.coverage_xml, Folders.coverage_badge))
@power_session(python=PY38, logsdir=Folders.runlogs)
def flake8(session: PowerSession):
session.install("-r", str(Folders.ci_tools / "flake8-requirements.txt"))
session.run2("pip install -e .[flake8]")
rm_folder(Folders.flake8_reports)
rm_file(Folders.flake8_intermediate_file)
session.run("flake8", pkg_name, "--exit-zero", "--format=html", "--htmldir", str(Folders.flake8_reports),
"--statistics", "--tee", "--output-file", str(Folders.flake8_intermediate_file))
session.run2("genbadge flake8 -i %s -o %s" % (Folders.flake8_intermediate_file, Folders.flake8_badge))
rm_file(Folders.flake8_intermediate_file)
@power_session(python=[PY37])
def docs(session: PowerSession):
session.install_reqs(phase="docs", phase_reqs=["mkdocs-material", "mkdocs", "pymdown-extensions", "pygments"])
if session.posargs:
session.run2("mkdocs -f ./docs/mkdocs.yml %s" % " ".join(session.posargs))
else:
session.run2("mkdocs serve -f ./docs/mkdocs.yml")
@power_session(python=[PY37])
def publish(session: PowerSession):
session.install_reqs(phase="mkdocs", phase_reqs=["mkdocs-material", "mkdocs", "pymdown-extensions", "pygments"])
session.run2("mkdocs build -f ./docs/mkdocs.yml")
if not Folders.site_reports.exists():
raise ValueError("Test reports have not been built yet. Please run 'nox -s tests-3.7' first")
session.run2("mkdocs gh-deploy -f ./docs/mkdocs.yml")
ef my_scheme(version_):
version.append(version_)
return guess_next_dev_version(version_)
current_tag = get_version(".", version_scheme=my_scheme)
session.install_reqs(phase="setup.py#dist", phase_reqs=["setuptools_scm"])
rm_folder(Folders.dist)
session.run2("python setup.py sdist bdist_wheel")
if version[0].dirty or not version[0].exact:
raise ValueError("You need to execute this action on a clean tag version with no local changes.")
if len(session.posargs) == 1:
gh_token = session.posargs[0]
publish_on_pypi = False
elif len(session.posargs) == 0:
publish_on_pypi = True
import keyring
gh_token = keyring.get_password("https://docs.github.com/en/rest", "token")
assert len(gh_token) > 0
else:
raise ValueError("Only a single positional arg is allowed for now")
if publish_on_pypi:
session.install_reqs(phase="PyPi", phase_reqs=["twine"])
session.run2("twine upload dist/* -u smarie")
session.install_reqs(phase="release", phase_reqs=["click", "PyGithub"])
session.run2("python ci_tools/github_release.py -s {gh_token} "
"--repo-slug {gh_org}/{gh_repo} -cf ./docs/changelog.md "
"-d https://{gh_org}.github.io/{gh_repo}/changelog.html {tag}"
"".format(gh_token=gh_token, gh_org=gh_org, gh_repo=gh_repo, tag=current_tag))
@nox.session(python=False)
def gha_list(session):
if len(session.posargs) != 1:
raise ValueError("This session has a mandatory argument: <base_session_name>")
session_func = globals()[session.posargs[0]]
try:
session_func.parametrize
except AttributeError:
sessions_list = ["%s-%s" % (session_func.__name__, py) for py in session_func.python]
else:
sessions_list = ["%s-%s(%s)" % (session_func.__name__, py, param)
for py, param in product(session_func.python, session_func.parametrize)]
print(dumps(sessions_list))
| true
| true
|
790cfe04b88f499e32acf9459348ea860c18fcc7
| 559
|
py
|
Python
|
Haberman Data/deal_data.py
|
hrsu/disturb
|
38396fceb6c7b11fbc369166c7eea048c4188391
|
[
"Apache-2.0"
] | 1
|
2019-02-27T06:45:11.000Z
|
2019-02-27T06:45:11.000Z
|
Haberman Data/deal_data.py
|
hrsu/disturb
|
38396fceb6c7b11fbc369166c7eea048c4188391
|
[
"Apache-2.0"
] | null | null | null |
Haberman Data/deal_data.py
|
hrsu/disturb
|
38396fceb6c7b11fbc369166c7eea048c4188391
|
[
"Apache-2.0"
] | null | null | null |
def deal(infilename,outfilename):
infile = open(infilename)
lines = infile.readlines()
out = []
for line in lines:
line = line.split(',')
val = line[-1][0]
line = line[:-1]
line.insert(0,val)
out.append(line)
print(out)
str = ''
for line in out:
line_str =''
for each in line:
line_str = line_str + '{},'.format(each)
str = str + line_str[:-1]+'\n'
outfile=open(outfilename,'w')
outfile.write(str)
deal('Haberman Data Set.txt','Haberman_data.txt')
| 27.95
| 52
| 0.545617
|
def deal(infilename,outfilename):
infile = open(infilename)
lines = infile.readlines()
out = []
for line in lines:
line = line.split(',')
val = line[-1][0]
line = line[:-1]
line.insert(0,val)
out.append(line)
print(out)
str = ''
for line in out:
line_str =''
for each in line:
line_str = line_str + '{},'.format(each)
str = str + line_str[:-1]+'\n'
outfile=open(outfilename,'w')
outfile.write(str)
deal('Haberman Data Set.txt','Haberman_data.txt')
| true
| true
|
790cfe747d847095ab3a279b418644d134d9ace5
| 1,024
|
py
|
Python
|
main.py
|
a892574222/game
|
db1ca156a8fbf77019bc05b8137c928c1a907ec0
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
a892574222/game
|
db1ca156a8fbf77019bc05b8137c928c1a907ec0
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
a892574222/game
|
db1ca156a8fbf77019bc05b8137c928c1a907ec0
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
from src.tk import TK
import argparse
parser = argparse.ArgumentParser()
parser.register('type', 'bool', (lambda x: x.lower() in ('True', "yes", "true", "t", "1")))
parser.add_argument('--mode', default='main', help='')
args = parser.parse_args()
if args.mode == 'main':
window = TK()
window.start()
elif args.mode == 'N_0_2':
from src.N_0_2 import KO
ko = KO()
ko.solve()
elif args.mode == 'test':
from src.test import KO
ko = KO()
ko.solve()
elif args.mode == 'E_2_4':
from src.E_2_4 import KO
ko = KO()
ko.solve()
elif args.mode == 'E_3_4':
from src.E_3_4 import KO
ko = KO()
ko.solve()
elif args.mode == 'E_4_4':
from src.E_4_4 import KO
ko = KO()
ko.solve()
elif args.mode == 'paper':
from src.paper import KO
ko = KO()
ko.solve()
elif args.mode == 'N_5_6':
from src.N_5_6 import KO
ko = KO()
ko.solve()
elif args.mode == 'E_10_4':
from src.E_10_4 import KO
ko = KO()
ko.solve()
else:
pass
| 21.333333
| 91
| 0.586914
|
from src.tk import TK
import argparse
parser = argparse.ArgumentParser()
parser.register('type', 'bool', (lambda x: x.lower() in ('True', "yes", "true", "t", "1")))
parser.add_argument('--mode', default='main', help='')
args = parser.parse_args()
if args.mode == 'main':
window = TK()
window.start()
elif args.mode == 'N_0_2':
from src.N_0_2 import KO
ko = KO()
ko.solve()
elif args.mode == 'test':
from src.test import KO
ko = KO()
ko.solve()
elif args.mode == 'E_2_4':
from src.E_2_4 import KO
ko = KO()
ko.solve()
elif args.mode == 'E_3_4':
from src.E_3_4 import KO
ko = KO()
ko.solve()
elif args.mode == 'E_4_4':
from src.E_4_4 import KO
ko = KO()
ko.solve()
elif args.mode == 'paper':
from src.paper import KO
ko = KO()
ko.solve()
elif args.mode == 'N_5_6':
from src.N_5_6 import KO
ko = KO()
ko.solve()
elif args.mode == 'E_10_4':
from src.E_10_4 import KO
ko = KO()
ko.solve()
else:
pass
| true
| true
|
790cff7f1379496b3b9251e03735a67ac4f27f85
| 1,956
|
py
|
Python
|
src/load.py
|
philkr/voc-classification
|
c2097796951ea49eb4f7a919a4091b25b3ae2b52
|
[
"BSD-2-Clause"
] | 55
|
2016-08-14T19:09:59.000Z
|
2021-11-30T01:27:51.000Z
|
src/load.py
|
jeffdonahue/voc-classification
|
585dbcaeae8d30503c4b7781e2a1ee3f57067c30
|
[
"BSD-2-Clause"
] | 8
|
2016-07-27T00:29:55.000Z
|
2018-12-29T05:38:34.000Z
|
src/load.py
|
philkr/voc-classification
|
c2097796951ea49eb4f7a919a4091b25b3ae2b52
|
[
"BSD-2-Clause"
] | 20
|
2016-08-01T02:50:51.000Z
|
2020-08-24T01:34:54.000Z
|
from caffe_all import *
def parseProtoString(s):
from google.protobuf import text_format
proto_net = pb.NetParameter()
text_format.Merge(s, proto_net)
return proto_net
def get_param(l, exclude=set(['top', 'bottom', 'name', 'type'])):
if not hasattr(l,'ListFields'):
if hasattr(l,'__delitem__'):
return [get_param(i) for i in l]
return l
r = dict()
for f, v in l.ListFields():
if f.name not in exclude:
r[f.name] = get_param(v, [])
return r
class ProtoDesc:
def __init__(self, prototxt):
from os import path
self.prototxt = prototxt
self.parsed_proto = parseProtoString(open(self.prototxt, 'r').read())
# Guess the input dimension
self.input_dim = (3, 227, 227)
net = self.parsed_proto
if len(net.input_dim) > 0:
self.input_dim = net.input_dim[1:]
else:
lrs = net.layer
cs = [l.transform_param.crop_size for l in lrs
if l.HasField('transform_param')]
if len(cs):
self.input_dim = (3, cs[0], cs[0])
def __call__(self, clip=None, **inputs):
from collections import OrderedDict
net = self.parsed_proto
blobs = OrderedDict(inputs)
for l in net.layer:
if l.type not in ['Data', 'ImageData']:
in_place = l.top == l.bottom
param = get_param(l)
tops = getattr(L, l.type)(*[blobs[b] for b in l.bottom],
ntop=len(l.top), in_place=in_place,
name=l.name,
**param)
if len(l.top) <= 1:
tops = [tops]
for i, t in enumerate(l.top):
blobs[t] = tops[i]
if l.name == clip:
break
return list(blobs.values())[-1]
| 34.315789
| 77
| 0.506646
|
from caffe_all import *
def parseProtoString(s):
from google.protobuf import text_format
proto_net = pb.NetParameter()
text_format.Merge(s, proto_net)
return proto_net
def get_param(l, exclude=set(['top', 'bottom', 'name', 'type'])):
if not hasattr(l,'ListFields'):
if hasattr(l,'__delitem__'):
return [get_param(i) for i in l]
return l
r = dict()
for f, v in l.ListFields():
if f.name not in exclude:
r[f.name] = get_param(v, [])
return r
class ProtoDesc:
def __init__(self, prototxt):
from os import path
self.prototxt = prototxt
self.parsed_proto = parseProtoString(open(self.prototxt, 'r').read())
self.input_dim = (3, 227, 227)
net = self.parsed_proto
if len(net.input_dim) > 0:
self.input_dim = net.input_dim[1:]
else:
lrs = net.layer
cs = [l.transform_param.crop_size for l in lrs
if l.HasField('transform_param')]
if len(cs):
self.input_dim = (3, cs[0], cs[0])
def __call__(self, clip=None, **inputs):
from collections import OrderedDict
net = self.parsed_proto
blobs = OrderedDict(inputs)
for l in net.layer:
if l.type not in ['Data', 'ImageData']:
in_place = l.top == l.bottom
param = get_param(l)
tops = getattr(L, l.type)(*[blobs[b] for b in l.bottom],
ntop=len(l.top), in_place=in_place,
name=l.name,
**param)
if len(l.top) <= 1:
tops = [tops]
for i, t in enumerate(l.top):
blobs[t] = tops[i]
if l.name == clip:
break
return list(blobs.values())[-1]
| true
| true
|
790cff95827fc7d17d1cd74f5ffe045e5f4ccfd9
| 46,520
|
py
|
Python
|
pandas/io/formats/style.py
|
harunpehlivan/pandas
|
2e38d5552a5c7b2c0091cecddd483f4f08ad1d2c
|
[
"BSD-3-Clause"
] | 1
|
2021-02-18T00:32:20.000Z
|
2021-02-18T00:32:20.000Z
|
pandas/io/formats/style.py
|
DeanLa/pandas
|
09633b868f2f999599e29d32a326e112fdbbf3ec
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
pandas/io/formats/style.py
|
DeanLa/pandas
|
09633b868f2f999599e29d32a326e112fdbbf3ec
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
"""
Module for applying conditional formatting to
DataFrames and Series.
"""
from collections import defaultdict
from contextlib import contextmanager
import copy
from functools import partial
from itertools import product
from uuid import uuid1
import numpy as np
from pandas.compat import range
from pandas.util._decorators import Appender
from pandas.core.dtypes.common import is_float, is_string_like
from pandas.core.dtypes.generic import ABCSeries
import pandas as pd
from pandas.api.types import is_dict_like, is_list_like
import pandas.core.common as com
from pandas.core.config import get_option
from pandas.core.generic import _shared_docs
from pandas.core.indexing import _maybe_numeric_slice, _non_reducing_slice
try:
from jinja2 import (
PackageLoader, Environment, ChoiceLoader, FileSystemLoader
)
except ImportError:
raise ImportError("pandas.Styler requires jinja2. "
"Please install with `conda install Jinja2`\n"
"or `pip install Jinja2`")
try:
import matplotlib.pyplot as plt
from matplotlib import colors
has_mpl = True
except ImportError:
has_mpl = False
no_mpl_message = "{0} requires matplotlib."
@contextmanager
def _mpl(func):
if has_mpl:
yield plt, colors
else:
raise ImportError(no_mpl_message.format(func.__name__))
class Styler(object):
"""
Helps style a DataFrame or Series according to the data with HTML and CSS.
Parameters
----------
data : Series or DataFrame
precision : int
precision to round floats to, defaults to pd.options.display.precision
table_styles : list-like, default None
list of {selector: (attr, value)} dicts; see Notes
uuid : str, default None
a unique identifier to avoid CSS collisions; generated automatically
caption : str, default None
caption to attach to the table
cell_ids : bool, default True
If True, each cell will have an ``id`` attribute in their HTML tag.
The ``id`` takes the form ``T_<uuid>_row<num_row>_col<num_col>``
where ``<uuid>`` is the unique identifier, ``<num_row>`` is the row
number and ``<num_col>`` is the column number.
Attributes
----------
env : Jinja2 Environment
template : Jinja2 Template
loader : Jinja2 Loader
See Also
--------
pandas.DataFrame.style
Notes
-----
Most styling will be done by passing style functions into
``Styler.apply`` or ``Styler.applymap``. Style functions should
return values with strings containing CSS ``'attr: value'`` that will
be applied to the indicated cells.
If using in the Jupyter notebook, Styler has defined a ``_repr_html_``
to automatically render itself. Otherwise call Styler.render to get
the generated HTML.
CSS classes are attached to the generated HTML
* Index and Column names include ``index_name`` and ``level<k>``
where `k` is its level in a MultiIndex
* Index label cells include
* ``row_heading``
* ``row<n>`` where `n` is the numeric position of the row
* ``level<k>`` where `k` is the level in a MultiIndex
* Column label cells include
* ``col_heading``
* ``col<n>`` where `n` is the numeric position of the column
* ``evel<k>`` where `k` is the level in a MultiIndex
* Blank cells include ``blank``
* Data cells include ``data``
"""
loader = PackageLoader("pandas", "io/formats/templates")
env = Environment(
loader=loader,
trim_blocks=True,
)
template = env.get_template("html.tpl")
def __init__(self, data, precision=None, table_styles=None, uuid=None,
caption=None, table_attributes=None, cell_ids=True):
self.ctx = defaultdict(list)
self._todo = []
if not isinstance(data, (pd.Series, pd.DataFrame)):
raise TypeError("``data`` must be a Series or DataFrame")
if data.ndim == 1:
data = data.to_frame()
if not data.index.is_unique or not data.columns.is_unique:
raise ValueError("style is not supported for non-unique indices.")
self.data = data
self.index = data.index
self.columns = data.columns
self.uuid = uuid
self.table_styles = table_styles
self.caption = caption
if precision is None:
precision = get_option('display.precision')
self.precision = precision
self.table_attributes = table_attributes
self.hidden_index = False
self.hidden_columns = []
self.cell_ids = cell_ids
# display_funcs maps (row, col) -> formatting function
def default_display_func(x):
if is_float(x):
return '{:>.{precision}g}'.format(x, precision=self.precision)
else:
return x
self._display_funcs = defaultdict(lambda: default_display_func)
def _repr_html_(self):
"""
Hooks into Jupyter notebook rich display system.
"""
return self.render()
@Appender(_shared_docs['to_excel'] % dict(
axes='index, columns', klass='Styler',
axes_single_arg="{0 or 'index', 1 or 'columns'}",
optional_by="""
by : str or list of str
Name or list of names which refer to the axis items.""",
versionadded_to_excel='\n .. versionadded:: 0.20'))
def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
float_format=None, columns=None, header=True, index=True,
index_label=None, startrow=0, startcol=0, engine=None,
merge_cells=True, encoding=None, inf_rep='inf', verbose=True,
freeze_panes=None):
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(self, na_rep=na_rep, cols=columns,
header=header,
float_format=float_format, index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep)
formatter.write(excel_writer, sheet_name=sheet_name, startrow=startrow,
startcol=startcol, freeze_panes=freeze_panes,
engine=engine)
def _translate(self):
"""
Convert the DataFrame in `self.data` and the attrs from `_build_styles`
into a dictionary of {head, body, uuid, cellstyle}.
"""
table_styles = self.table_styles or []
caption = self.caption
ctx = self.ctx
precision = self.precision
hidden_index = self.hidden_index
hidden_columns = self.hidden_columns
uuid = self.uuid or str(uuid1()).replace("-", "_")
ROW_HEADING_CLASS = "row_heading"
COL_HEADING_CLASS = "col_heading"
INDEX_NAME_CLASS = "index_name"
DATA_CLASS = "data"
BLANK_CLASS = "blank"
BLANK_VALUE = ""
def format_attr(pair):
return "{key}={value}".format(**pair)
# for sparsifying a MultiIndex
idx_lengths = _get_level_lengths(self.index)
col_lengths = _get_level_lengths(self.columns, hidden_columns)
cell_context = dict()
n_rlvls = self.data.index.nlevels
n_clvls = self.data.columns.nlevels
rlabels = self.data.index.tolist()
clabels = self.data.columns.tolist()
if n_rlvls == 1:
rlabels = [[x] for x in rlabels]
if n_clvls == 1:
clabels = [[x] for x in clabels]
clabels = list(zip(*clabels))
cellstyle = []
head = []
for r in range(n_clvls):
# Blank for Index columns...
row_es = [{"type": "th",
"value": BLANK_VALUE,
"display_value": BLANK_VALUE,
"is_visible": not hidden_index,
"class": " ".join([BLANK_CLASS])}] * (n_rlvls - 1)
# ... except maybe the last for columns.names
name = self.data.columns.names[r]
cs = [BLANK_CLASS if name is None else INDEX_NAME_CLASS,
"level{lvl}".format(lvl=r)]
name = BLANK_VALUE if name is None else name
row_es.append({"type": "th",
"value": name,
"display_value": name,
"class": " ".join(cs),
"is_visible": not hidden_index})
if clabels:
for c, value in enumerate(clabels[r]):
cs = [COL_HEADING_CLASS, "level{lvl}".format(lvl=r),
"col{col}".format(col=c)]
cs.extend(cell_context.get(
"col_headings", {}).get(r, {}).get(c, []))
es = {
"type": "th",
"value": value,
"display_value": value,
"class": " ".join(cs),
"is_visible": _is_visible(c, r, col_lengths),
}
colspan = col_lengths.get((r, c), 0)
if colspan > 1:
es["attributes"] = [
format_attr({"key": "colspan", "value": colspan})
]
row_es.append(es)
head.append(row_es)
if (self.data.index.names and
com._any_not_none(*self.data.index.names) and
not hidden_index):
index_header_row = []
for c, name in enumerate(self.data.index.names):
cs = [INDEX_NAME_CLASS,
"level{lvl}".format(lvl=c)]
name = '' if name is None else name
index_header_row.append({"type": "th", "value": name,
"class": " ".join(cs)})
index_header_row.extend(
[{"type": "th",
"value": BLANK_VALUE,
"class": " ".join([BLANK_CLASS])
}] * (len(clabels[0]) - len(hidden_columns)))
head.append(index_header_row)
body = []
for r, idx in enumerate(self.data.index):
row_es = []
for c, value in enumerate(rlabels[r]):
rid = [ROW_HEADING_CLASS, "level{lvl}".format(lvl=c),
"row{row}".format(row=r)]
es = {
"type": "th",
"is_visible": (_is_visible(r, c, idx_lengths) and
not hidden_index),
"value": value,
"display_value": value,
"id": "_".join(rid[1:]),
"class": " ".join(rid)
}
rowspan = idx_lengths.get((c, r), 0)
if rowspan > 1:
es["attributes"] = [
format_attr({"key": "rowspan", "value": rowspan})
]
row_es.append(es)
for c, col in enumerate(self.data.columns):
cs = [DATA_CLASS, "row{row}".format(row=r),
"col{col}".format(col=c)]
cs.extend(cell_context.get("data", {}).get(r, {}).get(c, []))
formatter = self._display_funcs[(r, c)]
value = self.data.iloc[r, c]
row_dict = {"type": "td",
"value": value,
"class": " ".join(cs),
"display_value": formatter(value),
"is_visible": (c not in hidden_columns)}
# only add an id if the cell has a style
if (self.cell_ids or
not(len(ctx[r, c]) == 1 and ctx[r, c][0] == '')):
row_dict["id"] = "_".join(cs[1:])
row_es.append(row_dict)
props = []
for x in ctx[r, c]:
# have to handle empty styles like ['']
if x.count(":"):
props.append(x.split(":"))
else:
props.append(['', ''])
cellstyle.append({'props': props,
'selector': "row{row}_col{col}"
.format(row=r, col=c)})
body.append(row_es)
table_attr = self.table_attributes
use_mathjax = get_option("display.html.use_mathjax")
if not use_mathjax:
table_attr = table_attr or ''
if 'class="' in table_attr:
table_attr = table_attr.replace('class="',
'class="tex2jax_ignore ')
else:
table_attr += ' class="tex2jax_ignore"'
return dict(head=head, cellstyle=cellstyle, body=body, uuid=uuid,
precision=precision, table_styles=table_styles,
caption=caption, table_attributes=table_attr)
def format(self, formatter, subset=None):
"""
Format the text display value of cells.
.. versionadded:: 0.18.0
Parameters
----------
formatter : str, callable, or dict
subset : IndexSlice
An argument to ``DataFrame.loc`` that restricts which elements
``formatter`` is applied to.
Returns
-------
self : Styler
Notes
-----
``formatter`` is either an ``a`` or a dict ``{column name: a}`` where
``a`` is one of
- str: this will be wrapped in: ``a.format(x)``
- callable: called with the value of an individual cell
The default display value for numeric values is the "general" (``g``)
format with ``pd.options.display.precision`` precision.
Examples
--------
>>> df = pd.DataFrame(np.random.randn(4, 2), columns=['a', 'b'])
>>> df.style.format("{:.2%}")
>>> df['c'] = ['a', 'b', 'c', 'd']
>>> df.style.format({'c': str.upper})
"""
if subset is None:
row_locs = range(len(self.data))
col_locs = range(len(self.data.columns))
else:
subset = _non_reducing_slice(subset)
if len(subset) == 1:
subset = subset, self.data.columns
sub_df = self.data.loc[subset]
row_locs = self.data.index.get_indexer_for(sub_df.index)
col_locs = self.data.columns.get_indexer_for(sub_df.columns)
if is_dict_like(formatter):
for col, col_formatter in formatter.items():
# formatter must be callable, so '{}' are converted to lambdas
col_formatter = _maybe_wrap_formatter(col_formatter)
col_num = self.data.columns.get_indexer_for([col])[0]
for row_num in row_locs:
self._display_funcs[(row_num, col_num)] = col_formatter
else:
# single scalar to format all cells with
locs = product(*(row_locs, col_locs))
for i, j in locs:
formatter = _maybe_wrap_formatter(formatter)
self._display_funcs[(i, j)] = formatter
return self
def render(self, **kwargs):
"""
Render the built up styles to HTML.
Parameters
----------
`**kwargs` : Any additional keyword arguments are passed through
to ``self.template.render``. This is useful when you need to provide
additional variables for a custom template.
.. versionadded:: 0.20
Returns
-------
rendered : str
The rendered HTML
Notes
-----
``Styler`` objects have defined the ``_repr_html_`` method
which automatically calls ``self.render()`` when it's the
last item in a Notebook cell. When calling ``Styler.render()``
directly, wrap the result in ``IPython.display.HTML`` to view
the rendered HTML in the notebook.
Pandas uses the following keys in render. Arguments passed
in ``**kwargs`` take precedence, so think carefully if you want
to override them:
* head
* cellstyle
* body
* uuid
* precision
* table_styles
* caption
* table_attributes
"""
self._compute()
# TODO: namespace all the pandas keys
d = self._translate()
# filter out empty styles, every cell will have a class
# but the list of props may just be [['', '']].
# so we have the neested anys below
trimmed = [x for x in d['cellstyle']
if any(any(y) for y in x['props'])]
d['cellstyle'] = trimmed
d.update(kwargs)
return self.template.render(**d)
def _update_ctx(self, attrs):
"""
Update the state of the Styler.
Collects a mapping of {index_label: ['<property>: <value>']}.
attrs : Series or DataFrame
should contain strings of '<property>: <value>;<prop2>: <val2>'
Whitespace shouldn't matter and the final trailing ';' shouldn't
matter.
"""
for row_label, v in attrs.iterrows():
for col_label, col in v.iteritems():
i = self.index.get_indexer([row_label])[0]
j = self.columns.get_indexer([col_label])[0]
for pair in col.rstrip(";").split(";"):
self.ctx[(i, j)].append(pair)
def _copy(self, deepcopy=False):
styler = Styler(self.data, precision=self.precision,
caption=self.caption, uuid=self.uuid,
table_styles=self.table_styles)
if deepcopy:
styler.ctx = copy.deepcopy(self.ctx)
styler._todo = copy.deepcopy(self._todo)
else:
styler.ctx = self.ctx
styler._todo = self._todo
return styler
def __copy__(self):
"""
Deep copy by default.
"""
return self._copy(deepcopy=False)
def __deepcopy__(self, memo):
return self._copy(deepcopy=True)
def clear(self):
"""
Reset the styler, removing any previously applied styles.
Returns None.
"""
self.ctx.clear()
self._todo = []
def _compute(self):
"""
Execute the style functions built up in `self._todo`.
Relies on the conventions that all style functions go through
.apply or .applymap. The append styles to apply as tuples of
(application method, *args, **kwargs)
"""
r = self
for func, args, kwargs in self._todo:
r = func(self)(*args, **kwargs)
return r
def _apply(self, func, axis=0, subset=None, **kwargs):
subset = slice(None) if subset is None else subset
subset = _non_reducing_slice(subset)
data = self.data.loc[subset]
if axis is not None:
result = data.apply(func, axis=axis,
result_type='expand', **kwargs)
result.columns = data.columns
else:
result = func(data, **kwargs)
if not isinstance(result, pd.DataFrame):
raise TypeError(
"Function {func!r} must return a DataFrame when "
"passed to `Styler.apply` with axis=None"
.format(func=func))
if not (result.index.equals(data.index) and
result.columns.equals(data.columns)):
msg = ('Result of {func!r} must have identical index and '
'columns as the input'.format(func=func))
raise ValueError(msg)
result_shape = result.shape
expected_shape = self.data.loc[subset].shape
if result_shape != expected_shape:
msg = ("Function {func!r} returned the wrong shape.\n"
"Result has shape: {res}\n"
"Expected shape: {expect}".format(func=func,
res=result.shape,
expect=expected_shape))
raise ValueError(msg)
self._update_ctx(result)
return self
def apply(self, func, axis=0, subset=None, **kwargs):
"""
Apply a function column-wise, row-wise, or table-wise,
updating the HTML representation with the result.
Parameters
----------
func : function
``func`` should take a Series or DataFrame (depending
on ``axis``), and return an object with the same shape.
Must return a DataFrame with identical index and
column labels when ``axis=None``
axis : int, str or None
apply to each column (``axis=0`` or ``'index'``)
or to each row (``axis=1`` or ``'columns'``) or
to the entire DataFrame at once with ``axis=None``
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``func``
Returns
-------
self : Styler
Notes
-----
The output shape of ``func`` should match the input, i.e. if
``x`` is the input row, column, or table (depending on ``axis``),
then ``func(x).shape == x.shape`` should be true.
This is similar to ``DataFrame.apply``, except that ``axis=None``
applies the function to the entire DataFrame at once,
rather than column-wise or row-wise.
Examples
--------
>>> def highlight_max(x):
... return ['background-color: yellow' if v == x.max() else ''
for v in x]
...
>>> df = pd.DataFrame(np.random.randn(5, 2))
>>> df.style.apply(highlight_max)
"""
self._todo.append((lambda instance: getattr(instance, '_apply'),
(func, axis, subset), kwargs))
return self
def _applymap(self, func, subset=None, **kwargs):
func = partial(func, **kwargs) # applymap doesn't take kwargs?
if subset is None:
subset = pd.IndexSlice[:]
subset = _non_reducing_slice(subset)
result = self.data.loc[subset].applymap(func)
self._update_ctx(result)
return self
def applymap(self, func, subset=None, **kwargs):
"""
Apply a function elementwise, updating the HTML
representation with the result.
Parameters
----------
func : function
``func`` should take a scalar and return a scalar
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``func``
Returns
-------
self : Styler
See Also
--------
Styler.where
"""
self._todo.append((lambda instance: getattr(instance, '_applymap'),
(func, subset), kwargs))
return self
def where(self, cond, value, other=None, subset=None, **kwargs):
"""
Apply a function elementwise, updating the HTML
representation with a style which is selected in
accordance with the return value of a function.
.. versionadded:: 0.21.0
Parameters
----------
cond : callable
``cond`` should take a scalar and return a boolean
value : str
applied when ``cond`` returns true
other : str
applied when ``cond`` returns false
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``cond``
Returns
-------
self : Styler
See Also
--------
Styler.applymap
"""
if other is None:
other = ''
return self.applymap(lambda val: value if cond(val) else other,
subset=subset, **kwargs)
def set_precision(self, precision):
"""
Set the precision used to render.
Parameters
----------
precision : int
Returns
-------
self : Styler
"""
self.precision = precision
return self
def set_table_attributes(self, attributes):
"""
Set the table attributes.
These are the items that show up in the opening ``<table>`` tag
in addition to to automatic (by default) id.
Parameters
----------
attributes : string
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_table_attributes('class="pure-table"')
# ... <table class="pure-table"> ...
"""
self.table_attributes = attributes
return self
def export(self):
"""
Export the styles to applied to the current Styler.
Can be applied to a second style with ``Styler.use``.
Returns
-------
styles : list
See Also
--------
Styler.use
"""
return self._todo
def use(self, styles):
"""
Set the styles on the current Styler, possibly using styles
from ``Styler.export``.
Parameters
----------
styles : list
list of style functions
Returns
-------
self : Styler
See Also
--------
Styler.export
"""
self._todo.extend(styles)
return self
def set_uuid(self, uuid):
"""
Set the uuid for a Styler.
Parameters
----------
uuid : str
Returns
-------
self : Styler
"""
self.uuid = uuid
return self
def set_caption(self, caption):
"""
Set the caption on a Styler
Parameters
----------
caption : str
Returns
-------
self : Styler
"""
self.caption = caption
return self
def set_table_styles(self, table_styles):
"""
Set the table styles on a Styler.
These are placed in a ``<style>`` tag before the generated HTML table.
Parameters
----------
table_styles : list
Each individual table_style should be a dictionary with
``selector`` and ``props`` keys. ``selector`` should be a CSS
selector that the style will be applied to (automatically
prefixed by the table's UUID) and ``props`` should be a list of
tuples with ``(attribute, value)``.
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_table_styles(
... [{'selector': 'tr:hover',
... 'props': [('background-color', 'yellow')]}]
... )
"""
self.table_styles = table_styles
return self
def hide_index(self):
"""
Hide any indices from rendering.
.. versionadded:: 0.23.0
Returns
-------
self : Styler
"""
self.hidden_index = True
return self
def hide_columns(self, subset):
"""
Hide columns from rendering.
.. versionadded:: 0.23.0
Parameters
----------
subset : IndexSlice
An argument to ``DataFrame.loc`` that identifies which columns
are hidden.
Returns
-------
self : Styler
"""
subset = _non_reducing_slice(subset)
hidden_df = self.data.loc[subset]
self.hidden_columns = self.columns.get_indexer_for(hidden_df.columns)
return self
# -----------------------------------------------------------------------
# A collection of "builtin" styles
# -----------------------------------------------------------------------
@staticmethod
def _highlight_null(v, null_color):
return ('background-color: {color}'.format(color=null_color)
if pd.isna(v) else '')
def highlight_null(self, null_color='red'):
"""
Shade the background ``null_color`` for missing values.
Parameters
----------
null_color : str
Returns
-------
self : Styler
"""
self.applymap(self._highlight_null, null_color=null_color)
return self
def background_gradient(self, cmap='PuBu', low=0, high=0, axis=0,
subset=None, text_color_threshold=0.408):
"""
Color the background in a gradient according to
the data in each column (optionally row).
Requires matplotlib.
Parameters
----------
cmap : str or colormap
matplotlib colormap
low, high : float
compress the range by these values.
axis : int or str
1 or 'columns' for columnwise, 0 or 'index' for rowwise
subset : IndexSlice
a valid slice for ``data`` to limit the style application to
text_color_threshold : float or int
luminance threshold for determining text color. Facilitates text
visibility across varying background colors. From 0 to 1.
0 = all text is dark colored, 1 = all text is light colored.
.. versionadded:: 0.24.0
Returns
-------
self : Styler
Raises
------
ValueError
If ``text_color_threshold`` is not a value from 0 to 1.
Notes
-----
Set ``text_color_threshold`` or tune ``low`` and ``high`` to keep the
text legible by not using the entire range of the color map. The range
of the data is extended by ``low * (x.max() - x.min())`` and ``high *
(x.max() - x.min())`` before normalizing.
"""
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
self.apply(self._background_gradient, cmap=cmap, subset=subset,
axis=axis, low=low, high=high,
text_color_threshold=text_color_threshold)
return self
@staticmethod
def _background_gradient(s, cmap='PuBu', low=0, high=0,
text_color_threshold=0.408):
"""
Color background in a range according to the data.
"""
if (not isinstance(text_color_threshold, (float, int)) or
not 0 <= text_color_threshold <= 1):
msg = "`text_color_threshold` must be a value from 0 to 1."
raise ValueError(msg)
with _mpl(Styler.background_gradient) as (plt, colors):
smin = s.values.min()
smax = s.values.max()
rng = smax - smin
# extend lower / upper bounds, compresses color range
norm = colors.Normalize(smin - (rng * low), smax + (rng * high))
# matplotlib colors.Normalize modifies inplace?
# https://github.com/matplotlib/matplotlib/issues/5427
rgbas = plt.cm.get_cmap(cmap)(norm(s.values))
def relative_luminance(rgba):
"""
Calculate relative luminance of a color.
The calculation adheres to the W3C standards
(https://www.w3.org/WAI/GL/wiki/Relative_luminance)
Parameters
----------
color : rgb or rgba tuple
Returns
-------
float
The relative luminance as a value from 0 to 1
"""
r, g, b = (
x / 12.92 if x <= 0.03928 else ((x + 0.055) / 1.055 ** 2.4)
for x in rgba[:3]
)
return 0.2126 * r + 0.7152 * g + 0.0722 * b
def css(rgba):
dark = relative_luminance(rgba) < text_color_threshold
text_color = '#f1f1f1' if dark else '#000000'
return 'background-color: {b};color: {c};'.format(
b=colors.rgb2hex(rgba), c=text_color
)
if s.ndim == 1:
return [css(rgba) for rgba in rgbas]
else:
return pd.DataFrame(
[[css(rgba) for rgba in row] for row in rgbas],
index=s.index, columns=s.columns
)
def set_properties(self, subset=None, **kwargs):
"""
Convenience method for setting one or more non-data dependent
properties or each cell.
Parameters
----------
subset : IndexSlice
a valid slice for ``data`` to limit the style application to
kwargs : dict
property: value pairs to be set for each cell
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_properties(color="white", align="right")
>>> df.style.set_properties(**{'background-color': 'yellow'})
"""
values = ';'.join('{p}: {v}'.format(p=p, v=v)
for p, v in kwargs.items())
f = lambda x: values
return self.applymap(f, subset=subset)
@staticmethod
def _bar(s, align, colors, width=100, vmin=None, vmax=None):
"""
Draw bar chart in dataframe cells.
"""
# Get input value range.
smin = s.min() if vmin is None else vmin
if isinstance(smin, ABCSeries):
smin = smin.min()
smax = s.max() if vmax is None else vmax
if isinstance(smax, ABCSeries):
smax = smax.max()
if align == 'mid':
smin = min(0, smin)
smax = max(0, smax)
elif align == 'zero':
# For "zero" mode, we want the range to be symmetrical around zero.
smax = max(abs(smin), abs(smax))
smin = -smax
# Transform to percent-range of linear-gradient
normed = width * (s.values - smin) / (smax - smin + 1e-12)
zero = -width * smin / (smax - smin + 1e-12)
def css_bar(start, end, color):
"""
Generate CSS code to draw a bar from start to end.
"""
css = 'width: 10em; height: 80%;'
if end > start:
css += 'background: linear-gradient(90deg,'
if start > 0:
css += ' transparent {s:.1f}%, {c} {s:.1f}%, '.format(
s=start, c=color
)
css += '{c} {e:.1f}%, transparent {e:.1f}%)'.format(
e=min(end, width), c=color,
)
return css
def css(x):
if pd.isna(x):
return ''
# avoid deprecated indexing `colors[x > zero]`
color = colors[1] if x > zero else colors[0]
if align == 'left':
return css_bar(0, x, color)
else:
return css_bar(min(x, zero), max(x, zero), color)
if s.ndim == 1:
return [css(x) for x in normed]
else:
return pd.DataFrame(
[[css(x) for x in row] for row in normed],
index=s.index, columns=s.columns
)
def bar(self, subset=None, axis=0, color='#d65f5f', width=100,
align='left', vmin=None, vmax=None):
"""
Draw bar chart in the cell backgrounds.
Parameters
----------
subset : IndexSlice, optional
A valid slice for `data` to limit the style application to.
axis : int, str or None, default 0
Apply to each column (`axis=0` or `'index'`)
or to each row (`axis=1` or `'columns'`) or
to the entire DataFrame at once with `axis=None`.
color : str or 2-tuple/list
If a str is passed, the color is the same for both
negative and positive numbers. If 2-tuple/list is used, the
first element is the color_negative and the second is the
color_positive (eg: ['#d65f5f', '#5fba7d']).
width : float, default 100
A number between 0 or 100. The largest value will cover `width`
percent of the cell's width.
align : {'left', 'zero',' mid'}, default 'left'
How to align the bars with the cells.
- 'left' : the min value starts at the left of the cell.
- 'zero' : a value of zero is located at the center of the cell.
- 'mid' : the center of the cell is at (max-min)/2, or
if values are all negative (positive) the zero is aligned
at the right (left) of the cell.
.. versionadded:: 0.20.0
vmin : float, optional
Minimum bar value, defining the left hand limit
of the bar drawing range, lower values are clipped to `vmin`.
When None (default): the minimum value of the data will be used.
.. versionadded:: 0.24.0
vmax : float, optional
Maximum bar value, defining the right hand limit
of the bar drawing range, higher values are clipped to `vmax`.
When None (default): the maximum value of the data will be used.
.. versionadded:: 0.24.0
Returns
-------
self : Styler
"""
if align not in ('left', 'zero', 'mid'):
raise ValueError("`align` must be one of {'left', 'zero',' mid'}")
if not (is_list_like(color)):
color = [color, color]
elif len(color) == 1:
color = [color[0], color[0]]
elif len(color) > 2:
raise ValueError("`color` must be string or a list-like"
" of length 2: [`color_neg`, `color_pos`]"
" (eg: color=['#d65f5f', '#5fba7d'])")
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
self.apply(self._bar, subset=subset, axis=axis,
align=align, colors=color, width=width,
vmin=vmin, vmax=vmax)
return self
def highlight_max(self, subset=None, color='yellow', axis=0):
"""
Highlight the maximum by shading the background.
Parameters
----------
subset : IndexSlice, default None
a valid slice for ``data`` to limit the style application to
color : str, default 'yellow'
axis : int, str, or None; default 0
0 or 'index' for columnwise (default), 1 or 'columns' for rowwise,
or ``None`` for tablewise
Returns
-------
self : Styler
"""
return self._highlight_handler(subset=subset, color=color, axis=axis,
max_=True)
def highlight_min(self, subset=None, color='yellow', axis=0):
"""
Highlight the minimum by shading the background.
Parameters
----------
subset : IndexSlice, default None
a valid slice for ``data`` to limit the style application to
color : str, default 'yellow'
axis : int, str, or None; default 0
0 or 'index' for columnwise (default), 1 or 'columns' for rowwise,
or ``None`` for tablewise
Returns
-------
self : Styler
"""
return self._highlight_handler(subset=subset, color=color, axis=axis,
max_=False)
def _highlight_handler(self, subset=None, color='yellow', axis=None,
max_=True):
subset = _non_reducing_slice(_maybe_numeric_slice(self.data, subset))
self.apply(self._highlight_extrema, color=color, axis=axis,
subset=subset, max_=max_)
return self
@staticmethod
def _highlight_extrema(data, color='yellow', max_=True):
"""
Highlight the min or max in a Series or DataFrame.
"""
attr = 'background-color: {0}'.format(color)
if data.ndim == 1: # Series from .apply
if max_:
extrema = data == data.max()
else:
extrema = data == data.min()
return [attr if v else '' for v in extrema]
else: # DataFrame from .tee
if max_:
extrema = data == data.max().max()
else:
extrema = data == data.min().min()
return pd.DataFrame(np.where(extrema, attr, ''),
index=data.index, columns=data.columns)
@classmethod
def from_custom_template(cls, searchpath, name):
"""
Factory function for creating a subclass of ``Styler``
with a custom template and Jinja environment.
Parameters
----------
searchpath : str or list
Path or paths of directories containing the templates
name : str
Name of your custom template to use for rendering
Returns
-------
MyStyler : subclass of Styler
Has the correct ``env`` and ``template`` class attributes set.
"""
loader = ChoiceLoader([
FileSystemLoader(searchpath),
cls.loader,
])
class MyStyler(cls):
env = Environment(loader=loader)
template = env.get_template(name)
return MyStyler
def pipe(self, func, *args, **kwargs):
"""
Apply ``func(self, *args, **kwargs)``, and return the result.
.. versionadded:: 0.24.0
Parameters
----------
func : function
Function to apply to the Styler. Alternatively, a
``(callable, keyword)`` tuple where ``keyword`` is a string
indicating the keyword of ``callable`` that expects the Styler.
*args, **kwargs :
Arguments passed to `func`.
Returns
-------
object :
The value returned by ``func``.
See Also
--------
DataFrame.pipe : Analogous method for DataFrame.
Styler.apply : Apply a function row-wise, column-wise, or table-wise to
modify the dataframe's styling.
Notes
-----
Like :meth:`DataFrame.pipe`, this method can simplify the
application of several user-defined functions to a styler. Instead
of writing:
.. code-block:: python
f(g(df.style.set_precision(3), arg1=a), arg2=b, arg3=c)
users can write:
.. code-block:: python
(df.style.set_precision(3)
.pipe(g, arg1=a)
.pipe(f, arg2=b, arg3=c))
In particular, this allows users to define functions that take a
styler object, along with other parameters, and return the styler after
making styling changes (such as calling :meth:`Styler.apply` or
:meth:`Styler.set_properties`). Using ``.pipe``, these user-defined
style "transformations" can be interleaved with calls to the built-in
Styler interface.
Examples
--------
>>> def format_conversion(styler):
... return (styler.set_properties(**{'text-align': 'right'})
... .format({'conversion': '{:.1%}'}))
The user-defined ``format_conversion`` function above can be called
within a sequence of other style modifications:
>>> df = pd.DataFrame({'trial': list(range(5)),
... 'conversion': [0.75, 0.85, np.nan, 0.7, 0.72]})
>>> (df.style
... .highlight_min(subset=['conversion'], color='yellow')
... .pipe(format_conversion)
... .set_caption("Results with minimum conversion highlighted."))
"""
return com._pipe(self, func, *args, **kwargs)
def _is_visible(idx_row, idx_col, lengths):
"""
Index -> {(idx_row, idx_col): bool}).
"""
return (idx_col, idx_row) in lengths
def _get_level_lengths(index, hidden_elements=None):
"""
Given an index, find the level length for each element.
Optional argument is a list of index positions which
should not be visible.
Result is a dictionary of (level, inital_position): span
"""
sentinel = object()
levels = index.format(sparsify=sentinel, adjoin=False, names=False)
if hidden_elements is None:
hidden_elements = []
lengths = {}
if index.nlevels == 1:
for i, value in enumerate(levels):
if(i not in hidden_elements):
lengths[(0, i)] = 1
return lengths
for i, lvl in enumerate(levels):
for j, row in enumerate(lvl):
if not get_option('display.multi_sparse'):
lengths[(i, j)] = 1
elif (row != sentinel) and (j not in hidden_elements):
last_label = j
lengths[(i, last_label)] = 1
elif (row != sentinel):
# even if its hidden, keep track of it in case
# length >1 and later elements are visible
last_label = j
lengths[(i, last_label)] = 0
elif(j not in hidden_elements):
lengths[(i, last_label)] += 1
non_zero_lengths = {
element: length for element, length in lengths.items() if length >= 1}
return non_zero_lengths
def _maybe_wrap_formatter(formatter):
if is_string_like(formatter):
return lambda x: formatter.format(x)
elif callable(formatter):
return formatter
else:
msg = ("Expected a template string or callable, got {formatter} "
"instead".format(formatter=formatter))
raise TypeError(msg)
| 34.005848
| 79
| 0.529923
|
from collections import defaultdict
from contextlib import contextmanager
import copy
from functools import partial
from itertools import product
from uuid import uuid1
import numpy as np
from pandas.compat import range
from pandas.util._decorators import Appender
from pandas.core.dtypes.common import is_float, is_string_like
from pandas.core.dtypes.generic import ABCSeries
import pandas as pd
from pandas.api.types import is_dict_like, is_list_like
import pandas.core.common as com
from pandas.core.config import get_option
from pandas.core.generic import _shared_docs
from pandas.core.indexing import _maybe_numeric_slice, _non_reducing_slice
try:
from jinja2 import (
PackageLoader, Environment, ChoiceLoader, FileSystemLoader
)
except ImportError:
raise ImportError("pandas.Styler requires jinja2. "
"Please install with `conda install Jinja2`\n"
"or `pip install Jinja2`")
try:
import matplotlib.pyplot as plt
from matplotlib import colors
has_mpl = True
except ImportError:
has_mpl = False
no_mpl_message = "{0} requires matplotlib."
@contextmanager
def _mpl(func):
if has_mpl:
yield plt, colors
else:
raise ImportError(no_mpl_message.format(func.__name__))
class Styler(object):
loader = PackageLoader("pandas", "io/formats/templates")
env = Environment(
loader=loader,
trim_blocks=True,
)
template = env.get_template("html.tpl")
def __init__(self, data, precision=None, table_styles=None, uuid=None,
caption=None, table_attributes=None, cell_ids=True):
self.ctx = defaultdict(list)
self._todo = []
if not isinstance(data, (pd.Series, pd.DataFrame)):
raise TypeError("``data`` must be a Series or DataFrame")
if data.ndim == 1:
data = data.to_frame()
if not data.index.is_unique or not data.columns.is_unique:
raise ValueError("style is not supported for non-unique indices.")
self.data = data
self.index = data.index
self.columns = data.columns
self.uuid = uuid
self.table_styles = table_styles
self.caption = caption
if precision is None:
precision = get_option('display.precision')
self.precision = precision
self.table_attributes = table_attributes
self.hidden_index = False
self.hidden_columns = []
self.cell_ids = cell_ids
def default_display_func(x):
if is_float(x):
return '{:>.{precision}g}'.format(x, precision=self.precision)
else:
return x
self._display_funcs = defaultdict(lambda: default_display_func)
def _repr_html_(self):
return self.render()
@Appender(_shared_docs['to_excel'] % dict(
axes='index, columns', klass='Styler',
axes_single_arg="{0 or 'index', 1 or 'columns'}",
optional_by="""
by : str or list of str
Name or list of names which refer to the axis items.""",
versionadded_to_excel='\n .. versionadded:: 0.20'))
def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
float_format=None, columns=None, header=True, index=True,
index_label=None, startrow=0, startcol=0, engine=None,
merge_cells=True, encoding=None, inf_rep='inf', verbose=True,
freeze_panes=None):
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(self, na_rep=na_rep, cols=columns,
header=header,
float_format=float_format, index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep)
formatter.write(excel_writer, sheet_name=sheet_name, startrow=startrow,
startcol=startcol, freeze_panes=freeze_panes,
engine=engine)
def _translate(self):
table_styles = self.table_styles or []
caption = self.caption
ctx = self.ctx
precision = self.precision
hidden_index = self.hidden_index
hidden_columns = self.hidden_columns
uuid = self.uuid or str(uuid1()).replace("-", "_")
ROW_HEADING_CLASS = "row_heading"
COL_HEADING_CLASS = "col_heading"
INDEX_NAME_CLASS = "index_name"
DATA_CLASS = "data"
BLANK_CLASS = "blank"
BLANK_VALUE = ""
def format_attr(pair):
return "{key}={value}".format(**pair)
idx_lengths = _get_level_lengths(self.index)
col_lengths = _get_level_lengths(self.columns, hidden_columns)
cell_context = dict()
n_rlvls = self.data.index.nlevels
n_clvls = self.data.columns.nlevels
rlabels = self.data.index.tolist()
clabels = self.data.columns.tolist()
if n_rlvls == 1:
rlabels = [[x] for x in rlabels]
if n_clvls == 1:
clabels = [[x] for x in clabels]
clabels = list(zip(*clabels))
cellstyle = []
head = []
for r in range(n_clvls):
row_es = [{"type": "th",
"value": BLANK_VALUE,
"display_value": BLANK_VALUE,
"is_visible": not hidden_index,
"class": " ".join([BLANK_CLASS])}] * (n_rlvls - 1)
name = self.data.columns.names[r]
cs = [BLANK_CLASS if name is None else INDEX_NAME_CLASS,
"level{lvl}".format(lvl=r)]
name = BLANK_VALUE if name is None else name
row_es.append({"type": "th",
"value": name,
"display_value": name,
"class": " ".join(cs),
"is_visible": not hidden_index})
if clabels:
for c, value in enumerate(clabels[r]):
cs = [COL_HEADING_CLASS, "level{lvl}".format(lvl=r),
"col{col}".format(col=c)]
cs.extend(cell_context.get(
"col_headings", {}).get(r, {}).get(c, []))
es = {
"type": "th",
"value": value,
"display_value": value,
"class": " ".join(cs),
"is_visible": _is_visible(c, r, col_lengths),
}
colspan = col_lengths.get((r, c), 0)
if colspan > 1:
es["attributes"] = [
format_attr({"key": "colspan", "value": colspan})
]
row_es.append(es)
head.append(row_es)
if (self.data.index.names and
com._any_not_none(*self.data.index.names) and
not hidden_index):
index_header_row = []
for c, name in enumerate(self.data.index.names):
cs = [INDEX_NAME_CLASS,
"level{lvl}".format(lvl=c)]
name = '' if name is None else name
index_header_row.append({"type": "th", "value": name,
"class": " ".join(cs)})
index_header_row.extend(
[{"type": "th",
"value": BLANK_VALUE,
"class": " ".join([BLANK_CLASS])
}] * (len(clabels[0]) - len(hidden_columns)))
head.append(index_header_row)
body = []
for r, idx in enumerate(self.data.index):
row_es = []
for c, value in enumerate(rlabels[r]):
rid = [ROW_HEADING_CLASS, "level{lvl}".format(lvl=c),
"row{row}".format(row=r)]
es = {
"type": "th",
"is_visible": (_is_visible(r, c, idx_lengths) and
not hidden_index),
"value": value,
"display_value": value,
"id": "_".join(rid[1:]),
"class": " ".join(rid)
}
rowspan = idx_lengths.get((c, r), 0)
if rowspan > 1:
es["attributes"] = [
format_attr({"key": "rowspan", "value": rowspan})
]
row_es.append(es)
for c, col in enumerate(self.data.columns):
cs = [DATA_CLASS, "row{row}".format(row=r),
"col{col}".format(col=c)]
cs.extend(cell_context.get("data", {}).get(r, {}).get(c, []))
formatter = self._display_funcs[(r, c)]
value = self.data.iloc[r, c]
row_dict = {"type": "td",
"value": value,
"class": " ".join(cs),
"display_value": formatter(value),
"is_visible": (c not in hidden_columns)}
if (self.cell_ids or
not(len(ctx[r, c]) == 1 and ctx[r, c][0] == '')):
row_dict["id"] = "_".join(cs[1:])
row_es.append(row_dict)
props = []
for x in ctx[r, c]:
if x.count(":"):
props.append(x.split(":"))
else:
props.append(['', ''])
cellstyle.append({'props': props,
'selector': "row{row}_col{col}"
.format(row=r, col=c)})
body.append(row_es)
table_attr = self.table_attributes
use_mathjax = get_option("display.html.use_mathjax")
if not use_mathjax:
table_attr = table_attr or ''
if 'class="' in table_attr:
table_attr = table_attr.replace('class="',
'class="tex2jax_ignore ')
else:
table_attr += ' class="tex2jax_ignore"'
return dict(head=head, cellstyle=cellstyle, body=body, uuid=uuid,
precision=precision, table_styles=table_styles,
caption=caption, table_attributes=table_attr)
def format(self, formatter, subset=None):
if subset is None:
row_locs = range(len(self.data))
col_locs = range(len(self.data.columns))
else:
subset = _non_reducing_slice(subset)
if len(subset) == 1:
subset = subset, self.data.columns
sub_df = self.data.loc[subset]
row_locs = self.data.index.get_indexer_for(sub_df.index)
col_locs = self.data.columns.get_indexer_for(sub_df.columns)
if is_dict_like(formatter):
for col, col_formatter in formatter.items():
# formatter must be callable, so '{}' are converted to lambdas
col_formatter = _maybe_wrap_formatter(col_formatter)
col_num = self.data.columns.get_indexer_for([col])[0]
for row_num in row_locs:
self._display_funcs[(row_num, col_num)] = col_formatter
else:
# single scalar to format all cells with
locs = product(*(row_locs, col_locs))
for i, j in locs:
formatter = _maybe_wrap_formatter(formatter)
self._display_funcs[(i, j)] = formatter
return self
def render(self, **kwargs):
self._compute()
# TODO: namespace all the pandas keys
d = self._translate()
# filter out empty styles, every cell will have a class
# but the list of props may just be [['', '']].
# so we have the neested anys below
trimmed = [x for x in d['cellstyle']
if any(any(y) for y in x['props'])]
d['cellstyle'] = trimmed
d.update(kwargs)
return self.template.render(**d)
def _update_ctx(self, attrs):
for row_label, v in attrs.iterrows():
for col_label, col in v.iteritems():
i = self.index.get_indexer([row_label])[0]
j = self.columns.get_indexer([col_label])[0]
for pair in col.rstrip(";").split(";"):
self.ctx[(i, j)].append(pair)
def _copy(self, deepcopy=False):
styler = Styler(self.data, precision=self.precision,
caption=self.caption, uuid=self.uuid,
table_styles=self.table_styles)
if deepcopy:
styler.ctx = copy.deepcopy(self.ctx)
styler._todo = copy.deepcopy(self._todo)
else:
styler.ctx = self.ctx
styler._todo = self._todo
return styler
def __copy__(self):
return self._copy(deepcopy=False)
def __deepcopy__(self, memo):
return self._copy(deepcopy=True)
def clear(self):
self.ctx.clear()
self._todo = []
def _compute(self):
r = self
for func, args, kwargs in self._todo:
r = func(self)(*args, **kwargs)
return r
def _apply(self, func, axis=0, subset=None, **kwargs):
subset = slice(None) if subset is None else subset
subset = _non_reducing_slice(subset)
data = self.data.loc[subset]
if axis is not None:
result = data.apply(func, axis=axis,
result_type='expand', **kwargs)
result.columns = data.columns
else:
result = func(data, **kwargs)
if not isinstance(result, pd.DataFrame):
raise TypeError(
"Function {func!r} must return a DataFrame when "
"passed to `Styler.apply` with axis=None"
.format(func=func))
if not (result.index.equals(data.index) and
result.columns.equals(data.columns)):
msg = ('Result of {func!r} must have identical index and '
'columns as the input'.format(func=func))
raise ValueError(msg)
result_shape = result.shape
expected_shape = self.data.loc[subset].shape
if result_shape != expected_shape:
msg = ("Function {func!r} returned the wrong shape.\n"
"Result has shape: {res}\n"
"Expected shape: {expect}".format(func=func,
res=result.shape,
expect=expected_shape))
raise ValueError(msg)
self._update_ctx(result)
return self
def apply(self, func, axis=0, subset=None, **kwargs):
self._todo.append((lambda instance: getattr(instance, '_apply'),
(func, axis, subset), kwargs))
return self
def _applymap(self, func, subset=None, **kwargs):
func = partial(func, **kwargs) # applymap doesn't take kwargs?
if subset is None:
subset = pd.IndexSlice[:]
subset = _non_reducing_slice(subset)
result = self.data.loc[subset].applymap(func)
self._update_ctx(result)
return self
def applymap(self, func, subset=None, **kwargs):
self._todo.append((lambda instance: getattr(instance, '_applymap'),
(func, subset), kwargs))
return self
def where(self, cond, value, other=None, subset=None, **kwargs):
if other is None:
other = ''
return self.applymap(lambda val: value if cond(val) else other,
subset=subset, **kwargs)
def set_precision(self, precision):
self.precision = precision
return self
def set_table_attributes(self, attributes):
self.table_attributes = attributes
return self
def export(self):
return self._todo
def use(self, styles):
self._todo.extend(styles)
return self
def set_uuid(self, uuid):
self.uuid = uuid
return self
def set_caption(self, caption):
self.caption = caption
return self
def set_table_styles(self, table_styles):
self.table_styles = table_styles
return self
def hide_index(self):
self.hidden_index = True
return self
def hide_columns(self, subset):
subset = _non_reducing_slice(subset)
hidden_df = self.data.loc[subset]
self.hidden_columns = self.columns.get_indexer_for(hidden_df.columns)
return self
# -----------------------------------------------------------------------
# A collection of "builtin" styles
# -----------------------------------------------------------------------
@staticmethod
def _highlight_null(v, null_color):
return ('background-color: {color}'.format(color=null_color)
if pd.isna(v) else '')
def highlight_null(self, null_color='red'):
self.applymap(self._highlight_null, null_color=null_color)
return self
def background_gradient(self, cmap='PuBu', low=0, high=0, axis=0,
subset=None, text_color_threshold=0.408):
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
self.apply(self._background_gradient, cmap=cmap, subset=subset,
axis=axis, low=low, high=high,
text_color_threshold=text_color_threshold)
return self
@staticmethod
def _background_gradient(s, cmap='PuBu', low=0, high=0,
text_color_threshold=0.408):
if (not isinstance(text_color_threshold, (float, int)) or
not 0 <= text_color_threshold <= 1):
msg = "`text_color_threshold` must be a value from 0 to 1."
raise ValueError(msg)
with _mpl(Styler.background_gradient) as (plt, colors):
smin = s.values.min()
smax = s.values.max()
rng = smax - smin
# extend lower / upper bounds, compresses color range
norm = colors.Normalize(smin - (rng * low), smax + (rng * high))
# matplotlib colors.Normalize modifies inplace?
# https://github.com/matplotlib/matplotlib/issues/5427
rgbas = plt.cm.get_cmap(cmap)(norm(s.values))
def relative_luminance(rgba):
r, g, b = (
x / 12.92 if x <= 0.03928 else ((x + 0.055) / 1.055 ** 2.4)
for x in rgba[:3]
)
return 0.2126 * r + 0.7152 * g + 0.0722 * b
def css(rgba):
dark = relative_luminance(rgba) < text_color_threshold
text_color = '#f1f1f1' if dark else '#000000'
return 'background-color: {b};color: {c};'.format(
b=colors.rgb2hex(rgba), c=text_color
)
if s.ndim == 1:
return [css(rgba) for rgba in rgbas]
else:
return pd.DataFrame(
[[css(rgba) for rgba in row] for row in rgbas],
index=s.index, columns=s.columns
)
def set_properties(self, subset=None, **kwargs):
values = ';'.join('{p}: {v}'.format(p=p, v=v)
for p, v in kwargs.items())
f = lambda x: values
return self.applymap(f, subset=subset)
@staticmethod
def _bar(s, align, colors, width=100, vmin=None, vmax=None):
# Get input value range.
smin = s.min() if vmin is None else vmin
if isinstance(smin, ABCSeries):
smin = smin.min()
smax = s.max() if vmax is None else vmax
if isinstance(smax, ABCSeries):
smax = smax.max()
if align == 'mid':
smin = min(0, smin)
smax = max(0, smax)
elif align == 'zero':
# For "zero" mode, we want the range to be symmetrical around zero.
smax = max(abs(smin), abs(smax))
smin = -smax
# Transform to percent-range of linear-gradient
normed = width * (s.values - smin) / (smax - smin + 1e-12)
zero = -width * smin / (smax - smin + 1e-12)
def css_bar(start, end, color):
css = 'width: 10em; height: 80%;'
if end > start:
css += 'background: linear-gradient(90deg,'
if start > 0:
css += ' transparent {s:.1f}%, {c} {s:.1f}%, '.format(
s=start, c=color
)
css += '{c} {e:.1f}%, transparent {e:.1f}%)'.format(
e=min(end, width), c=color,
)
return css
def css(x):
if pd.isna(x):
return ''
# avoid deprecated indexing `colors[x > zero]`
color = colors[1] if x > zero else colors[0]
if align == 'left':
return css_bar(0, x, color)
else:
return css_bar(min(x, zero), max(x, zero), color)
if s.ndim == 1:
return [css(x) for x in normed]
else:
return pd.DataFrame(
[[css(x) for x in row] for row in normed],
index=s.index, columns=s.columns
)
def bar(self, subset=None, axis=0, color='#d65f5f', width=100,
align='left', vmin=None, vmax=None):
if align not in ('left', 'zero', 'mid'):
raise ValueError("`align` must be one of {'left', 'zero',' mid'}")
if not (is_list_like(color)):
color = [color, color]
elif len(color) == 1:
color = [color[0], color[0]]
elif len(color) > 2:
raise ValueError("`color` must be string or a list-like"
" of length 2: [`color_neg`, `color_pos`]"
" (eg: color=['bset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
self.apply(self._bar, subset=subset, axis=axis,
align=align, colors=color, width=width,
vmin=vmin, vmax=vmax)
return self
def highlight_max(self, subset=None, color='yellow', axis=0):
return self._highlight_handler(subset=subset, color=color, axis=axis,
max_=True)
def highlight_min(self, subset=None, color='yellow', axis=0):
return self._highlight_handler(subset=subset, color=color, axis=axis,
max_=False)
def _highlight_handler(self, subset=None, color='yellow', axis=None,
max_=True):
subset = _non_reducing_slice(_maybe_numeric_slice(self.data, subset))
self.apply(self._highlight_extrema, color=color, axis=axis,
subset=subset, max_=max_)
return self
@staticmethod
def _highlight_extrema(data, color='yellow', max_=True):
attr = 'background-color: {0}'.format(color)
if data.ndim == 1: # Series from .apply
if max_:
extrema = data == data.max()
else:
extrema = data == data.min()
return [attr if v else '' for v in extrema]
else: # DataFrame from .tee
if max_:
extrema = data == data.max().max()
else:
extrema = data == data.min().min()
return pd.DataFrame(np.where(extrema, attr, ''),
index=data.index, columns=data.columns)
@classmethod
def from_custom_template(cls, searchpath, name):
loader = ChoiceLoader([
FileSystemLoader(searchpath),
cls.loader,
])
class MyStyler(cls):
env = Environment(loader=loader)
template = env.get_template(name)
return MyStyler
def pipe(self, func, *args, **kwargs):
return com._pipe(self, func, *args, **kwargs)
def _is_visible(idx_row, idx_col, lengths):
return (idx_col, idx_row) in lengths
def _get_level_lengths(index, hidden_elements=None):
sentinel = object()
levels = index.format(sparsify=sentinel, adjoin=False, names=False)
if hidden_elements is None:
hidden_elements = []
lengths = {}
if index.nlevels == 1:
for i, value in enumerate(levels):
if(i not in hidden_elements):
lengths[(0, i)] = 1
return lengths
for i, lvl in enumerate(levels):
for j, row in enumerate(lvl):
if not get_option('display.multi_sparse'):
lengths[(i, j)] = 1
elif (row != sentinel) and (j not in hidden_elements):
last_label = j
lengths[(i, last_label)] = 1
elif (row != sentinel):
# even if its hidden, keep track of it in case
# length >1 and later elements are visible
last_label = j
lengths[(i, last_label)] = 0
elif(j not in hidden_elements):
lengths[(i, last_label)] += 1
non_zero_lengths = {
element: length for element, length in lengths.items() if length >= 1}
return non_zero_lengths
def _maybe_wrap_formatter(formatter):
if is_string_like(formatter):
return lambda x: formatter.format(x)
elif callable(formatter):
return formatter
else:
msg = ("Expected a template string or callable, got {formatter} "
"instead".format(formatter=formatter))
raise TypeError(msg)
| true
| true
|
790d00344232a574dc3e54ad2b287f0f6c4410d6
| 770
|
py
|
Python
|
palimport/_utils.py
|
asmodehn/lark_import
|
f98fc66e786c5ad9894fc75ad7cc2857702994fe
|
[
"MIT"
] | 2
|
2019-09-19T14:28:04.000Z
|
2021-09-27T09:26:27.000Z
|
palimport/_utils.py
|
asmodehn/lark_import
|
f98fc66e786c5ad9894fc75ad7cc2857702994fe
|
[
"MIT"
] | 3
|
2018-05-15T07:54:39.000Z
|
2018-05-29T07:51:20.000Z
|
palimport/_utils.py
|
asmodehn/palimport
|
f98fc66e786c5ad9894fc75ad7cc2857702994fe
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, print_function
import sys
def _verbose_message(message, *args, **kwargs):
"""Print the message to stderr if -v/PYTHONVERBOSE is turned on."""
verbosity = kwargs.pop('verbosity', 1)
if sys.flags.verbose >= verbosity:
if not message.startswith(('#', 'import ')):
message = '# ' + message
print(message.format(*args), file=sys.stderr)
try:
ImportError('msg', name='name', path='path')
except TypeError:
class _ImportError(ImportError):
def __init__(self, *args, **kwargs):
self.name = kwargs.pop('name', None)
self.path = kwargs.pop('path', None)
super(_ImportError, self).__init__(*args, **kwargs)
else:
_ImportError = ImportError
| 30.8
| 71
| 0.637662
|
from __future__ import absolute_import, print_function
import sys
def _verbose_message(message, *args, **kwargs):
verbosity = kwargs.pop('verbosity', 1)
if sys.flags.verbose >= verbosity:
if not message.startswith(('#', 'import ')):
message = '# ' + message
print(message.format(*args), file=sys.stderr)
try:
ImportError('msg', name='name', path='path')
except TypeError:
class _ImportError(ImportError):
def __init__(self, *args, **kwargs):
self.name = kwargs.pop('name', None)
self.path = kwargs.pop('path', None)
super(_ImportError, self).__init__(*args, **kwargs)
else:
_ImportError = ImportError
| true
| true
|
790d0034e77697b07ad94204308782dff8c27267
| 6,439
|
py
|
Python
|
plenum/server/view_change/pre_view_change_strategies.py
|
andkononykhin/indy-plenum-copy
|
46c48feaf75e5578c9dceb76d4b6d09f7e63add5
|
[
"Apache-2.0"
] | 1
|
2019-03-19T23:44:56.000Z
|
2019-03-19T23:44:56.000Z
|
plenum/server/view_change/pre_view_change_strategies.py
|
andkononykhin/indy-plenum-copy
|
46c48feaf75e5578c9dceb76d4b6d09f7e63add5
|
[
"Apache-2.0"
] | null | null | null |
plenum/server/view_change/pre_view_change_strategies.py
|
andkononykhin/indy-plenum-copy
|
46c48feaf75e5578c9dceb76d4b6d09f7e63add5
|
[
"Apache-2.0"
] | null | null | null |
from abc import abstractmethod, ABCMeta
from collections import deque
from functools import partial
from plenum.common.constants import VIEW_CHANGE_START, PreVCStrategies, VIEW_CHANGE_CONTINUE
from plenum.common.messages.node_messages import ViewChangeStartMessage, ViewChangeContinueMessage, PrePrepare, Prepare, \
Commit, Ordered
from stp_zmq.zstack import Quota
from stp_core.common.log import getlogger
logger = getlogger()
class PreViewChangeStrategy(metaclass=ABCMeta):
"""Abstract class for routines before starting viewChange procedure"""
def __init__(self, view_changer, node):
self.view_changer = view_changer
self.node = node
@abstractmethod
def prepare_view_change(self, proposed_view_no: int):
raise NotImplementedError()
@staticmethod
@abstractmethod
def on_view_change_started(obj, msg, frm):
raise NotImplementedError()
@staticmethod
@abstractmethod
def on_view_change_continued(obj, msg):
raise NotImplementedError()
@abstractmethod
def on_strategy_complete(self):
raise NotImplementedError()
class VCStartMsgStrategy(PreViewChangeStrategy):
"""Strategy logic:
- when startViewChange method was called, then put 'local' ViewChangeStart message and set corresponded handlers
- on processing startViewChange message on the nodeInBoxRouter's side the next steps will be performed:
- call nodestack.service method with extended quota parameters for getting as much as possible 3PC
messages from ZMQ's side
- process all messages from nodeInBox queue and stash all not 3PC
- append to replica's inBox queue ViewChangeContinueMessage
- then replica's inBox queue will be processed and after ViewChangeContinueMessage view_change procedure
will be continued in the normal way
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.stashedNodeInBox = deque()
self.replica = self.node.master_replica
self.is_preparing = False
def prepare_view_change(self, proposed_view_no: int):
if not self.is_preparing:
logger.info("VCStartMsgStrategy: Starting prepare_view_change process")
self._set_req_handlers()
vcs_msg = ViewChangeStartMessage(proposed_view_no)
nodeInBox = self.node.nodeInBox
nodeInBox.append((vcs_msg, self.node.name))
self.is_preparing = True
def on_strategy_complete(self):
logger.info("VCStartMsgStrategy: on_strategy_complete - View Change can be started")
self.unstash_messages()
self.is_preparing = False
@staticmethod
async def _process_node_inbox_3PC(node):
current_view_no = node.viewNo
stashed_not_3PC = deque()
types_3PC = (PrePrepare, Prepare, Commit, Ordered)
while node.nodeInBox:
m = node.nodeInBox.popleft()
if len(m) == 2 and isinstance(m[0], types_3PC) and \
m[0].viewNo == current_view_no and \
m[0].instId == node.instances.masterId:
await node.process_one_node_message(m)
else:
stashed_not_3PC.append(m)
return stashed_not_3PC
"""Handler for processing ViewChangeStart message on node's nodeInBoxRouter"""
@staticmethod
async def on_view_change_started(node, msg: ViewChangeStartMessage, frm):
strategy = node.view_changer.pre_vc_strategy
proposed_view_no = msg.proposed_view_no
logger.info("VCStartMsgStrategy: got ViewChangeStartMessage with proposed_view_no: {}".format(proposed_view_no))
if proposed_view_no > node.view_changer.view_no:
vcc_msg = ViewChangeContinueMessage(proposed_view_no)
quota = Quota(
count=node.config.EXTENDED_QUOTA_MULTIPLIER_BEFORE_VC * node.quota_control.node_quota.count,
size=node.config.EXTENDED_QUOTA_MULTIPLIER_BEFORE_VC * node.quota_control.node_quota.size)
msgs_count = await node.nodestack.service(limit=None,
quota=quota)
logger.info("VCStartMsgStrategy: Got {} messages from nodestack".format(msgs_count))
strategy.stashedNodeInBox = await VCStartMsgStrategy._process_node_inbox_3PC(node)
logger.info("VCStartMsgStrategy: {} not 3PC msgs was stashed".format(len(strategy.stashedNodeInBox)))
node.master_replica.inBox.append(vcc_msg)
"""Handler for processing ViewChangeStart message on replica's inBoxRouter"""
@staticmethod
def on_view_change_continued(replica, msg: ViewChangeContinueMessage):
strategy = replica.node.view_changer.pre_vc_strategy
proposed_view_no = msg.proposed_view_no
replica.logger.info("VCStartMsgStrategy: got ViewChangeContinueMessage with proposed_view_no: {}".format(proposed_view_no))
if proposed_view_no > replica.node.viewNo:
"""
Return stashed not 3PC msgs to nodeInBox queue and start ViewChange
Critical assumption: All 3PC msgs passed from node already processed
"""
strategy.unstash_messages()
replica.logger.info("VCStartMsgStrategy: continue view_change procedure in a normal way")
replica.node.view_changer.startViewChange(proposed_view_no, continue_vc=True)
strategy.is_preparing = False
def unstash_messages(self):
logger.info("VCStartMsgStrategy: unstash all not 3PC msgs to nodeInBox queue")
while self.stashedNodeInBox:
self.node.nodeInBox.appendleft(self.stashedNodeInBox.pop())
def _set_req_handlers(self):
node_msg_router = self.node.nodeMsgRouter
replica_msg_router = self.replica.inBoxRouter
if ViewChangeStartMessage not in node_msg_router.routes:
processor = partial(VCStartMsgStrategy.on_view_change_started,
self.node)
node_msg_router.add((ViewChangeStartMessage, processor))
if ViewChangeContinueMessage not in replica_msg_router.routes:
processor = partial(VCStartMsgStrategy.on_view_change_continued,
self.replica)
replica_msg_router.add((ViewChangeContinueMessage, processor))
preVCStrategies = {
PreVCStrategies.VC_START_MSG_STRATEGY: VCStartMsgStrategy
}
| 44.715278
| 131
| 0.701196
|
from abc import abstractmethod, ABCMeta
from collections import deque
from functools import partial
from plenum.common.constants import VIEW_CHANGE_START, PreVCStrategies, VIEW_CHANGE_CONTINUE
from plenum.common.messages.node_messages import ViewChangeStartMessage, ViewChangeContinueMessage, PrePrepare, Prepare, \
Commit, Ordered
from stp_zmq.zstack import Quota
from stp_core.common.log import getlogger
logger = getlogger()
class PreViewChangeStrategy(metaclass=ABCMeta):
def __init__(self, view_changer, node):
self.view_changer = view_changer
self.node = node
@abstractmethod
def prepare_view_change(self, proposed_view_no: int):
raise NotImplementedError()
@staticmethod
@abstractmethod
def on_view_change_started(obj, msg, frm):
raise NotImplementedError()
@staticmethod
@abstractmethod
def on_view_change_continued(obj, msg):
raise NotImplementedError()
@abstractmethod
def on_strategy_complete(self):
raise NotImplementedError()
class VCStartMsgStrategy(PreViewChangeStrategy):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.stashedNodeInBox = deque()
self.replica = self.node.master_replica
self.is_preparing = False
def prepare_view_change(self, proposed_view_no: int):
if not self.is_preparing:
logger.info("VCStartMsgStrategy: Starting prepare_view_change process")
self._set_req_handlers()
vcs_msg = ViewChangeStartMessage(proposed_view_no)
nodeInBox = self.node.nodeInBox
nodeInBox.append((vcs_msg, self.node.name))
self.is_preparing = True
def on_strategy_complete(self):
logger.info("VCStartMsgStrategy: on_strategy_complete - View Change can be started")
self.unstash_messages()
self.is_preparing = False
@staticmethod
async def _process_node_inbox_3PC(node):
current_view_no = node.viewNo
stashed_not_3PC = deque()
types_3PC = (PrePrepare, Prepare, Commit, Ordered)
while node.nodeInBox:
m = node.nodeInBox.popleft()
if len(m) == 2 and isinstance(m[0], types_3PC) and \
m[0].viewNo == current_view_no and \
m[0].instId == node.instances.masterId:
await node.process_one_node_message(m)
else:
stashed_not_3PC.append(m)
return stashed_not_3PC
@staticmethod
async def on_view_change_started(node, msg: ViewChangeStartMessage, frm):
strategy = node.view_changer.pre_vc_strategy
proposed_view_no = msg.proposed_view_no
logger.info("VCStartMsgStrategy: got ViewChangeStartMessage with proposed_view_no: {}".format(proposed_view_no))
if proposed_view_no > node.view_changer.view_no:
vcc_msg = ViewChangeContinueMessage(proposed_view_no)
quota = Quota(
count=node.config.EXTENDED_QUOTA_MULTIPLIER_BEFORE_VC * node.quota_control.node_quota.count,
size=node.config.EXTENDED_QUOTA_MULTIPLIER_BEFORE_VC * node.quota_control.node_quota.size)
msgs_count = await node.nodestack.service(limit=None,
quota=quota)
logger.info("VCStartMsgStrategy: Got {} messages from nodestack".format(msgs_count))
strategy.stashedNodeInBox = await VCStartMsgStrategy._process_node_inbox_3PC(node)
logger.info("VCStartMsgStrategy: {} not 3PC msgs was stashed".format(len(strategy.stashedNodeInBox)))
node.master_replica.inBox.append(vcc_msg)
@staticmethod
def on_view_change_continued(replica, msg: ViewChangeContinueMessage):
strategy = replica.node.view_changer.pre_vc_strategy
proposed_view_no = msg.proposed_view_no
replica.logger.info("VCStartMsgStrategy: got ViewChangeContinueMessage with proposed_view_no: {}".format(proposed_view_no))
if proposed_view_no > replica.node.viewNo:
strategy.unstash_messages()
replica.logger.info("VCStartMsgStrategy: continue view_change procedure in a normal way")
replica.node.view_changer.startViewChange(proposed_view_no, continue_vc=True)
strategy.is_preparing = False
def unstash_messages(self):
logger.info("VCStartMsgStrategy: unstash all not 3PC msgs to nodeInBox queue")
while self.stashedNodeInBox:
self.node.nodeInBox.appendleft(self.stashedNodeInBox.pop())
def _set_req_handlers(self):
node_msg_router = self.node.nodeMsgRouter
replica_msg_router = self.replica.inBoxRouter
if ViewChangeStartMessage not in node_msg_router.routes:
processor = partial(VCStartMsgStrategy.on_view_change_started,
self.node)
node_msg_router.add((ViewChangeStartMessage, processor))
if ViewChangeContinueMessage not in replica_msg_router.routes:
processor = partial(VCStartMsgStrategy.on_view_change_continued,
self.replica)
replica_msg_router.add((ViewChangeContinueMessage, processor))
preVCStrategies = {
PreVCStrategies.VC_START_MSG_STRATEGY: VCStartMsgStrategy
}
| true
| true
|
790d007368ae40c90ca3ead37878146b24ec5ed8
| 3,037
|
py
|
Python
|
contrib/linearize/linearize-hashes.py
|
listedlinked/sors
|
99992f4acdcdbeb30e707cb67697dcc9bdd0db73
|
[
"MIT"
] | null | null | null |
contrib/linearize/linearize-hashes.py
|
listedlinked/sors
|
99992f4acdcdbeb30e707cb67697dcc9bdd0db73
|
[
"MIT"
] | null | null | null |
contrib/linearize/linearize-hashes.py
|
listedlinked/sors
|
99992f4acdcdbeb30e707cb67697dcc9bdd0db73
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2014 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function
import json
import struct
import re
import base64
import httplib
import sys
settings = {}
class BitcoinRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def execute(self, obj):
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read()
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
print(resp_obj['result'])
height += num_blocks
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 60100
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print("Missing username and/or password in cfg file", file=stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_block_hashes(settings)
| 26.640351
| 90
| 0.682581
|
from __future__ import print_function
import json
import struct
import re
import base64
import httplib
import sys
settings = {}
class BitcoinRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def execute(self, obj):
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read()
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
exit(1)
assert(resp_obj['id'] == x)
print(resp_obj['result'])
height += num_blocks
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
m = re.search('^\s*#', line)
if m:
continue
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 60100
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print("Missing username and/or password in cfg file", file=stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_block_hashes(settings)
| true
| true
|
790d008f566269ced30d6bdcef22ad14cf2617a5
| 3,979
|
py
|
Python
|
pywick/optimizers/ralamb.py
|
achaiah/pywick
|
9d663faf0c1660a9b8359a6472c164f658dfc8cb
|
[
"MIT"
] | 408
|
2019-05-16T16:12:41.000Z
|
2022-03-26T17:27:12.000Z
|
pywick/optimizers/ralamb.py
|
achaiah/pywick
|
9d663faf0c1660a9b8359a6472c164f658dfc8cb
|
[
"MIT"
] | 13
|
2019-05-17T05:47:06.000Z
|
2021-06-21T19:02:30.000Z
|
pywick/optimizers/ralamb.py
|
achaiah/pywick
|
9d663faf0c1660a9b8359a6472c164f658dfc8cb
|
[
"MIT"
] | 42
|
2019-05-16T19:57:12.000Z
|
2022-03-06T15:23:18.000Z
|
# Source: https://gist.github.com/redknightlois/c4023d393eb8f92bb44b2ab582d7ec20
from torch.optim.optimizer import Optimizer
import torch
import math
class Ralamb(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-4):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.buffer = [[None, None, None] for ind in range(10)]
super(Ralamb, self).__init__(params, defaults)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('Ralamb does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
# Decay the first and second moment running average coefficient
# m_t
exp_avg.mul_(beta1).add_(1 - beta1, grad)
# v_t
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
state['step'] += 1
buffered = self.buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, radam_step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
# more conservative since it's an approximated value
if N_sma >= 5:
radam_step_size = group['lr'] * math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
else:
radam_step_size = group['lr'] / (1 - beta1 ** state['step'])
buffered[2] = radam_step_size
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
# more conservative since it's an approximated value
radam_step = p_data_fp32.clone()
if N_sma >= 5:
denom = exp_avg_sq.sqrt().add_(group['eps'])
radam_step.addcdiv_(-radam_step_size, exp_avg, denom)
else:
radam_step.add_(-radam_step_size, exp_avg)
radam_norm = radam_step.pow(2).sum().sqrt()
weight_norm = p.data.pow(2).sum().sqrt().clamp(0, 10)
if 0 in (weight_norm, radam_norm):
trust_ratio = 1
else:
trust_ratio = weight_norm / radam_norm
state['weight_norm'] = weight_norm
state['adam_norm'] = radam_norm
state['trust_ratio'] = trust_ratio
if N_sma >= 5:
p_data_fp32.addcdiv_(-radam_step_size * trust_ratio, exp_avg, denom)
else:
p_data_fp32.add_(-radam_step_size * trust_ratio, exp_avg)
p.data.copy_(p_data_fp32)
return loss
| 40.191919
| 195
| 0.508922
|
from torch.optim.optimizer import Optimizer
import torch
import math
class Ralamb(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-4):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.buffer = [[None, None, None] for ind in range(10)]
super(Ralamb, self).__init__(params, defaults)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('Ralamb does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
state['step'] += 1
buffered = self.buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, radam_step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
if N_sma >= 5:
radam_step_size = group['lr'] * math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
else:
radam_step_size = group['lr'] / (1 - beta1 ** state['step'])
buffered[2] = radam_step_size
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
# more conservative since it's an approximated value
radam_step = p_data_fp32.clone()
if N_sma >= 5:
denom = exp_avg_sq.sqrt().add_(group['eps'])
radam_step.addcdiv_(-radam_step_size, exp_avg, denom)
else:
radam_step.add_(-radam_step_size, exp_avg)
radam_norm = radam_step.pow(2).sum().sqrt()
weight_norm = p.data.pow(2).sum().sqrt().clamp(0, 10)
if 0 in (weight_norm, radam_norm):
trust_ratio = 1
else:
trust_ratio = weight_norm / radam_norm
state['weight_norm'] = weight_norm
state['adam_norm'] = radam_norm
state['trust_ratio'] = trust_ratio
if N_sma >= 5:
p_data_fp32.addcdiv_(-radam_step_size * trust_ratio, exp_avg, denom)
else:
p_data_fp32.add_(-radam_step_size * trust_ratio, exp_avg)
p.data.copy_(p_data_fp32)
return loss
| true
| true
|
790d00c5a223d8b8edeefd87b662eb702c908c85
| 792
|
py
|
Python
|
package/config.py
|
sooftware/char-rnnlm
|
fc6573bde13b151f373fc081f63e3f563debf56c
|
[
"MIT"
] | 7
|
2020-04-16T17:37:56.000Z
|
2022-02-02T11:00:30.000Z
|
package/config.py
|
sooftware/char-rnnlm
|
fc6573bde13b151f373fc081f63e3f563debf56c
|
[
"MIT"
] | null | null | null |
package/config.py
|
sooftware/char-rnnlm
|
fc6573bde13b151f373fc081f63e3f563debf56c
|
[
"MIT"
] | 3
|
2020-04-16T17:39:19.000Z
|
2020-12-28T03:45:04.000Z
|
class Config:
def __init__(self,
use_cuda=True,
hidden_size=512,
dropout_p=0.5,
n_layers=4,
batch_size=32,
max_epochs=40,
lr=0.0001,
teacher_forcing_ratio=1.0,
seed=1,
max_len=428,
worker_num=1
):
self.use_cuda = use_cuda
self.hidden_size = hidden_size
self.dropout_p = dropout_p
self.n_layers = n_layers
self.batch_size = batch_size
self.max_epochs = max_epochs
self.lr = lr
self.teacher_forcing_ratio = teacher_forcing_ratio
self.seed = seed
self.max_len = max_len
self.worker_num = worker_num
| 30.461538
| 58
| 0.5
|
class Config:
def __init__(self,
use_cuda=True,
hidden_size=512,
dropout_p=0.5,
n_layers=4,
batch_size=32,
max_epochs=40,
lr=0.0001,
teacher_forcing_ratio=1.0,
seed=1,
max_len=428,
worker_num=1
):
self.use_cuda = use_cuda
self.hidden_size = hidden_size
self.dropout_p = dropout_p
self.n_layers = n_layers
self.batch_size = batch_size
self.max_epochs = max_epochs
self.lr = lr
self.teacher_forcing_ratio = teacher_forcing_ratio
self.seed = seed
self.max_len = max_len
self.worker_num = worker_num
| true
| true
|
790d02420ad1f5d3448692d666d225f8f191208a
| 1,533
|
py
|
Python
|
hacking/checks/dictlist.py
|
UbuntuEvangelist/hacking
|
c1bf3fa5a2122e0d2b83ac47ec2861387d06a8c3
|
[
"Apache-2.0"
] | 1
|
2016-04-29T17:33:40.000Z
|
2016-04-29T17:33:40.000Z
|
hacking/checks/dictlist.py
|
UbuntuEvangelist/hacking
|
c1bf3fa5a2122e0d2b83ac47ec2861387d06a8c3
|
[
"Apache-2.0"
] | null | null | null |
hacking/checks/dictlist.py
|
UbuntuEvangelist/hacking
|
c1bf3fa5a2122e0d2b83ac47ec2861387d06a8c3
|
[
"Apache-2.0"
] | 16
|
2017-01-12T09:38:55.000Z
|
2019-04-18T20:52:34.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tokenize
from hacking import core
LOCALS_TEXT_MAP = {
'locals': 'locals()',
'self': 'self.__dict__'
}
@core.flake8ext
def hacking_no_locals(logical_line, physical_line, tokens, noqa):
"""Do not use locals() or self.__dict__ for string formatting.
Okay: 'locals()'
Okay: 'locals'
Okay: locals()
Okay: print(locals())
H501: print("%(something)" % locals())
H501: LOG.info(_("%(something)") % self.__dict__)
Okay: print("%(something)" % locals()) # noqa
"""
if noqa:
return
for_formatting = False
for token_type, text, start, _, _ in tokens:
if text == "%" and token_type == tokenize.OP:
for_formatting = True
if for_formatting and token_type == tokenize.NAME:
for k, v in LOCALS_TEXT_MAP.items():
if text == k and v in logical_line:
yield (start[1],
"H501: Do not use %s for string formatting" % v)
| 32.617021
| 76
| 0.643183
|
import tokenize
from hacking import core
LOCALS_TEXT_MAP = {
'locals': 'locals()',
'self': 'self.__dict__'
}
@core.flake8ext
def hacking_no_locals(logical_line, physical_line, tokens, noqa):
if noqa:
return
for_formatting = False
for token_type, text, start, _, _ in tokens:
if text == "%" and token_type == tokenize.OP:
for_formatting = True
if for_formatting and token_type == tokenize.NAME:
for k, v in LOCALS_TEXT_MAP.items():
if text == k and v in logical_line:
yield (start[1],
"H501: Do not use %s for string formatting" % v)
| true
| true
|
790d029148e060461c2ca87346f345b09776d25b
| 405
|
py
|
Python
|
__init__.py
|
rogermoore6872/mycroft-fortune
|
23e1c82fbb8c4b24553c419b82c57fd9c15d19ae
|
[
"MIT"
] | null | null | null |
__init__.py
|
rogermoore6872/mycroft-fortune
|
23e1c82fbb8c4b24553c419b82c57fd9c15d19ae
|
[
"MIT"
] | null | null | null |
__init__.py
|
rogermoore6872/mycroft-fortune
|
23e1c82fbb8c4b24553c419b82c57fd9c15d19ae
|
[
"MIT"
] | null | null | null |
from mycroft import MycroftSkill, intent_file_handler
import subprocess
class Fortune(MycroftSkill):
def __init__(self):
MycroftSkill.__init__(self)
@intent_file_handler('fortune.intent')
def handle_fortune(self, message):
result = subprocess.run("fortune", capture_output=True, text=True)
self.speak_dialog(result.stdout)
def create_skill():
return Fortune()
| 23.823529
| 74
| 0.728395
|
from mycroft import MycroftSkill, intent_file_handler
import subprocess
class Fortune(MycroftSkill):
def __init__(self):
MycroftSkill.__init__(self)
@intent_file_handler('fortune.intent')
def handle_fortune(self, message):
result = subprocess.run("fortune", capture_output=True, text=True)
self.speak_dialog(result.stdout)
def create_skill():
return Fortune()
| true
| true
|
790d03197ad47b0b968b011b5824f33a34bbd869
| 418
|
py
|
Python
|
colaboradados_django/wsgi.py
|
dennys-bd/colaboradados_django
|
0c2b78e670924fd5ac094598bfad2c81a86cf74f
|
[
"MIT"
] | null | null | null |
colaboradados_django/wsgi.py
|
dennys-bd/colaboradados_django
|
0c2b78e670924fd5ac094598bfad2c81a86cf74f
|
[
"MIT"
] | null | null | null |
colaboradados_django/wsgi.py
|
dennys-bd/colaboradados_django
|
0c2b78e670924fd5ac094598bfad2c81a86cf74f
|
[
"MIT"
] | null | null | null |
"""
WSGI config for colaboradados_django project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'colaboradados_django.settings')
application = get_wsgi_application()
| 23.222222
| 80
| 0.796651
|
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'colaboradados_django.settings')
application = get_wsgi_application()
| true
| true
|
790d036d225b557e23d8e46233e190899240bc79
| 163
|
py
|
Python
|
bin/cubes/pentacubes-stepped-pyramid-5.py
|
tiwo/puzzler
|
7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e
|
[
"Intel"
] | null | null | null |
bin/cubes/pentacubes-stepped-pyramid-5.py
|
tiwo/puzzler
|
7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e
|
[
"Intel"
] | null | null | null |
bin/cubes/pentacubes-stepped-pyramid-5.py
|
tiwo/puzzler
|
7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e
|
[
"Intel"
] | 1
|
2022-01-02T16:54:14.000Z
|
2022-01-02T16:54:14.000Z
|
#!/usr/bin/env python
# $Id$
"""many solutions"""
import puzzler
from puzzler.puzzles.pentacubes import PentacubesSteppedPyramid5 as puzzle
puzzler.run(puzzle)
| 16.3
| 74
| 0.773006
|
import puzzler
from puzzler.puzzles.pentacubes import PentacubesSteppedPyramid5 as puzzle
puzzler.run(puzzle)
| true
| true
|
790d0373d580aadbe327fd158a968804bbfd2262
| 2,074
|
py
|
Python
|
ml_from_scratch/logistic_regression.py
|
peimengsui/ml_from_scratch
|
5f5d276fee8f25ab91fd4342434aa23eb154a405
|
[
"MIT"
] | null | null | null |
ml_from_scratch/logistic_regression.py
|
peimengsui/ml_from_scratch
|
5f5d276fee8f25ab91fd4342434aa23eb154a405
|
[
"MIT"
] | null | null | null |
ml_from_scratch/logistic_regression.py
|
peimengsui/ml_from_scratch
|
5f5d276fee8f25ab91fd4342434aa23eb154a405
|
[
"MIT"
] | 1
|
2020-08-09T19:39:27.000Z
|
2020-08-09T19:39:27.000Z
|
import numpy as np
import math
from ml_from_scratch.activation_functions import Sigmoid
from ml_from_scratch.utils import make_diagonal
class LogisticRegression():
""" Logistic Regression classifier.
Parameters:
-----------
n_iters: int
Number of iterations running gradient descent, default is 1000
lr: float
learning rate
gradient_descent: boolean
True or false depending if gradient descent should be used when training. If
false then we use Newton Method.
"""
def __init__(self, n_iters=1000, lr=.1, gradient_descent=True):
self.param = None
self.n_iters = n_iters
self.lr = lr
self.gradient_descent = gradient_descent
self.sigmoid = Sigmoid()
def _initialize_parameters(self, X):
n_features = np.shape(X)[1]
# Initialize parameters between [-1/sqrt(N), 1/sqrt(N)]
limit = 1 / math.sqrt(n_features)
self.param = np.random.uniform(-limit, limit, (n_features,))
def fit(self, X, y):
self._initialize_parameters(X)
# Tune parameters for n iterations
for i in range(self.n_iters):
# Make a new prediction
y_pred = self.sigmoid(X.dot(self.param))
if self.gradient_descent:
# Move against the gradient of the loss function with
# respect to the parameters to minimize the loss
self.param -= self.lr * (y_pred - y).dot(X)
else:
# Make a diagonal matrix of the sigmoid gradient column vector
diag_gradient = make_diagonal(self.sigmoid.gradient(X.dot(self.param)))
# Batch opt:
self.param = np.linalg.pinv(X.T.dot(diag_gradient).dot(X)).\
dot(X.T).dot(diag_gradient.dot(X).dot(self.param) + y - y_pred)
def predict(self, X):
y_pred = np.round(self.sigmoid(X.dot(self.param))).astype(int)
return y_pred
def predict_proba(self, X):
p_pred = self.sigmoid(X.dot(self.param))
return p_pred
| 37.035714
| 87
| 0.619094
|
import numpy as np
import math
from ml_from_scratch.activation_functions import Sigmoid
from ml_from_scratch.utils import make_diagonal
class LogisticRegression():
def __init__(self, n_iters=1000, lr=.1, gradient_descent=True):
self.param = None
self.n_iters = n_iters
self.lr = lr
self.gradient_descent = gradient_descent
self.sigmoid = Sigmoid()
def _initialize_parameters(self, X):
n_features = np.shape(X)[1]
limit = 1 / math.sqrt(n_features)
self.param = np.random.uniform(-limit, limit, (n_features,))
def fit(self, X, y):
self._initialize_parameters(X)
for i in range(self.n_iters):
y_pred = self.sigmoid(X.dot(self.param))
if self.gradient_descent:
self.param -= self.lr * (y_pred - y).dot(X)
else:
diag_gradient = make_diagonal(self.sigmoid.gradient(X.dot(self.param)))
self.param = np.linalg.pinv(X.T.dot(diag_gradient).dot(X)).\
dot(X.T).dot(diag_gradient.dot(X).dot(self.param) + y - y_pred)
def predict(self, X):
y_pred = np.round(self.sigmoid(X.dot(self.param))).astype(int)
return y_pred
def predict_proba(self, X):
p_pred = self.sigmoid(X.dot(self.param))
return p_pred
| true
| true
|
790d0381cb1ede5d99111b85f3a651ded60b7712
| 6,969
|
py
|
Python
|
DeepHyperion-BNG/self_driving/beamng_member.py
|
IharBakhanovich/DeepHyperion
|
f7f696ba95124125dfe967ea4890d944a9958d77
|
[
"MIT"
] | null | null | null |
DeepHyperion-BNG/self_driving/beamng_member.py
|
IharBakhanovich/DeepHyperion
|
f7f696ba95124125dfe967ea4890d944a9958d77
|
[
"MIT"
] | null | null | null |
DeepHyperion-BNG/self_driving/beamng_member.py
|
IharBakhanovich/DeepHyperion
|
f7f696ba95124125dfe967ea4890d944a9958d77
|
[
"MIT"
] | null | null | null |
import hashlib
import random
from typing import Tuple, Dict
from self_driving.beamng_config import BeamNGConfig
from self_driving.beamng_evaluator import BeamNGEvaluator
from core.member import Member
from self_driving.catmull_rom import catmull_rom
from self_driving.road_bbox import RoadBoundingBox
from self_driving.road_polygon import RoadPolygon
from self_driving.edit_distance_polyline import iterative_levenshtein
Tuple4F = Tuple[float, float, float, float]
Tuple2F = Tuple[float, float]
class BeamNGMember(Member):
"""A class representing a road returned by the RoadGenerator."""
counter = 0
def __init__(self, control_nodes: Tuple4F, sample_nodes: Tuple4F, num_spline_nodes: int,
road_bbox: RoadBoundingBox):
super().__init__()
BeamNGMember.counter += 1
self.name = f'mbr{str(BeamNGMember.counter)}'
self.name_ljust = self.name.ljust(7)
self.control_nodes = control_nodes
self.sample_nodes = sample_nodes
self.num_spline_nodes = num_spline_nodes
self.road_bbox = road_bbox
self.config: BeamNGConfig = None
self.problem: 'BeamNGProblem' = None
self._evaluator: BeamNGEvaluator = None
def clone(self):
res = BeamNGMember(list(self.control_nodes), list(self.sample_nodes), self.num_spline_nodes, self.road_bbox)
res.config = self.config
res.problem = self.problem
res.distance_to_boundary = self.distance_to_boundary
return res
def to_dict(self) -> dict:
return {
'control_nodes': self.control_nodes,
'sample_nodes': self.sample_nodes,
'num_spline_nodes': self.num_spline_nodes,
'road_bbox_size': self.road_bbox.bbox.bounds,
'distance_to_boundary': self.distance_to_boundary
}
@classmethod
def from_dict(cls, dict: Dict):
road_bbox = RoadBoundingBox(dict['road_bbox_size'])
res = BeamNGMember([tuple(t) for t in dict['control_nodes']],
[tuple(t) for t in dict['sample_nodes']],
dict['num_spline_nodes'], road_bbox)
res.distance_to_boundary = dict['distance_to_boundary']
return res
def evaluate(self):
if self.needs_evaluation():
self.simulation = self.problem._get_evaluator().evaluate([self])
print('eval mbr', self)
#assert not self.needs_evaluation()
def needs_evaluation(self):
return self.distance_to_boundary is None or self.simulation is None
def clear_evaluation(self):
self.distance_to_boundary = None
def is_valid(self):
return (RoadPolygon.from_nodes(self.sample_nodes).is_valid() and
self.road_bbox.contains(RoadPolygon.from_nodes(self.control_nodes[1:-1])))
def distance(self, other: 'BeamNGMember'):
#TODO
#return frechet_dist(self.sample_nodes, other.sample_nodes)
return iterative_levenshtein(self.sample_nodes, other.sample_nodes)
#return frechet_dist(self.sample_nodes[0::3], other.sample_nodes[0::3])
def to_tuple(self):
import numpy as np
barycenter = np.mean(self.control_nodes, axis=0)[:2]
return barycenter
def mutate(self) -> 'BeamNGMember':
RoadMutator(self, lower_bound=-int(self.problem.config.MUTATION_EXTENT), upper_bound=int(self.problem.config.MUTATION_EXTENT)).mutate()
self.distance_to_boundary = None
return self
def __repr__(self):
eval_boundary = 'na'
if self.distance_to_boundary:
eval_boundary = str(self.distance_to_boundary)
if self.distance_to_boundary > 0:
eval_boundary = '+' + eval_boundary
eval_boundary = '~' + eval_boundary
eval_boundary = eval_boundary[:7].ljust(7)
h = hashlib.sha256(str([tuple(node) for node in self.control_nodes]).encode('UTF-8')).hexdigest()[-5:]
return f'{self.name_ljust} h={h} b={eval_boundary}'
class RoadMutator:
NUM_UNDO_ATTEMPTS = 20
def __init__(self, road: BeamNGMember, lower_bound=-2, upper_bound=2):
self.road = road
self.lower_bound = lower_bound
self.upper_bound = upper_bound
def mutate_gene(self, index, xy_prob=0.5) -> Tuple[int, int]:
gene = list(self.road.control_nodes[index])
# Choose the mutation extent
candidate_mut_values = [i for i in range(self.lower_bound, self.upper_bound) if i !=0]
mut_value = random.choice(candidate_mut_values)
#mut_value = random.randint(self.lower_bound, self.upper_bound)
# Avoid to choose 0
#if mut_value == 0:
# mut_value += 1
# Select coordinate to mutate
if random.random() < xy_prob:
c = 1
else:
c = 0
gene[c] += mut_value
self.road.control_nodes[index] = tuple(gene)
self.road.sample_nodes = catmull_rom(self.road.control_nodes, self.road.num_spline_nodes)
return c, mut_value
def undo_mutation(self, index, c, mut_value):
gene = list(self.road.control_nodes[index])
gene[c] -= mut_value
self.road.control_nodes[index] = tuple(gene)
self.road.sample_nodes = catmull_rom(self.road.control_nodes, self.road.num_spline_nodes)
def mutate(self, num_undo_attempts=10):
backup_nodes = list(self.road.control_nodes)
attempted_genes = set()
n = len(self.road.control_nodes) - 2
seglength = 3
candidate_length = n - (2 * seglength)
assert(candidate_length > 0)
def next_gene_index() -> int:
if len(attempted_genes) == candidate_length:
return -1
i = None
condition = False
while not condition:
i = random.randint(seglength, n - seglength)
if i not in attempted_genes:
condition = True
assert(i is not None)
assert seglength <= i <= n - seglength
# i = random.randint(3, n - 3)
# while i in attempted_genes:
# i = random.randint(3, n-3)
attempted_genes.add(i)
return i
gene_index = next_gene_index()
while gene_index != -1:
c, mut_value = self.mutate_gene(gene_index)
attempt = 0
is_valid = self.road.is_valid()
while not is_valid and attempt < num_undo_attempts:
self.undo_mutation(gene_index, c, mut_value)
c, mut_value = self.mutate_gene(gene_index)
attempt += 1
is_valid = self.road.is_valid()
if is_valid:
break
else:
gene_index = next_gene_index()
if gene_index == -1:
raise ValueError("No gene can be mutated")
assert self.road.is_valid()
assert self.road.control_nodes != backup_nodes
| 37.67027
| 143
| 0.634094
|
import hashlib
import random
from typing import Tuple, Dict
from self_driving.beamng_config import BeamNGConfig
from self_driving.beamng_evaluator import BeamNGEvaluator
from core.member import Member
from self_driving.catmull_rom import catmull_rom
from self_driving.road_bbox import RoadBoundingBox
from self_driving.road_polygon import RoadPolygon
from self_driving.edit_distance_polyline import iterative_levenshtein
Tuple4F = Tuple[float, float, float, float]
Tuple2F = Tuple[float, float]
class BeamNGMember(Member):
counter = 0
def __init__(self, control_nodes: Tuple4F, sample_nodes: Tuple4F, num_spline_nodes: int,
road_bbox: RoadBoundingBox):
super().__init__()
BeamNGMember.counter += 1
self.name = f'mbr{str(BeamNGMember.counter)}'
self.name_ljust = self.name.ljust(7)
self.control_nodes = control_nodes
self.sample_nodes = sample_nodes
self.num_spline_nodes = num_spline_nodes
self.road_bbox = road_bbox
self.config: BeamNGConfig = None
self.problem: 'BeamNGProblem' = None
self._evaluator: BeamNGEvaluator = None
def clone(self):
res = BeamNGMember(list(self.control_nodes), list(self.sample_nodes), self.num_spline_nodes, self.road_bbox)
res.config = self.config
res.problem = self.problem
res.distance_to_boundary = self.distance_to_boundary
return res
def to_dict(self) -> dict:
return {
'control_nodes': self.control_nodes,
'sample_nodes': self.sample_nodes,
'num_spline_nodes': self.num_spline_nodes,
'road_bbox_size': self.road_bbox.bbox.bounds,
'distance_to_boundary': self.distance_to_boundary
}
@classmethod
def from_dict(cls, dict: Dict):
road_bbox = RoadBoundingBox(dict['road_bbox_size'])
res = BeamNGMember([tuple(t) for t in dict['control_nodes']],
[tuple(t) for t in dict['sample_nodes']],
dict['num_spline_nodes'], road_bbox)
res.distance_to_boundary = dict['distance_to_boundary']
return res
def evaluate(self):
if self.needs_evaluation():
self.simulation = self.problem._get_evaluator().evaluate([self])
print('eval mbr', self)
def needs_evaluation(self):
return self.distance_to_boundary is None or self.simulation is None
def clear_evaluation(self):
self.distance_to_boundary = None
def is_valid(self):
return (RoadPolygon.from_nodes(self.sample_nodes).is_valid() and
self.road_bbox.contains(RoadPolygon.from_nodes(self.control_nodes[1:-1])))
def distance(self, other: 'BeamNGMember'):
return iterative_levenshtein(self.sample_nodes, other.sample_nodes)
def to_tuple(self):
import numpy as np
barycenter = np.mean(self.control_nodes, axis=0)[:2]
return barycenter
def mutate(self) -> 'BeamNGMember':
RoadMutator(self, lower_bound=-int(self.problem.config.MUTATION_EXTENT), upper_bound=int(self.problem.config.MUTATION_EXTENT)).mutate()
self.distance_to_boundary = None
return self
def __repr__(self):
eval_boundary = 'na'
if self.distance_to_boundary:
eval_boundary = str(self.distance_to_boundary)
if self.distance_to_boundary > 0:
eval_boundary = '+' + eval_boundary
eval_boundary = '~' + eval_boundary
eval_boundary = eval_boundary[:7].ljust(7)
h = hashlib.sha256(str([tuple(node) for node in self.control_nodes]).encode('UTF-8')).hexdigest()[-5:]
return f'{self.name_ljust} h={h} b={eval_boundary}'
class RoadMutator:
NUM_UNDO_ATTEMPTS = 20
def __init__(self, road: BeamNGMember, lower_bound=-2, upper_bound=2):
self.road = road
self.lower_bound = lower_bound
self.upper_bound = upper_bound
def mutate_gene(self, index, xy_prob=0.5) -> Tuple[int, int]:
gene = list(self.road.control_nodes[index])
candidate_mut_values = [i for i in range(self.lower_bound, self.upper_bound) if i !=0]
mut_value = random.choice(candidate_mut_values)
if random.random() < xy_prob:
c = 1
else:
c = 0
gene[c] += mut_value
self.road.control_nodes[index] = tuple(gene)
self.road.sample_nodes = catmull_rom(self.road.control_nodes, self.road.num_spline_nodes)
return c, mut_value
def undo_mutation(self, index, c, mut_value):
gene = list(self.road.control_nodes[index])
gene[c] -= mut_value
self.road.control_nodes[index] = tuple(gene)
self.road.sample_nodes = catmull_rom(self.road.control_nodes, self.road.num_spline_nodes)
def mutate(self, num_undo_attempts=10):
backup_nodes = list(self.road.control_nodes)
attempted_genes = set()
n = len(self.road.control_nodes) - 2
seglength = 3
candidate_length = n - (2 * seglength)
assert(candidate_length > 0)
def next_gene_index() -> int:
if len(attempted_genes) == candidate_length:
return -1
i = None
condition = False
while not condition:
i = random.randint(seglength, n - seglength)
if i not in attempted_genes:
condition = True
assert(i is not None)
assert seglength <= i <= n - seglength
attempted_genes.add(i)
return i
gene_index = next_gene_index()
while gene_index != -1:
c, mut_value = self.mutate_gene(gene_index)
attempt = 0
is_valid = self.road.is_valid()
while not is_valid and attempt < num_undo_attempts:
self.undo_mutation(gene_index, c, mut_value)
c, mut_value = self.mutate_gene(gene_index)
attempt += 1
is_valid = self.road.is_valid()
if is_valid:
break
else:
gene_index = next_gene_index()
if gene_index == -1:
raise ValueError("No gene can be mutated")
assert self.road.is_valid()
assert self.road.control_nodes != backup_nodes
| true
| true
|
790d03d1b7da1dcc25982c68c84f0b3d13d04f5a
| 494
|
py
|
Python
|
moviemaker3/math/angle.py
|
friedrichromstedt/moviemaker3
|
7941a06d43bbbb63e45496044040a163ab97d78d
|
[
"MIT"
] | 1
|
2018-12-30T18:40:07.000Z
|
2018-12-30T18:40:07.000Z
|
moviemaker3/math/angle.py
|
friedrichromstedt/moviemaker3
|
7941a06d43bbbb63e45496044040a163ab97d78d
|
[
"MIT"
] | null | null | null |
moviemaker3/math/angle.py
|
friedrichromstedt/moviemaker3
|
7941a06d43bbbb63e45496044040a163ab97d78d
|
[
"MIT"
] | null | null | null |
import numpy
from fframework import asfunction, OpFunction
__all__ = ['Angle']
class Angle(OpFunction):
"""Transforms a mesh into the angle of the mesh to the x axis."""
def __init__(self, mesh):
"""*mesh* is the mesh Function."""
self.mesh = asfunction(mesh)
def __call__(self, ps):
"""Returns the arctan2. The (y, x) coordinate is in the last
dimension."""
meshT = self.mesh(ps).T
return numpy.arctan2(meshT[0], meshT[1]).T
| 24.7
| 70
| 0.621457
|
import numpy
from fframework import asfunction, OpFunction
__all__ = ['Angle']
class Angle(OpFunction):
def __init__(self, mesh):
self.mesh = asfunction(mesh)
def __call__(self, ps):
meshT = self.mesh(ps).T
return numpy.arctan2(meshT[0], meshT[1]).T
| true
| true
|
790d0461ac16c3aab6091d7c9573443d7faa76fe
| 2,047
|
py
|
Python
|
python3/koans/about_dice_project.py
|
benrki/python_koans
|
501bdf1c942bf543d36f8db3b9f9586205697b59
|
[
"MIT"
] | null | null | null |
python3/koans/about_dice_project.py
|
benrki/python_koans
|
501bdf1c942bf543d36f8db3b9f9586205697b59
|
[
"MIT"
] | null | null | null |
python3/koans/about_dice_project.py
|
benrki/python_koans
|
501bdf1c942bf543d36f8db3b9f9586205697b59
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
import random
class DiceSet:
def __init__(self):
self._values = None
@property
def values(self):
return self._values
def roll(self, n):
# Needs implementing!
# Tip: random.randint(min, max) can be used to generate random numbers
self._values = [random.randint(1, 6) for n in range(n)]
class AboutDiceProject(Koan):
def test_can_create_a_dice_set(self):
dice = DiceSet()
self.assertTrue(dice)
def test_rolling_the_dice_returns_a_set_of_integers_between_1_and_6(self):
dice = DiceSet()
dice.roll(5)
self.assertTrue(isinstance(dice.values, list), "should be a list")
self.assertEqual(5, len(dice.values))
for value in dice.values:
self.assertTrue(value >= 1 and value <= 6, "value " +
str(value) + " must be between 1 and 6")
def test_dice_values_do_not_change_unless_explicitly_rolled(self):
dice = DiceSet()
dice.roll(5)
first_time = dice.values
second_time = dice.values
self.assertEqual(first_time, second_time)
def test_dice_values_should_change_between_rolls(self):
dice = DiceSet()
dice.roll(5)
first_time = dice.values
dice.roll(5)
second_time = dice.values
self.assertNotEqual(first_time, second_time,
"Two rolls should not be equal")
# THINK ABOUT IT:
#
# If the rolls are random, then it is possible (although not
# likely) that two consecutive rolls are equal. What would be a
# better way to test this?
# Roll two different instances of DiceSet and check that they both
# have any value
def test_you_can_roll_different_numbers_of_dice(self):
dice = DiceSet()
dice.roll(3)
self.assertEqual(3, len(dice.values))
dice.roll(1)
self.assertEqual(1, len(dice.values))
| 28.041096
| 78
| 0.618955
|
from runner.koan import *
import random
class DiceSet:
def __init__(self):
self._values = None
@property
def values(self):
return self._values
def roll(self, n):
self._values = [random.randint(1, 6) for n in range(n)]
class AboutDiceProject(Koan):
def test_can_create_a_dice_set(self):
dice = DiceSet()
self.assertTrue(dice)
def test_rolling_the_dice_returns_a_set_of_integers_between_1_and_6(self):
dice = DiceSet()
dice.roll(5)
self.assertTrue(isinstance(dice.values, list), "should be a list")
self.assertEqual(5, len(dice.values))
for value in dice.values:
self.assertTrue(value >= 1 and value <= 6, "value " +
str(value) + " must be between 1 and 6")
def test_dice_values_do_not_change_unless_explicitly_rolled(self):
dice = DiceSet()
dice.roll(5)
first_time = dice.values
second_time = dice.values
self.assertEqual(first_time, second_time)
def test_dice_values_should_change_between_rolls(self):
dice = DiceSet()
dice.roll(5)
first_time = dice.values
dice.roll(5)
second_time = dice.values
self.assertNotEqual(first_time, second_time,
"Two rolls should not be equal")
def test_you_can_roll_different_numbers_of_dice(self):
dice = DiceSet()
dice.roll(3)
self.assertEqual(3, len(dice.values))
dice.roll(1)
self.assertEqual(1, len(dice.values))
| true
| true
|
790d0606b205cdb3b56bbfcdcde511f028d7d6f3
| 16,952
|
py
|
Python
|
gui_calculator.py
|
Eqwe-wewe/accounting-calculator
|
02b9f830f116435e42dae84096fc5e326acf21db
|
[
"MIT"
] | 1
|
2022-02-22T14:12:58.000Z
|
2022-02-22T14:12:58.000Z
|
gui_calculator.py
|
Eqwe-Wewe/accounting-calc
|
02b9f830f116435e42dae84096fc5e326acf21db
|
[
"MIT"
] | null | null | null |
gui_calculator.py
|
Eqwe-Wewe/accounting-calc
|
02b9f830f116435e42dae84096fc5e326acf21db
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'calculator2.ui'
#
# Created by: PyQt5 UI code generator 5.15.3
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.setEnabled(True)
MainWindow.setFixedSize(QtCore.QSize(471, 400))
MainWindow.setTabletTracking(False)
MainWindow.setDockNestingEnabled(False)
MainWindow.setUnifiedTitleAndToolBarOnMac(False)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setMinimumSize(QtCore.QSize(471, 390))
self.centralwidget.setMaximumSize(QtCore.QSize(471, 390))
self.centralwidget.setObjectName("centralwidget")
self.lcdNumber = QtWidgets.QLCDNumber(self.centralwidget)
self.lcdNumber.setGeometry(QtCore.QRect(10, 40, 451, 101))
self.lcdNumber.setStyleSheet("background-color: rgb(255, 255, 255);")
self.lcdNumber.setFrameShape(QtWidgets.QFrame.Box)
self.lcdNumber.setSmallDecimalPoint(False)
self.lcdNumber.setDigitCount(14)
self.lcdNumber.setSegmentStyle(QtWidgets.QLCDNumber.Flat)
self.lcdNumber.setObjectName("lcdNumber")
self.num_1 = QtWidgets.QPushButton(self.centralwidget)
self.num_1.setGeometry(QtCore.QRect(10, 290, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_1.setFont(font)
self.num_1.setStyleSheet(
"background-color: rgb(71, 64, 64);\n"
"color: rgb(255, 255, 255);")
self.num_1.setObjectName("num_1")
self.buttonGroup = QtWidgets.QButtonGroup(MainWindow)
self.buttonGroup.setObjectName("buttonGroup")
self.buttonGroup.addButton(self.num_1)
self.num_2 = QtWidgets.QPushButton(self.centralwidget)
self.num_2.setGeometry(QtCore.QRect(100, 290, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_2.setFont(font)
self.num_2.setStyleSheet(
"background-color: rgb(71, 64, 64);\n"
"color: rgb(255, 255, 255);")
self.num_2.setObjectName("num_2")
self.buttonGroup.addButton(self.num_2)
self.num_3 = QtWidgets.QPushButton(self.centralwidget)
self.num_3.setGeometry(QtCore.QRect(190, 290, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_3.setFont(font)
self.num_3.setStyleSheet(
"background-color: rgb(71, 64, 64);\n"
"color: rgb(255, 255, 255);")
self.num_3.setObjectName("num_3")
self.buttonGroup.addButton(self.num_3)
self.num_plus = QtWidgets.QPushButton(self.centralwidget)
self.num_plus.setGeometry(QtCore.QRect(280, 190, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_plus.setFont(font)
self.num_plus.setStyleSheet(
"background-color: rgb(255, 85, 0);\n"
"color: rgb(255, 255, 255);")
self.num_plus.setObjectName("num_plus")
self.num_4 = QtWidgets.QPushButton(self.centralwidget)
self.num_4.setGeometry(QtCore.QRect(10, 240, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_4.setFont(font)
self.num_4.setStyleSheet(
"background-color: rgb(71, 64, 64);\n"
"color: rgb(255, 255, 255);")
self.num_4.setObjectName("num_4")
self.buttonGroup.addButton(self.num_4)
self.num_5 = QtWidgets.QPushButton(self.centralwidget)
self.num_5.setGeometry(QtCore.QRect(100, 240, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_5.setFont(font)
self.num_5.setStyleSheet(
"background-color: rgb(71, 64, 64);\n"
"color: rgb(255, 255, 255);")
self.num_5.setObjectName("num_5")
self.buttonGroup.addButton(self.num_5)
self.num_6 = QtWidgets.QPushButton(self.centralwidget)
self.num_6.setGeometry(QtCore.QRect(190, 240, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_6.setFont(font)
self.num_6.setStyleSheet(
"background-color: rgb(71, 64, 64);\n"
"color: rgb(255, 255, 255);")
self.num_6.setObjectName("num_6")
self.buttonGroup.addButton(self.num_6)
self.num_minus = QtWidgets.QPushButton(self.centralwidget)
self.num_minus.setGeometry(QtCore.QRect(280, 240, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_minus.setFont(font)
self.num_minus.setStyleSheet(
"background-color: rgb(255, 85, 0);\n"
"color: rgb(255, 255, 255);")
self.num_minus.setObjectName("num_minus")
self.num_7 = QtWidgets.QPushButton(self.centralwidget)
self.num_7.setGeometry(QtCore.QRect(10, 190, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_7.setFont(font)
self.num_7.setStyleSheet(
"background-color: rgb(71, 64, 64);\n"
"color: rgb(255, 255, 255);")
self.num_7.setObjectName("num_7")
self.buttonGroup.addButton(self.num_7)
self.num_8 = QtWidgets.QPushButton(self.centralwidget)
self.num_8.setGeometry(QtCore.QRect(100, 190, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_8.setFont(font)
self.num_8.setStyleSheet(
"background-color: rgb(71, 64, 64);\n"
"color: rgb(255, 255, 255);")
self.num_8.setObjectName("num_8")
self.buttonGroup.addButton(self.num_8)
self.num_9 = QtWidgets.QPushButton(self.centralwidget)
self.num_9.setGeometry(QtCore.QRect(190, 190, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_9.setFont(font)
self.num_9.setStyleSheet(
"background-color: rgb(71, 64, 64);\n"
"color: rgb(255, 255, 255);")
self.num_9.setObjectName("num_9")
self.buttonGroup.addButton(self.num_9)
self.num_mult = QtWidgets.QPushButton(self.centralwidget)
self.num_mult.setGeometry(QtCore.QRect(280, 290, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_mult.setFont(font)
self.num_mult.setStyleSheet(
"background-color: rgb(255, 85, 0);\n"
"color: rgb(255, 255, 255);")
self.num_mult.setObjectName("num_mult")
self.num_point = QtWidgets.QPushButton(self.centralwidget)
self.num_point.setGeometry(QtCore.QRect(10, 340, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_point.setFont(font)
self.num_point.setStyleSheet(
"background-color: rgb(71, 64, 64);\n"
"color: rgb(255, 255, 255);")
self.num_point.setObjectName("num_point")
self.buttonGroup.addButton(self.num_point)
self.num_0 = QtWidgets.QPushButton(self.centralwidget)
self.num_0.setGeometry(QtCore.QRect(100, 340, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_0.setFont(font)
self.num_0.setStyleSheet(
"background-color: rgb(71, 64, 64);\n"
"color: rgb(255, 255, 255);")
self.num_0.setObjectName("num_0")
self.buttonGroup.addButton(self.num_0)
self.num_eq = QtWidgets.QPushButton(self.centralwidget)
self.num_eq.setGeometry(QtCore.QRect(370, 340, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_eq.setFont(font)
self.num_eq.setStyleSheet(
"background-color: rgb(170, 0, 0);\n"
"color: rgb(255, 255, 255);")
self.num_eq.setObjectName("num_eq")
self.num_division = QtWidgets.QPushButton(self.centralwidget)
self.num_division.setGeometry(QtCore.QRect(280, 340, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_division.setFont(font)
self.num_division.setStyleSheet(
"background-color: rgb(255, 85, 0);\n"
"color: rgb(255, 255, 255);")
self.num_division.setObjectName("num_division")
self.num_c = QtWidgets.QPushButton(self.centralwidget)
self.num_c.setGeometry(QtCore.QRect(370, 150, 91, 41))
font = QtGui.QFont()
font.setPointSize(20)
self.num_c.setFont(font)
self.num_c.setStyleSheet(
"background-color: rgb(255, 170, 0);\n"
"color: rgb(255, 255, 255);")
self.num_c.setObjectName("num_c")
self.num_ce = QtWidgets.QPushButton(self.centralwidget)
self.num_ce.setGeometry(QtCore.QRect(280, 150, 91, 41))
font = QtGui.QFont()
font.setPointSize(20)
self.num_ce.setFont(font)
self.num_ce.setStyleSheet(
"background-color: rgb(255, 170, 0);\n"
"color: rgb(255, 255, 255);")
self.num_ce.setShortcut("")
self.num_ce.setAutoDefault(False)
self.num_ce.setDefault(False)
self.num_ce.setFlat(False)
self.num_ce.setObjectName("num_ce")
self.num_backspace = QtWidgets.QPushButton(self.centralwidget)
self.num_backspace.setGeometry(QtCore.QRect(370, 190, 91, 51))
font = QtGui.QFont()
font.setPointSize(30)
self.num_backspace.setFont(font)
self.num_backspace.setStyleSheet(
"background-color: rgb(255, 85, 0);\n"
"color: rgb(255, 255, 255);")
self.num_backspace.setObjectName("num_backspace")
self.num_procent = QtWidgets.QPushButton(self.centralwidget)
self.num_procent.setGeometry(QtCore.QRect(370, 290, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_procent.setFont(font)
self.num_procent.setStyleSheet(
"background-color: rgb(255, 85, 0);\n"
"color: rgb(255, 255, 255);")
self.num_procent.setObjectName("num_procent")
self.num_plus_minus = QtWidgets.QPushButton(self.centralwidget)
self.num_plus_minus.setGeometry(QtCore.QRect(190, 340, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_plus_minus.setFont(font)
self.num_plus_minus.setStyleSheet(
"background-color: rgb(71, 64, 64);\n"
"color: rgb(255, 255, 255);")
self.num_plus_minus.setObjectName("num_plus_minus")
self.buttonGroup.addButton(self.num_plus_minus)
self.history = QtWidgets.QLabel(self.centralwidget)
self.history.setGeometry(QtCore.QRect(10, 10, 451, 21))
font = QtGui.QFont()
font.setPointSize(12)
self.history.setFont(font)
self.history.setLayoutDirection(QtCore.Qt.LeftToRight)
self.history.setText("")
self.history.setAlignment(
QtCore.Qt.AlignRight |
QtCore.Qt.AlignTrailing |
QtCore.Qt.AlignVCenter)
self.history.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse)
self.history.setObjectName("history")
self.num_mc = QtWidgets.QPushButton(self.centralwidget)
self.num_mc.setGeometry(QtCore.QRect(10, 150, 68, 41))
font = QtGui.QFont()
font.setPointSize(20)
self.num_mc.setFont(font)
self.num_mc.setStyleSheet(
"background-color: rgb(193, 193, 193);"
"color: rgb(255, 255, 255);\n")
self.num_mc.setObjectName("num_mc")
self.num_mr = QtWidgets.QPushButton(self.centralwidget)
self.num_mr.setGeometry(QtCore.QRect(77, 150, 68, 41))
font = QtGui.QFont()
font.setPointSize(20)
self.num_mr.setFont(font)
self.num_mr.setStyleSheet(
"background-color: rgb(193, 193, 193);"
"color: rgb(255, 255, 255);\n")
self.num_mr.setObjectName("num_mr")
self.num_m_minus = QtWidgets.QPushButton(self.centralwidget)
self.num_m_minus.setGeometry(QtCore.QRect(144, 150, 68, 41))
font = QtGui.QFont()
font.setPointSize(20)
self.num_m_minus.setFont(font)
self.num_m_minus.setStyleSheet(
"background-color: rgb(193, 193, 193);"
"color: rgb(255, 255, 255);\n")
self.num_m_minus.setObjectName("num_m_minus")
self.num_sqrt = QtWidgets.QPushButton(self.centralwidget)
self.num_sqrt.setGeometry(QtCore.QRect(370, 240, 91, 51))
font = QtGui.QFont()
font.setFamily("MS Shell Dlg 2")
font.setPointSize(20)
font.setBold(False)
font.setWeight(50)
self.num_sqrt.setFont(font)
self.num_sqrt.setStyleSheet(
"background-color: rgb(255, 85, 0);\n"
"color: rgb(255, 255, 255);")
self.num_sqrt.setObjectName("num_sqrt")
self.num_m_plus = QtWidgets.QPushButton(self.centralwidget)
self.num_m_plus.setGeometry(QtCore.QRect(211, 150, 70, 41))
font = QtGui.QFont()
font.setPointSize(20)
self.num_m_plus.setFont(font)
self.num_m_plus.setStyleSheet(
"background-color: rgb(193, 193, 193);"
"color: rgb(255, 255, 255);\n")
self.num_m_plus.setObjectName("num_m_plus")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(15, 43, 20, 20))
font = QtGui.QFont()
font.setPointSize(20)
self.label.setFont(font)
self.label.setObjectName("label")
self.lcdNumber.raise_()
self.history.raise_()
self.num_mc.raise_()
self.num_mr.raise_()
self.num_m_minus.raise_()
self.num_m_plus.raise_()
self.num_ce.raise_()
self.num_c.raise_()
self.num_7.raise_()
self.num_8.raise_()
self.num_9.raise_()
self.num_plus.raise_()
self.num_backspace.raise_()
self.num_4.raise_()
self.num_5.raise_()
self.num_6.raise_()
self.num_1.raise_()
self.num_2.raise_()
self.num_3.raise_()
self.num_point.raise_()
self.num_0.raise_()
self.num_minus.raise_()
self.num_mult.raise_()
self.num_plus_minus.raise_()
self.num_division.raise_()
self.label.raise_()
self.num_sqrt.raise_()
self.num_procent.raise_()
self.num_eq.raise_()
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Калькулятор v1.1"))
self.num_1.setText(_translate("MainWindow", "1"))
self.num_2.setText(_translate("MainWindow", "2"))
self.num_3.setText(_translate("MainWindow", "3"))
self.num_plus.setText(_translate("MainWindow", "+"))
self.num_4.setText(_translate("MainWindow", "4"))
self.num_5.setText(_translate("MainWindow", "5"))
self.num_6.setText(_translate("MainWindow", "6"))
self.num_minus.setText(_translate("MainWindow", "-"))
self.num_7.setText(_translate("MainWindow", "7"))
self.num_8.setText(_translate("MainWindow", "8"))
self.num_9.setText(_translate("MainWindow", "9"))
self.num_mult.setText(_translate("MainWindow", "*"))
self.num_point.setText(_translate("MainWindow", "."))
self.num_0.setText(_translate("MainWindow", "0"))
self.num_eq.setText(_translate("MainWindow", "="))
self.num_division.setText(_translate("MainWindow", "÷"))
self.num_c.setText(_translate("MainWindow", "C"))
self.num_ce.setText(_translate("MainWindow", "CE"))
self.num_backspace.setText(_translate("MainWindow", "←"))
self.num_procent.setText(_translate("MainWindow", "%"))
self.num_plus_minus.setText(_translate("MainWindow", "+/-"))
self.num_mc.setText(_translate("MainWindow", "MC"))
self.num_mr.setText(_translate("MainWindow", "MR"))
self.num_m_minus.setText(_translate("MainWindow", "M-"))
self.num_sqrt.setText(_translate("MainWindow", "√"))
self.num_m_plus.setText(_translate("MainWindow", "M+"))
| 45.326203
| 80
| 0.616505
|
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.setEnabled(True)
MainWindow.setFixedSize(QtCore.QSize(471, 400))
MainWindow.setTabletTracking(False)
MainWindow.setDockNestingEnabled(False)
MainWindow.setUnifiedTitleAndToolBarOnMac(False)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setMinimumSize(QtCore.QSize(471, 390))
self.centralwidget.setMaximumSize(QtCore.QSize(471, 390))
self.centralwidget.setObjectName("centralwidget")
self.lcdNumber = QtWidgets.QLCDNumber(self.centralwidget)
self.lcdNumber.setGeometry(QtCore.QRect(10, 40, 451, 101))
self.lcdNumber.setStyleSheet("background-color: rgb(255, 255, 255);")
self.lcdNumber.setFrameShape(QtWidgets.QFrame.Box)
self.lcdNumber.setSmallDecimalPoint(False)
self.lcdNumber.setDigitCount(14)
self.lcdNumber.setSegmentStyle(QtWidgets.QLCDNumber.Flat)
self.lcdNumber.setObjectName("lcdNumber")
self.num_1 = QtWidgets.QPushButton(self.centralwidget)
self.num_1.setGeometry(QtCore.QRect(10, 290, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_1.setFont(font)
self.num_1.setStyleSheet(
"background-color: rgb(71, 64, 64);\n"
"color: rgb(255, 255, 255);")
self.num_1.setObjectName("num_1")
self.buttonGroup = QtWidgets.QButtonGroup(MainWindow)
self.buttonGroup.setObjectName("buttonGroup")
self.buttonGroup.addButton(self.num_1)
self.num_2 = QtWidgets.QPushButton(self.centralwidget)
self.num_2.setGeometry(QtCore.QRect(100, 290, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_2.setFont(font)
self.num_2.setStyleSheet(
"background-color: rgb(71, 64, 64);\n"
"color: rgb(255, 255, 255);")
self.num_2.setObjectName("num_2")
self.buttonGroup.addButton(self.num_2)
self.num_3 = QtWidgets.QPushButton(self.centralwidget)
self.num_3.setGeometry(QtCore.QRect(190, 290, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_3.setFont(font)
self.num_3.setStyleSheet(
"background-color: rgb(71, 64, 64);\n"
"color: rgb(255, 255, 255);")
self.num_3.setObjectName("num_3")
self.buttonGroup.addButton(self.num_3)
self.num_plus = QtWidgets.QPushButton(self.centralwidget)
self.num_plus.setGeometry(QtCore.QRect(280, 190, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_plus.setFont(font)
self.num_plus.setStyleSheet(
"background-color: rgb(255, 85, 0);\n"
"color: rgb(255, 255, 255);")
self.num_plus.setObjectName("num_plus")
self.num_4 = QtWidgets.QPushButton(self.centralwidget)
self.num_4.setGeometry(QtCore.QRect(10, 240, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_4.setFont(font)
self.num_4.setStyleSheet(
"background-color: rgb(71, 64, 64);\n"
"color: rgb(255, 255, 255);")
self.num_4.setObjectName("num_4")
self.buttonGroup.addButton(self.num_4)
self.num_5 = QtWidgets.QPushButton(self.centralwidget)
self.num_5.setGeometry(QtCore.QRect(100, 240, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_5.setFont(font)
self.num_5.setStyleSheet(
"background-color: rgb(71, 64, 64);\n"
"color: rgb(255, 255, 255);")
self.num_5.setObjectName("num_5")
self.buttonGroup.addButton(self.num_5)
self.num_6 = QtWidgets.QPushButton(self.centralwidget)
self.num_6.setGeometry(QtCore.QRect(190, 240, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_6.setFont(font)
self.num_6.setStyleSheet(
"background-color: rgb(71, 64, 64);\n"
"color: rgb(255, 255, 255);")
self.num_6.setObjectName("num_6")
self.buttonGroup.addButton(self.num_6)
self.num_minus = QtWidgets.QPushButton(self.centralwidget)
self.num_minus.setGeometry(QtCore.QRect(280, 240, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_minus.setFont(font)
self.num_minus.setStyleSheet(
"background-color: rgb(255, 85, 0);\n"
"color: rgb(255, 255, 255);")
self.num_minus.setObjectName("num_minus")
self.num_7 = QtWidgets.QPushButton(self.centralwidget)
self.num_7.setGeometry(QtCore.QRect(10, 190, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_7.setFont(font)
self.num_7.setStyleSheet(
"background-color: rgb(71, 64, 64);\n"
"color: rgb(255, 255, 255);")
self.num_7.setObjectName("num_7")
self.buttonGroup.addButton(self.num_7)
self.num_8 = QtWidgets.QPushButton(self.centralwidget)
self.num_8.setGeometry(QtCore.QRect(100, 190, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_8.setFont(font)
self.num_8.setStyleSheet(
"background-color: rgb(71, 64, 64);\n"
"color: rgb(255, 255, 255);")
self.num_8.setObjectName("num_8")
self.buttonGroup.addButton(self.num_8)
self.num_9 = QtWidgets.QPushButton(self.centralwidget)
self.num_9.setGeometry(QtCore.QRect(190, 190, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_9.setFont(font)
self.num_9.setStyleSheet(
"background-color: rgb(71, 64, 64);\n"
"color: rgb(255, 255, 255);")
self.num_9.setObjectName("num_9")
self.buttonGroup.addButton(self.num_9)
self.num_mult = QtWidgets.QPushButton(self.centralwidget)
self.num_mult.setGeometry(QtCore.QRect(280, 290, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_mult.setFont(font)
self.num_mult.setStyleSheet(
"background-color: rgb(255, 85, 0);\n"
"color: rgb(255, 255, 255);")
self.num_mult.setObjectName("num_mult")
self.num_point = QtWidgets.QPushButton(self.centralwidget)
self.num_point.setGeometry(QtCore.QRect(10, 340, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_point.setFont(font)
self.num_point.setStyleSheet(
"background-color: rgb(71, 64, 64);\n"
"color: rgb(255, 255, 255);")
self.num_point.setObjectName("num_point")
self.buttonGroup.addButton(self.num_point)
self.num_0 = QtWidgets.QPushButton(self.centralwidget)
self.num_0.setGeometry(QtCore.QRect(100, 340, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_0.setFont(font)
self.num_0.setStyleSheet(
"background-color: rgb(71, 64, 64);\n"
"color: rgb(255, 255, 255);")
self.num_0.setObjectName("num_0")
self.buttonGroup.addButton(self.num_0)
self.num_eq = QtWidgets.QPushButton(self.centralwidget)
self.num_eq.setGeometry(QtCore.QRect(370, 340, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_eq.setFont(font)
self.num_eq.setStyleSheet(
"background-color: rgb(170, 0, 0);\n"
"color: rgb(255, 255, 255);")
self.num_eq.setObjectName("num_eq")
self.num_division = QtWidgets.QPushButton(self.centralwidget)
self.num_division.setGeometry(QtCore.QRect(280, 340, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_division.setFont(font)
self.num_division.setStyleSheet(
"background-color: rgb(255, 85, 0);\n"
"color: rgb(255, 255, 255);")
self.num_division.setObjectName("num_division")
self.num_c = QtWidgets.QPushButton(self.centralwidget)
self.num_c.setGeometry(QtCore.QRect(370, 150, 91, 41))
font = QtGui.QFont()
font.setPointSize(20)
self.num_c.setFont(font)
self.num_c.setStyleSheet(
"background-color: rgb(255, 170, 0);\n"
"color: rgb(255, 255, 255);")
self.num_c.setObjectName("num_c")
self.num_ce = QtWidgets.QPushButton(self.centralwidget)
self.num_ce.setGeometry(QtCore.QRect(280, 150, 91, 41))
font = QtGui.QFont()
font.setPointSize(20)
self.num_ce.setFont(font)
self.num_ce.setStyleSheet(
"background-color: rgb(255, 170, 0);\n"
"color: rgb(255, 255, 255);")
self.num_ce.setShortcut("")
self.num_ce.setAutoDefault(False)
self.num_ce.setDefault(False)
self.num_ce.setFlat(False)
self.num_ce.setObjectName("num_ce")
self.num_backspace = QtWidgets.QPushButton(self.centralwidget)
self.num_backspace.setGeometry(QtCore.QRect(370, 190, 91, 51))
font = QtGui.QFont()
font.setPointSize(30)
self.num_backspace.setFont(font)
self.num_backspace.setStyleSheet(
"background-color: rgb(255, 85, 0);\n"
"color: rgb(255, 255, 255);")
self.num_backspace.setObjectName("num_backspace")
self.num_procent = QtWidgets.QPushButton(self.centralwidget)
self.num_procent.setGeometry(QtCore.QRect(370, 290, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_procent.setFont(font)
self.num_procent.setStyleSheet(
"background-color: rgb(255, 85, 0);\n"
"color: rgb(255, 255, 255);")
self.num_procent.setObjectName("num_procent")
self.num_plus_minus = QtWidgets.QPushButton(self.centralwidget)
self.num_plus_minus.setGeometry(QtCore.QRect(190, 340, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_plus_minus.setFont(font)
self.num_plus_minus.setStyleSheet(
"background-color: rgb(71, 64, 64);\n"
"color: rgb(255, 255, 255);")
self.num_plus_minus.setObjectName("num_plus_minus")
self.buttonGroup.addButton(self.num_plus_minus)
self.history = QtWidgets.QLabel(self.centralwidget)
self.history.setGeometry(QtCore.QRect(10, 10, 451, 21))
font = QtGui.QFont()
font.setPointSize(12)
self.history.setFont(font)
self.history.setLayoutDirection(QtCore.Qt.LeftToRight)
self.history.setText("")
self.history.setAlignment(
QtCore.Qt.AlignRight |
QtCore.Qt.AlignTrailing |
QtCore.Qt.AlignVCenter)
self.history.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse)
self.history.setObjectName("history")
self.num_mc = QtWidgets.QPushButton(self.centralwidget)
self.num_mc.setGeometry(QtCore.QRect(10, 150, 68, 41))
font = QtGui.QFont()
font.setPointSize(20)
self.num_mc.setFont(font)
self.num_mc.setStyleSheet(
"background-color: rgb(193, 193, 193);"
"color: rgb(255, 255, 255);\n")
self.num_mc.setObjectName("num_mc")
self.num_mr = QtWidgets.QPushButton(self.centralwidget)
self.num_mr.setGeometry(QtCore.QRect(77, 150, 68, 41))
font = QtGui.QFont()
font.setPointSize(20)
self.num_mr.setFont(font)
self.num_mr.setStyleSheet(
"background-color: rgb(193, 193, 193);"
"color: rgb(255, 255, 255);\n")
self.num_mr.setObjectName("num_mr")
self.num_m_minus = QtWidgets.QPushButton(self.centralwidget)
self.num_m_minus.setGeometry(QtCore.QRect(144, 150, 68, 41))
font = QtGui.QFont()
font.setPointSize(20)
self.num_m_minus.setFont(font)
self.num_m_minus.setStyleSheet(
"background-color: rgb(193, 193, 193);"
"color: rgb(255, 255, 255);\n")
self.num_m_minus.setObjectName("num_m_minus")
self.num_sqrt = QtWidgets.QPushButton(self.centralwidget)
self.num_sqrt.setGeometry(QtCore.QRect(370, 240, 91, 51))
font = QtGui.QFont()
font.setFamily("MS Shell Dlg 2")
font.setPointSize(20)
font.setBold(False)
font.setWeight(50)
self.num_sqrt.setFont(font)
self.num_sqrt.setStyleSheet(
"background-color: rgb(255, 85, 0);\n"
"color: rgb(255, 255, 255);")
self.num_sqrt.setObjectName("num_sqrt")
self.num_m_plus = QtWidgets.QPushButton(self.centralwidget)
self.num_m_plus.setGeometry(QtCore.QRect(211, 150, 70, 41))
font = QtGui.QFont()
font.setPointSize(20)
self.num_m_plus.setFont(font)
self.num_m_plus.setStyleSheet(
"background-color: rgb(193, 193, 193);"
"color: rgb(255, 255, 255);\n")
self.num_m_plus.setObjectName("num_m_plus")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(15, 43, 20, 20))
font = QtGui.QFont()
font.setPointSize(20)
self.label.setFont(font)
self.label.setObjectName("label")
self.lcdNumber.raise_()
self.history.raise_()
self.num_mc.raise_()
self.num_mr.raise_()
self.num_m_minus.raise_()
self.num_m_plus.raise_()
self.num_ce.raise_()
self.num_c.raise_()
self.num_7.raise_()
self.num_8.raise_()
self.num_9.raise_()
self.num_plus.raise_()
self.num_backspace.raise_()
self.num_4.raise_()
self.num_5.raise_()
self.num_6.raise_()
self.num_1.raise_()
self.num_2.raise_()
self.num_3.raise_()
self.num_point.raise_()
self.num_0.raise_()
self.num_minus.raise_()
self.num_mult.raise_()
self.num_plus_minus.raise_()
self.num_division.raise_()
self.label.raise_()
self.num_sqrt.raise_()
self.num_procent.raise_()
self.num_eq.raise_()
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Калькулятор v1.1"))
self.num_1.setText(_translate("MainWindow", "1"))
self.num_2.setText(_translate("MainWindow", "2"))
self.num_3.setText(_translate("MainWindow", "3"))
self.num_plus.setText(_translate("MainWindow", "+"))
self.num_4.setText(_translate("MainWindow", "4"))
self.num_5.setText(_translate("MainWindow", "5"))
self.num_6.setText(_translate("MainWindow", "6"))
self.num_minus.setText(_translate("MainWindow", "-"))
self.num_7.setText(_translate("MainWindow", "7"))
self.num_8.setText(_translate("MainWindow", "8"))
self.num_9.setText(_translate("MainWindow", "9"))
self.num_mult.setText(_translate("MainWindow", "*"))
self.num_point.setText(_translate("MainWindow", "."))
self.num_0.setText(_translate("MainWindow", "0"))
self.num_eq.setText(_translate("MainWindow", "="))
self.num_division.setText(_translate("MainWindow", "÷"))
self.num_c.setText(_translate("MainWindow", "C"))
self.num_ce.setText(_translate("MainWindow", "CE"))
self.num_backspace.setText(_translate("MainWindow", "←"))
self.num_procent.setText(_translate("MainWindow", "%"))
self.num_plus_minus.setText(_translate("MainWindow", "+/-"))
self.num_mc.setText(_translate("MainWindow", "MC"))
self.num_mr.setText(_translate("MainWindow", "MR"))
self.num_m_minus.setText(_translate("MainWindow", "M-"))
self.num_sqrt.setText(_translate("MainWindow", "√"))
self.num_m_plus.setText(_translate("MainWindow", "M+"))
| true
| true
|
790d074322597c94718db1631ea332656c11063e
| 5,431
|
py
|
Python
|
Python/env/Lib/site-packages/mysqlx/authentication.py
|
D12-ctrl/ProyectoFinal
|
666047042308750d581328e32967f502a4476948
|
[
"MIT"
] | 3
|
2021-01-07T18:27:35.000Z
|
2021-01-13T19:15:01.000Z
|
Python/env/Lib/site-packages/mysqlx/authentication.py
|
D12-ctrl/ProyectoFinal
|
666047042308750d581328e32967f502a4476948
|
[
"MIT"
] | 20
|
2021-05-03T18:02:23.000Z
|
2022-03-12T12:01:04.000Z
|
Lib/site-packages/mysqlx/authentication.py
|
fochoao/cpython
|
3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9
|
[
"bzip2-1.0.6",
"0BSD"
] | 4
|
2021-07-13T19:44:06.000Z
|
2021-08-13T07:49:35.000Z
|
# Copyright (c) 2016, 2020, Oracle and/or its affiliates.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2.0, as
# published by the Free Software Foundation.
#
# This program is also distributed with certain software (including
# but not limited to OpenSSL) that is licensed under separate terms,
# as designated in a particular file or component or in included license
# documentation. The authors of MySQL hereby grant you an
# additional permission to link the program and your derivative works
# with the separately licensed software that they have included with
# MySQL.
#
# Without limiting anything contained in the foregoing, this file,
# which is part of MySQL Connector/Python, is also subject to the
# Universal FOSS Exception, version 1.0, a copy of which can be found at
# http://oss.oracle.com/licenses/universal-foss-exception.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License, version 2.0, for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Implementation of MySQL Authentication Plugin."""
import hashlib
import struct
from .helpers import hexlify
def xor_string(hash1, hash2, hash_size):
"""Encrypt/Decrypt function used for password encryption in
authentication, using a simple XOR.
Args:
hash1 (str): The first hash.
hash2 (str): The second hash.
Returns:
str: A string with the xor applied.
"""
xored = [h1 ^ h2 for (h1, h2) in zip(hash1, hash2)]
return struct.pack("{0}B".format(hash_size), *xored)
class BaseAuthPlugin(object):
"""Base class for implementing the authentication plugins."""
def __init__(self, username=None, password=None):
self._username = username
self._password = password
def name(self):
"""Returns the plugin name.
Returns:
str: The plugin name.
"""
raise NotImplementedError
def auth_name(self):
"""Returns the authentication name.
Returns:
str: The authentication name.
"""
raise NotImplementedError
class MySQL41AuthPlugin(BaseAuthPlugin):
"""Class implementing the MySQL Native Password authentication plugin."""
def name(self):
"""Returns the plugin name.
Returns:
str: The plugin name.
"""
return "MySQL 4.1 Authentication Plugin"
def auth_name(self):
"""Returns the authentication name.
Returns:
str: The authentication name.
"""
return "MYSQL41"
def auth_data(self, data):
"""Hashing for MySQL 4.1 authentication.
Args:
data (str): The authentication data.
Returns:
str: The authentication response.
"""
if self._password:
password = self._password.encode("utf-8") \
if isinstance(self._password, str) else self._password
hash1 = hashlib.sha1(password).digest()
hash2 = hashlib.sha1(hash1).digest()
xored = xor_string(hash1, hashlib.sha1(data + hash2).digest(), 20)
return "{0}\0{1}\0*{2}\0".format("", self._username, hexlify(xored))
return "{0}\0{1}\0".format("", self._username)
class PlainAuthPlugin(BaseAuthPlugin):
"""Class implementing the MySQL Plain authentication plugin."""
def name(self):
"""Returns the plugin name.
Returns:
str: The plugin name.
"""
return "Plain Authentication Plugin"
def auth_name(self):
"""Returns the authentication name.
Returns:
str: The authentication name.
"""
return "PLAIN"
def auth_data(self):
"""Returns the authentication data.
Returns:
str: The authentication data.
"""
return "\0{0}\0{1}".format(self._username, self._password)
class Sha256MemoryAuthPlugin(BaseAuthPlugin):
"""Class implementing the SHA256_MEMORY authentication plugin."""
def name(self):
"""Returns the plugin name.
Returns:
str: The plugin name.
"""
return "SHA256_MEMORY Authentication Plugin"
def auth_name(self):
"""Returns the authentication name.
Returns:
str: The authentication name.
"""
return "SHA256_MEMORY"
def auth_data(self, data):
"""Hashing for SHA256_MEMORY authentication.
The scramble is of the form:
SHA256(SHA256(SHA256(PASSWORD)),NONCE) XOR SHA256(PASSWORD)
Args:
data (str): The authentication data.
Returns:
str: The authentication response.
"""
password = self._password.encode("utf-8") \
if isinstance(self._password, str) else self._password
hash1 = hashlib.sha256(password).digest()
hash2 = hashlib.sha256(hashlib.sha256(hash1).digest() + data).digest()
xored = xor_string(hash2, hash1, 32)
return "\0{0}\0{1}".format(self._username, hexlify(xored))
| 31.034286
| 80
| 0.643528
|
import hashlib
import struct
from .helpers import hexlify
def xor_string(hash1, hash2, hash_size):
xored = [h1 ^ h2 for (h1, h2) in zip(hash1, hash2)]
return struct.pack("{0}B".format(hash_size), *xored)
class BaseAuthPlugin(object):
def __init__(self, username=None, password=None):
self._username = username
self._password = password
def name(self):
raise NotImplementedError
def auth_name(self):
raise NotImplementedError
class MySQL41AuthPlugin(BaseAuthPlugin):
def name(self):
return "MySQL 4.1 Authentication Plugin"
def auth_name(self):
return "MYSQL41"
def auth_data(self, data):
if self._password:
password = self._password.encode("utf-8") \
if isinstance(self._password, str) else self._password
hash1 = hashlib.sha1(password).digest()
hash2 = hashlib.sha1(hash1).digest()
xored = xor_string(hash1, hashlib.sha1(data + hash2).digest(), 20)
return "{0}\0{1}\0*{2}\0".format("", self._username, hexlify(xored))
return "{0}\0{1}\0".format("", self._username)
class PlainAuthPlugin(BaseAuthPlugin):
def name(self):
return "Plain Authentication Plugin"
def auth_name(self):
return "PLAIN"
def auth_data(self):
return "\0{0}\0{1}".format(self._username, self._password)
class Sha256MemoryAuthPlugin(BaseAuthPlugin):
def name(self):
return "SHA256_MEMORY Authentication Plugin"
def auth_name(self):
return "SHA256_MEMORY"
def auth_data(self, data):
password = self._password.encode("utf-8") \
if isinstance(self._password, str) else self._password
hash1 = hashlib.sha256(password).digest()
hash2 = hashlib.sha256(hashlib.sha256(hash1).digest() + data).digest()
xored = xor_string(hash2, hash1, 32)
return "\0{0}\0{1}".format(self._username, hexlify(xored))
| true
| true
|
790d08711e2336cba8b696ba7cafeb010a704e0b
| 29,715
|
py
|
Python
|
det3d/core/bbox/box_np_ops.py
|
motional/polarstream
|
74af9548cad69a4f546b83dae7b87454bc590c9e
|
[
"MIT"
] | 9
|
2022-03-29T04:53:14.000Z
|
2022-03-30T02:29:28.000Z
|
det3d/core/bbox/box_np_ops.py
|
motional/polarstream
|
74af9548cad69a4f546b83dae7b87454bc590c9e
|
[
"MIT"
] | null | null | null |
det3d/core/bbox/box_np_ops.py
|
motional/polarstream
|
74af9548cad69a4f546b83dae7b87454bc590c9e
|
[
"MIT"
] | 1
|
2022-03-29T04:31:53.000Z
|
2022-03-29T04:31:53.000Z
|
from pathlib import Path
import numba
import numpy as np
from det3d.core.bbox.geometry import (
points_count_convex_polygon_3d_jit,
points_in_convex_polygon_3d_jit,
)
try:
from spconv.utils import rbbox_intersection, rbbox_iou
except:
print("Import spconv fail, no support for sparse convolution!")
def points_count_rbbox(points, rbbox, z_axis=2, origin=(0.5, 0.5, 0.5)):
rbbox_corners = center_to_corner_box3d(
rbbox[:, :3], rbbox[:, 3:6], rbbox[:, -1], origin=origin, axis=z_axis
)
surfaces = corner_to_surfaces_3d(rbbox_corners)
return points_count_convex_polygon_3d_jit(points[:, :3], surfaces)
def riou_cc(rbboxes, qrbboxes, standup_thresh=0.0):
# less than 50ms when used in second one thread. 10x slower than gpu
boxes_corners = center_to_corner_box2d(
rbboxes[:, :2], rbboxes[:, 2:4], rbboxes[:, 4]
)
boxes_standup = corner_to_standup_nd(boxes_corners)
qboxes_corners = center_to_corner_box2d(
qrbboxes[:, :2], qrbboxes[:, 2:4], qrbboxes[:, 4]
)
qboxes_standup = corner_to_standup_nd(qboxes_corners)
# if standup box not overlapped, rbbox not overlapped too.
standup_iou = iou_jit(boxes_standup, qboxes_standup, eps=0.0)
return rbbox_iou(boxes_corners, qboxes_corners, standup_iou, standup_thresh)
def rinter_cc(rbboxes, qrbboxes, standup_thresh=0.0):
# less than 50ms when used in second one thread. 10x slower than gpu
boxes_corners = center_to_corner_box2d(
rbboxes[:, :2], rbboxes[:, 2:4], rbboxes[:, 4]
)
boxes_standup = corner_to_standup_nd(boxes_corners)
qboxes_corners = center_to_corner_box2d(
qrbboxes[:, :2], qrbboxes[:, 2:4], qrbboxes[:, 4]
)
qboxes_standup = corner_to_standup_nd(qboxes_corners)
# if standup box not overlapped, rbbox not overlapped too.
standup_iou = iou_jit(boxes_standup, qboxes_standup, eps=0.0)
return rbbox_intersection(
boxes_corners, qboxes_corners, standup_iou, standup_thresh
)
def corners_nd(dims, origin=0.5):
"""generate relative box corners based on length per dim and
origin point.
Args:
dims (float array, shape=[N, ndim]): array of length per dim
origin (list or array or float): origin point relate to smallest point.
Returns:
float array, shape=[N, 2 ** ndim, ndim]: returned corners.
point layout example: (2d) x0y0, x0y1, x1y0, x1y1;
(3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1
where x0 < x1, y0 < y1, z0 < z1
"""
ndim = int(dims.shape[1])
corners_norm = np.stack(
np.unravel_index(np.arange(2 ** ndim), [2] * ndim), axis=1
).astype(dims.dtype)
# now corners_norm has format: (2d) x0y0, x0y1, x1y0, x1y1
# (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1
# so need to convert to a format which is convenient to do other computing.
# for 2d boxes, format is clockwise start with minimum point
# for 3d boxes, please draw lines by your hand.
if ndim == 2:
# generate clockwise box corners
corners_norm = corners_norm[[0, 1, 3, 2]]
elif ndim == 3:
corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]]
corners_norm = corners_norm - np.array(origin, dtype=dims.dtype)
corners = dims.reshape([-1, 1, ndim]) * corners_norm.reshape([1, 2 ** ndim, ndim])
return corners
@numba.njit
def corners_2d_jit(dims, origin=0.5):
ndim = 2
corners_norm = np.array([[0, 0], [0, 1], [1, 1], [1, 0]], dtype=dims.dtype)
corners_norm = corners_norm - np.array(origin, dtype=dims.dtype)
corners = dims.reshape((-1, 1, ndim)) * corners_norm.reshape((1, 2 ** ndim, ndim))
return corners
@numba.njit
def corners_3d_jit(dims, origin=0.5):
ndim = 3
corners_norm = np.array(
[0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1],
dtype=dims.dtype,
).reshape((8, 3))
corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]]
corners_norm = corners_norm - np.array(origin, dtype=dims.dtype)
corners = dims.reshape((-1, 1, ndim)) * corners_norm.reshape((1, 2 ** ndim, ndim))
return corners
@numba.njit
def corner_to_standup_nd_jit(boxes_corner):
num_boxes = boxes_corner.shape[0]
ndim = boxes_corner.shape[-1]
result = np.zeros((num_boxes, ndim * 2), dtype=boxes_corner.dtype)
for i in range(num_boxes):
for j in range(ndim):
result[i, j] = np.min(boxes_corner[i, :, j])
for j in range(ndim):
result[i, j + ndim] = np.max(boxes_corner[i, :, j])
return result
def corner_to_standup_nd(boxes_corner):
assert len(boxes_corner.shape) == 3
standup_boxes = []
standup_boxes.append(np.min(boxes_corner, axis=1))
standup_boxes.append(np.max(boxes_corner, axis=1))
return np.concatenate(standup_boxes, -1)
def rbbox2d_to_near_bbox(rbboxes):
"""convert rotated bbox to nearest 'standing' or 'lying' bbox.
Args:
rbboxes: [N, 5(x, y, xdim, ydim, rad)] rotated bboxes
Returns:
bboxes: [N, 4(xmin, ymin, xmax, ymax)] bboxes
"""
rots = rbboxes[..., -1]
rots_0_pi_div_2 = np.abs(limit_period(rots, 0.5, np.pi))
cond = (rots_0_pi_div_2 > np.pi / 4)[..., np.newaxis]
bboxes_center = np.where(cond, rbboxes[:, [0, 1, 3, 2]], rbboxes[:, :4])
bboxes = center_to_minmax_2d(bboxes_center[:, :2], bboxes_center[:, 2:])
return bboxes
def rotation_3d_in_axis(points, angles, axis=0):
# points: [N, point_size, 3]
rot_sin = np.sin(angles)
rot_cos = np.cos(angles)
ones = np.ones_like(rot_cos)
zeros = np.zeros_like(rot_cos)
if axis == 1:
rot_mat_T = np.stack(
[
[rot_cos, zeros, -rot_sin],
[zeros, ones, zeros],
[rot_sin, zeros, rot_cos],
]
)
elif axis == 2 or axis == -1:
rot_mat_T = np.stack(
[
[rot_cos, -rot_sin, zeros],
[rot_sin, rot_cos, zeros],
[zeros, zeros, ones],
]
)
elif axis == 0:
rot_mat_T = np.stack(
[
[zeros, rot_cos, -rot_sin],
[zeros, rot_sin, rot_cos],
[ones, zeros, zeros],
]
)
else:
raise ValueError("axis should in range")
return np.einsum("aij,jka->aik", points, rot_mat_T)
def rotation_points_single_angle(points, angle, axis=0):
# points: [N, 3]
rot_sin = np.sin(angle)
rot_cos = np.cos(angle)
if axis == 1:
rot_mat_T = np.array(
[[rot_cos, 0, -rot_sin], [0, 1, 0], [rot_sin, 0, rot_cos]],
dtype=points.dtype,
)
elif axis == 2 or axis == -1:
rot_mat_T = np.array(
[[rot_cos, -rot_sin, 0], [rot_sin, rot_cos, 0], [0, 0, 1]],
dtype=points.dtype,
)
elif axis == 0:
rot_mat_T = np.array(
[[1, 0, 0], [0, rot_cos, -rot_sin], [0, rot_sin, rot_cos]],
dtype=points.dtype,
)
else:
raise ValueError("axis should in range")
return points @ rot_mat_T
def rotation_2d(points, angles):
"""rotation 2d points based on origin point clockwise when angle positive.
Args:
points (float array, shape=[N, point_size, 2]): points to be rotated.
angles (float array, shape=[N]): rotation angle.
Returns:
float array: same shape as points
"""
rot_sin = np.sin(angles)
rot_cos = np.cos(angles)
rot_mat_T = np.stack([[rot_cos, -rot_sin], [rot_sin, rot_cos]])
return np.einsum("aij,jka->aik", points, rot_mat_T)
def rotation_box(box_corners, angle):
"""rotation 2d points based on origin point clockwise when angle positive.
Args:
points (float array, shape=[N, point_size, 2]): points to be rotated.
angle (float): rotation angle.
Returns:
float array: same shape as points
"""
rot_sin = np.sin(angle)
rot_cos = np.cos(angle)
rot_mat_T = np.array(
[[rot_cos, -rot_sin], [rot_sin, rot_cos]], dtype=box_corners.dtype
)
return box_corners @ rot_mat_T
def center_to_corner_box3d(centers, dims, angles=None, origin=(0.5, 0.5, 0.5), axis=2):
"""convert kitti locations, dimensions and angles to corners
Args:
centers (float array, shape=[N, 3]): locations in kitti label file.
dims (float array, shape=[N, 3]): dimensions in kitti label file.
angles (float array, shape=[N]): rotation_y in kitti label file.
origin (list or array or float): origin point relate to smallest point.
use [0.5, 1.0, 0.5] in camera and [0.5, 0.5, 0] in lidar.
axis (int): rotation axis. 1 for camera and 2 for lidar.
Returns:
[type]: [description]
"""
# 'length' in kitti format is in x axis.
# yzx(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar)
# center in kitti format is [0.5, 1.0, 0.5] in xyz.
corners = corners_nd(dims, origin=origin)
# corners: [N, 8, 3]
if angles is not None:
corners = rotation_3d_in_axis(corners, angles, axis=axis)
corners += centers.reshape([-1, 1, 3])
return corners
def center_to_corner_box2d(centers, dims, angles=None, origin=0.5):
"""convert kitti locations, dimensions and angles to corners.
format: center(xy), dims(xy), angles(clockwise when positive)
Args:
centers (float array, shape=[N, 2]): locations in kitti label file.
dims (float array, shape=[N, 2]): dimensions in kitti label file.
angles (float array, shape=[N]): rotation_y in kitti label file.
Returns:
[type]: [description]
"""
# 'length' in kitti format is in x axis.
# xyz(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar)
# center in kitti format is [0.5, 1.0, 0.5] in xyz.
corners = corners_nd(dims, origin=origin)
# corners: [N, 4, 2]
if angles is not None:
corners = rotation_2d(corners, angles)
corners += centers.reshape([-1, 1, 2])
return corners
@numba.jit(nopython=True)
def box2d_to_corner_jit(boxes):
num_box = boxes.shape[0]
corners_norm = np.zeros((4, 2), dtype=boxes.dtype)
corners_norm[1, 1] = 1.0
corners_norm[2] = 1.0
corners_norm[3, 0] = 1.0
corners_norm -= np.array([0.5, 0.5], dtype=boxes.dtype)
corners = boxes.reshape(num_box, 1, 5)[:, :, 2:4] * corners_norm.reshape(1, 4, 2)
rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype)
box_corners = np.zeros((num_box, 4, 2), dtype=boxes.dtype)
for i in range(num_box):
rot_sin = np.sin(boxes[i, -1])
rot_cos = np.cos(boxes[i, -1])
rot_mat_T[0, 0] = rot_cos
rot_mat_T[0, 1] = -rot_sin
rot_mat_T[1, 0] = rot_sin
rot_mat_T[1, 1] = rot_cos
box_corners[i] = corners[i] @ rot_mat_T + boxes[i, :2]
return box_corners
def rbbox3d_to_corners(rbboxes, origin=[0.5, 0.5, 0.5], axis=2):
return center_to_corner_box3d(
rbboxes[..., :3], rbboxes[..., 3:6], rbboxes[..., 6], origin, axis=axis
)
def rbbox3d_to_bev_corners(rbboxes, origin=0.5):
return center_to_corner_box2d(
rbboxes[..., :2], rbboxes[..., 3:5], rbboxes[..., 6], origin
)
def minmax_to_corner_2d(minmax_box):
ndim = minmax_box.shape[-1] // 2
center = minmax_box[..., :ndim]
dims = minmax_box[..., ndim:] - center
return center_to_corner_box2d(center, dims, origin=0.0)
def minmax_to_corner_2d_v2(minmax_box):
# N, 4 -> N 4 2
return minmax_box[..., [0, 1, 0, 3, 2, 3, 2, 1]].reshape(-1, 4, 2)
def minmax_to_corner_3d(minmax_box):
ndim = minmax_box.shape[-1] // 2
center = minmax_box[..., :ndim]
dims = minmax_box[..., ndim:] - center
return center_to_corner_box3d(center, dims, origin=0.0)
def minmax_to_center_2d(minmax_box):
ndim = minmax_box.shape[-1] // 2
center_min = minmax_box[..., :ndim]
dims = minmax_box[..., ndim:] - center_min
center = center_min + 0.5 * dims
return np.concatenate([center, dims], axis=-1)
def center_to_minmax_2d_0_5(centers, dims):
return np.concatenate([centers - dims / 2, centers + dims / 2], axis=-1)
def center_to_minmax_2d(centers, dims, origin=0.5):
if origin == 0.5:
return center_to_minmax_2d_0_5(centers, dims)
corners = center_to_corner_box2d(centers, dims, origin=origin)
return corners[:, [0, 2]].reshape([-1, 4])
def limit_period(val, offset=0.5, period=np.pi):
return val - np.floor(val / period + offset) * period
def projection_matrix_to_CRT_kitti(proj):
# P = C @ [R|T]
# C is upper triangular matrix, so we need to inverse CR and use QR
# stable for all kitti camera projection matrix
CR = proj[0:3, 0:3]
CT = proj[0:3, 3]
RinvCinv = np.linalg.inv(CR)
Rinv, Cinv = np.linalg.qr(RinvCinv)
C = np.linalg.inv(Cinv)
R = np.linalg.inv(Rinv)
T = Cinv @ CT
return C, R, T
def get_frustum(bbox_image, C, near_clip=0.001, far_clip=100):
fku = C[0, 0]
fkv = -C[1, 1]
u0v0 = C[0:2, 2]
z_points = np.array([near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)[:, np.newaxis]
b = bbox_image
box_corners = np.array(
[[b[0], b[1]], [b[0], b[3]], [b[2], b[3]], [b[2], b[1]]], dtype=C.dtype
)
near_box_corners = (box_corners - u0v0) / np.array(
[fku / near_clip, -fkv / near_clip], dtype=C.dtype
)
far_box_corners = (box_corners - u0v0) / np.array(
[fku / far_clip, -fkv / far_clip], dtype=C.dtype
)
ret_xy = np.concatenate([near_box_corners, far_box_corners], axis=0) # [8, 2]
ret_xyz = np.concatenate([ret_xy, z_points], axis=1)
return ret_xyz
def get_frustum_v2(bboxes, C, near_clip=0.001, far_clip=100):
fku = C[0, 0]
fkv = -C[1, 1]
u0v0 = C[0:2, 2]
num_box = bboxes.shape[0]
z_points = np.array([near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)[
np.newaxis, :, np.newaxis
]
z_points = np.tile(z_points, [num_box, 1, 1])
box_corners = minmax_to_corner_2d_v2(bboxes)
near_box_corners = (box_corners - u0v0) / np.array(
[fku / near_clip, -fkv / near_clip], dtype=C.dtype
)
far_box_corners = (box_corners - u0v0) / np.array(
[fku / far_clip, -fkv / far_clip], dtype=C.dtype
)
ret_xy = np.concatenate([near_box_corners, far_box_corners], axis=1) # [8, 2]
ret_xyz = np.concatenate([ret_xy, z_points], axis=-1)
return ret_xyz
@numba.njit
def _add_rgb_to_points_kernel(points_2d, image, points_rgb):
num_points = points_2d.shape[0]
image_h, image_w = image.shape[:2]
for i in range(num_points):
img_pos = np.floor(points_2d[i]).astype(np.int32)
if img_pos[0] >= 0 and img_pos[0] < image_w:
if img_pos[1] >= 0 and img_pos[1] < image_h:
points_rgb[i, :] = image[img_pos[1], img_pos[0], :]
# image[img_pos[1], img_pos[0]] = 0
def add_rgb_to_points(points, image, rect, Trv2c, P2, mean_size=[5, 5]):
kernel = np.ones(mean_size, np.float32) / np.prod(mean_size)
# image = cv2.filter2D(image, -1, kernel)
points_cam = lidar_to_camera(points[:, :3], rect, Trv2c)
points_2d = project_to_image(points_cam, P2)
points_rgb = np.zeros([points_cam.shape[0], 3], dtype=points.dtype)
_add_rgb_to_points_kernel(points_2d, image, points_rgb)
return points_rgb
def project_to_image(points_3d, proj_mat):
points_shape = list(points_3d.shape)
points_shape[-1] = 1
points_4 = np.concatenate([points_3d, np.ones(points_shape)], axis=-1)
point_2d = points_4 @ proj_mat.T
point_2d_res = point_2d[..., :2] / point_2d[..., 2:3]
return point_2d_res
def camera_to_lidar(points, r_rect, velo2cam):
points_shape = list(points.shape[0:-1])
if points.shape[-1] == 3:
points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1)
lidar_points = points @ np.linalg.inv((r_rect @ velo2cam).T)
return lidar_points[..., :3]
def lidar_to_camera(points, r_rect, velo2cam):
points_shape = list(points.shape[:-1])
if points.shape[-1] == 3:
points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1)
camera_points = points @ (r_rect @ velo2cam).T
return camera_points[..., :3]
def box_camera_to_lidar(data, r_rect, velo2cam):
xyz = data[:, 0:3]
l, h, w = data[:, 3:4], data[:, 4:5], data[:, 5:6]
r = data[:, 6:7]
xyz_lidar = camera_to_lidar(xyz, r_rect, velo2cam)
return np.concatenate([xyz_lidar, w, l, h, r], axis=1)
def box_lidar_to_camera(data, r_rect, velo2cam):
xyz_lidar = data[:, 0:3]
w, l, h = data[:, 3:4], data[:, 4:5], data[:, 5:6]
r = data[:, 6:7]
xyz = lidar_to_camera(xyz_lidar, r_rect, velo2cam)
return np.concatenate([xyz, l, h, w, r], axis=1)
def remove_outside_points(points, rect, Trv2c, P2, image_shape):
# 5x faster than remove_outside_points_v1(2ms vs 10ms)
C, R, T = projection_matrix_to_CRT_kitti(P2)
image_bbox = [0, 0, image_shape[1], image_shape[0]]
frustum = get_frustum(image_bbox, C)
frustum -= T
frustum = np.linalg.inv(R) @ frustum.T
frustum = camera_to_lidar(frustum.T, rect, Trv2c)
frustum_surfaces = corner_to_surfaces_3d_jit(frustum[np.newaxis, ...])
indices = points_in_convex_polygon_3d_jit(points[:, :3], frustum_surfaces)
points = points[indices.reshape([-1])]
return points
@numba.jit(nopython=True)
def iou_jit(boxes, query_boxes, eps=1.0):
"""calculate box iou. note that jit version runs 2x faster than cython in
my machine!
Parameters
----------
boxes: (N, 4) ndarray of float
query_boxes: (K, 4) ndarray of float
Returns
-------
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
"""
N = boxes.shape[0]
K = query_boxes.shape[0]
overlaps = np.zeros((N, K), dtype=boxes.dtype)
for k in range(K):
box_area = (query_boxes[k, 2] - query_boxes[k, 0] + eps) * (
query_boxes[k, 3] - query_boxes[k, 1] + eps
)
for n in range(N):
iw = (
min(boxes[n, 2], query_boxes[k, 2])
- max(boxes[n, 0], query_boxes[k, 0])
+ eps
)
if iw > 0:
ih = (
min(boxes[n, 3], query_boxes[k, 3])
- max(boxes[n, 1], query_boxes[k, 1])
+ eps
)
if ih > 0:
ua = (
(boxes[n, 2] - boxes[n, 0] + eps)
* (boxes[n, 3] - boxes[n, 1] + eps)
+ box_area
- iw * ih
)
overlaps[n, k] = iw * ih / ua
return overlaps
@numba.jit(nopython=True)
def iou_3d_jit(boxes, query_boxes, add1=True):
"""calculate box iou3d,
----------
boxes: (N, 6) ndarray of float
query_boxes: (K, 6) ndarray of float
Returns
-------
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
"""
N = boxes.shape[0]
K = query_boxes.shape[0]
overlaps = np.zeros((N, K), dtype=boxes.dtype)
if add1:
add1 = 1.0
else:
add1 = 0.0
for k in range(K):
box_area = (
(query_boxes[k, 3] - query_boxes[k, 0] + add1)
* (query_boxes[k, 4] - query_boxes[k, 1] + add1)
* (query_boxes[k, 5] - query_boxes[k, 2] + add1)
)
for n in range(N):
iw = (
min(boxes[n, 3], query_boxes[k, 3])
- max(boxes[n, 0], query_boxes[k, 0])
+ add1
)
if iw > 0:
ih = (
min(boxes[n, 4], query_boxes[k, 4])
- max(boxes[n, 1], query_boxes[k, 1])
+ add1
)
if ih > 0:
il = (
min(boxes[n, 5], query_boxes[k, 5])
- max(boxes[n, 2], query_boxes[k, 2])
+ add1
)
if il > 0:
ua = float(
(boxes[n, 3] - boxes[n, 0] + add1)
* (boxes[n, 4] - boxes[n, 1] + add1)
* (boxes[n, 5] - boxes[n, 2] + add1)
+ box_area
- iw * ih * il
)
overlaps[n, k] = iw * ih * il / ua
return overlaps
@numba.jit(nopython=True)
def iou_nd_jit(boxes, query_boxes, add1=True):
"""calculate box iou nd, 2x slower than iou_jit.
----------
boxes: (N, ndim * 2) ndarray of float
query_boxes: (K, ndim * 2) ndarray of float
Returns
-------
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
"""
N = boxes.shape[0]
K = query_boxes.shape[0]
ndim = boxes.shape[1] // 2
overlaps = np.zeros((N, K), dtype=boxes.dtype)
side_lengths = np.zeros((ndim,), dtype=boxes.dtype)
if add1:
add1 = 1.0
else:
add1 = 0.0
invalid = False
for k in range(K):
qbox_area = query_boxes[k, ndim] - query_boxes[k, 0] + add1
for i in range(1, ndim):
qbox_area *= query_boxes[k, ndim + i] - query_boxes[k, i] + add1
for n in range(N):
invalid = False
for i in range(ndim):
side_length = (
min(boxes[n, i + ndim], query_boxes[k, i + ndim])
- max(boxes[n, i], query_boxes[k, i])
+ add1
)
if side_length <= 0:
invalid = True
break
side_lengths[i] = side_length
if not invalid:
box_area = boxes[n, ndim] - boxes[n, 0] + add1
for i in range(1, ndim):
box_area *= boxes[n, ndim + i] - boxes[n, i] + add1
inter = side_lengths[0]
for i in range(1, ndim):
inter *= side_lengths[i]
# inter = np.prod(side_lengths)
ua = float(box_area + qbox_area - inter)
overlaps[n, k] = inter / ua
return overlaps
def points_in_rbbox(points, rbbox, z_axis=2, origin=(0.5, 0.5, 0.5)):
rbbox_corners = center_to_corner_box3d(
rbbox[:, :3], rbbox[:, 3:6], rbbox[:, -1], origin=origin, axis=z_axis
)
surfaces = corner_to_surfaces_3d(rbbox_corners)
indices = points_in_convex_polygon_3d_jit(points[:, :3], surfaces)
return indices
def corner_to_surfaces_3d(corners):
"""convert 3d box corners from corner function above
to surfaces that normal vectors all direct to internal.
Args:
corners (float array, [N, 8, 3]): 3d box corners.
Returns:
surfaces (float array, [N, 6, 4, 3]):
"""
# box_corners: [N, 8, 3], must from corner functions in this module
surfaces = np.array(
[
[corners[:, 0], corners[:, 1], corners[:, 2], corners[:, 3]],
[corners[:, 7], corners[:, 6], corners[:, 5], corners[:, 4]],
[corners[:, 0], corners[:, 3], corners[:, 7], corners[:, 4]],
[corners[:, 1], corners[:, 5], corners[:, 6], corners[:, 2]],
[corners[:, 0], corners[:, 4], corners[:, 5], corners[:, 1]],
[corners[:, 3], corners[:, 2], corners[:, 6], corners[:, 7]],
]
).transpose([2, 0, 1, 3])
return surfaces
@numba.jit(nopython=True)
def corner_to_surfaces_3d_jit(corners):
"""convert 3d box corners from corner function above
to surfaces that normal vectors all direct to internal.
Args:
corners (float array, [N, 8, 3]): 3d box corners.
Returns:
surfaces (float array, [N, 6, 4, 3]):
"""
# box_corners: [N, 8, 3], must from corner functions in this module
num_boxes = corners.shape[0]
surfaces = np.zeros((num_boxes, 6, 4, 3), dtype=corners.dtype)
corner_idxes = np.array(
[0, 1, 2, 3, 7, 6, 5, 4, 0, 3, 7, 4, 1, 5, 6, 2, 0, 4, 5, 1, 3, 2, 6, 7]
).reshape(6, 4)
for i in range(num_boxes):
for j in range(6):
for k in range(4):
surfaces[i, j, k] = corners[i, corner_idxes[j, k]]
return surfaces
def assign_label_to_voxel(gt_boxes, coors, voxel_size, coors_range):
"""assign a 0/1 label to each voxel based on whether
the center of voxel is in gt_box. LIDAR.
"""
voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype)
coors_range = np.array(coors_range, dtype=gt_boxes.dtype)
shift = coors_range[:3]
voxel_origins = coors[:, ::-1] * voxel_size + shift
voxel_centers = voxel_origins + voxel_size * 0.5
gt_box_corners = center_to_corner_box3d(
gt_boxes[:, :3] - voxel_size * 0.5,
gt_boxes[:, 3:6] + voxel_size,
gt_boxes[:, 6],
origin=[0.5, 0.5, 0.5],
axis=2,
)
gt_surfaces = corner_to_surfaces_3d(gt_box_corners)
ret = points_in_convex_polygon_3d_jit(voxel_centers, gt_surfaces)
return np.any(ret, axis=1).astype(np.int64)
def assign_label_to_voxel_v3(gt_boxes, coors, voxel_size, coors_range):
"""assign a 0/1 label to each voxel based on whether
the center of voxel is in gt_box. LIDAR.
"""
voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype)
coors_range = np.array(coors_range, dtype=gt_boxes.dtype)
shift = coors_range[:3]
voxel_origins = coors[:, ::-1] * voxel_size + shift
voxel_maxes = voxel_origins + voxel_size
voxel_minmax = np.concatenate([voxel_origins, voxel_maxes], axis=-1)
voxel_corners = minmax_to_corner_3d(voxel_minmax)
gt_box_corners = center_to_corner_box3d(
gt_boxes[:, :3],
gt_boxes[:, 3:6],
gt_boxes[:, 6],
origin=[0.5, 0.5, 0.5],
axis=2,
)
gt_surfaces = corner_to_surfaces_3d(gt_box_corners)
voxel_corners_flat = voxel_corners.reshape([-1, 3])
ret = points_in_convex_polygon_3d_jit(voxel_corners_flat, gt_surfaces)
ret = ret.reshape([-1, 8, ret.shape[-1]])
return ret.any(-1).any(-1).astype(np.int64)
def image_box_region_area(img_cumsum, bbox):
"""check a 2d voxel is contained by a box. used to filter empty
anchors.
Summed-area table algorithm:
==> W
------------------
| | |
|------A---------B
| | |
| | |
|----- C---------D
Iabcd = ID-IB-IC+IA
Args:
img_cumsum: [M, H, W](yx) cumsumed image.
bbox: [N, 4](xyxy) bounding box,
"""
N = bbox.shape[0]
M = img_cumsum.shape[0]
ret = np.zeros([N, M], dtype=img_cumsum.dtype)
ID = img_cumsum[:, bbox[:, 3], bbox[:, 2]]
IA = img_cumsum[:, bbox[:, 1], bbox[:, 0]]
IB = img_cumsum[:, bbox[:, 3], bbox[:, 0]]
IC = img_cumsum[:, bbox[:, 1], bbox[:, 2]]
ret = ID - IB - IC + IA
return ret
def get_minimum_bounding_box_bv(points, voxel_size, bound, downsample=8, margin=1.6):
x_vsize = voxel_size[0]
y_vsize = voxel_size[1]
max_x = points[:, 0].max()
max_y = points[:, 1].max()
min_x = points[:, 0].min()
min_y = points[:, 1].min()
max_x = np.floor(max_x / (x_vsize * downsample) + 1) * (x_vsize * downsample)
max_y = np.floor(max_y / (y_vsize * downsample) + 1) * (y_vsize * downsample)
min_x = np.floor(min_x / (x_vsize * downsample)) * (x_vsize * downsample)
min_y = np.floor(min_y / (y_vsize * downsample)) * (y_vsize * downsample)
max_x = np.minimum(max_x + margin, bound[2])
max_y = np.minimum(max_y + margin, bound[3])
min_x = np.maximum(min_x - margin, bound[0])
min_y = np.maximum(min_y - margin, bound[1])
return np.array([min_x, min_y, max_x, max_y])
def box3d_to_bbox(box3d, rect, Trv2c, P2):
box3d_to_cam = box_lidar_to_camera(box3d, rect, Trv2c)
box_corners = center_to_corner_box3d(
box3d[:, :3], box3d[:, 3:6], box3d[:, 6], [0.5, 1.0, 0.5], axis=1
)
box_corners_in_image = project_to_image(box_corners, P2)
# box_corners_in_image: [N, 8, 2]
minxy = np.min(box_corners_in_image, axis=1)
maxxy = np.max(box_corners_in_image, axis=1)
bbox = np.concatenate([minxy, maxxy], axis=1)
return bbox
def change_box3d_center_(box3d, src, dst):
dst = np.array(dst, dtype=box3d.dtype)
src = np.array(src, dtype=box3d.dtype)
box3d[..., :3] += box3d[..., 3:6] * (dst - src)
def encode_parts(relative_shifts):
parts = np.zeros((len(relative_shifts),), dtype=np.int32)
mask = (relative_shifts[:, 0] >= 0) & (relative_shifts[:, 1] >= 0)
parts[mask] = 0
mask = (relative_shifts[:, 0] < 0) & (relative_shifts[:, 1] >= 0)
parts[mask] = 1
mask = (relative_shifts[:, 0] < 0) & (relative_shifts[:, 1] < 0)
parts[mask] = 2
mask = (relative_shifts[:, 0] >= 0) & (relative_shifts[:, 1] < 0)
parts[mask] = 3
return parts
| 36.460123
| 88
| 0.57126
|
from pathlib import Path
import numba
import numpy as np
from det3d.core.bbox.geometry import (
points_count_convex_polygon_3d_jit,
points_in_convex_polygon_3d_jit,
)
try:
from spconv.utils import rbbox_intersection, rbbox_iou
except:
print("Import spconv fail, no support for sparse convolution!")
def points_count_rbbox(points, rbbox, z_axis=2, origin=(0.5, 0.5, 0.5)):
rbbox_corners = center_to_corner_box3d(
rbbox[:, :3], rbbox[:, 3:6], rbbox[:, -1], origin=origin, axis=z_axis
)
surfaces = corner_to_surfaces_3d(rbbox_corners)
return points_count_convex_polygon_3d_jit(points[:, :3], surfaces)
def riou_cc(rbboxes, qrbboxes, standup_thresh=0.0):
boxes_corners = center_to_corner_box2d(
rbboxes[:, :2], rbboxes[:, 2:4], rbboxes[:, 4]
)
boxes_standup = corner_to_standup_nd(boxes_corners)
qboxes_corners = center_to_corner_box2d(
qrbboxes[:, :2], qrbboxes[:, 2:4], qrbboxes[:, 4]
)
qboxes_standup = corner_to_standup_nd(qboxes_corners)
standup_iou = iou_jit(boxes_standup, qboxes_standup, eps=0.0)
return rbbox_iou(boxes_corners, qboxes_corners, standup_iou, standup_thresh)
def rinter_cc(rbboxes, qrbboxes, standup_thresh=0.0):
boxes_corners = center_to_corner_box2d(
rbboxes[:, :2], rbboxes[:, 2:4], rbboxes[:, 4]
)
boxes_standup = corner_to_standup_nd(boxes_corners)
qboxes_corners = center_to_corner_box2d(
qrbboxes[:, :2], qrbboxes[:, 2:4], qrbboxes[:, 4]
)
qboxes_standup = corner_to_standup_nd(qboxes_corners)
standup_iou = iou_jit(boxes_standup, qboxes_standup, eps=0.0)
return rbbox_intersection(
boxes_corners, qboxes_corners, standup_iou, standup_thresh
)
def corners_nd(dims, origin=0.5):
ndim = int(dims.shape[1])
corners_norm = np.stack(
np.unravel_index(np.arange(2 ** ndim), [2] * ndim), axis=1
).astype(dims.dtype)
if ndim == 2:
corners_norm = corners_norm[[0, 1, 3, 2]]
elif ndim == 3:
corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]]
corners_norm = corners_norm - np.array(origin, dtype=dims.dtype)
corners = dims.reshape([-1, 1, ndim]) * corners_norm.reshape([1, 2 ** ndim, ndim])
return corners
@numba.njit
def corners_2d_jit(dims, origin=0.5):
ndim = 2
corners_norm = np.array([[0, 0], [0, 1], [1, 1], [1, 0]], dtype=dims.dtype)
corners_norm = corners_norm - np.array(origin, dtype=dims.dtype)
corners = dims.reshape((-1, 1, ndim)) * corners_norm.reshape((1, 2 ** ndim, ndim))
return corners
@numba.njit
def corners_3d_jit(dims, origin=0.5):
ndim = 3
corners_norm = np.array(
[0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1],
dtype=dims.dtype,
).reshape((8, 3))
corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]]
corners_norm = corners_norm - np.array(origin, dtype=dims.dtype)
corners = dims.reshape((-1, 1, ndim)) * corners_norm.reshape((1, 2 ** ndim, ndim))
return corners
@numba.njit
def corner_to_standup_nd_jit(boxes_corner):
num_boxes = boxes_corner.shape[0]
ndim = boxes_corner.shape[-1]
result = np.zeros((num_boxes, ndim * 2), dtype=boxes_corner.dtype)
for i in range(num_boxes):
for j in range(ndim):
result[i, j] = np.min(boxes_corner[i, :, j])
for j in range(ndim):
result[i, j + ndim] = np.max(boxes_corner[i, :, j])
return result
def corner_to_standup_nd(boxes_corner):
assert len(boxes_corner.shape) == 3
standup_boxes = []
standup_boxes.append(np.min(boxes_corner, axis=1))
standup_boxes.append(np.max(boxes_corner, axis=1))
return np.concatenate(standup_boxes, -1)
def rbbox2d_to_near_bbox(rbboxes):
rots = rbboxes[..., -1]
rots_0_pi_div_2 = np.abs(limit_period(rots, 0.5, np.pi))
cond = (rots_0_pi_div_2 > np.pi / 4)[..., np.newaxis]
bboxes_center = np.where(cond, rbboxes[:, [0, 1, 3, 2]], rbboxes[:, :4])
bboxes = center_to_minmax_2d(bboxes_center[:, :2], bboxes_center[:, 2:])
return bboxes
def rotation_3d_in_axis(points, angles, axis=0):
rot_sin = np.sin(angles)
rot_cos = np.cos(angles)
ones = np.ones_like(rot_cos)
zeros = np.zeros_like(rot_cos)
if axis == 1:
rot_mat_T = np.stack(
[
[rot_cos, zeros, -rot_sin],
[zeros, ones, zeros],
[rot_sin, zeros, rot_cos],
]
)
elif axis == 2 or axis == -1:
rot_mat_T = np.stack(
[
[rot_cos, -rot_sin, zeros],
[rot_sin, rot_cos, zeros],
[zeros, zeros, ones],
]
)
elif axis == 0:
rot_mat_T = np.stack(
[
[zeros, rot_cos, -rot_sin],
[zeros, rot_sin, rot_cos],
[ones, zeros, zeros],
]
)
else:
raise ValueError("axis should in range")
return np.einsum("aij,jka->aik", points, rot_mat_T)
def rotation_points_single_angle(points, angle, axis=0):
rot_sin = np.sin(angle)
rot_cos = np.cos(angle)
if axis == 1:
rot_mat_T = np.array(
[[rot_cos, 0, -rot_sin], [0, 1, 0], [rot_sin, 0, rot_cos]],
dtype=points.dtype,
)
elif axis == 2 or axis == -1:
rot_mat_T = np.array(
[[rot_cos, -rot_sin, 0], [rot_sin, rot_cos, 0], [0, 0, 1]],
dtype=points.dtype,
)
elif axis == 0:
rot_mat_T = np.array(
[[1, 0, 0], [0, rot_cos, -rot_sin], [0, rot_sin, rot_cos]],
dtype=points.dtype,
)
else:
raise ValueError("axis should in range")
return points @ rot_mat_T
def rotation_2d(points, angles):
rot_sin = np.sin(angles)
rot_cos = np.cos(angles)
rot_mat_T = np.stack([[rot_cos, -rot_sin], [rot_sin, rot_cos]])
return np.einsum("aij,jka->aik", points, rot_mat_T)
def rotation_box(box_corners, angle):
rot_sin = np.sin(angle)
rot_cos = np.cos(angle)
rot_mat_T = np.array(
[[rot_cos, -rot_sin], [rot_sin, rot_cos]], dtype=box_corners.dtype
)
return box_corners @ rot_mat_T
def center_to_corner_box3d(centers, dims, angles=None, origin=(0.5, 0.5, 0.5), axis=2):
corners = corners_nd(dims, origin=origin)
if angles is not None:
corners = rotation_3d_in_axis(corners, angles, axis=axis)
corners += centers.reshape([-1, 1, 3])
return corners
def center_to_corner_box2d(centers, dims, angles=None, origin=0.5):
corners = corners_nd(dims, origin=origin)
if angles is not None:
corners = rotation_2d(corners, angles)
corners += centers.reshape([-1, 1, 2])
return corners
@numba.jit(nopython=True)
def box2d_to_corner_jit(boxes):
num_box = boxes.shape[0]
corners_norm = np.zeros((4, 2), dtype=boxes.dtype)
corners_norm[1, 1] = 1.0
corners_norm[2] = 1.0
corners_norm[3, 0] = 1.0
corners_norm -= np.array([0.5, 0.5], dtype=boxes.dtype)
corners = boxes.reshape(num_box, 1, 5)[:, :, 2:4] * corners_norm.reshape(1, 4, 2)
rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype)
box_corners = np.zeros((num_box, 4, 2), dtype=boxes.dtype)
for i in range(num_box):
rot_sin = np.sin(boxes[i, -1])
rot_cos = np.cos(boxes[i, -1])
rot_mat_T[0, 0] = rot_cos
rot_mat_T[0, 1] = -rot_sin
rot_mat_T[1, 0] = rot_sin
rot_mat_T[1, 1] = rot_cos
box_corners[i] = corners[i] @ rot_mat_T + boxes[i, :2]
return box_corners
def rbbox3d_to_corners(rbboxes, origin=[0.5, 0.5, 0.5], axis=2):
return center_to_corner_box3d(
rbboxes[..., :3], rbboxes[..., 3:6], rbboxes[..., 6], origin, axis=axis
)
def rbbox3d_to_bev_corners(rbboxes, origin=0.5):
return center_to_corner_box2d(
rbboxes[..., :2], rbboxes[..., 3:5], rbboxes[..., 6], origin
)
def minmax_to_corner_2d(minmax_box):
ndim = minmax_box.shape[-1] // 2
center = minmax_box[..., :ndim]
dims = minmax_box[..., ndim:] - center
return center_to_corner_box2d(center, dims, origin=0.0)
def minmax_to_corner_2d_v2(minmax_box):
return minmax_box[..., [0, 1, 0, 3, 2, 3, 2, 1]].reshape(-1, 4, 2)
def minmax_to_corner_3d(minmax_box):
ndim = minmax_box.shape[-1] // 2
center = minmax_box[..., :ndim]
dims = minmax_box[..., ndim:] - center
return center_to_corner_box3d(center, dims, origin=0.0)
def minmax_to_center_2d(minmax_box):
ndim = minmax_box.shape[-1] // 2
center_min = minmax_box[..., :ndim]
dims = minmax_box[..., ndim:] - center_min
center = center_min + 0.5 * dims
return np.concatenate([center, dims], axis=-1)
def center_to_minmax_2d_0_5(centers, dims):
return np.concatenate([centers - dims / 2, centers + dims / 2], axis=-1)
def center_to_minmax_2d(centers, dims, origin=0.5):
if origin == 0.5:
return center_to_minmax_2d_0_5(centers, dims)
corners = center_to_corner_box2d(centers, dims, origin=origin)
return corners[:, [0, 2]].reshape([-1, 4])
def limit_period(val, offset=0.5, period=np.pi):
return val - np.floor(val / period + offset) * period
def projection_matrix_to_CRT_kitti(proj):
CR = proj[0:3, 0:3]
CT = proj[0:3, 3]
RinvCinv = np.linalg.inv(CR)
Rinv, Cinv = np.linalg.qr(RinvCinv)
C = np.linalg.inv(Cinv)
R = np.linalg.inv(Rinv)
T = Cinv @ CT
return C, R, T
def get_frustum(bbox_image, C, near_clip=0.001, far_clip=100):
fku = C[0, 0]
fkv = -C[1, 1]
u0v0 = C[0:2, 2]
z_points = np.array([near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)[:, np.newaxis]
b = bbox_image
box_corners = np.array(
[[b[0], b[1]], [b[0], b[3]], [b[2], b[3]], [b[2], b[1]]], dtype=C.dtype
)
near_box_corners = (box_corners - u0v0) / np.array(
[fku / near_clip, -fkv / near_clip], dtype=C.dtype
)
far_box_corners = (box_corners - u0v0) / np.array(
[fku / far_clip, -fkv / far_clip], dtype=C.dtype
)
ret_xy = np.concatenate([near_box_corners, far_box_corners], axis=0)
ret_xyz = np.concatenate([ret_xy, z_points], axis=1)
return ret_xyz
def get_frustum_v2(bboxes, C, near_clip=0.001, far_clip=100):
fku = C[0, 0]
fkv = -C[1, 1]
u0v0 = C[0:2, 2]
num_box = bboxes.shape[0]
z_points = np.array([near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)[
np.newaxis, :, np.newaxis
]
z_points = np.tile(z_points, [num_box, 1, 1])
box_corners = minmax_to_corner_2d_v2(bboxes)
near_box_corners = (box_corners - u0v0) / np.array(
[fku / near_clip, -fkv / near_clip], dtype=C.dtype
)
far_box_corners = (box_corners - u0v0) / np.array(
[fku / far_clip, -fkv / far_clip], dtype=C.dtype
)
ret_xy = np.concatenate([near_box_corners, far_box_corners], axis=1)
ret_xyz = np.concatenate([ret_xy, z_points], axis=-1)
return ret_xyz
@numba.njit
def _add_rgb_to_points_kernel(points_2d, image, points_rgb):
num_points = points_2d.shape[0]
image_h, image_w = image.shape[:2]
for i in range(num_points):
img_pos = np.floor(points_2d[i]).astype(np.int32)
if img_pos[0] >= 0 and img_pos[0] < image_w:
if img_pos[1] >= 0 and img_pos[1] < image_h:
points_rgb[i, :] = image[img_pos[1], img_pos[0], :]
def add_rgb_to_points(points, image, rect, Trv2c, P2, mean_size=[5, 5]):
kernel = np.ones(mean_size, np.float32) / np.prod(mean_size)
points_cam = lidar_to_camera(points[:, :3], rect, Trv2c)
points_2d = project_to_image(points_cam, P2)
points_rgb = np.zeros([points_cam.shape[0], 3], dtype=points.dtype)
_add_rgb_to_points_kernel(points_2d, image, points_rgb)
return points_rgb
def project_to_image(points_3d, proj_mat):
points_shape = list(points_3d.shape)
points_shape[-1] = 1
points_4 = np.concatenate([points_3d, np.ones(points_shape)], axis=-1)
point_2d = points_4 @ proj_mat.T
point_2d_res = point_2d[..., :2] / point_2d[..., 2:3]
return point_2d_res
def camera_to_lidar(points, r_rect, velo2cam):
points_shape = list(points.shape[0:-1])
if points.shape[-1] == 3:
points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1)
lidar_points = points @ np.linalg.inv((r_rect @ velo2cam).T)
return lidar_points[..., :3]
def lidar_to_camera(points, r_rect, velo2cam):
points_shape = list(points.shape[:-1])
if points.shape[-1] == 3:
points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1)
camera_points = points @ (r_rect @ velo2cam).T
return camera_points[..., :3]
def box_camera_to_lidar(data, r_rect, velo2cam):
xyz = data[:, 0:3]
l, h, w = data[:, 3:4], data[:, 4:5], data[:, 5:6]
r = data[:, 6:7]
xyz_lidar = camera_to_lidar(xyz, r_rect, velo2cam)
return np.concatenate([xyz_lidar, w, l, h, r], axis=1)
def box_lidar_to_camera(data, r_rect, velo2cam):
xyz_lidar = data[:, 0:3]
w, l, h = data[:, 3:4], data[:, 4:5], data[:, 5:6]
r = data[:, 6:7]
xyz = lidar_to_camera(xyz_lidar, r_rect, velo2cam)
return np.concatenate([xyz, l, h, w, r], axis=1)
def remove_outside_points(points, rect, Trv2c, P2, image_shape):
C, R, T = projection_matrix_to_CRT_kitti(P2)
image_bbox = [0, 0, image_shape[1], image_shape[0]]
frustum = get_frustum(image_bbox, C)
frustum -= T
frustum = np.linalg.inv(R) @ frustum.T
frustum = camera_to_lidar(frustum.T, rect, Trv2c)
frustum_surfaces = corner_to_surfaces_3d_jit(frustum[np.newaxis, ...])
indices = points_in_convex_polygon_3d_jit(points[:, :3], frustum_surfaces)
points = points[indices.reshape([-1])]
return points
@numba.jit(nopython=True)
def iou_jit(boxes, query_boxes, eps=1.0):
N = boxes.shape[0]
K = query_boxes.shape[0]
overlaps = np.zeros((N, K), dtype=boxes.dtype)
for k in range(K):
box_area = (query_boxes[k, 2] - query_boxes[k, 0] + eps) * (
query_boxes[k, 3] - query_boxes[k, 1] + eps
)
for n in range(N):
iw = (
min(boxes[n, 2], query_boxes[k, 2])
- max(boxes[n, 0], query_boxes[k, 0])
+ eps
)
if iw > 0:
ih = (
min(boxes[n, 3], query_boxes[k, 3])
- max(boxes[n, 1], query_boxes[k, 1])
+ eps
)
if ih > 0:
ua = (
(boxes[n, 2] - boxes[n, 0] + eps)
* (boxes[n, 3] - boxes[n, 1] + eps)
+ box_area
- iw * ih
)
overlaps[n, k] = iw * ih / ua
return overlaps
@numba.jit(nopython=True)
def iou_3d_jit(boxes, query_boxes, add1=True):
N = boxes.shape[0]
K = query_boxes.shape[0]
overlaps = np.zeros((N, K), dtype=boxes.dtype)
if add1:
add1 = 1.0
else:
add1 = 0.0
for k in range(K):
box_area = (
(query_boxes[k, 3] - query_boxes[k, 0] + add1)
* (query_boxes[k, 4] - query_boxes[k, 1] + add1)
* (query_boxes[k, 5] - query_boxes[k, 2] + add1)
)
for n in range(N):
iw = (
min(boxes[n, 3], query_boxes[k, 3])
- max(boxes[n, 0], query_boxes[k, 0])
+ add1
)
if iw > 0:
ih = (
min(boxes[n, 4], query_boxes[k, 4])
- max(boxes[n, 1], query_boxes[k, 1])
+ add1
)
if ih > 0:
il = (
min(boxes[n, 5], query_boxes[k, 5])
- max(boxes[n, 2], query_boxes[k, 2])
+ add1
)
if il > 0:
ua = float(
(boxes[n, 3] - boxes[n, 0] + add1)
* (boxes[n, 4] - boxes[n, 1] + add1)
* (boxes[n, 5] - boxes[n, 2] + add1)
+ box_area
- iw * ih * il
)
overlaps[n, k] = iw * ih * il / ua
return overlaps
@numba.jit(nopython=True)
def iou_nd_jit(boxes, query_boxes, add1=True):
N = boxes.shape[0]
K = query_boxes.shape[0]
ndim = boxes.shape[1] // 2
overlaps = np.zeros((N, K), dtype=boxes.dtype)
side_lengths = np.zeros((ndim,), dtype=boxes.dtype)
if add1:
add1 = 1.0
else:
add1 = 0.0
invalid = False
for k in range(K):
qbox_area = query_boxes[k, ndim] - query_boxes[k, 0] + add1
for i in range(1, ndim):
qbox_area *= query_boxes[k, ndim + i] - query_boxes[k, i] + add1
for n in range(N):
invalid = False
for i in range(ndim):
side_length = (
min(boxes[n, i + ndim], query_boxes[k, i + ndim])
- max(boxes[n, i], query_boxes[k, i])
+ add1
)
if side_length <= 0:
invalid = True
break
side_lengths[i] = side_length
if not invalid:
box_area = boxes[n, ndim] - boxes[n, 0] + add1
for i in range(1, ndim):
box_area *= boxes[n, ndim + i] - boxes[n, i] + add1
inter = side_lengths[0]
for i in range(1, ndim):
inter *= side_lengths[i]
ua = float(box_area + qbox_area - inter)
overlaps[n, k] = inter / ua
return overlaps
def points_in_rbbox(points, rbbox, z_axis=2, origin=(0.5, 0.5, 0.5)):
rbbox_corners = center_to_corner_box3d(
rbbox[:, :3], rbbox[:, 3:6], rbbox[:, -1], origin=origin, axis=z_axis
)
surfaces = corner_to_surfaces_3d(rbbox_corners)
indices = points_in_convex_polygon_3d_jit(points[:, :3], surfaces)
return indices
def corner_to_surfaces_3d(corners):
surfaces = np.array(
[
[corners[:, 0], corners[:, 1], corners[:, 2], corners[:, 3]],
[corners[:, 7], corners[:, 6], corners[:, 5], corners[:, 4]],
[corners[:, 0], corners[:, 3], corners[:, 7], corners[:, 4]],
[corners[:, 1], corners[:, 5], corners[:, 6], corners[:, 2]],
[corners[:, 0], corners[:, 4], corners[:, 5], corners[:, 1]],
[corners[:, 3], corners[:, 2], corners[:, 6], corners[:, 7]],
]
).transpose([2, 0, 1, 3])
return surfaces
@numba.jit(nopython=True)
def corner_to_surfaces_3d_jit(corners):
num_boxes = corners.shape[0]
surfaces = np.zeros((num_boxes, 6, 4, 3), dtype=corners.dtype)
corner_idxes = np.array(
[0, 1, 2, 3, 7, 6, 5, 4, 0, 3, 7, 4, 1, 5, 6, 2, 0, 4, 5, 1, 3, 2, 6, 7]
).reshape(6, 4)
for i in range(num_boxes):
for j in range(6):
for k in range(4):
surfaces[i, j, k] = corners[i, corner_idxes[j, k]]
return surfaces
def assign_label_to_voxel(gt_boxes, coors, voxel_size, coors_range):
voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype)
coors_range = np.array(coors_range, dtype=gt_boxes.dtype)
shift = coors_range[:3]
voxel_origins = coors[:, ::-1] * voxel_size + shift
voxel_centers = voxel_origins + voxel_size * 0.5
gt_box_corners = center_to_corner_box3d(
gt_boxes[:, :3] - voxel_size * 0.5,
gt_boxes[:, 3:6] + voxel_size,
gt_boxes[:, 6],
origin=[0.5, 0.5, 0.5],
axis=2,
)
gt_surfaces = corner_to_surfaces_3d(gt_box_corners)
ret = points_in_convex_polygon_3d_jit(voxel_centers, gt_surfaces)
return np.any(ret, axis=1).astype(np.int64)
def assign_label_to_voxel_v3(gt_boxes, coors, voxel_size, coors_range):
voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype)
coors_range = np.array(coors_range, dtype=gt_boxes.dtype)
shift = coors_range[:3]
voxel_origins = coors[:, ::-1] * voxel_size + shift
voxel_maxes = voxel_origins + voxel_size
voxel_minmax = np.concatenate([voxel_origins, voxel_maxes], axis=-1)
voxel_corners = minmax_to_corner_3d(voxel_minmax)
gt_box_corners = center_to_corner_box3d(
gt_boxes[:, :3],
gt_boxes[:, 3:6],
gt_boxes[:, 6],
origin=[0.5, 0.5, 0.5],
axis=2,
)
gt_surfaces = corner_to_surfaces_3d(gt_box_corners)
voxel_corners_flat = voxel_corners.reshape([-1, 3])
ret = points_in_convex_polygon_3d_jit(voxel_corners_flat, gt_surfaces)
ret = ret.reshape([-1, 8, ret.shape[-1]])
return ret.any(-1).any(-1).astype(np.int64)
def image_box_region_area(img_cumsum, bbox):
N = bbox.shape[0]
M = img_cumsum.shape[0]
ret = np.zeros([N, M], dtype=img_cumsum.dtype)
ID = img_cumsum[:, bbox[:, 3], bbox[:, 2]]
IA = img_cumsum[:, bbox[:, 1], bbox[:, 0]]
IB = img_cumsum[:, bbox[:, 3], bbox[:, 0]]
IC = img_cumsum[:, bbox[:, 1], bbox[:, 2]]
ret = ID - IB - IC + IA
return ret
def get_minimum_bounding_box_bv(points, voxel_size, bound, downsample=8, margin=1.6):
x_vsize = voxel_size[0]
y_vsize = voxel_size[1]
max_x = points[:, 0].max()
max_y = points[:, 1].max()
min_x = points[:, 0].min()
min_y = points[:, 1].min()
max_x = np.floor(max_x / (x_vsize * downsample) + 1) * (x_vsize * downsample)
max_y = np.floor(max_y / (y_vsize * downsample) + 1) * (y_vsize * downsample)
min_x = np.floor(min_x / (x_vsize * downsample)) * (x_vsize * downsample)
min_y = np.floor(min_y / (y_vsize * downsample)) * (y_vsize * downsample)
max_x = np.minimum(max_x + margin, bound[2])
max_y = np.minimum(max_y + margin, bound[3])
min_x = np.maximum(min_x - margin, bound[0])
min_y = np.maximum(min_y - margin, bound[1])
return np.array([min_x, min_y, max_x, max_y])
def box3d_to_bbox(box3d, rect, Trv2c, P2):
box3d_to_cam = box_lidar_to_camera(box3d, rect, Trv2c)
box_corners = center_to_corner_box3d(
box3d[:, :3], box3d[:, 3:6], box3d[:, 6], [0.5, 1.0, 0.5], axis=1
)
box_corners_in_image = project_to_image(box_corners, P2)
minxy = np.min(box_corners_in_image, axis=1)
maxxy = np.max(box_corners_in_image, axis=1)
bbox = np.concatenate([minxy, maxxy], axis=1)
return bbox
def change_box3d_center_(box3d, src, dst):
dst = np.array(dst, dtype=box3d.dtype)
src = np.array(src, dtype=box3d.dtype)
box3d[..., :3] += box3d[..., 3:6] * (dst - src)
def encode_parts(relative_shifts):
parts = np.zeros((len(relative_shifts),), dtype=np.int32)
mask = (relative_shifts[:, 0] >= 0) & (relative_shifts[:, 1] >= 0)
parts[mask] = 0
mask = (relative_shifts[:, 0] < 0) & (relative_shifts[:, 1] >= 0)
parts[mask] = 1
mask = (relative_shifts[:, 0] < 0) & (relative_shifts[:, 1] < 0)
parts[mask] = 2
mask = (relative_shifts[:, 0] >= 0) & (relative_shifts[:, 1] < 0)
parts[mask] = 3
return parts
| true
| true
|
790d08818998c5080469cdf9b93c4f5672c1cd17
| 2,531
|
py
|
Python
|
ivy_builder/specs/network_spec.py
|
ivy-dl/builder
|
a24d7254e90476332b962f9aba9a02222c55e035
|
[
"Apache-2.0"
] | 1
|
2022-02-20T15:40:01.000Z
|
2022-02-20T15:40:01.000Z
|
ivy_builder/specs/network_spec.py
|
ivy-dl/builder
|
a24d7254e90476332b962f9aba9a02222c55e035
|
[
"Apache-2.0"
] | null | null | null |
ivy_builder/specs/network_spec.py
|
ivy-dl/builder
|
a24d7254e90476332b962f9aba9a02222c55e035
|
[
"Apache-2.0"
] | 1
|
2022-03-29T15:21:56.000Z
|
2022-03-29T15:21:56.000Z
|
# global
import ivy
import abc
import importlib
from typing import List
# local
from ivy_builder.specs.spec import Spec
from ivy_builder.specs import DatasetSpec
from ivy_builder.specs.spec import locals_to_kwargs
# ToDo: fix cyclic imports, so this method can be imported from the builder module
def load_class_from_str(full_str):
mod_str = '.'.join(full_str.split('.')[:-1])
class_str = full_str.split('.')[-1]
return getattr(importlib.import_module(mod_str), class_str)
class NetworkSpec(Spec, abc.ABC):
def __init__(self, dataset_spec: DatasetSpec = None, dev_strs: List[str] = None,
v_keychains=None, keep_v_keychains=False, build_mode='explicit', **kwargs) -> None:
"""
base class for storing general specifications of the neural network
"""
kw = locals_to_kwargs(locals())
super().__init__(dataset_spec=dataset_spec,
dev_strs=dev_strs,
v_keychains=v_keychains,
keep_v_keychains=keep_v_keychains,
build_mode=build_mode,
**kwargs)
if 'subnets' in self:
for k, subet_spec in self.subnets.items():
if 'network_spec_class' in subet_spec:
if isinstance(subet_spec.network_spec_class, str):
spec_class = load_class_from_str(subet_spec.network_spec_class)
else:
spec_class = subet_spec.network_spec_class
if isinstance(kwargs['subnets'][k], spec_class):
subet_spec = kwargs['subnets'][k]
else:
subet_spec = spec_class(**{**kwargs['subnets'][k],
**dict(dataset_spec=dataset_spec, dev_strs=dev_strs)})
self.subnets[k] = subet_spec
if isinstance(subet_spec.network_class, str):
self.subnets[k].network_class = load_class_from_str(subet_spec.network_class)
else:
self.subnets[k].network_class = subet_spec.network_class
self.subnets[k].store_vars = ivy.default(self.subnets[k].if_exists('store_vars'), True)
self.subnets[k].build_mode = ivy.default(self.subnets[k].if_exists('build_mode'), self.build_mode)
self.subnets[k].dataset_spec = dataset_spec
self.subnets[k].dev_strs = dev_strs
self._kwargs = kw
| 45.196429
| 114
| 0.594232
|
import ivy
import abc
import importlib
from typing import List
from ivy_builder.specs.spec import Spec
from ivy_builder.specs import DatasetSpec
from ivy_builder.specs.spec import locals_to_kwargs
def load_class_from_str(full_str):
mod_str = '.'.join(full_str.split('.')[:-1])
class_str = full_str.split('.')[-1]
return getattr(importlib.import_module(mod_str), class_str)
class NetworkSpec(Spec, abc.ABC):
def __init__(self, dataset_spec: DatasetSpec = None, dev_strs: List[str] = None,
v_keychains=None, keep_v_keychains=False, build_mode='explicit', **kwargs) -> None:
kw = locals_to_kwargs(locals())
super().__init__(dataset_spec=dataset_spec,
dev_strs=dev_strs,
v_keychains=v_keychains,
keep_v_keychains=keep_v_keychains,
build_mode=build_mode,
**kwargs)
if 'subnets' in self:
for k, subet_spec in self.subnets.items():
if 'network_spec_class' in subet_spec:
if isinstance(subet_spec.network_spec_class, str):
spec_class = load_class_from_str(subet_spec.network_spec_class)
else:
spec_class = subet_spec.network_spec_class
if isinstance(kwargs['subnets'][k], spec_class):
subet_spec = kwargs['subnets'][k]
else:
subet_spec = spec_class(**{**kwargs['subnets'][k],
**dict(dataset_spec=dataset_spec, dev_strs=dev_strs)})
self.subnets[k] = subet_spec
if isinstance(subet_spec.network_class, str):
self.subnets[k].network_class = load_class_from_str(subet_spec.network_class)
else:
self.subnets[k].network_class = subet_spec.network_class
self.subnets[k].store_vars = ivy.default(self.subnets[k].if_exists('store_vars'), True)
self.subnets[k].build_mode = ivy.default(self.subnets[k].if_exists('build_mode'), self.build_mode)
self.subnets[k].dataset_spec = dataset_spec
self.subnets[k].dev_strs = dev_strs
self._kwargs = kw
| true
| true
|
790d09b5e9809a10e91a44b3c53d8a7d68078a8c
| 64
|
py
|
Python
|
warsa/precipitation/satellite/__init__.py
|
JRoehrig/pywarsa
|
d2fcc6cebbadaff742bf2ac870a01b0cb534ebde
|
[
"MIT"
] | null | null | null |
warsa/precipitation/satellite/__init__.py
|
JRoehrig/pywarsa
|
d2fcc6cebbadaff742bf2ac870a01b0cb534ebde
|
[
"MIT"
] | null | null | null |
warsa/precipitation/satellite/__init__.py
|
JRoehrig/pywarsa
|
d2fcc6cebbadaff742bf2ac870a01b0cb534ebde
|
[
"MIT"
] | 1
|
2020-12-17T15:49:13.000Z
|
2020-12-17T15:49:13.000Z
|
__author__ = 'roehrig'
"""Satellite and reanalysis products
"""
| 16
| 36
| 0.734375
|
__author__ = 'roehrig'
| true
| true
|
790d0a13937996fb917e410c64335f4df346e4f2
| 3,893
|
py
|
Python
|
lib/python2.7/site-packages/ldap3/protocol/formatters/validators.py
|
crav7/ProjectDjango
|
10dc03919b1fcfc34d2ddc93b85989638399e3e9
|
[
"MIT"
] | null | null | null |
lib/python2.7/site-packages/ldap3/protocol/formatters/validators.py
|
crav7/ProjectDjango
|
10dc03919b1fcfc34d2ddc93b85989638399e3e9
|
[
"MIT"
] | null | null | null |
lib/python2.7/site-packages/ldap3/protocol/formatters/validators.py
|
crav7/ProjectDjango
|
10dc03919b1fcfc34d2ddc93b85989638399e3e9
|
[
"MIT"
] | null | null | null |
"""
"""
# Created on 2016.08.09
#
# Author: Giovanni Cannata
#
# Copyright 2016, 2017 Giovanni Cannata
#
# This file is part of ldap3.
#
# ldap3 is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ldap3 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with ldap3 in the COPYING and COPYING.LESSER files.
# If not, see <http://www.gnu.org/licenses/>.
from datetime import datetime
from ... import SEQUENCE_TYPES, STRING_TYPES
from .formatters import format_time
# Validators return True if value is valid, False if value is not valid,
# or a value different from True and False that is a valid value to substitute to the input value
def check_type(input_value, value_type):
if isinstance(input_value, value_type):
return True
if isinstance(input_value, SEQUENCE_TYPES):
for value in input_value:
if not isinstance(value, value_type):
return False
return True
return False
def always_valid(name, input_value):
return True
def validate_generic_single_value(name, input_value):
if not isinstance(input_value, SEQUENCE_TYPES):
return True
if len(input_value) == 1:
return True
return False
def validate_integer(name, input_value):
return check_type(input_value, int)
def validate_bytes(name, input_value):
return check_type(input_value, bytes)
def validate_boolean(name, input_value):
# it could be a real bool or the string TRUE or FALSE, # only a single valued is allowed
if validate_generic_single_value(name, input_value):
if isinstance(input_value, SEQUENCE_TYPES):
input_value = input_value[0]
if isinstance(input_value, bool):
if input_value:
return 'TRUE'
else:
return 'FALSE'
if isinstance(input_value, STRING_TYPES):
if input_value.lower() == 'true':
return 'TRUE'
elif input_value.lower() == 'false':
return 'FALSE'
return False
def validate_time(name, input_value):
# if datetime object doesn't have a timezone it's considered local time and is adjusted to UTC
changed = False
sequence = True # indicates if a sequence must be returned
if not isinstance(input_value, SEQUENCE_TYPES):
sequence = False
input_value = [input_value]
valid_values = []
for element in input_value:
if isinstance(element, STRING_TYPES): # tries to check if it is already be a Generalized Time
if isinstance(format_time(element), datetime): # valid Generalized Time string
valid_values.append(element)
else:
return False
elif isinstance(element, datetime):
changed = True
if element.tzinfo: # a datetime with a timezone
valid_values.append(element.strftime('%Y%m%d%H%M%SZ%z'))
else: # datetime without timezone, assumed local and adjusted to UTC
offset = datetime.now() - datetime.utcnow()
valid_values.append((element - offset).strftime('%Y%m%d%H%M%SZ'))
else:
return False
if changed:
if sequence:
return valid_values
else:
return valid_values[0]
else:
return True
| 32.714286
| 103
| 0.645261
|
from datetime import datetime
from ... import SEQUENCE_TYPES, STRING_TYPES
from .formatters import format_time
def check_type(input_value, value_type):
if isinstance(input_value, value_type):
return True
if isinstance(input_value, SEQUENCE_TYPES):
for value in input_value:
if not isinstance(value, value_type):
return False
return True
return False
def always_valid(name, input_value):
return True
def validate_generic_single_value(name, input_value):
if not isinstance(input_value, SEQUENCE_TYPES):
return True
if len(input_value) == 1:
return True
return False
def validate_integer(name, input_value):
return check_type(input_value, int)
def validate_bytes(name, input_value):
return check_type(input_value, bytes)
def validate_boolean(name, input_value):
lue(name, input_value):
if isinstance(input_value, SEQUENCE_TYPES):
input_value = input_value[0]
if isinstance(input_value, bool):
if input_value:
return 'TRUE'
else:
return 'FALSE'
if isinstance(input_value, STRING_TYPES):
if input_value.lower() == 'true':
return 'TRUE'
elif input_value.lower() == 'false':
return 'FALSE'
return False
def validate_time(name, input_value):
changed = False
sequence = True
if not isinstance(input_value, SEQUENCE_TYPES):
sequence = False
input_value = [input_value]
valid_values = []
for element in input_value:
if isinstance(element, STRING_TYPES):
if isinstance(format_time(element), datetime):
valid_values.append(element)
else:
return False
elif isinstance(element, datetime):
changed = True
if element.tzinfo:
valid_values.append(element.strftime('%Y%m%d%H%M%SZ%z'))
else:
offset = datetime.now() - datetime.utcnow()
valid_values.append((element - offset).strftime('%Y%m%d%H%M%SZ'))
else:
return False
if changed:
if sequence:
return valid_values
else:
return valid_values[0]
else:
return True
| true
| true
|
790d0a33bb7179331ba3ddcbb1b97ece1075af92
| 1,085
|
py
|
Python
|
venv/Lib/site-packages/pathspec/__init__.py
|
gilbertekalea/booking.com_crawler
|
71e52c87cd72a77f80a3e5fc0af0e1a68a5712ae
|
[
"MIT"
] | 92
|
2020-01-22T22:15:29.000Z
|
2022-03-31T05:19:16.000Z
|
venv/Lib/site-packages/pathspec/__init__.py
|
gilbertekalea/booking.com_crawler
|
71e52c87cd72a77f80a3e5fc0af0e1a68a5712ae
|
[
"MIT"
] | 604
|
2020-01-25T17:13:27.000Z
|
2022-03-31T18:58:24.000Z
|
venv/Lib/site-packages/pathspec/__init__.py
|
gilbertekalea/booking.com_crawler
|
71e52c87cd72a77f80a3e5fc0af0e1a68a5712ae
|
[
"MIT"
] | 39
|
2020-02-06T00:38:06.000Z
|
2022-03-15T06:14:19.000Z
|
# encoding: utf-8
"""
The *pathspec* package provides pattern matching for file paths. So far
this only includes Git's wildmatch pattern matching (the style used for
".gitignore" files).
The following classes are imported and made available from the root of
the `pathspec` package:
- :class:`pathspec.pathspec.PathSpec`
- :class:`pathspec.pattern.Pattern`
- :class:`pathspec.pattern.RegexPattern`
- :class:`pathspec.util.RecursionError`
The following functions are also imported:
- :func:`pathspec.util.iter_tree`
- :func:`pathspec.util.lookup_pattern`
- :func:`pathspec.util.match_files`
"""
from __future__ import unicode_literals
from .pathspec import PathSpec
from .pattern import Pattern, RegexPattern
from .util import iter_tree, lookup_pattern, match_files, RecursionError
from ._meta import (
__author__,
__copyright__,
__credits__,
__license__,
__version__,
)
# Load pattern implementations.
from . import patterns
# Expose `GitIgnorePattern` class in the root module for backward
# compatibility with v0.4.
from .patterns.gitwildmatch import GitIgnorePattern
| 24.659091
| 72
| 0.784332
|
from __future__ import unicode_literals
from .pathspec import PathSpec
from .pattern import Pattern, RegexPattern
from .util import iter_tree, lookup_pattern, match_files, RecursionError
from ._meta import (
__author__,
__copyright__,
__credits__,
__license__,
__version__,
)
from . import patterns
from .patterns.gitwildmatch import GitIgnorePattern
| true
| true
|
790d0a3cf8b88770e34396b24cfb8f7e4ed87451
| 15,369
|
py
|
Python
|
renderer_blender_src.py
|
laphisboy/mvsnerf
|
ea1aecd7d653b04a7f4bec27ad978f64a038bc92
|
[
"MIT"
] | null | null | null |
renderer_blender_src.py
|
laphisboy/mvsnerf
|
ea1aecd7d653b04a7f4bec27ad978f64a038bc92
|
[
"MIT"
] | null | null | null |
renderer_blender_src.py
|
laphisboy/mvsnerf
|
ea1aecd7d653b04a7f4bec27ad978f64a038bc92
|
[
"MIT"
] | null | null | null |
import argparse
import re
####
# # Box 1
####
import sys,os,imageio,lpips
root = '/home/youngsun/documents/mvs/mvsnerf_timing'
os.chdir(root)
sys.path.append(root)
from opt_src import config_parser
from data import dataset_dict
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
# models
from models_src import *
from renderer_src import *
from data.ray_utils import get_rays
from tqdm import tqdm
from skimage.metrics import structural_similarity
# pytorch-lightning
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning import LightningModule, Trainer, loggers
from data.ray_utils import ray_marcher
import torch
torch.cuda.set_device(0)
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
####
# # Box 2
####
def decode_batch(batch):
rays = batch['rays'] # (B, 8)
rgbs = batch['rgbs'] # (B, 3)
return rays, rgbs
def unpreprocess(data, shape=(1,1,3,1,1)):
# to unnormalize image for visualization
# data N V C H W
device = data.device
mean = torch.tensor([-0.485 / 0.229, -0.456 / 0.224, -0.406 / 0.225]).view(*shape).to(device)
std = torch.tensor([1 / 0.229, 1 / 0.224, 1 / 0.225]).view(*shape).to(device)
return (data - mean) / std
def read_depth(filename):
depth_h = np.array(read_pfm(filename)[0], dtype=np.float32) # (800, 800)
depth_h = cv2.resize(depth_h, None, fx=0.5, fy=0.5,
interpolation=cv2.INTER_NEAREST) # (600, 800)
depth_h = depth_h[44:556, 80:720] # (512, 640)
# depth = cv2.resize(depth_h, None, fx=0.5, fy=0.5,interpolation=cv2.INTER_NEAREST)#!!!!!!!!!!!!!!!!!!!!!!!!!
mask = depth>0
return depth_h,mask
loss_fn_vgg = lpips.LPIPS(net='vgg')
mse2psnr = lambda x : -10. * np.log(x) / np.log(10.)
####
# # Box 3
####
# create function for returning dense, sparse, far views
def get_source_imgs(source_dataset, target_position, N_views, device, view_type='nearest',
fixed_idxs=None,
is_source_target_overlap=False):
pair_idx = get_pair_idx(source_dataset, target_position, N_views, view_type, fixed_idxs, is_source_target_overlap)
imgs_source, proj_mats, near_far_source, pose_source = source_dataset.read_source_views(pair_idx=pair_idx,device=device)
return imgs_source, proj_mats, near_far_source, pose_source
def get_pair_idx(source_dataset, target_position, N_views, view_type='nearest',
fixed_idxs=None,
is_source_target_overlap=False):
positions = source_dataset.poses[:,:3,3]
dis = np.sum(np.abs(positions - target_position), axis=-1)
dis_sort = np.argsort(dis)
if is_source_target_overlap:
dis_sort = dis_sort[1:]
if view_type == 'nearest': # or "as dense as possible ㅎㅎ"
pair_idx = dis_sort[:N_views]
pair_idx = [source_dataset.img_idx[item] for item in pair_idx]
if view_type == 'dense':
idxs = torch.randperm(int(np.rint(N_views*1.5)))[:N_views].sort()[0]
pair_idx = dis_sort[idxs]
pair_idx = [source_dataset.img_idx[item] for item in pair_idx]
if view_type == 'random': # i know its unnecessarily long...
idxs = torch.randperm(len(dis_sort))[:N_views]
pair_idx = dis_sort[idxs]
pair_idx = [source_dataset.img_idx[item] for item in pair_idx]
if view_type == 'sparse':
idxs = torch.linspace(0, len(dis_sort), steps=N_views+1).round()
idxs = [np.random.choice(range(int(idxs[i]), int(idxs[i+1]))) for i in range(len(idxs)-1)]
pair_idx = dis_sort[idxs]
pair_idx = [source_dataset.img_idx[item] for item in pair_idx]
if view_type == 'far':
idxs = torch.randperm(int(np.rint(N_views*1.5)))[:N_views].sort(descending=True)[0]
pair_idx = dis_sort[::-1][idxs]
pair_idx = [source_dataset.img_idx[item] for item in pair_idx]
if view_type == 'farthest':
pair_idx = dis_sort[::-1][:N_views]
pair_idx = [source_dataset.img_idx[item] for item in pair_idx]
# return index for the case of 'fixed'
if view_type == 'fixed':
pair_idx = fixed_idxs
return pair_idx
####
# # Box 4
####
def render_blender(view_type='nearest',
scenes=['ficus'],
num_src_views=3,
ckpt='base-3src-dense.tar',
source_split='train',
target_split='val',
select_index=None,
is_fixed=False,
is_source_target_overlap=False
):
psnr_all,ssim_all,LPIPS_vgg_all = [],[],[]
# for i_scene, scene in enumerate(['ship','mic','chair','lego','drums','ficus','materials','hotdog']):#
for i_scene, scene in enumerate(scenes):#
psnr,ssim,LPIPS_vgg = [],[],[]
cmd = f'--datadir /mnt/hdd/mvsnerf_data/nerf_synthetic/{scene} \
--dataset_name blender_src --white_bkgd \
--net_type v0 --ckpt ./ckpts/{ckpt} --num_src_views {num_src_views}'
save_dir = f'/mnt/hdd/youngsun/mvsnerf_timing/results/{ckpt[:-4]}/blender-{num_src_views}-'
if is_fixed:
save_dir += 'fixed-'
save_dir += f'{view_type}-'
save_dir += f'{source_split}-{target_split}/{scene}'
args = config_parser(cmd.split())
args.use_viewdirs = True
args.N_samples = 128
# args.feat_dim = 8+12
args.feat_dim = 8+4*num_src_views
# create models
if 0==i_scene:
render_kwargs_train, render_kwargs_test, start, grad_vars = create_nerf_mvs(args, use_mvs=True, dir_embedder=False, pts_embedder=True)
filter_keys(render_kwargs_train)
MVSNet = render_kwargs_train['network_mvs']
render_kwargs_train.pop('network_mvs')
datadir = args.datadir
datatype = 'train'
pad = 16
args.chunk = 5120
print('============> rendering dataset <===================')
dataset_source = dataset_dict[args.dataset_name](args, split=source_split)
dataset_target = dataset_dict[args.dataset_name](args, split=target_split, select_index=select_index)
target_idx = dataset_target.img_idx
save_as_image = True
os.makedirs(save_dir, exist_ok=True)
MVSNet.train()
MVSNet = MVSNet.cuda()
with torch.no_grad():
try:
tqdm._instances.clear()
except Exception:
pass
for i, batch in enumerate(tqdm(dataset_target)):
torch.cuda.empty_cache()
rays, img = decode_batch(batch)
rays = rays.squeeze().to(device) # (H*W, 3)
img = img.squeeze().cpu().numpy() # (H, W, 3)
if is_fixed:
if i == 0:
if select_index is not None:
pair_idx = get_pair_idx(source_dataset=dataset_source,
target_position=dataset_target.poses[[len(select_index)//2],:3,3],
N_views=args.num_src_views,
view_type=view_type)
else:
pair_idx = get_pair_idx(source_dataset=dataset_source,
target_position=dataset_target.poses[[50],:3,3],
N_views=args.num_src_views,
view_type=view_type)
imgs_source, proj_mats, near_far_source, pose_source = dataset_source.read_source_views(pair_idx=pair_idx,
device=device)
else:
# created fixed image_source
imgs_source, proj_mats, near_far_source, pose_source = get_source_imgs(source_dataset=dataset_source,
target_position=dataset_target.poses[[i],:3,3],
N_views=args.num_src_views, device=device,
view_type=view_type)
volume_feature, _, _ = MVSNet(imgs_source, proj_mats, near_far_source, pad=pad)
imgs_source = unpreprocess(imgs_source)
N_rays_all = rays.shape[0]
rgb_rays, depth_rays_preds = [],[]
for chunk_idx in range(N_rays_all//args.chunk + int(N_rays_all%args.chunk>0)):
xyz_coarse_sampled, rays_o, rays_d, z_vals = ray_marcher(rays[chunk_idx*args.chunk:(chunk_idx+1)*args.chunk],
N_samples=args.N_samples)
# Converting world coordinate to ndc coordinate
H, W = img.shape[:2]
inv_scale = torch.tensor([W - 1, H - 1]).to(device)
w2c_ref, intrinsic_ref = pose_source['w2cs'][0], pose_source['intrinsics'][0].clone()
intrinsic_ref[:2] *= args.imgScale_test/args.imgScale_train
xyz_NDC = get_ndc_coordinate(w2c_ref, intrinsic_ref, xyz_coarse_sampled, inv_scale,
near=near_far_source[0], far=near_far_source[1], pad=pad*args.imgScale_test)
# rendering
rgb, disp, acc, depth_pred, alpha, extras = rendering(args, pose_source, xyz_coarse_sampled,
xyz_NDC, z_vals, rays_o, rays_d,
volume_feature,imgs_source, **render_kwargs_train)
rgb, depth_pred = torch.clamp(rgb.cpu(),0,1.0).numpy(), depth_pred.cpu().numpy()
rgb_rays.append(rgb)
depth_rays_preds.append(depth_pred)
depth_rays_preds = np.concatenate(depth_rays_preds).reshape(H, W)
depth_rays_preds, _ = visualize_depth_numpy(depth_rays_preds, near_far_source)
rgb_rays = np.concatenate(rgb_rays).reshape(H, W, 3)
img_vis = np.concatenate((img*255,rgb_rays*255,depth_rays_preds),axis=1)
img_vis = np.concatenate((torch.cat(torch.split(imgs_source*255, [1]*num_src_views, dim=1),-1).squeeze().permute(1,2,0).cpu().numpy(),img_vis),axis=1)
if save_as_image:
imageio.imwrite(f'{save_dir}/{scene}_{target_idx[i]:03d}.png', img_vis.astype('uint8'))
else:
rgbs.append(img_vis.astype('uint8'))
# quantity
# center crop 0.8 ratio
H_crop, W_crop = np.array(rgb_rays.shape[:2])//10
img = img[H_crop:-H_crop,W_crop:-W_crop]
rgb_rays = rgb_rays[H_crop:-H_crop,W_crop:-W_crop]
psnr.append( mse2psnr(np.mean((rgb_rays-img)**2)))
ssim.append( structural_similarity(rgb_rays, img, multichannel=True))
img_tensor = torch.from_numpy(rgb_rays)[None].permute(0,3,1,2).float()*2-1.0 # image should be RGB, IMPORTANT: normalized to [-1,1]
img_gt_tensor = torch.from_numpy(img)[None].permute(0,3,1,2).float()*2-1.0
LPIPS_vgg.append( loss_fn_vgg(img_tensor, img_gt_tensor).item())
print(f'=====> scene: {scene} mean psnr {np.mean(psnr)} ssim: {np.mean(ssim)} lpips: {np.mean(LPIPS_vgg)}')
psnr_all.append(psnr);ssim_all.append(ssim);LPIPS_vgg_all.append(LPIPS_vgg)
if not save_as_image:
imageio.mimwrite(f'{save_dir}/{scene}_spiral.mp4', np.stack(rgbs), fps=20, quality=10)
print(f'=====> all mean psnr {np.mean(psnr_all)} ssim: {np.mean(ssim_all)} lpips: {np.mean(LPIPS_vgg_all)}')
####
# # Box 5
####
def render_blender_all_settings(scenes=['lego'], num_src_views=3, ckpt='base-3src-dense.tar',source_split='train', target_split='val', select_index=[30,60,90], view_types=[1]):
if 1 in view_types:
render_blender('nearest', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=None)
if 2 in view_types:
render_blender('dense', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=None)
if 3 in view_types:
render_blender('sparse', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=None)
if 4 in view_types:
render_blender('far', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=None)
if 5 in view_types:
render_blender('random', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=None)
if 6 in view_types:
render_blender('nearest', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=True)
if 7 in view_types:
render_blender('sparse', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=True)
if 8 in view_types:
render_blender('nearest', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=None, is_source_target_overlap=True)
if 9 in view_types:
render_blender('sparse', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=None, is_source_target_overlap=True)
return None
####
# # Box 6
####
####
# # Box 7
####
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--view_types', nargs="+", type=int,
help= 'Enter list of view types to render:' \
' 1 - nearest, 2 - dense, 3 - sparse, 4 - far, 5 - random, ' \
'6 - fixed nearset, 7 - fixed sparse, 8 - unseen nearest, 9 - unseen sparse')
parser.add_argument('--view_indexes', nargs="+", type=int, const=None, default=None,
help= 'default - all views (100)')
parser.add_argument('--scenes', nargs='+', default=[])
parser.add_argument('--ckpts', nargs='+', default=[])
parser.add_argument('--source', type=str, default='train')
parser.add_argument('--target', type=str, default='val')
args = parser.parse_args()
for ckpt in args.ckpts:
num_src_views = int(re.findall('[0-9]+', ckpt)[0])
render_blender_all_settings(scenes=args.scenes,
num_src_views=num_src_views,
ckpt=ckpt,
source_split=args.source,
target_split=args.target,
select_index=args.view_indexes,
view_types=args.view_types)
torch.cuda.empty_cache()
| 39.919481
| 176
| 0.569133
|
import argparse
import re
mageio,lpips
root = '/home/youngsun/documents/mvs/mvsnerf_timing'
os.chdir(root)
sys.path.append(root)
from opt_src import config_parser
from data import dataset_dict
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
from models_src import *
from renderer_src import *
from data.ray_utils import get_rays
from tqdm import tqdm
from skimage.metrics import structural_similarity
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning import LightningModule, Trainer, loggers
from data.ray_utils import ray_marcher
import torch
torch.cuda.set_device(0)
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
(batch):
rays = batch['rays']
rgbs = batch['rgbs']
return rays, rgbs
def unpreprocess(data, shape=(1,1,3,1,1)):
device = data.device
mean = torch.tensor([-0.485 / 0.229, -0.456 / 0.224, -0.406 / 0.225]).view(*shape).to(device)
std = torch.tensor([1 / 0.229, 1 / 0.224, 1 / 0.225]).view(*shape).to(device)
return (data - mean) / std
def read_depth(filename):
depth_h = np.array(read_pfm(filename)[0], dtype=np.float32)
depth_h = cv2.resize(depth_h, None, fx=0.5, fy=0.5,
interpolation=cv2.INTER_NEAREST)
depth_h = depth_h[44:556, 80:720]
turn depth_h,mask
loss_fn_vgg = lpips.LPIPS(net='vgg')
mse2psnr = lambda x : -10. * np.log(x) / np.log(10.)
e_imgs(source_dataset, target_position, N_views, device, view_type='nearest',
fixed_idxs=None,
is_source_target_overlap=False):
pair_idx = get_pair_idx(source_dataset, target_position, N_views, view_type, fixed_idxs, is_source_target_overlap)
imgs_source, proj_mats, near_far_source, pose_source = source_dataset.read_source_views(pair_idx=pair_idx,device=device)
return imgs_source, proj_mats, near_far_source, pose_source
def get_pair_idx(source_dataset, target_position, N_views, view_type='nearest',
fixed_idxs=None,
is_source_target_overlap=False):
positions = source_dataset.poses[:,:3,3]
dis = np.sum(np.abs(positions - target_position), axis=-1)
dis_sort = np.argsort(dis)
if is_source_target_overlap:
dis_sort = dis_sort[1:]
if view_type == 'nearest':
pair_idx = dis_sort[:N_views]
pair_idx = [source_dataset.img_idx[item] for item in pair_idx]
if view_type == 'dense':
idxs = torch.randperm(int(np.rint(N_views*1.5)))[:N_views].sort()[0]
pair_idx = dis_sort[idxs]
pair_idx = [source_dataset.img_idx[item] for item in pair_idx]
if view_type == 'random':
idxs = torch.randperm(len(dis_sort))[:N_views]
pair_idx = dis_sort[idxs]
pair_idx = [source_dataset.img_idx[item] for item in pair_idx]
if view_type == 'sparse':
idxs = torch.linspace(0, len(dis_sort), steps=N_views+1).round()
idxs = [np.random.choice(range(int(idxs[i]), int(idxs[i+1]))) for i in range(len(idxs)-1)]
pair_idx = dis_sort[idxs]
pair_idx = [source_dataset.img_idx[item] for item in pair_idx]
if view_type == 'far':
idxs = torch.randperm(int(np.rint(N_views*1.5)))[:N_views].sort(descending=True)[0]
pair_idx = dis_sort[::-1][idxs]
pair_idx = [source_dataset.img_idx[item] for item in pair_idx]
if view_type == 'farthest':
pair_idx = dis_sort[::-1][:N_views]
pair_idx = [source_dataset.img_idx[item] for item in pair_idx]
if view_type == 'fixed':
pair_idx = fixed_idxs
return pair_idx
der(view_type='nearest',
scenes=['ficus'],
num_src_views=3,
ckpt='base-3src-dense.tar',
source_split='train',
target_split='val',
select_index=None,
is_fixed=False,
is_source_target_overlap=False
):
psnr_all,ssim_all,LPIPS_vgg_all = [],[],[]
for i_scene, scene in enumerate(scenes):
psnr,ssim,LPIPS_vgg = [],[],[]
cmd = f'--datadir /mnt/hdd/mvsnerf_data/nerf_synthetic/{scene} \
--dataset_name blender_src --white_bkgd \
--net_type v0 --ckpt ./ckpts/{ckpt} --num_src_views {num_src_views}'
save_dir = f'/mnt/hdd/youngsun/mvsnerf_timing/results/{ckpt[:-4]}/blender-{num_src_views}-'
if is_fixed:
save_dir += 'fixed-'
save_dir += f'{view_type}-'
save_dir += f'{source_split}-{target_split}/{scene}'
args = config_parser(cmd.split())
args.use_viewdirs = True
args.N_samples = 128
args.feat_dim = 8+4*num_src_views
if 0==i_scene:
render_kwargs_train, render_kwargs_test, start, grad_vars = create_nerf_mvs(args, use_mvs=True, dir_embedder=False, pts_embedder=True)
filter_keys(render_kwargs_train)
MVSNet = render_kwargs_train['network_mvs']
render_kwargs_train.pop('network_mvs')
datadir = args.datadir
datatype = 'train'
pad = 16
args.chunk = 5120
print('============> rendering dataset <===================')
dataset_source = dataset_dict[args.dataset_name](args, split=source_split)
dataset_target = dataset_dict[args.dataset_name](args, split=target_split, select_index=select_index)
target_idx = dataset_target.img_idx
save_as_image = True
os.makedirs(save_dir, exist_ok=True)
MVSNet.train()
MVSNet = MVSNet.cuda()
with torch.no_grad():
try:
tqdm._instances.clear()
except Exception:
pass
for i, batch in enumerate(tqdm(dataset_target)):
torch.cuda.empty_cache()
rays, img = decode_batch(batch)
rays = rays.squeeze().to(device)
img = img.squeeze().cpu().numpy()
if is_fixed:
if i == 0:
if select_index is not None:
pair_idx = get_pair_idx(source_dataset=dataset_source,
target_position=dataset_target.poses[[len(select_index)//2],:3,3],
N_views=args.num_src_views,
view_type=view_type)
else:
pair_idx = get_pair_idx(source_dataset=dataset_source,
target_position=dataset_target.poses[[50],:3,3],
N_views=args.num_src_views,
view_type=view_type)
imgs_source, proj_mats, near_far_source, pose_source = dataset_source.read_source_views(pair_idx=pair_idx,
device=device)
else:
imgs_source, proj_mats, near_far_source, pose_source = get_source_imgs(source_dataset=dataset_source,
target_position=dataset_target.poses[[i],:3,3],
N_views=args.num_src_views, device=device,
view_type=view_type)
volume_feature, _, _ = MVSNet(imgs_source, proj_mats, near_far_source, pad=pad)
imgs_source = unpreprocess(imgs_source)
N_rays_all = rays.shape[0]
rgb_rays, depth_rays_preds = [],[]
for chunk_idx in range(N_rays_all//args.chunk + int(N_rays_all%args.chunk>0)):
xyz_coarse_sampled, rays_o, rays_d, z_vals = ray_marcher(rays[chunk_idx*args.chunk:(chunk_idx+1)*args.chunk],
N_samples=args.N_samples)
H, W = img.shape[:2]
inv_scale = torch.tensor([W - 1, H - 1]).to(device)
w2c_ref, intrinsic_ref = pose_source['w2cs'][0], pose_source['intrinsics'][0].clone()
intrinsic_ref[:2] *= args.imgScale_test/args.imgScale_train
xyz_NDC = get_ndc_coordinate(w2c_ref, intrinsic_ref, xyz_coarse_sampled, inv_scale,
near=near_far_source[0], far=near_far_source[1], pad=pad*args.imgScale_test)
rgb, disp, acc, depth_pred, alpha, extras = rendering(args, pose_source, xyz_coarse_sampled,
xyz_NDC, z_vals, rays_o, rays_d,
volume_feature,imgs_source, **render_kwargs_train)
rgb, depth_pred = torch.clamp(rgb.cpu(),0,1.0).numpy(), depth_pred.cpu().numpy()
rgb_rays.append(rgb)
depth_rays_preds.append(depth_pred)
depth_rays_preds = np.concatenate(depth_rays_preds).reshape(H, W)
depth_rays_preds, _ = visualize_depth_numpy(depth_rays_preds, near_far_source)
rgb_rays = np.concatenate(rgb_rays).reshape(H, W, 3)
img_vis = np.concatenate((img*255,rgb_rays*255,depth_rays_preds),axis=1)
img_vis = np.concatenate((torch.cat(torch.split(imgs_source*255, [1]*num_src_views, dim=1),-1).squeeze().permute(1,2,0).cpu().numpy(),img_vis),axis=1)
if save_as_image:
imageio.imwrite(f'{save_dir}/{scene}_{target_idx[i]:03d}.png', img_vis.astype('uint8'))
else:
rgbs.append(img_vis.astype('uint8'))
H_crop, W_crop = np.array(rgb_rays.shape[:2])//10
img = img[H_crop:-H_crop,W_crop:-W_crop]
rgb_rays = rgb_rays[H_crop:-H_crop,W_crop:-W_crop]
psnr.append( mse2psnr(np.mean((rgb_rays-img)**2)))
ssim.append( structural_similarity(rgb_rays, img, multichannel=True))
img_tensor = torch.from_numpy(rgb_rays)[None].permute(0,3,1,2).float()*2-1.0
img_gt_tensor = torch.from_numpy(img)[None].permute(0,3,1,2).float()*2-1.0
LPIPS_vgg.append( loss_fn_vgg(img_tensor, img_gt_tensor).item())
print(f'=====> scene: {scene} mean psnr {np.mean(psnr)} ssim: {np.mean(ssim)} lpips: {np.mean(LPIPS_vgg)}')
psnr_all.append(psnr);ssim_all.append(ssim);LPIPS_vgg_all.append(LPIPS_vgg)
if not save_as_image:
imageio.mimwrite(f'{save_dir}/{scene}_spiral.mp4', np.stack(rgbs), fps=20, quality=10)
print(f'=====> all mean psnr {np.mean(psnr_all)} ssim: {np.mean(ssim_all)} lpips: {np.mean(LPIPS_vgg_all)}')
der_all_settings(scenes=['lego'], num_src_views=3, ckpt='base-3src-dense.tar',source_split='train', target_split='val', select_index=[30,60,90], view_types=[1]):
if 1 in view_types:
render_blender('nearest', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=None)
if 2 in view_types:
render_blender('dense', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=None)
if 3 in view_types:
render_blender('sparse', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=None)
if 4 in view_types:
render_blender('far', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=None)
if 5 in view_types:
render_blender('random', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=None)
if 6 in view_types:
render_blender('nearest', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=True)
if 7 in view_types:
render_blender('sparse', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=True)
if 8 in view_types:
render_blender('nearest', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=None, is_source_target_overlap=True)
if 9 in view_types:
render_blender('sparse', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=None, is_source_target_overlap=True)
return None
parser = argparse.ArgumentParser()
parser.add_argument('--view_types', nargs="+", type=int,
help= 'Enter list of view types to render:' \
' 1 - nearest, 2 - dense, 3 - sparse, 4 - far, 5 - random, ' \
'6 - fixed nearset, 7 - fixed sparse, 8 - unseen nearest, 9 - unseen sparse')
parser.add_argument('--view_indexes', nargs="+", type=int, const=None, default=None,
help= 'default - all views (100)')
parser.add_argument('--scenes', nargs='+', default=[])
parser.add_argument('--ckpts', nargs='+', default=[])
parser.add_argument('--source', type=str, default='train')
parser.add_argument('--target', type=str, default='val')
args = parser.parse_args()
for ckpt in args.ckpts:
num_src_views = int(re.findall('[0-9]+', ckpt)[0])
render_blender_all_settings(scenes=args.scenes,
num_src_views=num_src_views,
ckpt=ckpt,
source_split=args.source,
target_split=args.target,
select_index=args.view_indexes,
view_types=args.view_types)
torch.cuda.empty_cache()
| true
| true
|
790d0b17097b75a8c99d44990441c182fa50116e
| 256
|
py
|
Python
|
snippets_java/manage.py
|
edilio/snippets-javaos
|
e73d96876b98e021a4d6dd71582dad573a808931
|
[
"MIT"
] | null | null | null |
snippets_java/manage.py
|
edilio/snippets-javaos
|
e73d96876b98e021a4d6dd71582dad573a808931
|
[
"MIT"
] | null | null | null |
snippets_java/manage.py
|
edilio/snippets-javaos
|
e73d96876b98e021a4d6dd71582dad573a808931
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "snippets_java.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 23.272727
| 77
| 0.777344
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "snippets_java.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| true
| true
|
790d0b4fb39541666568936c77711a563c883a0b
| 2,182
|
py
|
Python
|
data/p2DJ/New/program/cirq/startCirq347.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p2DJ/New/program/cirq/startCirq347.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p2DJ/New/program/cirq/startCirq347.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=2
# total number=20
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=17
c.append(cirq.Z.on(input_qubit[1])) # number=18
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=19
c.append(cirq.Y.on(input_qubit[1])) # number=2
c.append(cirq.Y.on(input_qubit[1])) # number=4
c.append(cirq.Y.on(input_qubit[1])) # number=3
c.append(cirq.H.on(input_qubit[0])) # number=13
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=14
c.append(cirq.H.on(input_qubit[0])) # number=15
c.append(cirq.X.on(input_qubit[0])) # number=8
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=9
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=10
c.append(cirq.X.on(input_qubit[0])) # number=11
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=12
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq347.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
| 31.171429
| 77
| 0.691567
|
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
from cirq.contrib.svg import SVGCircuit
def make_circuit(n: int, input_qubit):
c = cirq.Circuit()
c.append(cirq.H.on(input_qubit[0]))
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0]))
c.append(cirq.Z.on(input_qubit[1]))
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0]))
c.append(cirq.Y.on(input_qubit[1]))
c.append(cirq.Y.on(input_qubit[1]))
c.append(cirq.Y.on(input_qubit[1]))
c.append(cirq.H.on(input_qubit[0]))
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0]))
c.append(cirq.H.on(input_qubit[0]))
c.append(cirq.X.on(input_qubit[0]))
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0]))
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0]))
c.append(cirq.X.on(input_qubit[0]))
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0]))
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq347.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
| true
| true
|
790d0bc8937f8d7233fdd3bde8c43090775d1bfb
| 250
|
py
|
Python
|
CodeChef/FCTRL2.py
|
tapaswenipathak/Competitive-Programming
|
97bba0f2ccdf587df93244a027050489f0905480
|
[
"MIT"
] | 2
|
2019-04-20T18:03:20.000Z
|
2019-08-17T21:20:47.000Z
|
CodeChef/FCTRL2.py
|
tapaswenipathak/Competitive-Programming
|
97bba0f2ccdf587df93244a027050489f0905480
|
[
"MIT"
] | null | null | null |
CodeChef/FCTRL2.py
|
tapaswenipathak/Competitive-Programming
|
97bba0f2ccdf587df93244a027050489f0905480
|
[
"MIT"
] | 1
|
2019-04-20T18:03:26.000Z
|
2019-04-20T18:03:26.000Z
|
import math
t = int(raw_input())
for i in range(t) :
n = int(raw_input())
print math.factorial(n)
'''Why using math.factorial() is faster?
beacuse many of the Python libraries are in C or C++ and not it Python.
Hence the speed improves.'''
| 22.727273
| 74
| 0.684
|
import math
t = int(raw_input())
for i in range(t) :
n = int(raw_input())
print math.factorial(n)
'''Why using math.factorial() is faster?
beacuse many of the Python libraries are in C or C++ and not it Python.
Hence the speed improves.'''
| false
| true
|
790d0d154f090fac69313b75ff317cbf5ef6da28
| 450
|
py
|
Python
|
calculator/calculator.py
|
ShaharGotshtat/parse-and-calculate-with-rabbitmq
|
1cc781bd49f8b29596f773f18d640d4500ff9f70
|
[
"MIT"
] | null | null | null |
calculator/calculator.py
|
ShaharGotshtat/parse-and-calculate-with-rabbitmq
|
1cc781bd49f8b29596f773f18d640d4500ff9f70
|
[
"MIT"
] | null | null | null |
calculator/calculator.py
|
ShaharGotshtat/parse-and-calculate-with-rabbitmq
|
1cc781bd49f8b29596f773f18d640d4500ff9f70
|
[
"MIT"
] | null | null | null |
from rabbitmq_utils import read_messages
def solve_arithmetic_phrase(channel, method, properties, body):
with open('output.txt', 'a') as file:
try:
body_str = eval(body)
result = eval(body_str)
file.write(f'{body_str} = {result}\n')
return result
except Exception as e:
print(f'Error while calculating "{body_str}": {str(e)}')
read_messages(solve_arithmetic_phrase)
| 28.125
| 68
| 0.624444
|
from rabbitmq_utils import read_messages
def solve_arithmetic_phrase(channel, method, properties, body):
with open('output.txt', 'a') as file:
try:
body_str = eval(body)
result = eval(body_str)
file.write(f'{body_str} = {result}\n')
return result
except Exception as e:
print(f'Error while calculating "{body_str}": {str(e)}')
read_messages(solve_arithmetic_phrase)
| true
| true
|
790d0d44bbc0cf87007a21d70ee7872e067e2e4e
| 397
|
py
|
Python
|
apiproject/apiproject/asgi.py
|
vasulimited123/Django-Repository
|
0283dfd6396c58b52000c99667768145a8be3fd2
|
[
"MIT"
] | null | null | null |
apiproject/apiproject/asgi.py
|
vasulimited123/Django-Repository
|
0283dfd6396c58b52000c99667768145a8be3fd2
|
[
"MIT"
] | null | null | null |
apiproject/apiproject/asgi.py
|
vasulimited123/Django-Repository
|
0283dfd6396c58b52000c99667768145a8be3fd2
|
[
"MIT"
] | null | null | null |
"""
ASGI config for apiproject project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'apiproject.settings')
application = get_asgi_application()
| 23.352941
| 78
| 0.788413
|
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'apiproject.settings')
application = get_asgi_application()
| true
| true
|
790d0e3dc24ddaaa1a32a18ffd9f52882d01a2d3
| 1,176
|
py
|
Python
|
inlineplz/env/jenkins.py
|
CtrlZvi/inline-plz
|
208195372a8138dce78a165dd8410a8ce15aea80
|
[
"0BSD"
] | 30
|
2016-01-11T18:43:38.000Z
|
2022-01-29T19:09:53.000Z
|
inlineplz/env/jenkins.py
|
CtrlZvi/inline-plz
|
208195372a8138dce78a165dd8410a8ce15aea80
|
[
"0BSD"
] | 237
|
2016-01-09T23:01:19.000Z
|
2022-03-01T16:12:10.000Z
|
inlineplz/env/jenkins.py
|
CtrlZvi/inline-plz
|
208195372a8138dce78a165dd8410a8ce15aea80
|
[
"0BSD"
] | 14
|
2016-01-19T00:51:52.000Z
|
2022-01-12T20:49:31.000Z
|
# -*- coding: utf-8 -*-
import os
from ..env.base import EnvBase
try:
import urllib.parse as urlparse
except ImportError:
# pylint: disable=F0401
import urlparse
# https://wiki.jenkins-ci.org/display/JENKINS/Building+a+software+project#Buildingasoftwareproject-JenkinsSetEnvironmentVariables
class Jenkins(EnvBase):
def __init__(self):
if os.environ.get("ghprbPullId") or os.environ.get("ghprbActualCommit"):
self.pull_request = os.environ.get("ghprbPullId")
self.owner = (
os.environ.get("GITHUB_REPO_OWNER")
or os.environ.get("ghprbPullLink").split("/")[-4]
)
self.repo = (
os.environ.get("GITHUB_REPO_NAME")
or os.environ.get("ghprbPullLink").split("/")[-3]
)
self.commit = os.environ.get("ghprbActualCommit")
self.interface = "github"
self.token = os.environ.get("GITHUB_TOKEN")
spliturl = urlparse.urlsplit(os.environ.get("ghprbPullLink"))
if spliturl.netloc != "github.com":
self.url = "{0}://{1}".format(spliturl.scheme, spliturl.netloc)
| 33.6
| 129
| 0.604592
|
import os
from ..env.base import EnvBase
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
if os.environ.get("ghprbPullId") or os.environ.get("ghprbActualCommit"):
self.pull_request = os.environ.get("ghprbPullId")
self.owner = (
os.environ.get("GITHUB_REPO_OWNER")
or os.environ.get("ghprbPullLink").split("/")[-4]
)
self.repo = (
os.environ.get("GITHUB_REPO_NAME")
or os.environ.get("ghprbPullLink").split("/")[-3]
)
self.commit = os.environ.get("ghprbActualCommit")
self.interface = "github"
self.token = os.environ.get("GITHUB_TOKEN")
spliturl = urlparse.urlsplit(os.environ.get("ghprbPullLink"))
if spliturl.netloc != "github.com":
self.url = "{0}://{1}".format(spliturl.scheme, spliturl.netloc)
| true
| true
|
790d0ea507fdc802318228f91936dfd5c9ccd77e
| 1,215
|
py
|
Python
|
backend/backend/views.py
|
mkorman9/python-build-system
|
6bbbdd6adc656a4e5b5e0bb375881fedc7a8303c
|
[
"MIT"
] | null | null | null |
backend/backend/views.py
|
mkorman9/python-build-system
|
6bbbdd6adc656a4e5b5e0bb375881fedc7a8303c
|
[
"MIT"
] | 8
|
2018-02-11T20:59:52.000Z
|
2018-02-12T12:39:46.000Z
|
backend/backend/views.py
|
mkorman9/django-url-shortener
|
6bbbdd6adc656a4e5b5e0bb375881fedc7a8303c
|
[
"MIT"
] | null | null | null |
from django.http import JsonResponse, HttpResponseRedirect
from rest_framework.decorators import api_view
from sdk.key_generation import generate_random_key
from sdk.storage import create_storage
from sdk.url import URL, ModelValidationError
storage = create_storage()
@api_view(['GET'])
def go_to(request, key, format=None):
url = storage.get(key)
if not url:
return JsonResponse(status=404, data={
'error': 'key not found'
})
return HttpResponseRedirect(redirect_to=url.address)
@api_view(['POST'])
def shorten(request, format=None):
raw_url = request.data.get('url')
if not raw_url:
return JsonResponse(status=400, data={
'error': 'missing url parameter'
})
try:
url = URL.parse(raw_url)
except ModelValidationError as e:
return JsonResponse(status=400, data={
'error': 'invalid URL',
'details': e.message
})
key = _store_url_and_get_key(url)
return JsonResponse(status=200, data={
'key': key
})
def _store_url_and_get_key(url):
while True:
key = generate_random_key()
if storage.set(key, url):
break
return key
| 23.823529
| 58
| 0.64856
|
from django.http import JsonResponse, HttpResponseRedirect
from rest_framework.decorators import api_view
from sdk.key_generation import generate_random_key
from sdk.storage import create_storage
from sdk.url import URL, ModelValidationError
storage = create_storage()
@api_view(['GET'])
def go_to(request, key, format=None):
url = storage.get(key)
if not url:
return JsonResponse(status=404, data={
'error': 'key not found'
})
return HttpResponseRedirect(redirect_to=url.address)
@api_view(['POST'])
def shorten(request, format=None):
raw_url = request.data.get('url')
if not raw_url:
return JsonResponse(status=400, data={
'error': 'missing url parameter'
})
try:
url = URL.parse(raw_url)
except ModelValidationError as e:
return JsonResponse(status=400, data={
'error': 'invalid URL',
'details': e.message
})
key = _store_url_and_get_key(url)
return JsonResponse(status=200, data={
'key': key
})
def _store_url_and_get_key(url):
while True:
key = generate_random_key()
if storage.set(key, url):
break
return key
| true
| true
|
790d0ed5e2a12bd3bbe7d14fda9458dd742f8023
| 27,751
|
gyp
|
Python
|
electron.gyp
|
frantic/electron
|
4ebe71655b1575f985ddde5760f8f5cde8f03f0d
|
[
"MIT"
] | null | null | null |
electron.gyp
|
frantic/electron
|
4ebe71655b1575f985ddde5760f8f5cde8f03f0d
|
[
"MIT"
] | null | null | null |
electron.gyp
|
frantic/electron
|
4ebe71655b1575f985ddde5760f8f5cde8f03f0d
|
[
"MIT"
] | 1
|
2018-10-05T17:29:23.000Z
|
2018-10-05T17:29:23.000Z
|
{
'variables': {
'project_name%': 'electron',
'product_name%': 'Electron',
'company_name%': 'GitHub, Inc',
'company_abbr%': 'github',
'version%': '0.0.0-dev',
'js2c_input_dir': '<(SHARED_INTERMEDIATE_DIR)/js2c',
},
'includes': [
'features.gypi',
'filenames.gypi',
'native_mate/native_mate_files.gypi',
],
'target_defaults': {
'defines': [
'ATOM_PRODUCT_NAME="<(product_name)"',
'ATOM_PROJECT_NAME="<(project_name)"',
],
'conditions': [
['OS=="mac"', {
'mac_framework_dirs': [
'<(source_root)/external_binaries',
],
}],
['enable_desktop_capturer==1', {
'defines': [
'ENABLE_DESKTOP_CAPTURER',
],
}], # enable_desktop_capturer==1
['enable_osr==1', {
'defines': [
'ENABLE_OSR',
],
}], # enable_osr==1
['enable_pdf_viewer==1', {
'defines': [
'ENABLE_PDF_VIEWER',
],
}], # enable_pdf_viewer
['enable_run_as_node==1', {
'defines': [
'ENABLE_RUN_AS_NODE',
],
}], # enable_run_as_node
['enable_view_api==1', {
'defines': [
'ENABLE_VIEW_API',
],
}], # enable_view_api
['enable_pepper_flash==1', {
'defines': [
'ENABLE_PEPPER_FLASH',
],
}], # enable_pepper_flash
],
},
'targets': [
{
'target_name': '<(project_name)',
'type': 'executable',
'dependencies': [
'js2asar',
'app2asar',
'<(project_name)_lib',
],
'sources': [
'<@(app_sources)',
],
'include_dirs': [
'.',
],
'conditions': [
['OS=="mac"', {
'product_name': '<(product_name)',
'mac_bundle': 1,
'dependencies!': [
'<(project_name)_lib',
],
'dependencies': [
'<(project_name)_framework',
'<(project_name)_helper',
],
'xcode_settings': {
'ATOM_BUNDLE_ID': 'com.<(company_abbr).<(project_name)',
'INFOPLIST_FILE': 'atom/browser/resources/mac/Info.plist',
'LD_RUNPATH_SEARCH_PATHS': [
'@executable_path/../Frameworks',
],
},
'mac_bundle_resources': [
'<@(bundle_sources)',
],
'copies': [
{
'destination': '<(PRODUCT_DIR)/<(product_name).app/Contents/Frameworks',
'files': [
'<(PRODUCT_DIR)/<(product_name) Helper.app',
'<(PRODUCT_DIR)/<(product_name) Framework.framework',
],
},
],
'postbuilds': [
{
# This postbuid step is responsible for creating the following
# helpers:
#
# <(product_name) EH.app and <(product_name) NP.app are created
# from <(product_name).app.
#
# The EH helper is marked for an executable heap. The NP helper
# is marked for no PIE (ASLR).
'postbuild_name': 'Make More Helpers',
'action': [
'tools/mac/make_more_helpers.sh',
'Frameworks',
'<(product_name)',
],
},
# The application doesn't have real localizations, it just has
# empty .lproj directories, which is enough to convince Cocoa
# that Electron supports those languages.
{
'postbuild_name': 'Make Empty Localizations',
'variables': {
'apply_locales_cmd': ['python', 'tools/mac/apply_locales.py'],
'locale_dirs': [
'>!@(<(apply_locales_cmd) -d ZZLOCALE.lproj <(locales))',
],
},
'action': [
'tools/mac/make_locale_dirs.sh',
'<@(locale_dirs)',
],
},
],
'conditions': [
['mas_build==0', {
'copies': [
{
'destination': '<(PRODUCT_DIR)/<(product_name).app/Contents/Frameworks',
'files': [
'external_binaries/Squirrel.framework',
'external_binaries/ReactiveCocoa.framework',
'external_binaries/Mantle.framework',
],
},
],
}],
['mas_build==1', {
'dependencies': [
'<(project_name)_login_helper',
],
'copies': [
{
'destination': '<(PRODUCT_DIR)/<(product_name).app/Contents/Library/LoginItems',
'files': [
'<(PRODUCT_DIR)/<(product_name) Login Helper.app',
],
},
],
}],
],
}], # OS!="mac"
['OS=="win"', {
'msvs_settings': {
'VCManifestTool': {
'EmbedManifest': 'true',
'AdditionalManifestFiles': 'atom/browser/resources/win/atom.manifest',
},
'VCLinkerTool': {
# Chrome builds with this minimum environment which makes e.g.
# GetSystemMetrics(SM_CXSIZEFRAME) return Windows XP/2003
# compatible metrics. See: https://crbug.com/361720
#
# The following two settings translate to a linker flag
# of /SUBSYSTEM:WINDOWS,5.02
'MinimumRequiredVersion': '5.02',
'SubSystem': '2',
'AdditionalDependencies': [
'wtsapi32.lib',
],
},
},
'copies': [
{
'variables': {
'conditions': [
['libchromiumcontent_component', {
'copied_libraries': [
'<@(libchromiumcontent_shared_libraries)',
'<@(libchromiumcontent_shared_v8_libraries)',
],
}, {
'copied_libraries': [
'<(libchromiumcontent_dir)/ffmpeg.dll',
],
}],
],
},
'destination': '<(PRODUCT_DIR)',
'files': [
'<@(copied_libraries)',
'<(libchromiumcontent_dir)/locales',
'<(libchromiumcontent_dir)/libEGL.dll',
'<(libchromiumcontent_dir)/libGLESv2.dll',
'<(libchromiumcontent_dir)/icudtl.dat',
'<(libchromiumcontent_dir)/blink_image_resources_200_percent.pak',
'<(libchromiumcontent_dir)/content_resources_200_percent.pak',
'<(libchromiumcontent_dir)/content_shell.pak',
'<(libchromiumcontent_dir)/ui_resources_200_percent.pak',
'<(libchromiumcontent_dir)/views_resources_200_percent.pak',
'<(libchromiumcontent_dir)/natives_blob.bin',
'<(libchromiumcontent_dir)/v8_context_snapshot.bin',
'external_binaries/d3dcompiler_47.dll',
],
},
],
}, {
'dependencies': [
'vendor/breakpad/breakpad.gyp:dump_syms#host',
],
}], # OS=="win"
['OS=="linux"', {
'copies': [
{
'variables': {
'conditions': [
['libchromiumcontent_component', {
'copied_libraries': [
'<(PRODUCT_DIR)/lib/libnode.so',
'<@(libchromiumcontent_shared_libraries)',
'<@(libchromiumcontent_shared_v8_libraries)',
],
}, {
'copied_libraries': [
'<(PRODUCT_DIR)/lib/libnode.so',
'<(libchromiumcontent_dir)/libffmpeg.so',
],
}],
],
},
'destination': '<(PRODUCT_DIR)',
'files': [
'<@(copied_libraries)',
'<(libchromiumcontent_dir)/locales',
'<(libchromiumcontent_dir)/icudtl.dat',
'<(libchromiumcontent_dir)/blink_image_resources_200_percent.pak',
'<(libchromiumcontent_dir)/content_resources_200_percent.pak',
'<(libchromiumcontent_dir)/content_shell.pak',
'<(libchromiumcontent_dir)/ui_resources_200_percent.pak',
'<(libchromiumcontent_dir)/views_resources_200_percent.pak',
'<(libchromiumcontent_dir)/natives_blob.bin',
'<(libchromiumcontent_dir)/v8_context_snapshot.bin',
],
},
],
}], # OS=="linux"
],
}, # target <(project_name)
{
'target_name': '<(project_name)_lib',
'type': 'static_library',
'dependencies': [
'atom_js2c',
'brightray/brightray.gyp:brightray',
'vendor/node/node.gyp:node_lib',
],
'defines': [
# We need to access internal implementations of Node.
'NODE_WANT_INTERNALS=1',
'NODE_SHARED_MODE',
'HAVE_OPENSSL=1',
'HAVE_INSPECTOR=1',
# Disable warnings for g_settings_list_schemas.
'GLIB_DISABLE_DEPRECATION_WARNINGS',
# Defined in Chromium but not exposed in its gyp file.
'V8_USE_EXTERNAL_STARTUP_DATA',
# Import V8 symbols from shared library (node.dll / libnode.so)
'USING_V8_SHARED',
'USING_V8_PLATFORM_SHARED',
'USING_V8_BASE_SHARED',
# See Chromium src/third_party/protobuf/BUILD.gn
'GOOGLE_PROTOBUF_NO_RTTI',
'GOOGLE_PROTOBUF_NO_STATIC_INITIALIZER',
],
'sources': [
'<@(lib_sources)',
],
'include_dirs': [
'.',
'chromium_src',
'native_mate',
# Include atom_natives.h.
'<(SHARED_INTERMEDIATE_DIR)',
# Include directories for uv and node.
'vendor/node/src',
'vendor/node/deps/http_parser',
'vendor/node/deps/uv/include',
# The `node.h` is using `#include"v8.h"`.
'<(libchromiumcontent_src_dir)/v8/include',
# The `node.h` is using `#include"ares.h"`.
'vendor/node/deps/cares/include',
# The `third_party/WebKit/Source/platform/weborigin/SchemeRegistry.h` is using `platform/PlatformExport.h`.
'<(libchromiumcontent_src_dir)/third_party/WebKit/Source',
# The 'third_party/libyuv/include/libyuv/scale_argb.h' is using 'libyuv/basic_types.h'.
'<(libchromiumcontent_src_dir)/third_party/libyuv/include',
# The 'third_party/webrtc/modules/desktop_capture/desktop_frame.h' is using 'webrtc/base/scoped_ptr.h'.
'<(libchromiumcontent_src_dir)/third_party/',
'<(libchromiumcontent_src_dir)/components/cdm',
'<(libchromiumcontent_src_dir)/third_party/widevine',
'<(libchromiumcontent_src_dir)/third_party/widevine/cdm/stub',
'<(libchromiumcontent_src_dir)/third_party/protobuf/src',
# The 'third_party/webrtc/modules/desktop_capture/desktop_capture_options.h' is using 'rtc_base/constructormagic.h'.
'<(libchromiumcontent_src_dir)/third_party/webrtc',
# leveldb includes are required
'<(libchromiumcontent_src_dir)/third_party/leveldatabase/src',
'<(libchromiumcontent_src_dir)/third_party/leveldatabase/src/include',
],
'direct_dependent_settings': {
'include_dirs': [
'.',
],
},
'export_dependent_settings': [
'brightray/brightray.gyp:brightray',
],
'conditions': [
['enable_pdf_viewer==1', {
'dependencies': [
'vendor/pdf_viewer/pdf_viewer.gyp:pdf_viewer',
],
}], # enable_pdf_viewer
['enable_pepper_flash==1', {
'include_dirs': [
'<(libchromiumcontent_src_dir)/chrome/browser/renderer_host/pepper',
'<(libchromiumcontent_src_dir)/chrome/renderer/pepper',
],
'link_settings': {
'conditions': [
['OS=="win"', {
'libraries': [
'<(libchromiumcontent_dir)/pepper_flash.lib',
]
}, {
'libraries': [
'<(libchromiumcontent_dir)/libpepper_flash.a',
]
}],
],
},
}], # enable_pepper_flash
['libchromiumcontent_component', {
'link_settings': {
'libraries': [ '<@(libchromiumcontent_v8_libraries)' ],
},
}],
['OS=="win"', {
'sources': [
'<@(lib_sources_win)',
],
'link_settings': {
'libraries': [
'-limm32.lib',
'-lgdi32.lib',
'-loleacc.lib',
'-lcomctl32.lib',
'-lcomdlg32.lib',
'-lwininet.lib',
'-lwinmm.lib',
'-lcrypt32.lib',
'-luiautomationcore.lib',
'-lPropsys.lib'
],
},
'dependencies': [
# Node is built as static_library on Windows, so we also need to
# include its dependencies here.
'vendor/node/deps/cares/cares.gyp:cares',
'vendor/node/deps/http_parser/http_parser.gyp:http_parser',
'vendor/node/deps/uv/uv.gyp:libuv',
'vendor/node/deps/zlib/zlib.gyp:zlib',
# Build with breakpad support.
'vendor/breakpad/breakpad.gyp:breakpad_handler',
'vendor/breakpad/breakpad.gyp:breakpad_sender',
],
}], # OS=="win"
['OS=="mac" and mas_build==0', {
'dependencies': [
'vendor/crashpad/client/client.gyp:crashpad_client',
'vendor/crashpad/handler/handler.gyp:crashpad_handler',
],
'link_settings': {
# Do not link with QTKit for mas build.
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/QTKit.framework',
],
},
'xcode_settings': {
# ReactiveCocoa which is used by Squirrel requires using __weak.
'CLANG_ENABLE_OBJC_WEAK': 'YES',
'OTHER_CFLAGS': [
'-Wunguarded-availability',
'-Wobjc-missing-property-synthesis',
],
},
}], # OS=="mac" and mas_build==0
['OS=="mac" and mas_build==1', {
'defines': [
'MAS_BUILD',
],
'sources!': [
'atom/browser/auto_updater_mac.mm',
'atom/common/crash_reporter/crash_reporter_mac.h',
'atom/common/crash_reporter/crash_reporter_mac.mm',
],
'dependencies': [
# Somehow we have code from Chromium using crashpad, very likely
# from components/crash.
# Since we do not actually invoke code from components/crash, this
# dependency should be eventually optimized out by linker.
'vendor/crashpad/client/client.gyp:crashpad_client',
],
}], # OS=="mac" and mas_build==1
['OS=="linux"', {
'sources': [
'<@(lib_sources_linux)',
'<@(lib_sources_nss)',
],
'link_settings': {
'ldflags': [
# Make binary search for libraries under current directory, so we
# don't have to manually set $LD_LIBRARY_PATH:
# http://serverfault.com/questions/279068/cant-find-so-in-the-same-directory-as-the-executable
'-Wl,-rpath=\$$ORIGIN',
# Make native module dynamic loading work.
'-rdynamic',
],
},
# Required settings of using breakpad.
'cflags_cc': [
'-Wno-empty-body',
],
'include_dirs': [
'vendor/breakpad/src',
],
'dependencies': [
'vendor/breakpad/breakpad.gyp:breakpad_client',
],
}], # OS=="linux"
['OS=="linux" and clang==1', {
# Required settings of using breakpad.
'cflags_cc': [
'-Wno-reserved-user-defined-literal',
],
}], # OS=="linux" and clang==1
],
}, # target <(product_name)_lib
{
'target_name': 'js2asar',
'type': 'none',
'actions': [
{
'action_name': 'js2asar',
'variables': {
'conditions': [
['OS=="mac"', {
'resources_path': '<(PRODUCT_DIR)/<(product_name).app/Contents/Resources',
},{
'resources_path': '<(PRODUCT_DIR)/resources',
}],
],
},
'inputs': [
'<@(js_sources)',
],
'outputs': [
'<(resources_path)/electron.asar',
],
'action': [
'python',
'tools/js2asar.py',
'<@(_outputs)',
'lib',
'<@(_inputs)',
],
}
],
}, # target js2asar
{
'target_name': 'app2asar',
'type': 'none',
'actions': [
{
'action_name': 'app2asar',
'variables': {
'conditions': [
['OS=="mac"', {
'resources_path': '<(PRODUCT_DIR)/<(product_name).app/Contents/Resources',
},{
'resources_path': '<(PRODUCT_DIR)/resources',
}],
],
},
'inputs': [
'<@(default_app_sources)',
],
'outputs': [
'<(resources_path)/default_app.asar',
],
'action': [
'python',
'tools/js2asar.py',
'<@(_outputs)',
'default_app',
'<@(_inputs)',
],
}
],
}, # target app2asar
{
'target_name': 'atom_js2c_copy',
'type': 'none',
'copies': [
{
'destination': '<(js2c_input_dir)',
'files': [
'<@(js2c_sources)',
],
},
],
}, # target atom_js2c_copy
{
'target_name': 'atom_browserify',
'type': 'none',
'dependencies': [
# depend on this target to ensure the '<(js2c_input_dir)' is created
'atom_js2c_copy',
],
'variables': {
'sandbox_args': [
'./lib/sandboxed_renderer/init.js',
'-r',
'./lib/sandboxed_renderer/api/exports/electron.js:electron',
'-r',
'./lib/sandboxed_renderer/api/exports/fs.js:fs',
'-r',
'./lib/sandboxed_renderer/api/exports/os.js:os',
'-r',
'./lib/sandboxed_renderer/api/exports/path.js:path',
'-r',
'./lib/sandboxed_renderer/api/exports/child_process.js:child_process'
],
'isolated_args': [
'lib/isolated_renderer/init.js',
]
},
'actions': [
{
'action_name': 'atom_browserify_sandbox',
'inputs': [
'<!@(python tools/list-browserify-deps.py <(sandbox_args))'
],
'outputs': [
'<(js2c_input_dir)/preload_bundle.js',
],
'action': [
'npm',
'run',
'--silent',
'browserify',
'--',
'<@(sandbox_args)',
'-o',
'<@(_outputs)',
],
},
{
'action_name': 'atom_browserify_isolated_context',
'inputs': [
'<!@(python tools/list-browserify-deps.py <(isolated_args))'
],
'outputs': [
'<(js2c_input_dir)/isolated_bundle.js',
],
'action': [
'npm',
'run',
'--silent',
'browserify',
'--',
'<@(isolated_args)',
'-o',
'<@(_outputs)',
],
},
],
}, # target atom_browserify
{
'target_name': 'atom_js2c',
'type': 'none',
'dependencies': [
'atom_js2c_copy',
'atom_browserify',
],
'actions': [
{
'action_name': 'atom_js2c',
'inputs': [
# List all input files that should trigger a rebuild with js2c
'<@(js2c_sources)',
'<(js2c_input_dir)/preload_bundle.js',
'<(js2c_input_dir)/isolated_bundle.js',
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/atom_natives.h',
],
'action': [
'python',
'tools/js2c.py',
'vendor/node',
'<@(_outputs)',
'<(js2c_input_dir)',
],
}
],
}, # target atom_js2c
],
'conditions': [
['OS=="mac"', {
'targets': [
{
'target_name': '<(project_name)_framework',
'product_name': '<(product_name) Framework',
'type': 'shared_library',
'dependencies': [
'<(project_name)_lib',
],
'sources': [
'<@(framework_sources)',
],
'include_dirs': [
'.',
'vendor',
'<(libchromiumcontent_src_dir)',
],
'export_dependent_settings': [
'<(project_name)_lib',
],
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/Carbon.framework',
'$(SDKROOT)/System/Library/Frameworks/QuartzCore.framework',
'$(SDKROOT)/System/Library/Frameworks/Quartz.framework',
'$(SDKROOT)/System/Library/Frameworks/Security.framework',
'$(SDKROOT)/System/Library/Frameworks/SecurityInterface.framework',
'$(SDKROOT)/System/Library/Frameworks/ServiceManagement.framework',
'$(SDKROOT)/System/Library/Frameworks/StoreKit.framework',
],
},
'mac_bundle': 1,
'mac_bundle_resources': [
'atom/common/resources/mac/MainMenu.xib',
'<(libchromiumcontent_dir)/icudtl.dat',
'<(libchromiumcontent_dir)/blink_image_resources_200_percent.pak',
'<(libchromiumcontent_dir)/content_resources_200_percent.pak',
'<(libchromiumcontent_dir)/content_shell.pak',
'<(libchromiumcontent_dir)/ui_resources_200_percent.pak',
'<(libchromiumcontent_dir)/views_resources_200_percent.pak',
'<(libchromiumcontent_dir)/natives_blob.bin',
'<(libchromiumcontent_dir)/v8_context_snapshot.bin',
],
'xcode_settings': {
'ATOM_BUNDLE_ID': 'com.<(company_abbr).<(project_name).framework',
'INFOPLIST_FILE': 'atom/common/resources/mac/Info.plist',
'LD_DYLIB_INSTALL_NAME': '@rpath/<(product_name) Framework.framework/<(product_name) Framework',
'LD_RUNPATH_SEARCH_PATHS': [
'@loader_path/Libraries',
],
'OTHER_LDFLAGS': [
'-ObjC',
],
},
'copies': [
{
'variables': {
'conditions': [
['libchromiumcontent_component', {
'copied_libraries': [
'<(PRODUCT_DIR)/libnode.dylib',
'<@(libchromiumcontent_shared_libraries)',
'<@(libchromiumcontent_shared_v8_libraries)',
],
}, {
'copied_libraries': [
'<(PRODUCT_DIR)/libnode.dylib',
'<(libchromiumcontent_dir)/libffmpeg.dylib',
],
}],
],
},
'destination': '<(PRODUCT_DIR)/<(product_name) Framework.framework/Versions/A/Libraries',
'files': [
'<@(copied_libraries)',
],
},
],
'postbuilds': [
{
'postbuild_name': 'Fix path of libnode',
'action': [
'install_name_tool',
'-change',
'/usr/local/lib/libnode.dylib',
'@rpath/libnode.dylib',
'${BUILT_PRODUCTS_DIR}/<(product_name) Framework.framework/Versions/A/<(product_name) Framework',
],
},
{
'postbuild_name': 'Add symlinks for framework subdirectories',
'action': [
'tools/mac/create-framework-subdir-symlinks.sh',
'<(product_name) Framework',
'Libraries',
],
},
{
'postbuild_name': 'Copy locales',
'action': [
'tools/mac/copy-locales.py',
'-d',
'<(libchromiumcontent_dir)/locales',
'${BUILT_PRODUCTS_DIR}/<(product_name) Framework.framework/Resources',
'<@(locales)',
],
},
],
'conditions': [
['enable_pdf_viewer==1', {
'mac_bundle_resources': [
'<(PRODUCT_DIR)/pdf_viewer_resources.pak',
],
}], # enable_pdf_viewer
['mas_build==0', {
'link_settings': {
'libraries': [
'external_binaries/Squirrel.framework',
'external_binaries/ReactiveCocoa.framework',
'external_binaries/Mantle.framework',
],
},
'copies': [
{
'destination': '<(PRODUCT_DIR)/<(product_name) Framework.framework/Versions/A/Resources',
'files': [
'<(PRODUCT_DIR)/crashpad_handler',
],
},
],
}],
],
}, # target framework
{
'target_name': '<(project_name)_helper',
'product_name': '<(product_name) Helper',
'type': 'executable',
'dependencies': [
'<(project_name)_framework',
],
'sources': [
'<@(app_sources)',
],
'include_dirs': [
'.',
],
'mac_bundle': 1,
'xcode_settings': {
'ATOM_BUNDLE_ID': 'com.<(company_abbr).<(project_name).helper',
'INFOPLIST_FILE': 'atom/renderer/resources/mac/Info.plist',
'LD_RUNPATH_SEARCH_PATHS': [
'@executable_path/../../..',
],
},
}, # target helper
{
'target_name': '<(project_name)_login_helper',
'product_name': '<(product_name) Login Helper',
'type': 'executable',
'sources': [
'<@(login_helper_sources)',
],
'include_dirs': [
'.',
'vendor',
'<(libchromiumcontent_src_dir)',
],
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/AppKit.framework',
],
},
'mac_bundle': 1,
'xcode_settings': {
'ATOM_BUNDLE_ID': 'com.<(company_abbr).<(project_name).loginhelper',
'INFOPLIST_FILE': 'atom/app/resources/mac/loginhelper-Info.plist',
'OTHER_LDFLAGS': [
'-ObjC',
],
},
}, # target login_helper
],
}], # OS!="mac"
],
}
| 33.966952
| 124
| 0.474145
|
{
'variables': {
'project_name%': 'electron',
'product_name%': 'Electron',
'company_name%': 'GitHub, Inc',
'company_abbr%': 'github',
'version%': '0.0.0-dev',
'js2c_input_dir': '<(SHARED_INTERMEDIATE_DIR)/js2c',
},
'includes': [
'features.gypi',
'filenames.gypi',
'native_mate/native_mate_files.gypi',
],
'target_defaults': {
'defines': [
'ATOM_PRODUCT_NAME="<(product_name)"',
'ATOM_PROJECT_NAME="<(project_name)"',
],
'conditions': [
['OS=="mac"', {
'mac_framework_dirs': [
'<(source_root)/external_binaries',
],
}],
['enable_desktop_capturer==1', {
'defines': [
'ENABLE_DESKTOP_CAPTURER',
],
}],
['enable_osr==1', {
'defines': [
'ENABLE_OSR',
],
}],
['enable_pdf_viewer==1', {
'defines': [
'ENABLE_PDF_VIEWER',
],
}],
['enable_run_as_node==1', {
'defines': [
'ENABLE_RUN_AS_NODE',
],
}],
['enable_view_api==1', {
'defines': [
'ENABLE_VIEW_API',
],
}],
['enable_pepper_flash==1', {
'defines': [
'ENABLE_PEPPER_FLASH',
],
}],
],
},
'targets': [
{
'target_name': '<(project_name)',
'type': 'executable',
'dependencies': [
'js2asar',
'app2asar',
'<(project_name)_lib',
],
'sources': [
'<@(app_sources)',
],
'include_dirs': [
'.',
],
'conditions': [
['OS=="mac"', {
'product_name': '<(product_name)',
'mac_bundle': 1,
'dependencies!': [
'<(project_name)_lib',
],
'dependencies': [
'<(project_name)_framework',
'<(project_name)_helper',
],
'xcode_settings': {
'ATOM_BUNDLE_ID': 'com.<(company_abbr).<(project_name)',
'INFOPLIST_FILE': 'atom/browser/resources/mac/Info.plist',
'LD_RUNPATH_SEARCH_PATHS': [
'@executable_path/../Frameworks',
],
},
'mac_bundle_resources': [
'<@(bundle_sources)',
],
'copies': [
{
'destination': '<(PRODUCT_DIR)/<(product_name).app/Contents/Frameworks',
'files': [
'<(PRODUCT_DIR)/<(product_name) Helper.app',
'<(PRODUCT_DIR)/<(product_name) Framework.framework',
],
},
],
'postbuilds': [
{
'postbuild_name': 'Make More Helpers',
'action': [
'tools/mac/make_more_helpers.sh',
'Frameworks',
'<(product_name)',
],
},
# empty .lproj directories, which is enough to convince Cocoa
# that Electron supports those languages.
{
'postbuild_name': 'Make Empty Localizations',
'variables': {
'apply_locales_cmd': ['python', 'tools/mac/apply_locales.py'],
'locale_dirs': [
'>!@(<(apply_locales_cmd) -d ZZLOCALE.lproj <(locales))',
],
},
'action': [
'tools/mac/make_locale_dirs.sh',
'<@(locale_dirs)',
],
},
],
'conditions': [
['mas_build==0', {
'copies': [
{
'destination': '<(PRODUCT_DIR)/<(product_name).app/Contents/Frameworks',
'files': [
'external_binaries/Squirrel.framework',
'external_binaries/ReactiveCocoa.framework',
'external_binaries/Mantle.framework',
],
},
],
}],
['mas_build==1', {
'dependencies': [
'<(project_name)_login_helper',
],
'copies': [
{
'destination': '<(PRODUCT_DIR)/<(product_name).app/Contents/Library/LoginItems',
'files': [
'<(PRODUCT_DIR)/<(product_name) Login Helper.app',
],
},
],
}],
],
}], # OS!="mac"
['OS=="win"', {
'msvs_settings': {
'VCManifestTool': {
'EmbedManifest': 'true',
'AdditionalManifestFiles': 'atom/browser/resources/win/atom.manifest',
},
'VCLinkerTool': {
# Chrome builds with this minimum environment which makes e.g.
# GetSystemMetrics(SM_CXSIZEFRAME) return Windows XP/2003
# compatible metrics. See: https://crbug.com/361720
#
# The following two settings translate to a linker flag
# of /SUBSYSTEM:WINDOWS,5.02
'MinimumRequiredVersion': '5.02',
'SubSystem': '2',
'AdditionalDependencies': [
'wtsapi32.lib',
],
},
},
'copies': [
{
'variables': {
'conditions': [
['libchromiumcontent_component', {
'copied_libraries': [
'<@(libchromiumcontent_shared_libraries)',
'<@(libchromiumcontent_shared_v8_libraries)',
],
}, {
'copied_libraries': [
'<(libchromiumcontent_dir)/ffmpeg.dll',
],
}],
],
},
'destination': '<(PRODUCT_DIR)',
'files': [
'<@(copied_libraries)',
'<(libchromiumcontent_dir)/locales',
'<(libchromiumcontent_dir)/libEGL.dll',
'<(libchromiumcontent_dir)/libGLESv2.dll',
'<(libchromiumcontent_dir)/icudtl.dat',
'<(libchromiumcontent_dir)/blink_image_resources_200_percent.pak',
'<(libchromiumcontent_dir)/content_resources_200_percent.pak',
'<(libchromiumcontent_dir)/content_shell.pak',
'<(libchromiumcontent_dir)/ui_resources_200_percent.pak',
'<(libchromiumcontent_dir)/views_resources_200_percent.pak',
'<(libchromiumcontent_dir)/natives_blob.bin',
'<(libchromiumcontent_dir)/v8_context_snapshot.bin',
'external_binaries/d3dcompiler_47.dll',
],
},
],
}, {
'dependencies': [
'vendor/breakpad/breakpad.gyp:dump_syms
],
}], # OS=="win"
['OS=="linux"', {
'copies': [
{
'variables': {
'conditions': [
['libchromiumcontent_component', {
'copied_libraries': [
'<(PRODUCT_DIR)/lib/libnode.so',
'<@(libchromiumcontent_shared_libraries)',
'<@(libchromiumcontent_shared_v8_libraries)',
],
}, {
'copied_libraries': [
'<(PRODUCT_DIR)/lib/libnode.so',
'<(libchromiumcontent_dir)/libffmpeg.so',
],
}],
],
},
'destination': '<(PRODUCT_DIR)',
'files': [
'<@(copied_libraries)',
'<(libchromiumcontent_dir)/locales',
'<(libchromiumcontent_dir)/icudtl.dat',
'<(libchromiumcontent_dir)/blink_image_resources_200_percent.pak',
'<(libchromiumcontent_dir)/content_resources_200_percent.pak',
'<(libchromiumcontent_dir)/content_shell.pak',
'<(libchromiumcontent_dir)/ui_resources_200_percent.pak',
'<(libchromiumcontent_dir)/views_resources_200_percent.pak',
'<(libchromiumcontent_dir)/natives_blob.bin',
'<(libchromiumcontent_dir)/v8_context_snapshot.bin',
],
},
],
}], # OS=="linux"
],
}, # target <(project_name)
{
'target_name': '<(project_name)_lib',
'type': 'static_library',
'dependencies': [
'atom_js2c',
'brightray/brightray.gyp:brightray',
'vendor/node/node.gyp:node_lib',
],
'defines': [
# We need to access internal implementations of Node.
'NODE_WANT_INTERNALS=1',
'NODE_SHARED_MODE',
'HAVE_OPENSSL=1',
'HAVE_INSPECTOR=1',
# Disable warnings for g_settings_list_schemas.
'GLIB_DISABLE_DEPRECATION_WARNINGS',
# Defined in Chromium but not exposed in its gyp file.
'V8_USE_EXTERNAL_STARTUP_DATA',
# Import V8 symbols from shared library (node.dll / libnode.so)
'USING_V8_SHARED',
'USING_V8_PLATFORM_SHARED',
'USING_V8_BASE_SHARED',
# See Chromium src/third_party/protobuf/BUILD.gn
'GOOGLE_PROTOBUF_NO_RTTI',
'GOOGLE_PROTOBUF_NO_STATIC_INITIALIZER',
],
'sources': [
'<@(lib_sources)',
],
'include_dirs': [
'.',
'chromium_src',
'native_mate',
# Include atom_natives.h.
'<(SHARED_INTERMEDIATE_DIR)',
# Include directories for uv and node.
'vendor/node/src',
'vendor/node/deps/http_parser',
'vendor/node/deps/uv/include',
# The `node.h` is using `#include"v8.h"`.
'<(libchromiumcontent_src_dir)/v8/include',
# The `node.h` is using `#include"ares.h"`.
'vendor/node/deps/cares/include',
# The `third_party/WebKit/Source/platform/weborigin/SchemeRegistry.h` is using `platform/PlatformExport.h`.
'<(libchromiumcontent_src_dir)/third_party/WebKit/Source',
# The 'third_party/libyuv/include/libyuv/scale_argb.h' is using 'libyuv/basic_types.h'.
'<(libchromiumcontent_src_dir)/third_party/libyuv/include',
# The 'third_party/webrtc/modules/desktop_capture/desktop_frame.h' is using 'webrtc/base/scoped_ptr.h'.
'<(libchromiumcontent_src_dir)/third_party/',
'<(libchromiumcontent_src_dir)/components/cdm',
'<(libchromiumcontent_src_dir)/third_party/widevine',
'<(libchromiumcontent_src_dir)/third_party/widevine/cdm/stub',
'<(libchromiumcontent_src_dir)/third_party/protobuf/src',
# The 'third_party/webrtc/modules/desktop_capture/desktop_capture_options.h' is using 'rtc_base/constructormagic.h'.
'<(libchromiumcontent_src_dir)/third_party/webrtc',
# leveldb includes are required
'<(libchromiumcontent_src_dir)/third_party/leveldatabase/src',
'<(libchromiumcontent_src_dir)/third_party/leveldatabase/src/include',
],
'direct_dependent_settings': {
'include_dirs': [
'.',
],
},
'export_dependent_settings': [
'brightray/brightray.gyp:brightray',
],
'conditions': [
['enable_pdf_viewer==1', {
'dependencies': [
'vendor/pdf_viewer/pdf_viewer.gyp:pdf_viewer',
],
}], # enable_pdf_viewer
['enable_pepper_flash==1', {
'include_dirs': [
'<(libchromiumcontent_src_dir)/chrome/browser/renderer_host/pepper',
'<(libchromiumcontent_src_dir)/chrome/renderer/pepper',
],
'link_settings': {
'conditions': [
['OS=="win"', {
'libraries': [
'<(libchromiumcontent_dir)/pepper_flash.lib',
]
}, {
'libraries': [
'<(libchromiumcontent_dir)/libpepper_flash.a',
]
}],
],
},
}], # enable_pepper_flash
['libchromiumcontent_component', {
'link_settings': {
'libraries': [ '<@(libchromiumcontent_v8_libraries)' ],
},
}],
['OS=="win"', {
'sources': [
'<@(lib_sources_win)',
],
'link_settings': {
'libraries': [
'-limm32.lib',
'-lgdi32.lib',
'-loleacc.lib',
'-lcomctl32.lib',
'-lcomdlg32.lib',
'-lwininet.lib',
'-lwinmm.lib',
'-lcrypt32.lib',
'-luiautomationcore.lib',
'-lPropsys.lib'
],
},
'dependencies': [
# Node is built as static_library on Windows, so we also need to
# include its dependencies here.
'vendor/node/deps/cares/cares.gyp:cares',
'vendor/node/deps/http_parser/http_parser.gyp:http_parser',
'vendor/node/deps/uv/uv.gyp:libuv',
'vendor/node/deps/zlib/zlib.gyp:zlib',
# Build with breakpad support.
'vendor/breakpad/breakpad.gyp:breakpad_handler',
'vendor/breakpad/breakpad.gyp:breakpad_sender',
],
}], # OS=="win"
['OS=="mac" and mas_build==0', {
'dependencies': [
'vendor/crashpad/client/client.gyp:crashpad_client',
'vendor/crashpad/handler/handler.gyp:crashpad_handler',
],
'link_settings': {
# Do not link with QTKit for mas build.
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/QTKit.framework',
],
},
'xcode_settings': {
# ReactiveCocoa which is used by Squirrel requires using __weak.
'CLANG_ENABLE_OBJC_WEAK': 'YES',
'OTHER_CFLAGS': [
'-Wunguarded-availability',
'-Wobjc-missing-property-synthesis',
],
},
}], # OS=="mac" and mas_build==0
['OS=="mac" and mas_build==1', {
'defines': [
'MAS_BUILD',
],
'sources!': [
'atom/browser/auto_updater_mac.mm',
'atom/common/crash_reporter/crash_reporter_mac.h',
'atom/common/crash_reporter/crash_reporter_mac.mm',
],
'dependencies': [
# Somehow we have code from Chromium using crashpad, very likely
# from components/crash.
# Since we do not actually invoke code from components/crash, this
# dependency should be eventually optimized out by linker.
'vendor/crashpad/client/client.gyp:crashpad_client',
],
}], # OS=="mac" and mas_build==1
['OS=="linux"', {
'sources': [
'<@(lib_sources_linux)',
'<@(lib_sources_nss)',
],
'link_settings': {
'ldflags': [
# Make binary search for libraries under current directory, so we
# don't have to manually set $LD_LIBRARY_PATH:
'-Wl,-rpath=\$$ORIGIN',
'-rdynamic',
],
},
'cflags_cc': [
'-Wno-empty-body',
],
'include_dirs': [
'vendor/breakpad/src',
],
'dependencies': [
'vendor/breakpad/breakpad.gyp:breakpad_client',
],
}],
['OS=="linux" and clang==1', {
'cflags_cc': [
'-Wno-reserved-user-defined-literal',
],
}],
],
},
{
'target_name': 'js2asar',
'type': 'none',
'actions': [
{
'action_name': 'js2asar',
'variables': {
'conditions': [
['OS=="mac"', {
'resources_path': '<(PRODUCT_DIR)/<(product_name).app/Contents/Resources',
},{
'resources_path': '<(PRODUCT_DIR)/resources',
}],
],
},
'inputs': [
'<@(js_sources)',
],
'outputs': [
'<(resources_path)/electron.asar',
],
'action': [
'python',
'tools/js2asar.py',
'<@(_outputs)',
'lib',
'<@(_inputs)',
],
}
],
},
{
'target_name': 'app2asar',
'type': 'none',
'actions': [
{
'action_name': 'app2asar',
'variables': {
'conditions': [
['OS=="mac"', {
'resources_path': '<(PRODUCT_DIR)/<(product_name).app/Contents/Resources',
},{
'resources_path': '<(PRODUCT_DIR)/resources',
}],
],
},
'inputs': [
'<@(default_app_sources)',
],
'outputs': [
'<(resources_path)/default_app.asar',
],
'action': [
'python',
'tools/js2asar.py',
'<@(_outputs)',
'default_app',
'<@(_inputs)',
],
}
],
},
{
'target_name': 'atom_js2c_copy',
'type': 'none',
'copies': [
{
'destination': '<(js2c_input_dir)',
'files': [
'<@(js2c_sources)',
],
},
],
},
{
'target_name': 'atom_browserify',
'type': 'none',
'dependencies': [
'atom_js2c_copy',
],
'variables': {
'sandbox_args': [
'./lib/sandboxed_renderer/init.js',
'-r',
'./lib/sandboxed_renderer/api/exports/electron.js:electron',
'-r',
'./lib/sandboxed_renderer/api/exports/fs.js:fs',
'-r',
'./lib/sandboxed_renderer/api/exports/os.js:os',
'-r',
'./lib/sandboxed_renderer/api/exports/path.js:path',
'-r',
'./lib/sandboxed_renderer/api/exports/child_process.js:child_process'
],
'isolated_args': [
'lib/isolated_renderer/init.js',
]
},
'actions': [
{
'action_name': 'atom_browserify_sandbox',
'inputs': [
'<!@(python tools/list-browserify-deps.py <(sandbox_args))'
],
'outputs': [
'<(js2c_input_dir)/preload_bundle.js',
],
'action': [
'npm',
'run',
'--silent',
'browserify',
'--',
'<@(sandbox_args)',
'-o',
'<@(_outputs)',
],
},
{
'action_name': 'atom_browserify_isolated_context',
'inputs': [
'<!@(python tools/list-browserify-deps.py <(isolated_args))'
],
'outputs': [
'<(js2c_input_dir)/isolated_bundle.js',
],
'action': [
'npm',
'run',
'--silent',
'browserify',
'--',
'<@(isolated_args)',
'-o',
'<@(_outputs)',
],
},
],
},
{
'target_name': 'atom_js2c',
'type': 'none',
'dependencies': [
'atom_js2c_copy',
'atom_browserify',
],
'actions': [
{
'action_name': 'atom_js2c',
'inputs': [
'<@(js2c_sources)',
'<(js2c_input_dir)/preload_bundle.js',
'<(js2c_input_dir)/isolated_bundle.js',
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/atom_natives.h',
],
'action': [
'python',
'tools/js2c.py',
'vendor/node',
'<@(_outputs)',
'<(js2c_input_dir)',
],
}
],
},
],
'conditions': [
['OS=="mac"', {
'targets': [
{
'target_name': '<(project_name)_framework',
'product_name': '<(product_name) Framework',
'type': 'shared_library',
'dependencies': [
'<(project_name)_lib',
],
'sources': [
'<@(framework_sources)',
],
'include_dirs': [
'.',
'vendor',
'<(libchromiumcontent_src_dir)',
],
'export_dependent_settings': [
'<(project_name)_lib',
],
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/Carbon.framework',
'$(SDKROOT)/System/Library/Frameworks/QuartzCore.framework',
'$(SDKROOT)/System/Library/Frameworks/Quartz.framework',
'$(SDKROOT)/System/Library/Frameworks/Security.framework',
'$(SDKROOT)/System/Library/Frameworks/SecurityInterface.framework',
'$(SDKROOT)/System/Library/Frameworks/ServiceManagement.framework',
'$(SDKROOT)/System/Library/Frameworks/StoreKit.framework',
],
},
'mac_bundle': 1,
'mac_bundle_resources': [
'atom/common/resources/mac/MainMenu.xib',
'<(libchromiumcontent_dir)/icudtl.dat',
'<(libchromiumcontent_dir)/blink_image_resources_200_percent.pak',
'<(libchromiumcontent_dir)/content_resources_200_percent.pak',
'<(libchromiumcontent_dir)/content_shell.pak',
'<(libchromiumcontent_dir)/ui_resources_200_percent.pak',
'<(libchromiumcontent_dir)/views_resources_200_percent.pak',
'<(libchromiumcontent_dir)/natives_blob.bin',
'<(libchromiumcontent_dir)/v8_context_snapshot.bin',
],
'xcode_settings': {
'ATOM_BUNDLE_ID': 'com.<(company_abbr).<(project_name).framework',
'INFOPLIST_FILE': 'atom/common/resources/mac/Info.plist',
'LD_DYLIB_INSTALL_NAME': '@rpath/<(product_name) Framework.framework/<(product_name) Framework',
'LD_RUNPATH_SEARCH_PATHS': [
'@loader_path/Libraries',
],
'OTHER_LDFLAGS': [
'-ObjC',
],
},
'copies': [
{
'variables': {
'conditions': [
['libchromiumcontent_component', {
'copied_libraries': [
'<(PRODUCT_DIR)/libnode.dylib',
'<@(libchromiumcontent_shared_libraries)',
'<@(libchromiumcontent_shared_v8_libraries)',
],
}, {
'copied_libraries': [
'<(PRODUCT_DIR)/libnode.dylib',
'<(libchromiumcontent_dir)/libffmpeg.dylib',
],
}],
],
},
'destination': '<(PRODUCT_DIR)/<(product_name) Framework.framework/Versions/A/Libraries',
'files': [
'<@(copied_libraries)',
],
},
],
'postbuilds': [
{
'postbuild_name': 'Fix path of libnode',
'action': [
'install_name_tool',
'-change',
'/usr/local/lib/libnode.dylib',
'@rpath/libnode.dylib',
'${BUILT_PRODUCTS_DIR}/<(product_name) Framework.framework/Versions/A/<(product_name) Framework',
],
},
{
'postbuild_name': 'Add symlinks for framework subdirectories',
'action': [
'tools/mac/create-framework-subdir-symlinks.sh',
'<(product_name) Framework',
'Libraries',
],
},
{
'postbuild_name': 'Copy locales',
'action': [
'tools/mac/copy-locales.py',
'-d',
'<(libchromiumcontent_dir)/locales',
'${BUILT_PRODUCTS_DIR}/<(product_name) Framework.framework/Resources',
'<@(locales)',
],
},
],
'conditions': [
['enable_pdf_viewer==1', {
'mac_bundle_resources': [
'<(PRODUCT_DIR)/pdf_viewer_resources.pak',
],
}],
['mas_build==0', {
'link_settings': {
'libraries': [
'external_binaries/Squirrel.framework',
'external_binaries/ReactiveCocoa.framework',
'external_binaries/Mantle.framework',
],
},
'copies': [
{
'destination': '<(PRODUCT_DIR)/<(product_name) Framework.framework/Versions/A/Resources',
'files': [
'<(PRODUCT_DIR)/crashpad_handler',
],
},
],
}],
],
},
{
'target_name': '<(project_name)_helper',
'product_name': '<(product_name) Helper',
'type': 'executable',
'dependencies': [
'<(project_name)_framework',
],
'sources': [
'<@(app_sources)',
],
'include_dirs': [
'.',
],
'mac_bundle': 1,
'xcode_settings': {
'ATOM_BUNDLE_ID': 'com.<(company_abbr).<(project_name).helper',
'INFOPLIST_FILE': 'atom/renderer/resources/mac/Info.plist',
'LD_RUNPATH_SEARCH_PATHS': [
'@executable_path/../../..',
],
},
},
{
'target_name': '<(project_name)_login_helper',
'product_name': '<(product_name) Login Helper',
'type': 'executable',
'sources': [
'<@(login_helper_sources)',
],
'include_dirs': [
'.',
'vendor',
'<(libchromiumcontent_src_dir)',
],
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/AppKit.framework',
],
},
'mac_bundle': 1,
'xcode_settings': {
'ATOM_BUNDLE_ID': 'com.<(company_abbr).<(project_name).loginhelper',
'INFOPLIST_FILE': 'atom/app/resources/mac/loginhelper-Info.plist',
'OTHER_LDFLAGS': [
'-ObjC',
],
},
},
],
}],
],
}
| true
| true
|
790d0f0d3713353db16a2853b94844f519067aee
| 3,458
|
py
|
Python
|
examples/mnist_elastic_docker/mnist_slp_estimator.py
|
Pandinosaurus/KungFu
|
80dfa463450330e920b413f65cc49d8e013b84a9
|
[
"Apache-2.0"
] | 291
|
2019-10-25T16:37:59.000Z
|
2022-03-17T21:47:09.000Z
|
examples/mnist_elastic_docker/mnist_slp_estimator.py
|
Pandinosaurus/KungFu
|
80dfa463450330e920b413f65cc49d8e013b84a9
|
[
"Apache-2.0"
] | 56
|
2019-10-26T08:25:33.000Z
|
2021-09-07T11:11:51.000Z
|
examples/mnist_elastic_docker/mnist_slp_estimator.py
|
Pandinosaurus/KungFu
|
80dfa463450330e920b413f65cc49d8e013b84a9
|
[
"Apache-2.0"
] | 53
|
2019-10-25T17:45:40.000Z
|
2022-02-08T13:09:39.000Z
|
import argparse
import functools
import operator
import os
import numpy as np
import tensorflow as tf
from kungfu.tensorflow.v1.helpers.mnist import load_datasets
from tensorflow.python.util import deprecation
deprecation._PRINT_DEPRECATION_WARNINGS = False
def parse_args():
p = argparse.ArgumentParser(description='Example.')
p.add_argument('--data-dir', type=str, default='.', help='')
p.add_argument('--model-dir', type=str, default='.', help='')
p.add_argument('--kf-optimizer', type=str, default='sync_sgd', help='')
p.add_argument('--batch-size', type=int, default=100, help='')
p.add_argument('--num-epochs', type=int, default=1, help='')
p.add_argument('--learning-rate', type=float, default=0.01, help='')
return p.parse_args()
def slp(x, logits):
n = functools.reduce(operator.mul, [int(d) for d in x.shape[1:]], 1)
output = tf.layers.dense(inputs=tf.reshape(x, [-1, n]), units=logits)
return output, tf.argmax(output, axis=1)
def model_fn(features, labels, mode):
output, predictions = slp(features['x'], 10)
loss = tf.losses.sparse_softmax_cross_entropy(tf.cast(labels, tf.int32),
output)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(labels=labels, predictions=predictions)
}
optimizer = tf.train.GradientDescentOptimizer(0.1)
from kungfu.tensorflow.optimizers import SynchronousSGDOptimizer
optimizer = SynchronousSGDOptimizer(optimizer)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops)
def input_fn(ds, batch_size, epochs=1, shuffle=True):
features = {'x': ds.images}
return tf.estimator.inputs.numpy_input_fn(x=features,
y=ds.labels,
batch_size=batch_size,
num_epochs=epochs,
shuffle=shuffle)
def get_model_dir(args):
from kungfu.python import uid
x = uid()
port = (x >> 16) & 0xffff
version = x & 0xffff
suffix = '%d.%d' % (port, version)
return os.path.join(args.model_dir, suffix)
MNIST_DATA_SIZE = 60000
def main(do_eval=True):
args = parse_args()
model_dir = get_model_dir(args)
data = load_datasets(args.data_dir, normalize=True)
classifier = tf.estimator.Estimator(model_fn, model_dir=model_dir)
from kungfu.tensorflow.experimental.hook import ElasticHook
hooks = [ElasticHook(args.batch_size, args.num_epochs, MNIST_DATA_SIZE)]
classifier.train(input_fn(data.train,
args.batch_size,
epochs=args.num_epochs),
hooks=hooks)
if not do_eval:
import time
time.sleep(1)
return
results = classifier.evaluate(input_fn(data.test,
args.batch_size,
shuffle=False),
hooks=[],
steps=1)
print('results: %s' % (results, ))
if __name__ == '__main__':
print('main started')
main(False)
print('main finished')
| 34.58
| 79
| 0.589358
|
import argparse
import functools
import operator
import os
import numpy as np
import tensorflow as tf
from kungfu.tensorflow.v1.helpers.mnist import load_datasets
from tensorflow.python.util import deprecation
deprecation._PRINT_DEPRECATION_WARNINGS = False
def parse_args():
p = argparse.ArgumentParser(description='Example.')
p.add_argument('--data-dir', type=str, default='.', help='')
p.add_argument('--model-dir', type=str, default='.', help='')
p.add_argument('--kf-optimizer', type=str, default='sync_sgd', help='')
p.add_argument('--batch-size', type=int, default=100, help='')
p.add_argument('--num-epochs', type=int, default=1, help='')
p.add_argument('--learning-rate', type=float, default=0.01, help='')
return p.parse_args()
def slp(x, logits):
n = functools.reduce(operator.mul, [int(d) for d in x.shape[1:]], 1)
output = tf.layers.dense(inputs=tf.reshape(x, [-1, n]), units=logits)
return output, tf.argmax(output, axis=1)
def model_fn(features, labels, mode):
output, predictions = slp(features['x'], 10)
loss = tf.losses.sparse_softmax_cross_entropy(tf.cast(labels, tf.int32),
output)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(labels=labels, predictions=predictions)
}
optimizer = tf.train.GradientDescentOptimizer(0.1)
from kungfu.tensorflow.optimizers import SynchronousSGDOptimizer
optimizer = SynchronousSGDOptimizer(optimizer)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops)
def input_fn(ds, batch_size, epochs=1, shuffle=True):
features = {'x': ds.images}
return tf.estimator.inputs.numpy_input_fn(x=features,
y=ds.labels,
batch_size=batch_size,
num_epochs=epochs,
shuffle=shuffle)
def get_model_dir(args):
from kungfu.python import uid
x = uid()
port = (x >> 16) & 0xffff
version = x & 0xffff
suffix = '%d.%d' % (port, version)
return os.path.join(args.model_dir, suffix)
MNIST_DATA_SIZE = 60000
def main(do_eval=True):
args = parse_args()
model_dir = get_model_dir(args)
data = load_datasets(args.data_dir, normalize=True)
classifier = tf.estimator.Estimator(model_fn, model_dir=model_dir)
from kungfu.tensorflow.experimental.hook import ElasticHook
hooks = [ElasticHook(args.batch_size, args.num_epochs, MNIST_DATA_SIZE)]
classifier.train(input_fn(data.train,
args.batch_size,
epochs=args.num_epochs),
hooks=hooks)
if not do_eval:
import time
time.sleep(1)
return
results = classifier.evaluate(input_fn(data.test,
args.batch_size,
shuffle=False),
hooks=[],
steps=1)
print('results: %s' % (results, ))
if __name__ == '__main__':
print('main started')
main(False)
print('main finished')
| true
| true
|
790d0f4cae6c04cee14371883992c6d6d7803164
| 131,099
|
py
|
Python
|
heat/tests/test_stack.py
|
stackriot/heat
|
9ed612906e388eda8bf850420cbceef54e05841c
|
[
"Apache-2.0"
] | 265
|
2015-01-02T09:33:22.000Z
|
2022-03-26T23:19:54.000Z
|
heat/tests/test_stack.py
|
stackriot/heat
|
9ed612906e388eda8bf850420cbceef54e05841c
|
[
"Apache-2.0"
] | 8
|
2015-09-01T15:43:19.000Z
|
2021-12-14T05:18:23.000Z
|
heat/tests/test_stack.py
|
stackriot/heat
|
9ed612906e388eda8bf850420cbceef54e05841c
|
[
"Apache-2.0"
] | 295
|
2015-01-06T07:00:40.000Z
|
2021-09-06T08:05:06.000Z
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import datetime
import json
import logging
import time
from unittest import mock
import eventlet
import fixtures
from oslo_config import cfg
from heat.common import context
from heat.common import exception
from heat.common import template_format
from heat.common import timeutils
from heat.db.sqlalchemy import api as db_api
from heat.engine.clients.os import keystone
from heat.engine.clients.os.keystone import fake_keystoneclient as fake_ks
from heat.engine.clients.os import nova
from heat.engine import environment
from heat.engine import function
from heat.engine import node_data
from heat.engine import resource
from heat.engine import scheduler
from heat.engine import service
from heat.engine import stack
from heat.engine import stk_defn
from heat.engine import template
from heat.engine import update
from heat.objects import raw_template as raw_template_object
from heat.objects import resource as resource_objects
from heat.objects import stack as stack_object
from heat.objects import stack_tag as stack_tag_object
from heat.objects import user_creds as ucreds_object
from heat.tests import common
from heat.tests import fakes
from heat.tests import generic_resource as generic_rsrc
from heat.tests import utils
empty_template = template_format.parse('''{
"HeatTemplateFormatVersion" : "2012-12-12",
}''')
class StackTest(common.HeatTestCase):
def setUp(self):
super(StackTest, self).setUp()
self.tmpl = template.Template(copy.deepcopy(empty_template))
self.ctx = utils.dummy_context()
self.stub_auth()
def test_stack_reads_tenant(self):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl,
tenant_id='bar')
self.assertEqual('bar', self.stack.tenant_id)
def test_stack_reads_tenant_from_context_if_empty(self):
self.ctx.tenant = 'foo'
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl,
tenant_id=None)
self.assertEqual('foo', self.stack.tenant_id)
def test_stack_reads_username(self):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl,
username='bar')
self.assertEqual('bar', self.stack.username)
def test_stack_reads_username_from_context_if_empty(self):
self.ctx.username = 'foo'
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl,
username=None)
self.assertEqual('foo', self.stack.username)
def test_stack_string_repr(self):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl)
expected = 'Stack "%s" [%s]' % (self.stack.name, self.stack.id)
observed = str(self.stack)
self.assertEqual(expected, observed)
def test_state_defaults(self):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl)
self.assertEqual(('CREATE', 'IN_PROGRESS'), self.stack.state)
self.assertEqual('', self.stack.status_reason)
def test_timeout_secs_default(self):
cfg.CONF.set_override('stack_action_timeout', 1000)
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl)
self.assertIsNone(self.stack.timeout_mins)
self.assertEqual(1000, self.stack.timeout_secs())
def test_timeout_secs(self):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl,
timeout_mins=10)
self.assertEqual(600, self.stack.timeout_secs())
@mock.patch.object(stack, 'oslo_timeutils')
def test_time_elapsed(self, mock_tu):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl)
# dummy create time 10:00:00
self.stack.created_time = datetime.datetime(2015, 7, 27, 10, 0, 0)
# mock utcnow set to 10:10:00 (600s offset)
mock_tu.utcnow.return_value = datetime.datetime(2015, 7, 27, 10, 10, 0)
self.assertEqual(600, self.stack.time_elapsed())
@mock.patch.object(stack, 'oslo_timeutils')
def test_time_elapsed_negative(self, mock_tu):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl)
# dummy create time 10:00:00
self.stack.created_time = datetime.datetime(2015, 7, 27, 10, 0, 0)
# mock utcnow set to 09:59:50 (-10s offset)
mock_tu.utcnow.return_value = datetime.datetime(2015, 7, 27, 9, 59, 50)
self.assertEqual(-10, self.stack.time_elapsed())
@mock.patch.object(stack, 'oslo_timeutils')
def test_time_elapsed_ms(self, mock_tu):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl)
# dummy create time 10:00:00
self.stack.created_time = datetime.datetime(2015, 7, 27, 10, 5, 0)
# mock utcnow set to microsecond offset
mock_tu.utcnow.return_value = datetime.datetime(2015, 7, 27,
10, 4, 59, 750000)
self.assertEqual(-0.25, self.stack.time_elapsed())
@mock.patch.object(stack, 'oslo_timeutils')
def test_time_elapsed_with_updated_time(self, mock_tu):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl)
# dummy create time 10:00:00
self.stack.created_time = datetime.datetime(2015, 7, 27, 10, 0, 0)
# dummy updated time 11:00:00; should consider this not created_time
self.stack.updated_time = datetime.datetime(2015, 7, 27, 11, 0, 0)
# mock utcnow set to 11:10:00 (600s offset)
mock_tu.utcnow.return_value = datetime.datetime(2015, 7, 27, 11, 10, 0)
self.assertEqual(600, self.stack.time_elapsed())
@mock.patch.object(stack.Stack, 'time_elapsed')
def test_time_remaining(self, mock_te):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl)
# mock time elapsed; set to 600 seconds
mock_te.return_value = 600
# default stack timeout is 3600 seconds; remaining time 3000 secs
self.assertEqual(3000, self.stack.time_remaining())
@mock.patch.object(stack.Stack, 'time_elapsed')
def test_has_timed_out(self, mock_te):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl)
self.stack.status = self.stack.IN_PROGRESS
# test with timed out stack
mock_te.return_value = 3601
# default stack timeout is 3600 seconds; stack should time out
self.assertTrue(self.stack.has_timed_out())
# mock time elapsed; set to 600 seconds
mock_te.return_value = 600
# default stack timeout is 3600 seconds; remaining time 3000 secs
self.assertFalse(self.stack.has_timed_out())
# has_timed_out has no meaning when stack completes/fails;
# should return false
self.stack.status = self.stack.COMPLETE
self.assertFalse(self.stack.has_timed_out())
self.stack.status = self.stack.FAILED
self.assertFalse(self.stack.has_timed_out())
def test_no_auth_token(self):
ctx = utils.dummy_context()
ctx.auth_token = None
self.stack = stack.Stack(ctx, 'test_stack', self.tmpl)
self.assertEqual('abcd1234',
ctx.auth_plugin.auth_token)
def test_state_deleted(self):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl,
action=stack.Stack.CREATE,
status=stack.Stack.IN_PROGRESS)
self.stack.id = '1234'
self.stack.delete()
self.assertIsNone(self.stack.state_set(stack.Stack.CREATE,
stack.Stack.COMPLETE,
'test'))
def test_load_nonexistant_id(self):
self.assertRaises(exception.NotFound, stack.Stack.load,
self.ctx, -1)
def test_total_resources_empty(self):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl,
status_reason='flimflam')
self.stack.store()
self.assertEqual(0, self.stack.total_resources(self.stack.id))
self.assertEqual(0, self.stack.total_resources())
@mock.patch.object(db_api, 'stack_count_total_resources')
def test_total_resources_not_stored(self, sctr):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl,
status_reason='flimflam')
self.assertEqual(0, self.stack.total_resources())
sctr.assert_not_called()
def test_total_resources_not_found(self):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl,
status_reason='flimflam')
self.assertEqual(0, self.stack.total_resources('1234'))
@mock.patch.object(db_api, 'stack_count_total_resources')
def test_total_resources_generic(self, sctr):
tpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources':
{'A': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'test_stack',
template.Template(tpl),
status_reason='blarg')
self.stack.store()
sctr.return_value = 1
self.assertEqual(1, self.stack.total_resources(self.stack.id))
self.assertEqual(1, self.stack.total_resources())
def test_resource_get(self):
tpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources':
{'A': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'test_stack',
template.Template(tpl),
status_reason='blarg')
self.stack.store()
self.assertEqual('A', self.stack.resource_get('A').name)
self.assertEqual(self.stack['A'], self.stack.resource_get('A'))
self.assertIsNone(self.stack.resource_get('B'))
@mock.patch.object(resource_objects.Resource, 'get_all_by_stack')
def test_resource_get_db_fallback(self, gabs):
tpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources':
{'A': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'test_stack',
template.Template(tpl),
status_reason='blarg')
self.stack.store()
tpl2 = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources':
{'A': {'Type': 'GenericResourceType'},
'B': {'Type': 'GenericResourceType'}}}
t2 = template.Template(tpl2)
t2.store(self.ctx)
db_resources = {
'A': mock.MagicMock(),
'B': mock.MagicMock(current_template_id=t2.id),
'C': mock.MagicMock(current_template_id=t2.id)
}
db_resources['A'].name = 'A'
db_resources['B'].name = 'B'
db_resources['C'].name = 'C'
gabs.return_value = db_resources
self.assertEqual('A', self.stack.resource_get('A').name)
self.assertEqual('B', self.stack.resource_get('B').name)
# Ignore the resource if only in db
self.assertIsNone(self.stack.resource_get('C'))
self.assertIsNone(self.stack.resource_get('D'))
@mock.patch.object(resource_objects.Resource, 'get_all_by_stack')
def test_iter_resources(self, mock_db_call):
tpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources':
{'A': {'Type': 'GenericResourceType'},
'B': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'test_stack',
template.Template(tpl),
status_reason='blarg')
self.stack.store()
mock_rsc_a = mock.MagicMock(current_template_id=self.stack.t.id)
mock_rsc_a.name = 'A'
mock_rsc_b = mock.MagicMock(current_template_id=self.stack.t.id)
mock_rsc_b.name = 'B'
mock_db_call.return_value = {
'A': mock_rsc_a,
'B': mock_rsc_b
}
all_resources = list(self.stack.iter_resources())
# Verify, the DB query is called with expected filter
mock_db_call.assert_called_once_with(self.ctx, self.stack.id)
# And returns the resources
names = sorted([r.name for r in all_resources])
self.assertEqual(['A', 'B'], names)
@mock.patch.object(resource_objects.Resource, 'get_all_by_stack')
def test_iter_resources_with_nested(self, mock_db_call):
tpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources':
{'A': {'Type': 'StackResourceType'},
'B': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'test_stack',
template.Template(tpl),
status_reason='blarg')
self.stack.store()
mock_rsc_a = mock.MagicMock(current_template_id=self.stack.t.id)
mock_rsc_a.name = 'A'
mock_rsc_b = mock.MagicMock(current_template_id=self.stack.t.id)
mock_rsc_b.name = 'B'
mock_db_call.return_value = {
'A': mock_rsc_a,
'B': mock_rsc_b
}
def get_more(nested_depth=0, filters=None):
yield 'X'
yield 'Y'
yield 'Z'
mock_nested = self.patchobject(generic_rsrc.StackResourceType,
'nested')
mock_nested.return_value.iter_resources = mock.MagicMock(
side_effect=get_more)
resource_generator = self.stack.iter_resources()
self.assertIsNot(resource_generator, list)
first_level_resources = list(resource_generator)
self.assertEqual(2, len(first_level_resources))
all_resources = list(self.stack.iter_resources(1))
self.assertEqual(5, len(all_resources))
@mock.patch.object(resource_objects.Resource, 'get_all_by_stack')
def test_iter_resources_with_filters(self, mock_db_call):
tpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources':
{'A': {'Type': 'GenericResourceType'},
'B': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'test_stack',
template.Template(tpl),
status_reason='blarg')
self.stack.store()
mock_rsc = mock.MagicMock()
mock_rsc.name = 'A'
mock_rsc.current_template_id = self.stack.t.id
mock_db_call.return_value = {'A': mock_rsc}
all_resources = list(self.stack.iter_resources(
filters=dict(name=['A'])
))
# Verify, the DB query is called with expected filter
mock_db_call.assert_has_calls([
mock.call(self.ctx, self.stack.id, dict(name=['A'])),
mock.call(self.ctx, self.stack.id),
])
# Make sure it returns only one resource.
self.assertEqual(1, len(all_resources))
# And returns the resource A
self.assertEqual('A', all_resources[0].name)
@mock.patch.object(resource_objects.Resource, 'get_all_by_stack')
def test_iter_resources_with_nonexistent_template(self, mock_db_call):
tpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources':
{'A': {'Type': 'GenericResourceType'},
'B': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'test_stack',
template.Template(tpl),
status_reason='blarg')
self.stack.store()
mock_rsc_a = mock.MagicMock(current_template_id=self.stack.t.id)
mock_rsc_a.name = 'A'
mock_rsc_b = mock.MagicMock(current_template_id=self.stack.t.id + 1)
mock_rsc_b.name = 'B'
mock_db_call.return_value = {
'A': mock_rsc_a,
'B': mock_rsc_b
}
all_resources = list(self.stack.iter_resources())
self.assertEqual(1, len(all_resources))
@mock.patch.object(resource_objects.Resource, 'get_all_by_stack')
def test_iter_resources_nested_with_filters(self, mock_db_call):
tpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources':
{'A': {'Type': 'StackResourceType'},
'B': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'test_stack',
template.Template(tpl),
status_reason='blarg')
self.stack.store()
mock_rsc_a = mock.MagicMock(current_template_id=self.stack.t.id)
mock_rsc_a.name = 'A'
mock_rsc_b = mock.MagicMock(current_template_id=self.stack.t.id)
mock_rsc_b.name = 'B'
mock_db_call.return_value = {
'A': mock_rsc_a,
'B': mock_rsc_b
}
def get_more(nested_depth=0, filters=None):
if filters:
yield 'X'
mock_nested = self.patchobject(generic_rsrc.StackResourceType,
'nested')
mock_nested.return_value.iter_resources = mock.MagicMock(
side_effect=get_more)
all_resources = list(self.stack.iter_resources(
nested_depth=1,
filters=dict(name=['A'])
))
# Verify, the DB query is called with expected filter
mock_db_call.assert_has_calls([
mock.call(self.ctx, self.stack.id, dict(name=['A'])),
mock.call(self.ctx, self.stack.id),
])
# Returns three resources (1 first level + 2 second level)
self.assertEqual(3, len(all_resources))
def test_load_parent_resource(self):
self.stack = stack.Stack(self.ctx, 'load_parent_resource', self.tmpl,
parent_resource='parent')
self.stack.store()
stk = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
t = template.Template.load(self.ctx, stk.raw_template_id)
self.patchobject(template.Template, 'load', return_value=t)
self.patchobject(stack.Stack, '__init__', return_value=None)
stack.Stack.load(self.ctx, stack_id=self.stack.id)
stack.Stack.__init__.assert_called_once_with(
self.ctx, stk.name, t, stack_id=stk.id,
action=stk.action, status=stk.status,
status_reason=stk.status_reason,
timeout_mins=stk.timeout,
disable_rollback=stk.disable_rollback,
parent_resource='parent', owner_id=None,
stack_user_project_id=None,
created_time=mock.ANY,
updated_time=None,
user_creds_id=stk.user_creds_id,
tenant_id='test_tenant_id',
use_stored_context=False,
username=mock.ANY,
convergence=False,
current_traversal=self.stack.current_traversal,
prev_raw_template_id=None,
current_deps=None, cache_data=None,
nested_depth=0,
deleted_time=None, refresh_cred=False)
template.Template.load.assert_called_once_with(
self.ctx, stk.raw_template_id, stk.raw_template)
def test_identifier(self):
self.stack = stack.Stack(self.ctx, 'identifier_test', self.tmpl)
self.stack.store()
identifier = self.stack.identifier()
self.assertEqual(self.stack.tenant_id, identifier.tenant)
self.assertEqual('identifier_test', identifier.stack_name)
self.assertTrue(identifier.stack_id)
self.assertFalse(identifier.path)
def test_get_stack_abandon_data(self):
tpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Parameters': {'param1': {'Type': 'String'}},
'Resources':
{'A': {'Type': 'GenericResourceType'},
'B': {'Type': 'GenericResourceType'}}}
resources = '''{"A": {"status": "COMPLETE", "name": "A",
"resource_data": {}, "resource_id": null, "action": "INIT",
"type": "GenericResourceType", "metadata": {}},
"B": {"status": "COMPLETE", "name": "B", "resource_data": {},
"resource_id": null, "action": "INIT", "type": "GenericResourceType",
"metadata": {}}}'''
env = environment.Environment({'parameters': {'param1': 'test'}})
self.ctx.tenant_id = '123'
self.stack = stack.Stack(self.ctx, 'stack_details_test',
template.Template(tpl, env=env),
tenant_id=self.ctx.tenant_id,
stack_user_project_id='234',
tags=['tag1', 'tag2'])
self.stack.store()
info = self.stack.prepare_abandon()
self.assertEqual('CREATE', info['action'])
self.assertIn('id', info)
self.assertEqual('stack_details_test', info['name'])
self.assertEqual(json.loads(resources), info['resources'])
self.assertEqual('IN_PROGRESS', info['status'])
self.assertEqual(tpl, info['template'])
self.assertEqual('123', info['project_id'])
self.assertEqual('234', info['stack_user_project_id'])
self.assertEqual(env.params, info['environment']['parameters'])
self.assertEqual(['tag1', 'tag2'], info['tags'])
def test_set_param_id(self):
self.stack = stack.Stack(self.ctx, 'param_arn_test', self.tmpl)
exp_prefix = ('arn:openstack:heat::test_tenant_id'
':stacks/param_arn_test/')
self.assertEqual(self.stack.parameters['AWS::StackId'],
exp_prefix + 'None')
self.stack.store()
identifier = self.stack.identifier()
self.assertEqual(exp_prefix + self.stack.id,
self.stack.parameters['AWS::StackId'])
self.assertEqual(self.stack.parameters['AWS::StackId'],
identifier.arn())
def test_set_param_id_update(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'AResource': {'Type': 'ResourceWithPropsType',
'Metadata': {'Bar': {'Ref': 'AWS::StackId'}},
'Properties': {'Foo': 'abc'}}}}
self.stack = stack.Stack(self.ctx, 'update_stack_arn_test',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.COMPLETE),
self.stack.state)
stack_arn = self.stack.parameters['AWS::StackId']
tmpl2 = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'AResource': {'Type': 'ResourceWithPropsType',
'Metadata': {'Bar':
{'Ref': 'AWS::StackId'}},
'Properties': {'Foo': 'xyz'}}}}
updated_stack = stack.Stack(self.ctx, 'updated_stack',
template.Template(tmpl2))
self.stack.update(updated_stack)
self.assertEqual((stack.Stack.UPDATE, stack.Stack.COMPLETE),
self.stack.state)
self.assertEqual('xyz', self.stack['AResource'].properties['Foo'])
self.assertEqual(
stack_arn, self.stack['AResource'].metadata_get()['Bar'])
def test_load_param_id(self):
self.stack = stack.Stack(self.ctx, 'param_load_arn_test', self.tmpl)
self.stack.store()
identifier = self.stack.identifier()
self.assertEqual(self.stack.parameters['AWS::StackId'],
identifier.arn())
newstack = stack.Stack.load(self.ctx, stack_id=self.stack.id)
self.assertEqual(identifier.arn(), newstack.parameters['AWS::StackId'])
def test_load_reads_tenant_id(self):
self.ctx.tenant = 'foobar'
self.stack = stack.Stack(self.ctx, 'stack_name', self.tmpl)
self.stack.store()
stack_id = self.stack.id
self.ctx.tenant = None
self.stack = stack.Stack.load(self.ctx, stack_id=stack_id)
self.assertEqual('foobar', self.stack.tenant_id)
def test_load_reads_username_from_db(self):
self.ctx.username = 'foobar'
self.stack = stack.Stack(self.ctx, 'stack_name', self.tmpl)
self.stack.store()
stack_id = self.stack.id
self.ctx.username = None
stk = stack.Stack.load(self.ctx, stack_id=stack_id)
self.assertEqual('foobar', stk.username)
self.ctx.username = 'not foobar'
stk = stack.Stack.load(self.ctx, stack_id=stack_id)
self.assertEqual('foobar', stk.username)
def test_load_all(self):
stack1 = stack.Stack(self.ctx, 'stack1', self.tmpl)
stack1.store()
stack2 = stack.Stack(self.ctx, 'stack2', self.tmpl)
stack2.store()
stacks = list(stack.Stack.load_all(self.ctx))
self.assertEqual(2, len(stacks))
# Add another, nested, stack
stack3 = stack.Stack(self.ctx, 'stack3', self.tmpl,
owner_id=stack2.id)
stack3.store()
# Should still be 2 without show_nested
stacks = list(stack.Stack.load_all(self.ctx))
self.assertEqual(2, len(stacks))
stacks = list(stack.Stack.load_all(self.ctx, show_nested=True))
self.assertEqual(3, len(stacks))
# A backup stack should not be returned
stack1._backup_stack()
stacks = list(stack.Stack.load_all(self.ctx))
self.assertEqual(2, len(stacks))
stacks = list(stack.Stack.load_all(self.ctx, show_nested=True))
self.assertEqual(3, len(stacks))
def test_load_all_not_found(self):
stack1 = stack.Stack(self.ctx, 'stack1', self.tmpl)
stack1.store()
tmpl2 = template.Template(copy.deepcopy(empty_template))
stack2 = stack.Stack(self.ctx, 'stack2', tmpl2)
stack2.store()
def fake_load(ctx, template_id, tmpl):
if template_id == stack2.t.id:
raise exception.NotFound()
else:
return tmpl2
with mock.patch.object(template.Template, 'load') as tmpl_load:
tmpl_load.side_effect = fake_load
stacks = list(stack.Stack.load_all(self.ctx))
self.assertEqual(1, len(stacks))
def test_created_time(self):
self.stack = stack.Stack(self.ctx, 'creation_time_test', self.tmpl)
self.assertIsNone(self.stack.created_time)
self.stack.store()
self.assertIsNotNone(self.stack.created_time)
def test_updated_time(self):
self.stack = stack.Stack(self.ctx, 'updated_time_test',
self.tmpl)
self.assertIsNone(self.stack.updated_time)
self.stack.store()
self.stack.create()
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'R1': {'Type': 'GenericResourceType'}}}
newstack = stack.Stack(self.ctx, 'updated_time_test',
template.Template(tmpl))
self.stack.update(newstack)
self.assertIsNotNone(self.stack.updated_time)
def test_update_prev_raw_template(self):
self.stack = stack.Stack(self.ctx, 'updated_time_test',
self.tmpl)
self.assertIsNone(self.stack.updated_time)
self.stack.store()
self.stack.create()
self.assertIsNone(self.stack.prev_raw_template_id)
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'R1': {'Type': 'GenericResourceType'}}}
newstack = stack.Stack(self.ctx, 'updated_time_test',
template.Template(tmpl))
self.stack.update(newstack)
self.assertIsNotNone(self.stack.prev_raw_template_id)
prev_t = template.Template.load(self.ctx,
self.stack.prev_raw_template_id)
self.assertEqual(tmpl, prev_t.t)
prev_id = self.stack.prev_raw_template_id
tmpl2 = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'R2': {'Type': 'GenericResourceType'}}}
newstack2 = stack.Stack(self.ctx, 'updated_time_test',
template.Template(tmpl2))
self.stack.update(newstack2)
self.assertIsNotNone(self.stack.prev_raw_template_id)
self.assertNotEqual(prev_id, self.stack.prev_raw_template_id)
prev_t2 = template.Template.load(self.ctx,
self.stack.prev_raw_template_id)
self.assertEqual(tmpl2, prev_t2.t)
self.assertRaises(exception.NotFound,
template.Template.load, self.ctx, prev_id)
def test_access_policy_update(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'R1': {'Type': 'GenericResourceType'},
'Policy': {
'Type': 'OS::Heat::AccessPolicy',
'Properties': {
'AllowedResources': ['R1']
}}}}
self.stack = stack.Stack(self.ctx, 'update_stack_access_policy_test',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.COMPLETE),
self.stack.state)
tmpl2 = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'R1': {'Type': 'GenericResourceType'},
'R2': {'Type': 'GenericResourceType'},
'Policy': {
'Type': 'OS::Heat::AccessPolicy',
'Properties': {
'AllowedResources': ['R1', 'R2'],
}}}}
updated_stack = stack.Stack(self.ctx, 'updated_stack',
template.Template(tmpl2))
self.stack.update(updated_stack)
self.assertEqual((stack.Stack.UPDATE, stack.Stack.COMPLETE),
self.stack.state)
def test_abandon_nodelete_project(self):
self.stack = stack.Stack(self.ctx, 'delete_trust', self.tmpl)
stack_id = self.stack.store()
self.stack.set_stack_user_project_id(project_id='aproject456')
db_s = stack_object.Stack.get_by_id(self.ctx, stack_id)
self.assertIsNotNone(db_s)
self.stack.delete(abandon=True)
db_s = stack_object.Stack.get_by_id(self.ctx, stack_id)
self.assertIsNone(db_s)
self.assertEqual((stack.Stack.DELETE, stack.Stack.COMPLETE),
self.stack.state)
def test_suspend_resume(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'suspend_test',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((self.stack.CREATE, self.stack.COMPLETE),
self.stack.state)
self.assertIsNone(self.stack.updated_time)
self.stack.suspend()
self.assertEqual((self.stack.SUSPEND, self.stack.COMPLETE),
self.stack.state)
stack_suspend_time = self.stack.updated_time
self.assertIsNotNone(stack_suspend_time)
self.stack.resume()
self.assertEqual((self.stack.RESUME, self.stack.COMPLETE),
self.stack.state)
self.assertNotEqual(stack_suspend_time, self.stack.updated_time)
def test_suspend_stack_suspended_ok(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'suspend_test',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((self.stack.CREATE, self.stack.COMPLETE),
self.stack.state)
self.stack.suspend()
self.assertEqual((self.stack.SUSPEND, self.stack.COMPLETE),
self.stack.state)
# unexpected to call Resource.suspend
self.patchobject(generic_rsrc.GenericResource, 'suspend')
self.stack.suspend()
self.assertEqual((self.stack.SUSPEND, self.stack.COMPLETE),
self.stack.state)
generic_rsrc.GenericResource.suspend.assert_not_called()
def test_resume_stack_resumeed_ok(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'suspend_test',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((self.stack.CREATE, self.stack.COMPLETE),
self.stack.state)
self.stack.suspend()
self.assertEqual((self.stack.SUSPEND, self.stack.COMPLETE),
self.stack.state)
self.stack.resume()
self.assertEqual((self.stack.RESUME, self.stack.COMPLETE),
self.stack.state)
# unexpected to call Resource.resume
self.patchobject(generic_rsrc.GenericResource, 'resume')
self.stack.resume()
self.assertEqual((self.stack.RESUME, self.stack.COMPLETE),
self.stack.state)
generic_rsrc.GenericResource.resume.assert_not_called()
def test_suspend_fail(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
exc = Exception('foo')
self.patchobject(generic_rsrc.GenericResource, 'handle_suspend',
side_effect=exc)
self.stack = stack.Stack(self.ctx, 'suspend_test_fail',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((self.stack.CREATE, self.stack.COMPLETE),
self.stack.state)
self.stack.suspend()
self.assertEqual((self.stack.SUSPEND, self.stack.FAILED),
self.stack.state)
self.assertEqual('Resource SUSPEND failed: Exception: '
'resources.AResource: foo',
self.stack.status_reason)
generic_rsrc.GenericResource.handle_suspend.assert_called_once_with()
def test_resume_fail(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
self.patchobject(generic_rsrc.GenericResource, 'handle_resume',
side_effect=Exception('foo'))
self.stack = stack.Stack(self.ctx, 'resume_test_fail',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((self.stack.CREATE, self.stack.COMPLETE),
self.stack.state)
self.stack.suspend()
self.assertEqual((self.stack.SUSPEND, self.stack.COMPLETE),
self.stack.state)
self.stack.resume()
self.assertEqual((self.stack.RESUME, self.stack.FAILED),
self.stack.state)
self.assertEqual('Resource RESUME failed: Exception: '
'resources.AResource: foo',
self.stack.status_reason)
def test_suspend_timeout(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
exc = scheduler.Timeout('foo', 0)
self.patchobject(generic_rsrc.GenericResource, 'handle_suspend',
side_effect=exc)
self.stack = stack.Stack(self.ctx, 'suspend_test_fail_timeout',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((self.stack.CREATE, self.stack.COMPLETE),
self.stack.state)
self.stack.suspend()
self.assertEqual((self.stack.SUSPEND, self.stack.FAILED),
self.stack.state)
self.assertEqual('Suspend timed out', self.stack.status_reason)
generic_rsrc.GenericResource.handle_suspend.assert_called_once_with()
def test_resume_timeout(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
exc = scheduler.Timeout('foo', 0)
self.patchobject(generic_rsrc.GenericResource, 'handle_resume',
side_effect=exc)
self.stack = stack.Stack(self.ctx, 'resume_test_fail_timeout',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((self.stack.CREATE, self.stack.COMPLETE),
self.stack.state)
self.stack.suspend()
self.assertEqual((self.stack.SUSPEND, self.stack.COMPLETE),
self.stack.state)
self.stack.resume()
self.assertEqual((self.stack.RESUME, self.stack.FAILED),
self.stack.state)
self.assertEqual('Resume timed out', self.stack.status_reason)
generic_rsrc.GenericResource.handle_resume.assert_called_once_with()
def _get_stack_to_check(self, name):
tpl = {"HeatTemplateFormatVersion": "2012-12-12",
"Resources": {
"A": {"Type": "GenericResourceType"},
"B": {"Type": "GenericResourceType"}}}
self.stack = stack.Stack(self.ctx, name, template.Template(tpl),
status_reason=name)
self.stack.store()
def _mock_check(res):
res.handle_check = mock.Mock()
[_mock_check(res) for res in self.stack.resources.values()]
return self.stack
def test_check_supported(self):
stack1 = self._get_stack_to_check('check-supported')
stack1['A'].state_set(stack1['A'].CREATE, stack1['A'].COMPLETE)
stack1['B'].state_set(stack1['B'].CREATE, stack1['B'].COMPLETE)
stack1.check()
self.assertEqual(stack1.COMPLETE, stack1.status)
self.assertEqual(stack1.CHECK, stack1.action)
[self.assertTrue(res.handle_check.called)
for res in stack1.resources.values()]
self.assertNotIn('not fully supported', stack1.status_reason)
def test_check_not_supported(self):
stack1 = self._get_stack_to_check('check-not-supported')
del stack1['B'].handle_check
stack1['A'].state_set(stack1['A'].CREATE, stack1['A'].COMPLETE)
stack1.check()
self.assertEqual(stack1.COMPLETE, stack1.status)
self.assertEqual(stack1.CHECK, stack1.action)
self.assertTrue(stack1['A'].handle_check.called)
self.assertIn('not fully supported', stack1.status_reason)
def test_check_fail(self):
stk = self._get_stack_to_check('check-fail')
# if resource not created, check fail
stk.check()
self.assertEqual(stk.FAILED, stk.status)
self.assertEqual(stk.CHECK, stk.action)
self.assertFalse(stk['A'].handle_check.called)
self.assertFalse(stk['B'].handle_check.called)
self.assertIn('Resource A not created yet',
stk.status_reason)
self.assertIn('Resource B not created yet',
stk.status_reason)
# check if resource created
stk['A'].handle_check.side_effect = Exception('fail-A')
stk['B'].handle_check.side_effect = Exception('fail-B')
stk['A'].state_set(stk['A'].CREATE, stk['A'].COMPLETE)
stk['B'].state_set(stk['B'].CREATE, stk['B'].COMPLETE)
stk.check()
self.assertEqual(stk.FAILED, stk.status)
self.assertEqual(stk.CHECK, stk.action)
self.assertTrue(stk['A'].handle_check.called)
self.assertTrue(stk['B'].handle_check.called)
self.assertIn('fail-A', stk.status_reason)
self.assertIn('fail-B', stk.status_reason)
def test_adopt_stack(self):
adopt_data = '''{
"action": "CREATE",
"status": "COMPLETE",
"name": "my-test-stack-name",
"resources": {
"AResource": {
"status": "COMPLETE",
"name": "AResource",
"resource_data": {},
"metadata": {},
"resource_id": "test-res-id",
"action": "CREATE",
"type": "GenericResourceType"
}
}
}'''
tmpl = {
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'}},
'Outputs': {'TestOutput': {'Value': {
'Fn::GetAtt': ['AResource', 'Foo']}}
}
}
self.stack = stack.Stack(utils.dummy_context(), 'test_stack',
template.Template(tmpl),
adopt_stack_data=json.loads(adopt_data))
self.stack.store()
self.stack.adopt()
res = self.stack['AResource']
self.assertEqual(u'test-res-id', res.resource_id)
self.assertEqual('AResource', res.name)
self.assertEqual('COMPLETE', res.status)
self.assertEqual('ADOPT', res.action)
self.assertEqual((self.stack.ADOPT, self.stack.COMPLETE),
self.stack.state)
loaded_stack = stack.Stack.load(self.ctx, self.stack.id)
loaded_stack._update_all_resource_data(False, True)
self.assertEqual('AResource',
loaded_stack.outputs['TestOutput'].get_value())
self.assertIsNone(loaded_stack['AResource']._stored_properties_data)
def test_adopt_stack_fails(self):
adopt_data = '''{
"action": "CREATE",
"status": "COMPLETE",
"name": "my-test-stack-name",
"resources": {}
}'''
tmpl = template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'foo': {'Type': 'GenericResourceType'},
}
})
self.stack = stack.Stack(utils.dummy_context(), 'test_stack',
tmpl,
adopt_stack_data=json.loads(adopt_data))
self.stack.store()
self.stack.adopt()
self.assertEqual((self.stack.ADOPT, self.stack.FAILED),
self.stack.state)
expected = ('Resource ADOPT failed: Exception: resources.foo: '
'Resource ID was not provided.')
self.assertEqual(expected, self.stack.status_reason)
def test_adopt_stack_rollback(self):
adopt_data = '''{
"name": "my-test-stack-name",
"resources": {}
}'''
tmpl = template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'foo': {'Type': 'GenericResourceType'},
}
})
self.stack = stack.Stack(utils.dummy_context(),
'test_stack',
tmpl,
disable_rollback=False,
adopt_stack_data=json.loads(adopt_data))
self.stack.store()
with mock.patch.object(self.stack, 'delete',
side_effect=self.stack.delete) as mock_delete:
self.stack.adopt()
self.assertEqual((self.stack.ROLLBACK, self.stack.COMPLETE),
self.stack.state)
mock_delete.assert_called_once_with(action=self.stack.ROLLBACK,
abandon=True)
def test_resource_by_refid(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'resource_by_refid_stack',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.COMPLETE),
self.stack.state)
self.assertIn('AResource', self.stack)
rsrc = self.stack['AResource']
rsrc.resource_id_set('aaaa')
for action, status in (
(rsrc.INIT, rsrc.COMPLETE),
(rsrc.CREATE, rsrc.IN_PROGRESS),
(rsrc.CREATE, rsrc.COMPLETE),
(rsrc.RESUME, rsrc.IN_PROGRESS),
(rsrc.RESUME, rsrc.COMPLETE),
(rsrc.UPDATE, rsrc.IN_PROGRESS),
(rsrc.UPDATE, rsrc.COMPLETE),
(rsrc.CHECK, rsrc.COMPLETE)):
rsrc.state_set(action, status)
stk_defn.update_resource_data(self.stack.defn, rsrc.name,
rsrc.node_data())
self.assertEqual(rsrc, self.stack.resource_by_refid('aaaa'))
rsrc.state_set(rsrc.DELETE, rsrc.IN_PROGRESS)
stk_defn.update_resource_data(self.stack.defn, rsrc.name,
rsrc.node_data())
try:
self.assertIsNone(self.stack.resource_by_refid('aaaa'))
self.assertIsNone(self.stack.resource_by_refid('bbbb'))
finally:
rsrc.state_set(rsrc.CREATE, rsrc.COMPLETE)
def test_resource_name_ref_by_depends_on(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'AResource': {'Type': 'GenericResourceType'},
'BResource': {'Type': 'ResourceWithPropsType',
'Properties': {'Foo': 'AResource'},
'DependsOn': 'AResource'}}}
self.stack = stack.Stack(self.ctx, 'resource_by_name_ref_stack',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.COMPLETE),
self.stack.state)
self.assertIn('AResource', self.stack)
self.assertIn('BResource', self.stack)
rsrc = self.stack['AResource']
rsrc.resource_id_set('aaaa')
b_rsrc = self.stack['BResource']
b_rsrc.resource_id_set('bbbb')
b_foo_ref = b_rsrc.properties.get('Foo')
for action, status in (
(rsrc.INIT, rsrc.COMPLETE),
(rsrc.CREATE, rsrc.IN_PROGRESS),
(rsrc.CREATE, rsrc.COMPLETE),
(rsrc.RESUME, rsrc.IN_PROGRESS),
(rsrc.RESUME, rsrc.COMPLETE),
(rsrc.UPDATE, rsrc.IN_PROGRESS),
(rsrc.UPDATE, rsrc.COMPLETE)):
rsrc.state_set(action, status)
ref_rsrc = self.stack.resource_by_refid(b_foo_ref)
self.assertEqual(rsrc, ref_rsrc)
self.assertIn(b_rsrc.name, ref_rsrc.required_by())
def test_create_failure_recovery(self):
"""Check that rollback still works with dynamic metadata.
This test fails the second instance.
"""
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'AResource': {'Type': 'OverwrittenFnGetRefIdType',
'Properties': {'Foo': 'abc'}},
'BResource': {'Type': 'ResourceWithPropsType',
'Properties': {
'Foo': {'Ref': 'AResource'}}}}}
self.stack = stack.Stack(self.ctx, 'update_test_stack',
template.Template(tmpl),
disable_rollback=True)
class FakeException(Exception):
# to avoid pep8 check
pass
mock_create = self.patchobject(generic_rsrc.ResourceWithFnGetRefIdType,
'handle_create',
side_effect=[FakeException, None])
mock_delete = self.patchobject(generic_rsrc.ResourceWithFnGetRefIdType,
'handle_delete', return_value=None)
self.stack.store()
self.stack.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.FAILED),
self.stack.state)
self.assertEqual('abc', self.stack['AResource'].properties['Foo'])
updated_stack = stack.Stack(self.ctx, 'updated_stack',
template.Template(tmpl),
disable_rollback=True)
self.stack.update(updated_stack)
self.assertEqual((stack.Stack.UPDATE, stack.Stack.COMPLETE),
self.stack.state)
self.assertEqual(
'abc',
self.stack['AResource']._stored_properties_data['Foo'])
self.assertEqual(
'ID-AResource',
self.stack['BResource']._stored_properties_data['Foo'])
mock_delete.assert_called_once_with()
self.assertEqual(2, mock_create.call_count)
def test_create_bad_attribute(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'AResource': {'Type': 'GenericResourceType'},
'BResource': {'Type': 'ResourceWithPropsType',
'Properties': {
'Foo': {'Fn::GetAtt': ['AResource',
'Foo']}}}}}
self.stack = stack.Stack(self.ctx, 'bad_attr_test_stack',
template.Template(tmpl),
disable_rollback=True)
self.patchobject(generic_rsrc.ResourceWithProps,
'_update_stored_properties',
side_effect=exception.InvalidTemplateAttribute(
resource='a', key='foo'))
self.stack.store()
self.stack.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.FAILED),
self.stack.state)
self.assertEqual('Resource CREATE failed: The Referenced Attribute '
'(a foo) is incorrect.', self.stack.status_reason)
def test_stack_create_timeout(self):
def dummy_task():
while True:
yield
self.patchobject(scheduler.DependencyTaskGroup, '__call__',
return_value=dummy_task())
stk = stack.Stack(self.ctx, 's', self.tmpl)
start_time = time.time()
self.patchobject(timeutils, 'wallclock',
side_effect=[start_time, start_time + 1,
start_time + stk.timeout_secs() + 1])
stk.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.FAILED), stk.state)
self.assertEqual('Create timed out', stk.status_reason)
self.assertEqual(3, timeutils.wallclock.call_count)
def test_stack_name_valid(self):
stk = stack.Stack(self.ctx, 's', self.tmpl)
self.assertIsInstance(stk, stack.Stack)
stk = stack.Stack(self.ctx, 'stack123', self.tmpl)
self.assertIsInstance(stk, stack.Stack)
stk = stack.Stack(self.ctx, 'test.stack', self.tmpl)
self.assertIsInstance(stk, stack.Stack)
stk = stack.Stack(self.ctx, 'test_stack', self.tmpl)
self.assertIsInstance(stk, stack.Stack)
stk = stack.Stack(self.ctx, 'TEST', self.tmpl)
self.assertIsInstance(stk, stack.Stack)
stk = stack.Stack(self.ctx, 'test-stack', self.tmpl)
self.assertIsInstance(stk, stack.Stack)
def test_stack_name_invalid(self):
gt_255_chars = ('abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz'
'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz'
'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz'
'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz'
'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuv')
stack_names = ['_foo', '1bad', '.kcats', 'test stack', ' teststack',
'^-^', '"stack"', '1234', 'cat|dog', '$(foo)',
'test/stack', 'test\\stack', 'test::stack',
'test;stack', 'test~stack', '#test', gt_255_chars]
for stack_name in stack_names:
ex = self.assertRaises(
exception.StackValidationFailed, stack.Stack,
self.ctx, stack_name, self.tmpl)
self.assertIn("Invalid stack name %s must contain" % stack_name,
str(ex))
def test_stack_name_invalid_type(self):
stack_names = [{"bad": 123}, ["no", "lists"]]
for stack_name in stack_names:
ex = self.assertRaises(
exception.StackValidationFailed, stack.Stack,
self.ctx, stack_name, self.tmpl)
self.assertIn("Invalid stack name %s, must be a string"
% stack_name, str(ex))
def test_resource_state_get_att(self):
tmpl = {
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'}},
'Outputs': {'TestOutput': {'Value': {
'Fn::GetAtt': ['AResource', 'Foo']}}
}
}
self.stack = stack.Stack(self.ctx, 'resource_state_get_att',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.COMPLETE),
self.stack.state)
self.assertIn('AResource', self.stack)
rsrc = self.stack['AResource']
rsrc.resource_id_set('aaaa')
self.assertEqual('AResource', rsrc.FnGetAtt('Foo'))
for action, status in (
(rsrc.CREATE, rsrc.IN_PROGRESS),
(rsrc.CREATE, rsrc.COMPLETE),
(rsrc.CREATE, rsrc.FAILED),
(rsrc.SUSPEND, rsrc.IN_PROGRESS),
(rsrc.SUSPEND, rsrc.COMPLETE),
(rsrc.RESUME, rsrc.IN_PROGRESS),
(rsrc.RESUME, rsrc.COMPLETE),
(rsrc.UPDATE, rsrc.IN_PROGRESS),
(rsrc.UPDATE, rsrc.FAILED),
(rsrc.UPDATE, rsrc.COMPLETE),
(rsrc.DELETE, rsrc.IN_PROGRESS),
(rsrc.DELETE, rsrc.FAILED),
(rsrc.DELETE, rsrc.COMPLETE)):
rsrc.state_set(action, status)
self.stack._update_all_resource_data(False, True)
self.assertEqual('AResource',
self.stack.outputs['TestOutput'].get_value())
def test_resource_required_by(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'},
'BResource': {'Type': 'GenericResourceType',
'DependsOn': 'AResource'},
'CResource': {'Type': 'GenericResourceType',
'DependsOn': 'BResource'},
'DResource': {'Type': 'GenericResourceType',
'DependsOn': 'BResource'}}}
self.stack = stack.Stack(self.ctx, 'depends_test_stack',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.COMPLETE),
self.stack.state)
self.assertEqual(['BResource'],
self.stack['AResource'].required_by())
self.assertEqual([],
self.stack['CResource'].required_by())
required_by = self.stack['BResource'].required_by()
self.assertEqual(2, len(required_by))
for r in ['CResource', 'DResource']:
self.assertIn(r, required_by)
def test_resource_multi_required_by(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'},
'BResource': {'Type': 'GenericResourceType'},
'CResource': {'Type': 'GenericResourceType'},
'DResource': {'Type': 'GenericResourceType',
'DependsOn': ['AResource',
'BResource',
'CResource']}}}
self.stack = stack.Stack(self.ctx, 'depends_test_stack',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.COMPLETE),
self.stack.state)
for r in ['AResource', 'BResource', 'CResource']:
self.assertEqual(['DResource'],
self.stack[r].required_by())
def test_store_saves_owner(self):
"""owner_id attribute of Store is saved to the database when stored."""
self.stack = stack.Stack(self.ctx, 'owner_stack', self.tmpl)
stack_ownee = stack.Stack(self.ctx, 'ownee_stack', self.tmpl,
owner_id=self.stack.id)
stack_ownee.store()
db_stack = stack_object.Stack.get_by_id(self.ctx, stack_ownee.id)
self.assertEqual(self.stack.id, db_stack.owner_id)
def test_init_user_creds_id(self):
ctx_init = utils.dummy_context(user='my_user',
password='my_pass')
ctx_init.request_id = self.ctx.request_id
creds = ucreds_object.UserCreds.create(ctx_init)
self.stack = stack.Stack(self.ctx, 'creds_init', self.tmpl,
user_creds_id=creds.id)
self.stack.store()
self.assertEqual(creds.id, self.stack.user_creds_id)
ctx_expected = ctx_init.to_dict()
ctx_expected['auth_token'] = None
self.assertEqual(ctx_expected, self.stack.stored_context().to_dict())
def test_tags_property_get_set(self):
self.stack = stack.Stack(self.ctx, 'stack_tags', self.tmpl)
self.stack.store()
stack_id = self.stack.id
test_stack = stack.Stack.load(self.ctx, stack_id=stack_id)
self.assertIsNone(test_stack._tags)
self.assertEqual([], test_stack.tags)
self.stack = stack.Stack(self.ctx, 'stack_name', self.tmpl)
self.stack.tags = ['tag1', 'tag2']
self.assertEqual(['tag1', 'tag2'], self.stack._tags)
self.stack.store()
stack_id = self.stack.id
test_stack = stack.Stack.load(self.ctx, stack_id=stack_id)
self.assertIsNone(test_stack._tags)
self.assertEqual(['tag1', 'tag2'], test_stack.tags)
self.assertEqual(['tag1', 'tag2'], test_stack._tags)
def test_load_reads_tags(self):
self.stack = stack.Stack(self.ctx, 'stack_tags', self.tmpl)
self.stack.store()
stack_id = self.stack.id
test_stack = stack.Stack.load(self.ctx, stack_id=stack_id)
self.assertEqual([], test_stack.tags)
self.stack = stack.Stack(self.ctx, 'stack_name', self.tmpl,
tags=['tag1', 'tag2'])
self.stack.store()
stack_id = self.stack.id
test_stack = stack.Stack.load(self.ctx, stack_id=stack_id)
self.assertEqual(['tag1', 'tag2'], test_stack.tags)
def test_store_saves_tags(self):
self.stack = stack.Stack(self.ctx, 'tags_stack', self.tmpl)
self.stack.store()
db_tags = stack_tag_object.StackTagList.get(self.stack.context,
self.stack.id)
self.assertIsNone(db_tags)
self.stack = stack.Stack(self.ctx, 'tags_stack2', self.tmpl,
tags=['tag1', 'tag2'])
self.stack.store()
db_tags = stack_tag_object.StackTagList.get(self.stack.context,
self.stack.id)
self.assertEqual('tag1', db_tags[0].tag)
self.assertEqual('tag2', db_tags[1].tag)
def test_store_saves_creds(self):
"""A user_creds entry is created on first stack store."""
cfg.CONF.set_default('deferred_auth_method', 'password')
self.stack = stack.Stack(self.ctx, 'creds_stack', self.tmpl)
self.stack.store()
# The store should've created a user_creds row and set user_creds_id
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
user_creds_id = db_stack.user_creds_id
self.assertIsNotNone(user_creds_id)
# should've stored the username/password in the context
user_creds = ucreds_object.UserCreds.get_by_id(self.ctx, user_creds_id)
self.assertEqual(self.ctx.username, user_creds.get('username'))
self.assertEqual(self.ctx.password, user_creds.get('password'))
self.assertIsNone(user_creds.get('trust_id'))
self.assertIsNone(user_creds.get('trustor_user_id'))
# Check the stored_context is as expected
expected_context = context.RequestContext.from_dict(self.ctx.to_dict())
expected_context.auth_token = None
stored_context = self.stack.stored_context().to_dict()
self.assertEqual(expected_context.to_dict(), stored_context)
# Store again, ID should not change
self.stack.store()
self.assertEqual(user_creds_id, db_stack.user_creds_id)
def test_store_saves_creds_trust(self):
"""A user_creds entry is created on first stack store."""
cfg.CONF.set_override('deferred_auth_method', 'trusts')
self.patchobject(keystone.KeystoneClientPlugin, '_create',
return_value=fake_ks.FakeKeystoneClient(
user_id='auser123'))
self.stack = stack.Stack(self.ctx, 'creds_stack', self.tmpl)
self.stack.store()
# The store should've created a user_creds row and set user_creds_id
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
user_creds_id = db_stack.user_creds_id
self.assertIsNotNone(user_creds_id)
# should've stored the trust_id and trustor_user_id returned from
# FakeKeystoneClient.create_trust_context, username/password should
# not have been stored
user_creds = ucreds_object.UserCreds.get_by_id(self.ctx, user_creds_id)
self.assertIsNone(user_creds.get('username'))
self.assertIsNone(user_creds.get('password'))
self.assertEqual('atrust', user_creds.get('trust_id'))
self.assertEqual('auser123', user_creds.get('trustor_user_id'))
auth = self.patchobject(context.RequestContext,
'trusts_auth_plugin')
self.patchobject(auth, 'get_access',
return_value=fakes.FakeAccessInfo([], None, None))
# Check the stored_context is as expected
expected_context = context.RequestContext(
trust_id='atrust', trustor_user_id='auser123',
request_id=self.ctx.request_id, is_admin=False).to_dict()
stored_context = self.stack.stored_context().to_dict()
self.assertEqual(expected_context, stored_context)
# Store again, ID should not change
self.stack.store()
self.assertEqual(user_creds_id, db_stack.user_creds_id)
keystone.KeystoneClientPlugin._create.assert_called_with()
def test_backup_copies_user_creds_id(self):
ctx_init = utils.dummy_context(user='my_user',
password='my_pass')
ctx_init.request_id = self.ctx.request_id
creds = ucreds_object.UserCreds.create(ctx_init)
self.stack = stack.Stack(self.ctx, 'creds_init', self.tmpl,
user_creds_id=creds.id)
self.stack.store()
self.assertEqual(creds.id, self.stack.user_creds_id)
backup = self.stack._backup_stack()
self.assertEqual(creds.id, backup.user_creds_id)
def test_stored_context_err(self):
"""Test stored_context error path."""
self.stack = stack.Stack(self.ctx, 'creds_stack', self.tmpl)
ex = self.assertRaises(exception.Error, self.stack.stored_context)
expected_err = 'Attempt to use stored_context with no user_creds'
self.assertEqual(expected_err, str(ex))
def test_store_gets_username_from_stack(self):
self.stack = stack.Stack(self.ctx, 'username_stack',
self.tmpl, username='foobar')
self.ctx.username = 'not foobar'
self.stack.store()
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
self.assertEqual('foobar', db_stack.username)
def test_store_backup_true(self):
self.stack = stack.Stack(self.ctx, 'username_stack',
self.tmpl, username='foobar')
self.ctx.username = 'not foobar'
self.stack.store(backup=True)
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
self.assertTrue(db_stack.backup)
def test_store_backup_false(self):
self.stack = stack.Stack(self.ctx, 'username_stack',
self.tmpl, username='foobar')
self.ctx.username = 'not foobar'
self.stack.store(backup=False)
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
self.assertFalse(db_stack.backup)
def test_init_stored_context_false(self):
ctx_init = utils.dummy_context(user='mystored_user',
password='mystored_pass')
ctx_init.request_id = self.ctx.request_id
creds = ucreds_object.UserCreds.create(ctx_init)
self.stack = stack.Stack(self.ctx, 'creds_store1', self.tmpl,
user_creds_id=creds.id,
use_stored_context=False)
ctx_expected = self.ctx.to_dict()
self.assertEqual(ctx_expected, self.stack.context.to_dict())
self.stack.store()
self.assertEqual(ctx_expected, self.stack.context.to_dict())
def test_init_stored_context_true(self):
ctx_init = utils.dummy_context(user='mystored_user',
password='mystored_pass')
ctx_init.request_id = self.ctx.request_id
creds = ucreds_object.UserCreds.create(ctx_init)
self.stack = stack.Stack(self.ctx, 'creds_store2', self.tmpl,
user_creds_id=creds.id,
use_stored_context=True)
ctx_expected = ctx_init.to_dict()
ctx_expected['auth_token'] = None
self.assertEqual(ctx_expected, self.stack.context.to_dict())
self.stack.store()
self.assertEqual(ctx_expected, self.stack.context.to_dict())
def test_load_stored_context_false(self):
ctx_init = utils.dummy_context(user='mystored_user',
password='mystored_pass')
ctx_init.request_id = self.ctx.request_id
creds = ucreds_object.UserCreds.create(ctx_init)
self.stack = stack.Stack(self.ctx, 'creds_store3', self.tmpl,
user_creds_id=creds.id)
self.stack.store()
load_stack = stack.Stack.load(self.ctx, stack_id=self.stack.id,
use_stored_context=False)
self.assertEqual(self.ctx.to_dict(), load_stack.context.to_dict())
def test_load_stored_context_true(self):
ctx_init = utils.dummy_context(user='mystored_user',
password='mystored_pass')
ctx_init.request_id = self.ctx.request_id
creds = ucreds_object.UserCreds.create(ctx_init)
self.stack = stack.Stack(self.ctx, 'creds_store4', self.tmpl,
user_creds_id=creds.id)
self.stack.store()
ctx_expected = ctx_init.to_dict()
ctx_expected['auth_token'] = None
load_stack = stack.Stack.load(self.ctx, stack_id=self.stack.id,
use_stored_context=True)
self.assertEqual(ctx_expected, load_stack.context.to_dict())
def test_load_honors_owner(self):
"""Loading a stack from the database will set the owner_id.
Loading a stack from the database will set the owner_id of the
resultant stack appropriately.
"""
self.stack = stack.Stack(self.ctx, 'owner_stack', self.tmpl)
stack_ownee = stack.Stack(self.ctx, 'ownee_stack', self.tmpl,
owner_id=self.stack.id)
stack_ownee.store()
saved_stack = stack.Stack.load(self.ctx, stack_id=stack_ownee.id)
self.assertEqual(self.stack.id, saved_stack.owner_id)
def _test_load_with_refresh_cred(self, refresh=True):
cfg.CONF.set_override('deferred_auth_method', 'trusts')
self.patchobject(self.ctx.auth_plugin, 'get_user_id',
return_value='old_trustor_user_id')
self.patchobject(self.ctx.auth_plugin, 'get_project_id',
return_value='test_tenant_id')
old_context = utils.dummy_context()
old_context.trust_id = 'atrust123'
old_context.trustor_user_id = (
'trustor_user_id' if refresh else 'old_trustor_user_id')
m_sc = self.patchobject(context, 'StoredContext')
m_sc.from_dict.return_value = old_context
self.stack = stack.Stack(self.ctx, 'test_regenerate_trust', self.tmpl)
self.stack.store()
load_stack = stack.Stack.load(self.ctx, stack_id=self.stack.id,
check_refresh_cred=True)
self.assertEqual(refresh, load_stack.refresh_cred)
def test_load_with_refresh_cred(self):
self._test_load_with_refresh_cred()
def test_load_with_no_refresh_cred(self):
self._test_load_with_refresh_cred(refresh=False)
def test_requires_deferred_auth(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'},
'BResource': {'Type': 'GenericResourceType'},
'CResource': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'update_test_stack',
template.Template(tmpl),
disable_rollback=False)
self.assertFalse(self.stack.requires_deferred_auth())
self.stack['CResource'].requires_deferred_auth = True
self.assertTrue(self.stack.requires_deferred_auth())
def test_stack_user_project_id_default(self):
self.stack = stack.Stack(self.ctx, 'user_project_none', self.tmpl)
self.stack.store()
self.assertIsNone(self.stack.stack_user_project_id)
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
self.assertIsNone(db_stack.stack_user_project_id)
def test_stack_user_project_id_constructor(self):
self.stub_keystoneclient()
self.stack = stack.Stack(self.ctx, 'user_project_init',
self.tmpl,
stack_user_project_id='aproject1234')
self.stack.store()
self.assertEqual('aproject1234', self.stack.stack_user_project_id)
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
self.assertEqual('aproject1234', db_stack.stack_user_project_id)
self.stack.delete()
self.assertEqual((stack.Stack.DELETE, stack.Stack.COMPLETE),
self.stack.state)
def test_stack_user_project_id_setter(self):
self.stub_keystoneclient()
self.stack = stack.Stack(self.ctx, 'user_project_init', self.tmpl)
self.stack.store()
self.assertIsNone(self.stack.stack_user_project_id)
self.stack.set_stack_user_project_id(project_id='aproject456')
self.assertEqual('aproject456', self.stack.stack_user_project_id)
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
self.assertEqual('aproject456', db_stack.stack_user_project_id)
self.stack.delete()
self.assertEqual((stack.Stack.DELETE, stack.Stack.COMPLETE),
self.stack.state)
def test_stack_user_project_id_create(self):
self.stub_keystoneclient()
self.stack = stack.Stack(self.ctx, 'user_project_init', self.tmpl)
self.stack.store()
self.assertIsNone(self.stack.stack_user_project_id)
self.stack.create_stack_user_project_id()
self.assertEqual('aprojectid', self.stack.stack_user_project_id)
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
self.assertEqual('aprojectid', db_stack.stack_user_project_id)
self.stack.delete()
self.assertEqual((stack.Stack.DELETE, stack.Stack.COMPLETE),
self.stack.state)
def test_stack_eager_or_lazy_load_templ(self):
self.stack = stack.Stack(self.ctx, 'test_stack_eager_or_lazy_tmpl',
self.tmpl)
self.stack.store()
ctx1 = utils.dummy_context()
s1_db_result = db_api.stack_get(ctx1, self.stack.id, eager_load=True)
s1_obj = stack_object.Stack._from_db_object(ctx1, stack_object.Stack(),
s1_db_result)
self.assertIsNotNone(s1_obj._raw_template)
self.assertIsNotNone(s1_obj.raw_template)
ctx2 = utils.dummy_context()
s2_db_result = db_api.stack_get(ctx2, self.stack.id, eager_load=False)
s2_obj = stack_object.Stack._from_db_object(ctx2, stack_object.Stack(),
s2_db_result)
# _raw_template has not been set since it not eagerly loaded
self.assertFalse(hasattr(s2_obj, "_raw_template"))
# accessing raw_template lazy loads it
self.assertIsNotNone(s2_obj.raw_template)
self.assertIsNotNone(s2_obj._raw_template)
def test_preview_resources_returns_list_of_resource_previews(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'preview_stack',
template.Template(tmpl))
res = mock.Mock()
res.preview.return_value = 'foo'
self.stack._resources = {'r1': res}
resources = self.stack.preview_resources()
self.assertEqual(['foo'], resources)
def test_correct_outputs(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'AResource': {'Type': 'ResourceWithPropsType',
'Properties': {'Foo': 'abc'}},
'BResource': {'Type': 'ResourceWithPropsType',
'Properties': {'Foo': 'def'}}},
'Outputs': {
'Resource_attr': {
'Value': {
'Fn::GetAtt': ['AResource', 'Foo']}}}}
self.stack = stack.Stack(self.ctx, 'stack_with_correct_outputs',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.COMPLETE),
self.stack.state)
self.assertEqual('abc', self.stack['AResource'].properties['Foo'])
# According _resolve_attribute method in GenericResource output
# value will be equal with name AResource.
self.stack._update_all_resource_data(False, True)
self.assertEqual('AResource',
self.stack.outputs['Resource_attr'].get_value())
self.stack.delete()
self.assertEqual((self.stack.DELETE, self.stack.COMPLETE),
self.stack.state)
def test_incorrect_outputs(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'AResource': {'Type': 'ResourceWithPropsType',
'Properties': {'Foo': 'abc'}}},
'Outputs': {
'Resource_attr': {
'Value': {
'Fn::GetAtt': ['AResource', 'Bar']}}}}
self.stack = stack.Stack(self.ctx, 'stack_with_incorrect_outputs',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.COMPLETE),
self.stack.state)
ex = self.assertRaises(exception.InvalidTemplateAttribute,
self.stack.outputs['Resource_attr'].get_value)
self.assertIn('The Referenced Attribute (AResource Bar) is '
'incorrect.',
str(ex))
self.stack.delete()
self.assertEqual((self.stack.DELETE, self.stack.COMPLETE),
self.stack.state)
def test_stack_load_no_param_value_validation(self):
"""Test stack loading with disabled parameter value validation."""
tmpl = template_format.parse('''
heat_template_version: 2013-05-23
parameters:
flavor:
type: string
description: A flavor.
constraints:
- custom_constraint: nova.flavor
resources:
a_resource:
type: GenericResourceType
''')
# Mock objects so the query for flavors in server.FlavorConstraint
# works for stack creation
fc = fakes.FakeClient()
self.patchobject(nova.NovaClientPlugin, 'client', return_value=fc)
fc.flavors = mock.Mock()
flavor = collections.namedtuple("Flavor", ["id", "name"])
flavor.id = "1234"
flavor.name = "dummy"
fc.flavors.get.return_value = flavor
test_env = environment.Environment({'flavor': '1234'})
self.stack = stack.Stack(self.ctx, 'stack_with_custom_constraint',
template.Template(tmpl, env=test_env))
self.stack.validate()
self.stack.store()
self.stack.create()
stack_id = self.stack.id
self.assertEqual((stack.Stack.CREATE, stack.Stack.COMPLETE),
self.stack.state)
loaded_stack = stack.Stack.load(self.ctx, stack_id=self.stack.id)
self.assertEqual(stack_id, loaded_stack.parameters['OS::stack_id'])
fc.flavors.get.assert_called_once_with('1234')
def test_snapshot_delete(self):
snapshots = []
class ResourceDeleteSnapshot(generic_rsrc.ResourceWithProps):
def handle_delete_snapshot(self, data):
snapshots.append(data)
resource._register_class(
'ResourceDeleteSnapshot', ResourceDeleteSnapshot)
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'ResourceDeleteSnapshot'}}}
self.stack = stack.Stack(self.ctx, 'snapshot_stack',
template.Template(tmpl))
data = self.stack.prepare_abandon()
fake_snapshot = collections.namedtuple('Snapshot', ('data',))(data)
self.stack.delete_snapshot(fake_snapshot)
self.assertEqual([data['resources']['AResource']], snapshots)
def test_delete_snapshot_without_data(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'R1': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'snapshot_stack',
template.Template(tmpl))
fake_snapshot = collections.namedtuple('Snapshot', ('data',))(None)
self.assertIsNone(self.stack.delete_snapshot(fake_snapshot))
def test_incorrect_outputs_cfn_get_attr(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'AResource': {'Type': 'ResourceWithPropsType',
'Properties': {'Foo': 'abc'}}},
'Outputs': {
'Resource_attr': {
'Value': {
'Fn::GetAtt': ['AResource', 'Bar']}}}}
self.stack = stack.Stack(self.ctx, 'stack_with_correct_outputs',
template.Template(tmpl))
self.assertRaisesRegex(
exception.StackValidationFailed,
('Outputs.Resource_attr.Value.Fn::GetAtt: The Referenced '
r'Attribute \(AResource Bar\) is incorrect.'),
self.stack.validate)
def test_incorrect_outputs_cfn_incorrect_reference(self):
tmpl = template_format.parse("""
HeatTemplateFormatVersion: '2012-12-12'
Outputs:
Output:
Value:
Fn::GetAtt:
- Resource
- Foo
""")
self.stack = stack.Stack(self.ctx, 'stack_with_incorrect_outputs',
template.Template(tmpl))
ex = self.assertRaises(exception.StackValidationFailed,
self.stack.validate)
self.assertIn('The specified reference "Resource" '
'(in unknown) is incorrect.', str(ex))
def test_incorrect_outputs_incorrect_reference(self):
tmpl = template_format.parse("""
heat_template_version: 2013-05-23
outputs:
output:
value: { get_attr: [resource, foo] }
""")
self.stack = stack.Stack(self.ctx, 'stack_with_incorrect_outputs',
template.Template(tmpl))
ex = self.assertRaises(exception.StackValidationFailed,
self.stack.validate)
self.assertIn('The specified reference "resource" '
'(in unknown) is incorrect.', str(ex))
def test_incorrect_outputs_cfn_missing_value(self):
tmpl = template_format.parse("""
HeatTemplateFormatVersion: '2012-12-12'
Resources:
AResource:
Type: ResourceWithPropsType
Properties:
Foo: abc
Outputs:
Resource_attr:
Description: the attr
""")
self.stack = stack.Stack(self.ctx, 'stack_with_correct_outputs',
template.Template(tmpl))
ex = self.assertRaises(exception.StackValidationFailed,
self.stack.validate)
self.assertIn('Each output definition must contain a Value key.',
str(ex))
self.assertIn('Outputs.Resource_attr', str(ex))
def test_incorrect_outputs_cfn_empty_value(self):
tmpl = template_format.parse("""
HeatTemplateFormatVersion: '2012-12-12'
Resources:
AResource:
Type: ResourceWithPropsType
Properties:
Foo: abc
Outputs:
Resource_attr:
Value: ''
""")
self.stack = stack.Stack(self.ctx, 'stack_with_correct_outputs',
template.Template(tmpl))
self.assertIsNone(self.stack.validate())
def test_incorrect_outputs_cfn_none_value(self):
tmpl = template_format.parse("""
HeatTemplateFormatVersion: '2012-12-12'
Resources:
AResource:
Type: ResourceWithPropsType
Properties:
Foo: abc
Outputs:
Resource_attr:
Value:
""")
self.stack = stack.Stack(self.ctx, 'stack_with_correct_outputs',
template.Template(tmpl))
self.assertIsNone(self.stack.validate())
def test_incorrect_outputs_cfn_string_data(self):
tmpl = template_format.parse("""
HeatTemplateFormatVersion: '2012-12-12'
Resources:
AResource:
Type: ResourceWithPropsType
Properties:
Foo: abc
Outputs:
Resource_attr:
This is wrong data
""")
self.stack = stack.Stack(self.ctx, 'stack_with_correct_outputs',
template.Template(tmpl))
ex = self.assertRaises(exception.StackValidationFailed,
self.stack.validate)
self.assertIn('Found a %s instead' % str.__name__,
str(ex))
self.assertIn('Outputs.Resource_attr', str(ex))
def test_prop_validate_value(self):
tmpl = template_format.parse("""
HeatTemplateFormatVersion: '2012-12-12'
Resources:
AResource:
Type: ResourceWithPropsType
Properties:
FooInt: notanint
""")
self.stack = stack.Stack(self.ctx, 'stack_with_bad_property',
template.Template(tmpl))
ex = self.assertRaises(exception.StackValidationFailed,
self.stack.validate)
self.assertIn("'notanint' is not an integer",
str(ex))
self.stack.strict_validate = False
self.assertIsNone(self.stack.validate())
def test_disable_validate_required_param(self):
tmpl = template_format.parse("""
heat_template_version: 2013-05-23
parameters:
aparam:
type: number
resources:
AResource:
type: ResourceWithPropsRefPropOnValidate
properties:
FooInt: {get_param: aparam}
""")
self.stack = stack.Stack(self.ctx, 'stack_with_reqd_param',
template.Template(tmpl))
ex = self.assertRaises(exception.UserParameterMissing,
self.stack.validate)
self.assertIn("The Parameter (aparam) was not provided",
str(ex))
self.stack.strict_validate = False
ex = self.assertRaises(exception.StackValidationFailed,
self.stack.validate)
self.assertIn("The Parameter (aparam) was not provided",
str(ex))
self.assertIsNone(self.stack.validate(validate_res_tmpl_only=True))
def test_nodisable_validate_tmpl_err(self):
tmpl = template_format.parse("""
heat_template_version: 2013-05-23
resources:
AResource:
type: ResourceWithPropsRefPropOnValidate
depends_on: noexist
properties:
FooInt: 123
""")
self.stack = stack.Stack(self.ctx, 'stack_with_tmpl_err',
template.Template(tmpl))
ex = self.assertRaises(exception.InvalidTemplateReference,
self.stack.validate)
self.assertIn(
"The specified reference \"noexist\" (in AResource) is incorrect",
str(ex))
self.stack.strict_validate = False
ex = self.assertRaises(exception.InvalidTemplateReference,
self.stack.validate)
self.assertIn(
"The specified reference \"noexist\" (in AResource) is incorrect",
str(ex))
ex = self.assertRaises(exception.InvalidTemplateReference,
self.stack.validate,
validate_res_tmpl_only=True)
self.assertIn(
"The specified reference \"noexist\" (in AResource) is incorrect",
str(ex))
def test_validate_property_getatt(self):
tmpl = {
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'R1': {'Type': 'ResourceWithPropsType'},
'R2': {'Type': 'ResourceWithPropsType',
'Properties': {'Foo': {'Fn::GetAtt': ['R1', 'Foo']}}}}
}
self.stack = stack.Stack(self.ctx, 'test_stack',
template.Template(tmpl))
self.assertIsNone(self.stack.validate())
def test_param_validate_value(self):
tmpl = template_format.parse("""
HeatTemplateFormatVersion: '2012-12-12'
Parameters:
foo:
Type: Number
""")
env1 = environment.Environment({'parameters': {'foo': 'abc'}})
self.stack = stack.Stack(self.ctx, 'stack_with_bad_param',
template.Template(tmpl, env=env1))
ex = self.assertRaises(exception.StackValidationFailed,
self.stack.validate)
self.assertIn("Parameter 'foo' is invalid: could not convert "
"string to float:", str(ex))
self.assertIn("abc", str(ex))
self.stack.strict_validate = False
self.assertIsNone(self.stack.validate())
def test_incorrect_outputs_cfn_list_data(self):
tmpl = template_format.parse("""
HeatTemplateFormatVersion: '2012-12-12'
Resources:
AResource:
Type: ResourceWithPropsType
Properties:
Foo: abc
Outputs:
Resource_attr:
- Data is not what it seems
""")
self.stack = stack.Stack(self.ctx, 'stack_with_correct_outputs',
template.Template(tmpl))
ex = self.assertRaises(exception.StackValidationFailed,
self.stack.validate)
self.assertIn('Found a list', str(ex))
self.assertIn('Outputs.Resource_attr', str(ex))
def test_incorrect_deletion_policy(self):
tmpl = template_format.parse("""
HeatTemplateFormatVersion: '2012-12-12'
Parameters:
Deletion_Policy:
Type: String
Default: [1, 2]
Resources:
AResource:
Type: ResourceWithPropsType
DeletionPolicy: {Ref: Deletion_Policy}
Properties:
Foo: abc
""")
self.stack = stack.Stack(self.ctx, 'stack_bad_delpol',
template.Template(tmpl))
ex = self.assertRaises(exception.StackValidationFailed,
self.stack.validate)
self.assertIn('Invalid deletion policy "[1, 2]"',
str(ex))
def test_deletion_policy_apply_ref(self):
tmpl = template_format.parse("""
HeatTemplateFormatVersion: '2012-12-12'
Parameters:
Deletion_Policy:
Type: String
Default: Delete
Resources:
AResource:
Type: ResourceWithPropsType
DeletionPolicy: wibble
Properties:
Foo: abc
DeletionPolicy: {Ref: Deletion_Policy}
""")
self.stack = stack.Stack(self.ctx, 'stack_delpol_get_param',
template.Template(tmpl))
self.stack.validate()
self.stack.store()
self.stack.create()
self.assertEqual((self.stack.CREATE, self.stack.COMPLETE),
self.stack.state)
def test_deletion_policy_apply_get_param(self):
tmpl = template_format.parse("""
heat_template_version: 2016-04-08
parameters:
deletion_policy:
type: string
default: Delete
resources:
AResource:
type: ResourceWithPropsType
deletion_policy: {get_param: deletion_policy}
properties:
Foo: abc
""")
self.stack = stack.Stack(self.ctx, 'stack_delpol_get_param',
template.Template(tmpl))
self.stack.validate()
self.stack.store()
self.stack.create()
self.assertEqual((self.stack.CREATE, self.stack.COMPLETE),
self.stack.state)
def test_incorrect_deletion_policy_hot(self):
tmpl = template_format.parse("""
heat_template_version: 2013-05-23
parameters:
deletion_policy:
type: string
default: [1, 2]
resources:
AResource:
type: ResourceWithPropsType
deletion_policy: {get_param: deletion_policy}
properties:
Foo: abc
""")
self.stack = stack.Stack(self.ctx, 'stack_bad_delpol',
template.Template(tmpl))
ex = self.assertRaises(exception.StackValidationFailed,
self.stack.validate)
self.assertIn('Invalid deletion policy "[1, 2]',
str(ex))
def test_incorrect_outputs_hot_get_attr(self):
tmpl = {'heat_template_version': '2013-05-23',
'resources': {
'AResource': {'type': 'ResourceWithPropsType',
'properties': {'Foo': 'abc'}}},
'outputs': {
'resource_attr': {
'value': {
'get_attr': ['AResource', 'Bar']}}}}
self.stack = stack.Stack(self.ctx, 'stack_with_correct_outputs',
template.Template(tmpl))
self.assertRaisesRegex(
exception.StackValidationFailed,
('outputs.resource_attr.value.get_attr: The Referenced Attribute '
r'\(AResource Bar\) is incorrect.'),
self.stack.validate)
def test_snapshot_save_called_first(self):
def snapshotting_called_first(stack, action, status, reason):
self.assertEqual(stack.status, stack.IN_PROGRESS)
self.assertEqual(stack.action, stack.SNAPSHOT)
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'A': {'Type': 'GenericResourceType'},
'B': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'stack_details_test',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.stack.snapshot(save_snapshot_func=snapshotting_called_first)
def test_restore(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'A': {'Type': 'GenericResourceType'},
'B': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'stack_details_test',
template.Template(tmpl))
self.stack.store()
self.stack.create()
data = copy.deepcopy(self.stack.prepare_abandon())
fake_snapshot = collections.namedtuple(
'Snapshot', ('data', 'stack_id'))(data, self.stack.id)
new_tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'A': {'Type': 'GenericResourceType'}}}
updated_stack = stack.Stack(self.ctx, 'updated_stack',
template.Template(new_tmpl))
self.stack.update(updated_stack)
self.assertEqual(1, len(self.stack.resources))
self.stack.restore(fake_snapshot)
self.assertEqual((stack.Stack.RESTORE, stack.Stack.COMPLETE),
self.stack.state)
self.assertEqual(2, len(self.stack.resources))
def test_restore_with_original_env(self):
tmpl = {
'heat_template_version': '2013-05-23',
'parameters': {
'foo': {'type': 'string'}
},
'resources': {
'A': {
'type': 'ResourceWithPropsType',
'properties': {'Foo': {'get_param': 'foo'}}
}
}
}
self.stack = stack.Stack(self.ctx, 'stack_restore_test',
template.Template(
tmpl,
env=environment.Environment(
{'foo': 'abc'})))
self.stack.store()
self.stack.create()
self.assertEqual('abc',
self.stack.resources['A'].properties['Foo'])
data = copy.deepcopy(self.stack.prepare_abandon())
fake_snapshot = collections.namedtuple(
'Snapshot', ('data', 'stack_id'))(data, self.stack.id)
updated_stack = stack.Stack(self.ctx, 'updated_stack',
template.Template(
tmpl,
env=environment.Environment(
{'foo': 'xyz'})))
self.stack.update(updated_stack)
self.assertEqual('xyz',
self.stack.resources['A'].properties['Foo'])
self.stack.restore(fake_snapshot)
self.assertEqual((stack.Stack.RESTORE, stack.Stack.COMPLETE),
self.stack.state)
self.assertEqual('abc',
self.stack.resources['A'].properties['Foo'])
def test_hot_restore(self):
tpl = {'heat_template_version': '2013-05-23',
'resources':
{'A': {'type': 'ResourceWithRestoreType'}}}
self.stack = stack.Stack(self.ctx, 'stack_details_test',
template.Template(tpl))
self.stack.store()
self.stack.create()
data = self.stack.prepare_abandon()
data['resources']['A']['resource_data']['a_string'] = 'foo'
fake_snapshot = collections.namedtuple(
'Snapshot', ('data', 'stack_id'))(data, self.stack.id)
self.stack.restore(fake_snapshot)
self.assertEqual((stack.Stack.RESTORE, stack.Stack.COMPLETE),
self.stack.state)
self.assertEqual(
'foo', self.stack.resources['A'].properties['a_string'])
@mock.patch.object(stack.Stack, 'db_resource_get')
def test_lightweight_stack_getatt(self, mock_drg):
tmpl = template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'foo': {'Type': 'GenericResourceType'},
'bar': {
'Type': 'ResourceWithPropsType',
'Properties': {
'Foo': {'Fn::GetAtt': ['foo', 'bar']},
}
}
}
})
rsrcs_data = {'foo': {'reference_id': 'foo-id',
'attrs': {'bar': 'baz'}, 'uuid': mock.ANY,
'id': mock.ANY, 'action': 'CREATE',
'status': 'COMPLETE'},
'bar': {'reference_id': 'bar-id', 'uuid': mock.ANY,
'id': mock.ANY, 'action': 'CREATE',
'status': 'COMPLETE'}}
cache_data = {n: node_data.NodeData.from_dict(d)
for n, d in rsrcs_data.items()}
tmpl_stack = stack.Stack(self.ctx, 'test', tmpl)
tmpl_stack.store()
lightweight_stack = stack.Stack.load(self.ctx, stack_id=tmpl_stack.id,
cache_data=cache_data)
# Check if the property has the appropriate resolved value.
bar = resource.Resource(
'bar',
lightweight_stack.defn.resource_definition('bar'),
lightweight_stack)
self.assertEqual('baz', bar.properties['Foo'])
# Make sure FnGetAtt returns the cached value.
attr_value = lightweight_stack.defn['foo'].FnGetAtt('bar')
self.assertEqual('baz', attr_value)
# Make sure calls are not made to the database to retrieve the
# resource state.
self.assertFalse(mock_drg.called)
@mock.patch.object(stack.Stack, 'db_resource_get')
def test_lightweight_stack_getrefid(self, mock_drg):
tmpl = template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'foo': {'Type': 'GenericResourceType'},
'bar': {
'Type': 'ResourceWithPropsType',
'Properties': {
'Foo': {'Ref': 'foo'},
}
}
}
})
rsrcs_data = {'foo': {'reference_id': 'physical-resource-id',
'uuid': mock.ANY, 'id': mock.ANY,
'action': 'CREATE', 'status': 'COMPLETE'},
'bar': {'reference_id': 'bar-id', 'uuid': mock.ANY,
'id': mock.ANY, 'action': 'CREATE',
'status': 'COMPLETE'}}
cache_data = {n: node_data.NodeData.from_dict(d)
for n, d in rsrcs_data.items()}
tmpl_stack = stack.Stack(self.ctx, 'test', tmpl)
tmpl_stack.store()
lightweight_stack = stack.Stack.load(self.ctx, stack_id=tmpl_stack.id,
cache_data=cache_data)
# Check if the property has the appropriate resolved value.
bar = resource.Resource(
'bar',
lightweight_stack.defn.resource_definition('bar'),
lightweight_stack)
self.assertEqual('physical-resource-id', bar.properties['Foo'])
# Make sure FnGetRefId returns the cached value.
resource_id = lightweight_stack.defn['foo'].FnGetRefId()
self.assertEqual('physical-resource-id', resource_id)
# Make sure calls are not made to the database to retrieve the
# resource state.
self.assertFalse(mock_drg.called)
def test_encrypt_parameters_false_parameters_stored_plaintext(self):
"""Test stack loading with disabled parameter value validation."""
tmpl = template_format.parse('''
heat_template_version: 2013-05-23
parameters:
param1:
type: string
description: value1.
param2:
type: string
description: value2.
hidden: true
resources:
a_resource:
type: GenericResourceType
''')
env1 = environment.Environment({'param1': 'foo', 'param2': 'bar'})
self.stack = stack.Stack(self.ctx, 'test',
template.Template(tmpl, env=env1))
cfg.CONF.set_override('encrypt_parameters_and_properties', False)
# Verify that hidden parameters stored in plain text
self.stack.store()
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
params = db_stack.raw_template.environment['parameters']
self.assertEqual('foo', params['param1'])
self.assertEqual('bar', params['param2'])
def test_parameters_stored_encrypted_decrypted_on_load(self):
"""Test stack loading with disabled parameter value validation."""
tmpl = template_format.parse('''
heat_template_version: 2013-05-23
parameters:
param1:
type: string
description: value1.
param2:
type: string
description: value2.
hidden: true
resources:
a_resource:
type: GenericResourceType
''')
env1 = environment.Environment({'param1': 'foo', 'param2': 'bar'})
self.stack = stack.Stack(self.ctx, 'test',
template.Template(tmpl, env=env1))
cfg.CONF.set_override('encrypt_parameters_and_properties', True)
# Verify that hidden parameters are stored encrypted
self.stack.store()
db_tpl = db_api.raw_template_get(self.ctx, self.stack.t.id)
db_params = db_tpl.environment['parameters']
self.assertEqual('foo', db_params['param1'])
self.assertEqual('cryptography_decrypt_v1', db_params['param2'][0])
self.assertIsNotNone(db_params['param2'][1])
# Verify that loaded stack has decrypted paramters
loaded_stack = stack.Stack.load(self.ctx, stack_id=self.stack.id)
params = loaded_stack.t.env.params
self.assertEqual('foo', params.get('param1'))
self.assertEqual('bar', params.get('param2'))
# test update the param2
loaded_stack.state_set(self.stack.CREATE, self.stack.COMPLETE,
'for_update')
env2 = environment.Environment({'param1': 'foo', 'param2': 'new_bar'})
new_stack = stack.Stack(self.ctx, 'test_update',
template.Template(tmpl, env=env2))
loaded_stack.update(new_stack)
self.assertEqual((loaded_stack.UPDATE, loaded_stack.COMPLETE),
loaded_stack.state)
db_tpl = db_api.raw_template_get(self.ctx, loaded_stack.t.id)
db_params = db_tpl.environment['parameters']
self.assertEqual('foo', db_params['param1'])
self.assertEqual('cryptography_decrypt_v1', db_params['param2'][0])
self.assertIsNotNone(db_params['param2'][1])
loaded_stack1 = stack.Stack.load(self.ctx, stack_id=self.stack.id)
params = loaded_stack1.t.env.params
self.assertEqual('foo', params.get('param1'))
self.assertEqual('new_bar', params.get('param2'))
def test_parameters_created_encrypted_updated_decrypted(self):
"""Test stack loading with disabled parameter value validation."""
tmpl = template_format.parse('''
heat_template_version: 2013-05-23
parameters:
param1:
type: string
description: value1.
param2:
type: string
description: value2.
hidden: true
resources:
a_resource:
type: GenericResourceType
''')
# Create the stack with encryption enabled
cfg.CONF.set_override('encrypt_parameters_and_properties', True)
env1 = environment.Environment({'param1': 'foo', 'param2': 'bar'})
self.stack = stack.Stack(self.ctx, 'test',
template.Template(tmpl, env=env1))
self.stack.store()
# Update the stack with encryption disabled
cfg.CONF.set_override('encrypt_parameters_and_properties', False)
loaded_stack = stack.Stack.load(self.ctx, stack_id=self.stack.id)
loaded_stack.state_set(self.stack.CREATE, self.stack.COMPLETE,
'for_update')
env2 = environment.Environment({'param1': 'foo', 'param2': 'new_bar'})
new_stack = stack.Stack(self.ctx, 'test_update',
template.Template(tmpl, env=env2))
self.assertEqual(['param2'], loaded_stack.env.encrypted_param_names)
# Without the fix for bug #1572294, loaded_stack.update() will
# blow up with "ValueError: too many values to unpack"
loaded_stack.update(new_stack)
self.assertEqual([], loaded_stack.env.encrypted_param_names)
def test_parameters_inconsistent_encrypted_param_names(self):
tmpl = template_format.parse('''
heat_template_version: 2013-05-23
parameters:
param1:
type: string
description: value1.
param2:
type: string
description: value2.
hidden: true
resources:
a_resource:
type: GenericResourceType
''')
warning_logger = self.useFixture(
fixtures.FakeLogger(level=logging.WARNING,
format="%(levelname)8s [%(name)s] "
"%(message)s"))
cfg.CONF.set_override('encrypt_parameters_and_properties', False)
env1 = environment.Environment({'param1': 'foo', 'param2': 'bar'})
self.stack = stack.Stack(self.ctx, 'test',
template.Template(tmpl, env=env1))
self.stack.store()
loaded_stack = stack.Stack.load(self.ctx, stack_id=self.stack.id)
loaded_stack.state_set(self.stack.CREATE, self.stack.COMPLETE,
'for_update')
env2 = environment.Environment({'param1': 'foo', 'param2': 'new_bar'})
# Put inconsistent encrypted_param_names data in the environment
env2.encrypted_param_names = ['param1']
new_stack = stack.Stack(self.ctx, 'test_update',
template.Template(tmpl, env=env2))
self.assertIsNone(loaded_stack.update(new_stack))
self.assertIn('Encountered already-decrypted data',
warning_logger.output)
def test_parameters_stored_decrypted_successful_load(self):
"""Test stack loading with disabled parameter value validation."""
tmpl = template_format.parse('''
heat_template_version: 2013-05-23
parameters:
param1:
type: string
description: value1.
param2:
type: string
description: value2.
hidden: true
resources:
a_resource:
type: GenericResourceType
''')
env1 = environment.Environment({'param1': 'foo', 'param2': 'bar'})
self.stack = stack.Stack(self.ctx, 'test',
template.Template(tmpl, env=env1))
cfg.CONF.set_override('encrypt_parameters_and_properties', False)
# Verify that hidden parameters are stored decrypted
self.stack.store()
db_tpl = db_api.raw_template_get(self.ctx, self.stack.t.id)
db_params = db_tpl.environment['parameters']
self.assertEqual('foo', db_params['param1'])
self.assertEqual('bar', db_params['param2'])
# Verify that stack loads without error
loaded_stack = stack.Stack.load(self.ctx, stack_id=self.stack.id)
params = loaded_stack.t.env.params
self.assertEqual('foo', params.get('param1'))
self.assertEqual('bar', params.get('param2'))
def test_event_dispatch(self):
env = environment.Environment()
evt = eventlet.event.Event()
sink = fakes.FakeEventSink(evt)
env.register_event_sink('dummy', lambda: sink)
env.load({"event_sinks": [{"type": "dummy"}]})
stk = stack.Stack(self.ctx, 'test',
template.Template(empty_template, env=env))
stk.thread_group_mgr = service.ThreadGroupManager()
self.addCleanup(stk.thread_group_mgr.stop, stk.id)
stk.store()
stk._add_event('CREATE', 'IN_PROGRESS', '')
evt.wait()
expected = [{
'id': mock.ANY,
'timestamp': mock.ANY,
'type': 'os.heat.event',
'version': '0.1',
'payload': {
'physical_resource_id': stk.id,
'resource_action': 'CREATE',
'resource_name': 'test',
'resource_properties': {},
'resource_status': 'IN_PROGRESS',
'resource_status_reason': '',
'resource_type':
'OS::Heat::Stack',
'stack_id': stk.id,
'version': '0.1'}}]
self.assertEqual(expected, sink.events)
@mock.patch.object(stack_object.Stack, 'delete')
@mock.patch.object(raw_template_object.RawTemplate, 'delete')
def test_mark_complete_create(self, mock_tmpl_delete, mock_stack_delete):
tmpl = template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'foo': {'Type': 'GenericResourceType'}
}
})
tmpl_stack = stack.Stack(self.ctx, 'test', tmpl, convergence=True)
tmpl_stack.store()
tmpl_stack.action = tmpl_stack.CREATE
tmpl_stack.status = tmpl_stack.IN_PROGRESS
tmpl_stack.current_traversal = 'some-traversal'
tmpl_stack.mark_complete()
self.assertEqual(tmpl_stack.prev_raw_template_id,
None)
self.assertFalse(mock_tmpl_delete.called)
self.assertFalse(mock_stack_delete.called)
self.assertEqual(tmpl_stack.status, tmpl_stack.COMPLETE)
@mock.patch.object(stack.Stack, 'purge_db')
def test_mark_complete_update(self, mock_purge_db):
tmpl = template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'foo': {'Type': 'GenericResourceType'}
}
})
cfg.CONF.set_default('convergence_engine', True)
tmpl_stack = stack.Stack(self.ctx, 'test', tmpl, convergence=True)
tmpl_stack.prev_raw_template_id = 1
tmpl_stack.action = tmpl_stack.UPDATE
tmpl_stack.status = tmpl_stack.IN_PROGRESS
tmpl_stack.current_traversal = 'some-traversal'
tmpl_stack.store()
tmpl_stack.mark_complete()
self.assertTrue(mock_purge_db.called)
@mock.patch.object(stack.Stack, 'purge_db')
def test_mark_complete_update_delete(self, mock_purge_db):
tmpl = template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Description': 'Empty Template'
})
cfg.CONF.set_default('convergence_engine', True)
tmpl_stack = stack.Stack(self.ctx, 'test', tmpl, convergence=True)
tmpl_stack.prev_raw_template_id = 1
tmpl_stack.action = tmpl_stack.DELETE
tmpl_stack.status = tmpl_stack.IN_PROGRESS
tmpl_stack.current_traversal = 'some-traversal'
tmpl_stack.store()
tmpl_stack.mark_complete()
self.assertTrue(mock_purge_db.called)
@mock.patch.object(stack.Stack, 'purge_db')
def test_mark_complete_stale_traversal(self, mock_purge_db):
tmpl = template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'foo': {'Type': 'GenericResourceType'}
}
})
tmpl_stack = stack.Stack(self.ctx, 'test', tmpl)
tmpl_stack.store()
# emulate stale traversal
tmpl_stack.current_traversal = 'old-traversal'
tmpl_stack.mark_complete()
self.assertFalse(mock_purge_db.called)
@mock.patch.object(function, 'validate')
def test_validate_assertion_exception_rethrow(self, func_val):
expected_msg = 'Expected Assertion Error'
with mock.patch('heat.engine.stack.dependencies',
new_callable=mock.PropertyMock) as mock_dependencies:
mock_dependency = mock.MagicMock()
mock_dependency.name = 'res'
mock_dependency.external_id = None
mock_dependency.validate.side_effect = AssertionError(expected_msg)
mock_dependencies.Dependencies.return_value = [mock_dependency]
stc = stack.Stack(self.ctx, utils.random_name(), self.tmpl)
mock_res = mock.Mock()
mock_res.name = mock_dependency.name
mock_res.t = mock.Mock()
mock_res.t.name = mock_res.name
stc._resources = {mock_res.name: mock_res}
expected_exception = self.assertRaises(AssertionError,
stc.validate)
self.assertEqual(expected_msg, str(expected_exception))
mock_dependency.validate.assert_called_once_with()
tmpl = template_format.parse("""
HeatTemplateFormatVersion: '2012-12-12'
Outputs:
foo:
Value: bar
""")
stc = stack.Stack(self.ctx, utils.random_name(),
template.Template(tmpl))
func_val.side_effect = AssertionError(expected_msg)
expected_exception = self.assertRaises(AssertionError, stc.validate)
self.assertEqual(expected_msg, str(expected_exception))
@mock.patch.object(update, 'StackUpdate')
def test_update_task_exception(self, mock_stack_update):
class RandomException(Exception):
pass
tmpl1 = template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'foo': {'Type': 'GenericResourceType'}
}
})
self.stack = stack.Stack(utils.dummy_context(), 'test_stack', tmpl1)
self.stack.store()
self.stack.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.COMPLETE),
self.stack.state)
tmpl2 = template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'foo': {'Type': 'GenericResourceType'},
'bar': {'Type': 'GenericResourceType'}
}
})
updated_stack = stack.Stack(utils.dummy_context(), 'test_stack', tmpl2)
mock_stack_update.side_effect = RandomException()
self.assertRaises(RandomException, self.stack.update, updated_stack)
def update_exception_handler(self, exc, action=stack.Stack.UPDATE,
disable_rollback=False):
tmpl = template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'foo': {'Type': 'GenericResourceType'}
}
})
self.stack = stack.Stack(utils.dummy_context(),
'test_stack',
tmpl,
disable_rollback=disable_rollback)
self.stack.store()
rb = self.stack._update_exception_handler(exc=exc, action=action)
return rb
def test_update_exception_handler_resource_failure_no_rollback(self):
reason = 'something strange happened'
exc = exception.ResourceFailure(reason, None, action='UPDATE')
rb = self.update_exception_handler(exc, disable_rollback=True)
self.assertFalse(rb)
def test_update_exception_handler_resource_failure_rollback(self):
reason = 'something strange happened'
exc = exception.ResourceFailure(reason, None, action='UPDATE')
rb = self.update_exception_handler(exc, disable_rollback=False)
self.assertTrue(rb)
def test_update_exception_handler_force_cancel_with_rollback(self):
exc = stack.ForcedCancel(with_rollback=True)
rb = self.update_exception_handler(exc, disable_rollback=False)
self.assertTrue(rb)
def test_update_exception_handler_force_cancel_with_rollback_off(self):
# stack-cancel-update from user *always* rolls back
exc = stack.ForcedCancel(with_rollback=True)
rb = self.update_exception_handler(exc, disable_rollback=True)
self.assertTrue(rb)
def test_update_exception_handler_force_cancel_nested(self):
exc = stack.ForcedCancel(with_rollback=False)
rb = self.update_exception_handler(exc, disable_rollback=True)
self.assertFalse(rb)
def test_store_generates_new_traversal_id_for_new_stack(self):
tmpl = template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'foo': {'Type': 'GenericResourceType'}
}
})
self.stack = stack.Stack(utils.dummy_context(),
'test_stack', tmpl, convergence=True)
self.assertIsNone(self.stack.current_traversal)
self.stack.store()
self.assertIsNotNone(self.stack.current_traversal)
@mock.patch.object(stack_object.Stack, 'select_and_update')
def test_store_uses_traversal_id_for_updating_db(self, mock_sau):
tmpl = template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'foo': {'Type': 'GenericResourceType'}
}
})
self.stack = stack.Stack(utils.dummy_context(),
'test_stack', tmpl, convergence=True)
mock_sau.return_value = True
self.stack.id = 1
self.stack.current_traversal = 1
stack_id = self.stack.store()
mock_sau.assert_called_once_with(mock.ANY, 1, mock.ANY, exp_trvsl=1)
self.assertEqual(1, stack_id)
# ensure store uses given expected traversal ID
stack_id = self.stack.store(exp_trvsl=2)
self.assertEqual(1, stack_id)
mock_sau.assert_called_with(mock.ANY, 1, mock.ANY, exp_trvsl=2)
@mock.patch.object(stack_object.Stack, 'select_and_update')
def test_store_db_update_failure(self, mock_sau):
tmpl = template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'foo': {'Type': 'GenericResourceType'}
}
})
self.stack = stack.Stack(utils.dummy_context(),
'test_stack', tmpl, convergence=True)
mock_sau.return_value = False
self.stack.id = 1
stack_id = self.stack.store()
self.assertIsNone(stack_id)
@mock.patch.object(stack_object.Stack, 'select_and_update')
def test_state_set_uses_curr_traversal_for_updating_db(self, mock_sau):
tmpl = template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'foo': {'Type': 'GenericResourceType'}
}
})
self.stack = stack.Stack(utils.dummy_context(),
'test_stack', tmpl, convergence=True)
self.stack.id = 1
self.stack.current_traversal = 'curr-traversal'
self.stack.store()
self.stack.state_set(self.stack.UPDATE, self.stack.IN_PROGRESS, '')
mock_sau.assert_called_once_with(mock.ANY, 1, mock.ANY,
exp_trvsl='curr-traversal')
class StackKwargsForCloningTest(common.HeatTestCase):
scenarios = [
('default', dict(keep_status=False, only_db=False, keep_tags=False,
not_included=['action', 'status', 'status_reason',
'tags'])),
('only_db', dict(keep_status=False, only_db=True, keep_tags=False,
not_included=['action', 'status', 'status_reason',
'strict_validate', 'tags'])),
('keep_status', dict(keep_status=True, only_db=False, keep_tags=False,
not_included=['tags'])),
('status_db', dict(keep_status=True, only_db=True, keep_tags=False,
not_included=['strict_validate', 'tags'])),
('keep_tags', dict(keep_status=False, only_db=False, keep_tags=True,
not_included=['action', 'status', 'status_reason']))
]
def test_kwargs(self):
tmpl = template.Template(copy.deepcopy(empty_template))
ctx = utils.dummy_context()
test_data = dict(action='x', status='y',
status_reason='z', timeout_mins=33,
disable_rollback=True, parent_resource='fred',
owner_id=32, stack_user_project_id=569,
user_creds_id=123, tenant_id='some-uuid',
username='jo', nested_depth=3,
strict_validate=True, convergence=False,
current_traversal=45,
tags=['tag1', 'tag2'])
db_map = {'parent_resource': 'parent_resource_name',
'tenant_id': 'tenant', 'timeout_mins': 'timeout'}
test_db_data = {}
for key in test_data:
dbkey = db_map.get(key, key)
test_db_data[dbkey] = test_data[key]
self.stack = stack.Stack(ctx, utils.random_name(), tmpl,
**test_data)
res = self.stack.get_kwargs_for_cloning(keep_status=self.keep_status,
only_db=self.only_db,
keep_tags=self.keep_tags)
for key in self.not_included:
self.assertNotIn(key, res)
for key in test_data:
if key not in self.not_included:
dbkey = db_map.get(key, key)
if self.only_db:
self.assertEqual(test_data[key], res[dbkey])
else:
self.assertEqual(test_data[key], res[key])
if not self.only_db:
# just make sure that the kwargs are valid
# (no exception should be raised)
stack.Stack(ctx, utils.random_name(), tmpl, **res)
class ResetStateOnErrorTest(common.HeatTestCase):
class DummyStack(object):
(COMPLETE, IN_PROGRESS, FAILED) = range(3)
action = 'something'
status = COMPLETE
def __init__(self):
self.mark_failed = mock.MagicMock()
self.convergence = False
@stack.reset_state_on_error
def raise_exception(self):
self.status = self.IN_PROGRESS
raise ValueError('oops')
@stack.reset_state_on_error
def raise_exit_exception(self):
self.status = self.IN_PROGRESS
raise BaseException('bye')
@stack.reset_state_on_error
def succeed(self):
return 'Hello world'
@stack.reset_state_on_error
def fail(self):
self.status = self.FAILED
return 'Hello world'
def test_success(self):
dummy = self.DummyStack()
self.assertEqual('Hello world', dummy.succeed())
self.assertFalse(dummy.mark_failed.called)
def test_failure(self):
dummy = self.DummyStack()
self.assertEqual('Hello world', dummy.fail())
self.assertFalse(dummy.mark_failed.called)
def test_reset_state_exception(self):
dummy = self.DummyStack()
exc = self.assertRaises(ValueError, dummy.raise_exception)
self.assertIn('oops', str(exc))
self.assertTrue(dummy.mark_failed.called)
def test_reset_state_exit_exception(self):
dummy = self.DummyStack()
exc = self.assertRaises(BaseException, dummy.raise_exit_exception)
self.assertIn('bye', str(exc))
self.assertTrue(dummy.mark_failed.called)
class StackStateSetTest(common.HeatTestCase):
scenarios = [
('in_progress', dict(action=stack.Stack.CREATE,
status=stack.Stack.IN_PROGRESS,
persist_count=1, error=False)),
('create_complete', dict(action=stack.Stack.CREATE,
status=stack.Stack.COMPLETE,
persist_count=0, error=False)),
('create_failed', dict(action=stack.Stack.CREATE,
status=stack.Stack.FAILED,
persist_count=0, error=False)),
('update_complete', dict(action=stack.Stack.UPDATE,
status=stack.Stack.COMPLETE,
persist_count=1, error=False)),
('update_failed', dict(action=stack.Stack.UPDATE,
status=stack.Stack.FAILED,
persist_count=1, error=False)),
('delete_complete', dict(action=stack.Stack.DELETE,
status=stack.Stack.COMPLETE,
persist_count=1, error=False)),
('delete_failed', dict(action=stack.Stack.DELETE,
status=stack.Stack.FAILED,
persist_count=1, error=False)),
('adopt_complete', dict(action=stack.Stack.ADOPT,
status=stack.Stack.COMPLETE,
persist_count=0, error=False)),
('adopt_failed', dict(action=stack.Stack.ADOPT,
status=stack.Stack.FAILED,
persist_count=0, error=False)),
('rollback_complete', dict(action=stack.Stack.ROLLBACK,
status=stack.Stack.COMPLETE,
persist_count=1, error=False)),
('rollback_failed', dict(action=stack.Stack.ROLLBACK,
status=stack.Stack.FAILED,
persist_count=1, error=False)),
('invalid_action', dict(action='action',
status=stack.Stack.FAILED,
persist_count=0, error=True)),
('invalid_status', dict(action=stack.Stack.CREATE,
status='status',
persist_count=0, error=True)),
]
def test_state(self):
self.tmpl = template.Template(copy.deepcopy(empty_template))
self.ctx = utils.dummy_context()
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl,
action=stack.Stack.CREATE,
status=stack.Stack.IN_PROGRESS)
persist_state = self.patchobject(self.stack, '_persist_state')
self.assertEqual((stack.Stack.CREATE, stack.Stack.IN_PROGRESS),
self.stack.state)
if self.error:
self.assertRaises(ValueError, self.stack.state_set,
self.action, self.status, 'test')
else:
self.stack.state_set(self.action, self.status, 'test')
self.assertEqual((self.action, self.status), self.stack.state)
self.assertEqual('test', self.stack.status_reason)
self.assertEqual(self.persist_count, persist_state.call_count)
| 41.965109
| 79
| 0.577625
|
import collections
import copy
import datetime
import json
import logging
import time
from unittest import mock
import eventlet
import fixtures
from oslo_config import cfg
from heat.common import context
from heat.common import exception
from heat.common import template_format
from heat.common import timeutils
from heat.db.sqlalchemy import api as db_api
from heat.engine.clients.os import keystone
from heat.engine.clients.os.keystone import fake_keystoneclient as fake_ks
from heat.engine.clients.os import nova
from heat.engine import environment
from heat.engine import function
from heat.engine import node_data
from heat.engine import resource
from heat.engine import scheduler
from heat.engine import service
from heat.engine import stack
from heat.engine import stk_defn
from heat.engine import template
from heat.engine import update
from heat.objects import raw_template as raw_template_object
from heat.objects import resource as resource_objects
from heat.objects import stack as stack_object
from heat.objects import stack_tag as stack_tag_object
from heat.objects import user_creds as ucreds_object
from heat.tests import common
from heat.tests import fakes
from heat.tests import generic_resource as generic_rsrc
from heat.tests import utils
empty_template = template_format.parse('''{
"HeatTemplateFormatVersion" : "2012-12-12",
}''')
class StackTest(common.HeatTestCase):
def setUp(self):
super(StackTest, self).setUp()
self.tmpl = template.Template(copy.deepcopy(empty_template))
self.ctx = utils.dummy_context()
self.stub_auth()
def test_stack_reads_tenant(self):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl,
tenant_id='bar')
self.assertEqual('bar', self.stack.tenant_id)
def test_stack_reads_tenant_from_context_if_empty(self):
self.ctx.tenant = 'foo'
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl,
tenant_id=None)
self.assertEqual('foo', self.stack.tenant_id)
def test_stack_reads_username(self):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl,
username='bar')
self.assertEqual('bar', self.stack.username)
def test_stack_reads_username_from_context_if_empty(self):
self.ctx.username = 'foo'
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl,
username=None)
self.assertEqual('foo', self.stack.username)
def test_stack_string_repr(self):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl)
expected = 'Stack "%s" [%s]' % (self.stack.name, self.stack.id)
observed = str(self.stack)
self.assertEqual(expected, observed)
def test_state_defaults(self):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl)
self.assertEqual(('CREATE', 'IN_PROGRESS'), self.stack.state)
self.assertEqual('', self.stack.status_reason)
def test_timeout_secs_default(self):
cfg.CONF.set_override('stack_action_timeout', 1000)
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl)
self.assertIsNone(self.stack.timeout_mins)
self.assertEqual(1000, self.stack.timeout_secs())
def test_timeout_secs(self):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl,
timeout_mins=10)
self.assertEqual(600, self.stack.timeout_secs())
@mock.patch.object(stack, 'oslo_timeutils')
def test_time_elapsed(self, mock_tu):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl)
self.stack.created_time = datetime.datetime(2015, 7, 27, 10, 0, 0)
mock_tu.utcnow.return_value = datetime.datetime(2015, 7, 27, 10, 10, 0)
self.assertEqual(600, self.stack.time_elapsed())
@mock.patch.object(stack, 'oslo_timeutils')
def test_time_elapsed_negative(self, mock_tu):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl)
self.stack.created_time = datetime.datetime(2015, 7, 27, 10, 0, 0)
mock_tu.utcnow.return_value = datetime.datetime(2015, 7, 27, 9, 59, 50)
self.assertEqual(-10, self.stack.time_elapsed())
@mock.patch.object(stack, 'oslo_timeutils')
def test_time_elapsed_ms(self, mock_tu):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl)
self.stack.created_time = datetime.datetime(2015, 7, 27, 10, 5, 0)
mock_tu.utcnow.return_value = datetime.datetime(2015, 7, 27,
10, 4, 59, 750000)
self.assertEqual(-0.25, self.stack.time_elapsed())
@mock.patch.object(stack, 'oslo_timeutils')
def test_time_elapsed_with_updated_time(self, mock_tu):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl)
self.stack.created_time = datetime.datetime(2015, 7, 27, 10, 0, 0)
self.stack.updated_time = datetime.datetime(2015, 7, 27, 11, 0, 0)
mock_tu.utcnow.return_value = datetime.datetime(2015, 7, 27, 11, 10, 0)
self.assertEqual(600, self.stack.time_elapsed())
@mock.patch.object(stack.Stack, 'time_elapsed')
def test_time_remaining(self, mock_te):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl)
mock_te.return_value = 600
self.assertEqual(3000, self.stack.time_remaining())
@mock.patch.object(stack.Stack, 'time_elapsed')
def test_has_timed_out(self, mock_te):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl)
self.stack.status = self.stack.IN_PROGRESS
mock_te.return_value = 3601
self.assertTrue(self.stack.has_timed_out())
mock_te.return_value = 600
self.assertFalse(self.stack.has_timed_out())
self.stack.status = self.stack.COMPLETE
self.assertFalse(self.stack.has_timed_out())
self.stack.status = self.stack.FAILED
self.assertFalse(self.stack.has_timed_out())
def test_no_auth_token(self):
ctx = utils.dummy_context()
ctx.auth_token = None
self.stack = stack.Stack(ctx, 'test_stack', self.tmpl)
self.assertEqual('abcd1234',
ctx.auth_plugin.auth_token)
def test_state_deleted(self):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl,
action=stack.Stack.CREATE,
status=stack.Stack.IN_PROGRESS)
self.stack.id = '1234'
self.stack.delete()
self.assertIsNone(self.stack.state_set(stack.Stack.CREATE,
stack.Stack.COMPLETE,
'test'))
def test_load_nonexistant_id(self):
self.assertRaises(exception.NotFound, stack.Stack.load,
self.ctx, -1)
def test_total_resources_empty(self):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl,
status_reason='flimflam')
self.stack.store()
self.assertEqual(0, self.stack.total_resources(self.stack.id))
self.assertEqual(0, self.stack.total_resources())
@mock.patch.object(db_api, 'stack_count_total_resources')
def test_total_resources_not_stored(self, sctr):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl,
status_reason='flimflam')
self.assertEqual(0, self.stack.total_resources())
sctr.assert_not_called()
def test_total_resources_not_found(self):
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl,
status_reason='flimflam')
self.assertEqual(0, self.stack.total_resources('1234'))
@mock.patch.object(db_api, 'stack_count_total_resources')
def test_total_resources_generic(self, sctr):
tpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources':
{'A': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'test_stack',
template.Template(tpl),
status_reason='blarg')
self.stack.store()
sctr.return_value = 1
self.assertEqual(1, self.stack.total_resources(self.stack.id))
self.assertEqual(1, self.stack.total_resources())
def test_resource_get(self):
tpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources':
{'A': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'test_stack',
template.Template(tpl),
status_reason='blarg')
self.stack.store()
self.assertEqual('A', self.stack.resource_get('A').name)
self.assertEqual(self.stack['A'], self.stack.resource_get('A'))
self.assertIsNone(self.stack.resource_get('B'))
@mock.patch.object(resource_objects.Resource, 'get_all_by_stack')
def test_resource_get_db_fallback(self, gabs):
tpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources':
{'A': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'test_stack',
template.Template(tpl),
status_reason='blarg')
self.stack.store()
tpl2 = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources':
{'A': {'Type': 'GenericResourceType'},
'B': {'Type': 'GenericResourceType'}}}
t2 = template.Template(tpl2)
t2.store(self.ctx)
db_resources = {
'A': mock.MagicMock(),
'B': mock.MagicMock(current_template_id=t2.id),
'C': mock.MagicMock(current_template_id=t2.id)
}
db_resources['A'].name = 'A'
db_resources['B'].name = 'B'
db_resources['C'].name = 'C'
gabs.return_value = db_resources
self.assertEqual('A', self.stack.resource_get('A').name)
self.assertEqual('B', self.stack.resource_get('B').name)
self.assertIsNone(self.stack.resource_get('C'))
self.assertIsNone(self.stack.resource_get('D'))
@mock.patch.object(resource_objects.Resource, 'get_all_by_stack')
def test_iter_resources(self, mock_db_call):
tpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources':
{'A': {'Type': 'GenericResourceType'},
'B': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'test_stack',
template.Template(tpl),
status_reason='blarg')
self.stack.store()
mock_rsc_a = mock.MagicMock(current_template_id=self.stack.t.id)
mock_rsc_a.name = 'A'
mock_rsc_b = mock.MagicMock(current_template_id=self.stack.t.id)
mock_rsc_b.name = 'B'
mock_db_call.return_value = {
'A': mock_rsc_a,
'B': mock_rsc_b
}
all_resources = list(self.stack.iter_resources())
mock_db_call.assert_called_once_with(self.ctx, self.stack.id)
names = sorted([r.name for r in all_resources])
self.assertEqual(['A', 'B'], names)
@mock.patch.object(resource_objects.Resource, 'get_all_by_stack')
def test_iter_resources_with_nested(self, mock_db_call):
tpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources':
{'A': {'Type': 'StackResourceType'},
'B': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'test_stack',
template.Template(tpl),
status_reason='blarg')
self.stack.store()
mock_rsc_a = mock.MagicMock(current_template_id=self.stack.t.id)
mock_rsc_a.name = 'A'
mock_rsc_b = mock.MagicMock(current_template_id=self.stack.t.id)
mock_rsc_b.name = 'B'
mock_db_call.return_value = {
'A': mock_rsc_a,
'B': mock_rsc_b
}
def get_more(nested_depth=0, filters=None):
yield 'X'
yield 'Y'
yield 'Z'
mock_nested = self.patchobject(generic_rsrc.StackResourceType,
'nested')
mock_nested.return_value.iter_resources = mock.MagicMock(
side_effect=get_more)
resource_generator = self.stack.iter_resources()
self.assertIsNot(resource_generator, list)
first_level_resources = list(resource_generator)
self.assertEqual(2, len(first_level_resources))
all_resources = list(self.stack.iter_resources(1))
self.assertEqual(5, len(all_resources))
@mock.patch.object(resource_objects.Resource, 'get_all_by_stack')
def test_iter_resources_with_filters(self, mock_db_call):
tpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources':
{'A': {'Type': 'GenericResourceType'},
'B': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'test_stack',
template.Template(tpl),
status_reason='blarg')
self.stack.store()
mock_rsc = mock.MagicMock()
mock_rsc.name = 'A'
mock_rsc.current_template_id = self.stack.t.id
mock_db_call.return_value = {'A': mock_rsc}
all_resources = list(self.stack.iter_resources(
filters=dict(name=['A'])
))
mock_db_call.assert_has_calls([
mock.call(self.ctx, self.stack.id, dict(name=['A'])),
mock.call(self.ctx, self.stack.id),
])
self.assertEqual(1, len(all_resources))
self.assertEqual('A', all_resources[0].name)
@mock.patch.object(resource_objects.Resource, 'get_all_by_stack')
def test_iter_resources_with_nonexistent_template(self, mock_db_call):
tpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources':
{'A': {'Type': 'GenericResourceType'},
'B': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'test_stack',
template.Template(tpl),
status_reason='blarg')
self.stack.store()
mock_rsc_a = mock.MagicMock(current_template_id=self.stack.t.id)
mock_rsc_a.name = 'A'
mock_rsc_b = mock.MagicMock(current_template_id=self.stack.t.id + 1)
mock_rsc_b.name = 'B'
mock_db_call.return_value = {
'A': mock_rsc_a,
'B': mock_rsc_b
}
all_resources = list(self.stack.iter_resources())
self.assertEqual(1, len(all_resources))
@mock.patch.object(resource_objects.Resource, 'get_all_by_stack')
def test_iter_resources_nested_with_filters(self, mock_db_call):
tpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources':
{'A': {'Type': 'StackResourceType'},
'B': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'test_stack',
template.Template(tpl),
status_reason='blarg')
self.stack.store()
mock_rsc_a = mock.MagicMock(current_template_id=self.stack.t.id)
mock_rsc_a.name = 'A'
mock_rsc_b = mock.MagicMock(current_template_id=self.stack.t.id)
mock_rsc_b.name = 'B'
mock_db_call.return_value = {
'A': mock_rsc_a,
'B': mock_rsc_b
}
def get_more(nested_depth=0, filters=None):
if filters:
yield 'X'
mock_nested = self.patchobject(generic_rsrc.StackResourceType,
'nested')
mock_nested.return_value.iter_resources = mock.MagicMock(
side_effect=get_more)
all_resources = list(self.stack.iter_resources(
nested_depth=1,
filters=dict(name=['A'])
))
mock_db_call.assert_has_calls([
mock.call(self.ctx, self.stack.id, dict(name=['A'])),
mock.call(self.ctx, self.stack.id),
])
self.assertEqual(3, len(all_resources))
def test_load_parent_resource(self):
self.stack = stack.Stack(self.ctx, 'load_parent_resource', self.tmpl,
parent_resource='parent')
self.stack.store()
stk = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
t = template.Template.load(self.ctx, stk.raw_template_id)
self.patchobject(template.Template, 'load', return_value=t)
self.patchobject(stack.Stack, '__init__', return_value=None)
stack.Stack.load(self.ctx, stack_id=self.stack.id)
stack.Stack.__init__.assert_called_once_with(
self.ctx, stk.name, t, stack_id=stk.id,
action=stk.action, status=stk.status,
status_reason=stk.status_reason,
timeout_mins=stk.timeout,
disable_rollback=stk.disable_rollback,
parent_resource='parent', owner_id=None,
stack_user_project_id=None,
created_time=mock.ANY,
updated_time=None,
user_creds_id=stk.user_creds_id,
tenant_id='test_tenant_id',
use_stored_context=False,
username=mock.ANY,
convergence=False,
current_traversal=self.stack.current_traversal,
prev_raw_template_id=None,
current_deps=None, cache_data=None,
nested_depth=0,
deleted_time=None, refresh_cred=False)
template.Template.load.assert_called_once_with(
self.ctx, stk.raw_template_id, stk.raw_template)
def test_identifier(self):
self.stack = stack.Stack(self.ctx, 'identifier_test', self.tmpl)
self.stack.store()
identifier = self.stack.identifier()
self.assertEqual(self.stack.tenant_id, identifier.tenant)
self.assertEqual('identifier_test', identifier.stack_name)
self.assertTrue(identifier.stack_id)
self.assertFalse(identifier.path)
def test_get_stack_abandon_data(self):
tpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Parameters': {'param1': {'Type': 'String'}},
'Resources':
{'A': {'Type': 'GenericResourceType'},
'B': {'Type': 'GenericResourceType'}}}
resources = '''{"A": {"status": "COMPLETE", "name": "A",
"resource_data": {}, "resource_id": null, "action": "INIT",
"type": "GenericResourceType", "metadata": {}},
"B": {"status": "COMPLETE", "name": "B", "resource_data": {},
"resource_id": null, "action": "INIT", "type": "GenericResourceType",
"metadata": {}}}'''
env = environment.Environment({'parameters': {'param1': 'test'}})
self.ctx.tenant_id = '123'
self.stack = stack.Stack(self.ctx, 'stack_details_test',
template.Template(tpl, env=env),
tenant_id=self.ctx.tenant_id,
stack_user_project_id='234',
tags=['tag1', 'tag2'])
self.stack.store()
info = self.stack.prepare_abandon()
self.assertEqual('CREATE', info['action'])
self.assertIn('id', info)
self.assertEqual('stack_details_test', info['name'])
self.assertEqual(json.loads(resources), info['resources'])
self.assertEqual('IN_PROGRESS', info['status'])
self.assertEqual(tpl, info['template'])
self.assertEqual('123', info['project_id'])
self.assertEqual('234', info['stack_user_project_id'])
self.assertEqual(env.params, info['environment']['parameters'])
self.assertEqual(['tag1', 'tag2'], info['tags'])
def test_set_param_id(self):
self.stack = stack.Stack(self.ctx, 'param_arn_test', self.tmpl)
exp_prefix = ('arn:openstack:heat::test_tenant_id'
':stacks/param_arn_test/')
self.assertEqual(self.stack.parameters['AWS::StackId'],
exp_prefix + 'None')
self.stack.store()
identifier = self.stack.identifier()
self.assertEqual(exp_prefix + self.stack.id,
self.stack.parameters['AWS::StackId'])
self.assertEqual(self.stack.parameters['AWS::StackId'],
identifier.arn())
def test_set_param_id_update(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'AResource': {'Type': 'ResourceWithPropsType',
'Metadata': {'Bar': {'Ref': 'AWS::StackId'}},
'Properties': {'Foo': 'abc'}}}}
self.stack = stack.Stack(self.ctx, 'update_stack_arn_test',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.COMPLETE),
self.stack.state)
stack_arn = self.stack.parameters['AWS::StackId']
tmpl2 = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'AResource': {'Type': 'ResourceWithPropsType',
'Metadata': {'Bar':
{'Ref': 'AWS::StackId'}},
'Properties': {'Foo': 'xyz'}}}}
updated_stack = stack.Stack(self.ctx, 'updated_stack',
template.Template(tmpl2))
self.stack.update(updated_stack)
self.assertEqual((stack.Stack.UPDATE, stack.Stack.COMPLETE),
self.stack.state)
self.assertEqual('xyz', self.stack['AResource'].properties['Foo'])
self.assertEqual(
stack_arn, self.stack['AResource'].metadata_get()['Bar'])
def test_load_param_id(self):
self.stack = stack.Stack(self.ctx, 'param_load_arn_test', self.tmpl)
self.stack.store()
identifier = self.stack.identifier()
self.assertEqual(self.stack.parameters['AWS::StackId'],
identifier.arn())
newstack = stack.Stack.load(self.ctx, stack_id=self.stack.id)
self.assertEqual(identifier.arn(), newstack.parameters['AWS::StackId'])
def test_load_reads_tenant_id(self):
self.ctx.tenant = 'foobar'
self.stack = stack.Stack(self.ctx, 'stack_name', self.tmpl)
self.stack.store()
stack_id = self.stack.id
self.ctx.tenant = None
self.stack = stack.Stack.load(self.ctx, stack_id=stack_id)
self.assertEqual('foobar', self.stack.tenant_id)
def test_load_reads_username_from_db(self):
self.ctx.username = 'foobar'
self.stack = stack.Stack(self.ctx, 'stack_name', self.tmpl)
self.stack.store()
stack_id = self.stack.id
self.ctx.username = None
stk = stack.Stack.load(self.ctx, stack_id=stack_id)
self.assertEqual('foobar', stk.username)
self.ctx.username = 'not foobar'
stk = stack.Stack.load(self.ctx, stack_id=stack_id)
self.assertEqual('foobar', stk.username)
def test_load_all(self):
stack1 = stack.Stack(self.ctx, 'stack1', self.tmpl)
stack1.store()
stack2 = stack.Stack(self.ctx, 'stack2', self.tmpl)
stack2.store()
stacks = list(stack.Stack.load_all(self.ctx))
self.assertEqual(2, len(stacks))
stack3 = stack.Stack(self.ctx, 'stack3', self.tmpl,
owner_id=stack2.id)
stack3.store()
stacks = list(stack.Stack.load_all(self.ctx))
self.assertEqual(2, len(stacks))
stacks = list(stack.Stack.load_all(self.ctx, show_nested=True))
self.assertEqual(3, len(stacks))
stack1._backup_stack()
stacks = list(stack.Stack.load_all(self.ctx))
self.assertEqual(2, len(stacks))
stacks = list(stack.Stack.load_all(self.ctx, show_nested=True))
self.assertEqual(3, len(stacks))
def test_load_all_not_found(self):
stack1 = stack.Stack(self.ctx, 'stack1', self.tmpl)
stack1.store()
tmpl2 = template.Template(copy.deepcopy(empty_template))
stack2 = stack.Stack(self.ctx, 'stack2', tmpl2)
stack2.store()
def fake_load(ctx, template_id, tmpl):
if template_id == stack2.t.id:
raise exception.NotFound()
else:
return tmpl2
with mock.patch.object(template.Template, 'load') as tmpl_load:
tmpl_load.side_effect = fake_load
stacks = list(stack.Stack.load_all(self.ctx))
self.assertEqual(1, len(stacks))
def test_created_time(self):
self.stack = stack.Stack(self.ctx, 'creation_time_test', self.tmpl)
self.assertIsNone(self.stack.created_time)
self.stack.store()
self.assertIsNotNone(self.stack.created_time)
def test_updated_time(self):
self.stack = stack.Stack(self.ctx, 'updated_time_test',
self.tmpl)
self.assertIsNone(self.stack.updated_time)
self.stack.store()
self.stack.create()
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'R1': {'Type': 'GenericResourceType'}}}
newstack = stack.Stack(self.ctx, 'updated_time_test',
template.Template(tmpl))
self.stack.update(newstack)
self.assertIsNotNone(self.stack.updated_time)
def test_update_prev_raw_template(self):
self.stack = stack.Stack(self.ctx, 'updated_time_test',
self.tmpl)
self.assertIsNone(self.stack.updated_time)
self.stack.store()
self.stack.create()
self.assertIsNone(self.stack.prev_raw_template_id)
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'R1': {'Type': 'GenericResourceType'}}}
newstack = stack.Stack(self.ctx, 'updated_time_test',
template.Template(tmpl))
self.stack.update(newstack)
self.assertIsNotNone(self.stack.prev_raw_template_id)
prev_t = template.Template.load(self.ctx,
self.stack.prev_raw_template_id)
self.assertEqual(tmpl, prev_t.t)
prev_id = self.stack.prev_raw_template_id
tmpl2 = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'R2': {'Type': 'GenericResourceType'}}}
newstack2 = stack.Stack(self.ctx, 'updated_time_test',
template.Template(tmpl2))
self.stack.update(newstack2)
self.assertIsNotNone(self.stack.prev_raw_template_id)
self.assertNotEqual(prev_id, self.stack.prev_raw_template_id)
prev_t2 = template.Template.load(self.ctx,
self.stack.prev_raw_template_id)
self.assertEqual(tmpl2, prev_t2.t)
self.assertRaises(exception.NotFound,
template.Template.load, self.ctx, prev_id)
def test_access_policy_update(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'R1': {'Type': 'GenericResourceType'},
'Policy': {
'Type': 'OS::Heat::AccessPolicy',
'Properties': {
'AllowedResources': ['R1']
}}}}
self.stack = stack.Stack(self.ctx, 'update_stack_access_policy_test',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.COMPLETE),
self.stack.state)
tmpl2 = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'R1': {'Type': 'GenericResourceType'},
'R2': {'Type': 'GenericResourceType'},
'Policy': {
'Type': 'OS::Heat::AccessPolicy',
'Properties': {
'AllowedResources': ['R1', 'R2'],
}}}}
updated_stack = stack.Stack(self.ctx, 'updated_stack',
template.Template(tmpl2))
self.stack.update(updated_stack)
self.assertEqual((stack.Stack.UPDATE, stack.Stack.COMPLETE),
self.stack.state)
def test_abandon_nodelete_project(self):
self.stack = stack.Stack(self.ctx, 'delete_trust', self.tmpl)
stack_id = self.stack.store()
self.stack.set_stack_user_project_id(project_id='aproject456')
db_s = stack_object.Stack.get_by_id(self.ctx, stack_id)
self.assertIsNotNone(db_s)
self.stack.delete(abandon=True)
db_s = stack_object.Stack.get_by_id(self.ctx, stack_id)
self.assertIsNone(db_s)
self.assertEqual((stack.Stack.DELETE, stack.Stack.COMPLETE),
self.stack.state)
def test_suspend_resume(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'suspend_test',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((self.stack.CREATE, self.stack.COMPLETE),
self.stack.state)
self.assertIsNone(self.stack.updated_time)
self.stack.suspend()
self.assertEqual((self.stack.SUSPEND, self.stack.COMPLETE),
self.stack.state)
stack_suspend_time = self.stack.updated_time
self.assertIsNotNone(stack_suspend_time)
self.stack.resume()
self.assertEqual((self.stack.RESUME, self.stack.COMPLETE),
self.stack.state)
self.assertNotEqual(stack_suspend_time, self.stack.updated_time)
def test_suspend_stack_suspended_ok(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'suspend_test',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((self.stack.CREATE, self.stack.COMPLETE),
self.stack.state)
self.stack.suspend()
self.assertEqual((self.stack.SUSPEND, self.stack.COMPLETE),
self.stack.state)
self.patchobject(generic_rsrc.GenericResource, 'suspend')
self.stack.suspend()
self.assertEqual((self.stack.SUSPEND, self.stack.COMPLETE),
self.stack.state)
generic_rsrc.GenericResource.suspend.assert_not_called()
def test_resume_stack_resumeed_ok(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'suspend_test',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((self.stack.CREATE, self.stack.COMPLETE),
self.stack.state)
self.stack.suspend()
self.assertEqual((self.stack.SUSPEND, self.stack.COMPLETE),
self.stack.state)
self.stack.resume()
self.assertEqual((self.stack.RESUME, self.stack.COMPLETE),
self.stack.state)
self.patchobject(generic_rsrc.GenericResource, 'resume')
self.stack.resume()
self.assertEqual((self.stack.RESUME, self.stack.COMPLETE),
self.stack.state)
generic_rsrc.GenericResource.resume.assert_not_called()
def test_suspend_fail(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
exc = Exception('foo')
self.patchobject(generic_rsrc.GenericResource, 'handle_suspend',
side_effect=exc)
self.stack = stack.Stack(self.ctx, 'suspend_test_fail',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((self.stack.CREATE, self.stack.COMPLETE),
self.stack.state)
self.stack.suspend()
self.assertEqual((self.stack.SUSPEND, self.stack.FAILED),
self.stack.state)
self.assertEqual('Resource SUSPEND failed: Exception: '
'resources.AResource: foo',
self.stack.status_reason)
generic_rsrc.GenericResource.handle_suspend.assert_called_once_with()
def test_resume_fail(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
self.patchobject(generic_rsrc.GenericResource, 'handle_resume',
side_effect=Exception('foo'))
self.stack = stack.Stack(self.ctx, 'resume_test_fail',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((self.stack.CREATE, self.stack.COMPLETE),
self.stack.state)
self.stack.suspend()
self.assertEqual((self.stack.SUSPEND, self.stack.COMPLETE),
self.stack.state)
self.stack.resume()
self.assertEqual((self.stack.RESUME, self.stack.FAILED),
self.stack.state)
self.assertEqual('Resource RESUME failed: Exception: '
'resources.AResource: foo',
self.stack.status_reason)
def test_suspend_timeout(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
exc = scheduler.Timeout('foo', 0)
self.patchobject(generic_rsrc.GenericResource, 'handle_suspend',
side_effect=exc)
self.stack = stack.Stack(self.ctx, 'suspend_test_fail_timeout',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((self.stack.CREATE, self.stack.COMPLETE),
self.stack.state)
self.stack.suspend()
self.assertEqual((self.stack.SUSPEND, self.stack.FAILED),
self.stack.state)
self.assertEqual('Suspend timed out', self.stack.status_reason)
generic_rsrc.GenericResource.handle_suspend.assert_called_once_with()
def test_resume_timeout(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
exc = scheduler.Timeout('foo', 0)
self.patchobject(generic_rsrc.GenericResource, 'handle_resume',
side_effect=exc)
self.stack = stack.Stack(self.ctx, 'resume_test_fail_timeout',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((self.stack.CREATE, self.stack.COMPLETE),
self.stack.state)
self.stack.suspend()
self.assertEqual((self.stack.SUSPEND, self.stack.COMPLETE),
self.stack.state)
self.stack.resume()
self.assertEqual((self.stack.RESUME, self.stack.FAILED),
self.stack.state)
self.assertEqual('Resume timed out', self.stack.status_reason)
generic_rsrc.GenericResource.handle_resume.assert_called_once_with()
def _get_stack_to_check(self, name):
tpl = {"HeatTemplateFormatVersion": "2012-12-12",
"Resources": {
"A": {"Type": "GenericResourceType"},
"B": {"Type": "GenericResourceType"}}}
self.stack = stack.Stack(self.ctx, name, template.Template(tpl),
status_reason=name)
self.stack.store()
def _mock_check(res):
res.handle_check = mock.Mock()
[_mock_check(res) for res in self.stack.resources.values()]
return self.stack
def test_check_supported(self):
stack1 = self._get_stack_to_check('check-supported')
stack1['A'].state_set(stack1['A'].CREATE, stack1['A'].COMPLETE)
stack1['B'].state_set(stack1['B'].CREATE, stack1['B'].COMPLETE)
stack1.check()
self.assertEqual(stack1.COMPLETE, stack1.status)
self.assertEqual(stack1.CHECK, stack1.action)
[self.assertTrue(res.handle_check.called)
for res in stack1.resources.values()]
self.assertNotIn('not fully supported', stack1.status_reason)
def test_check_not_supported(self):
stack1 = self._get_stack_to_check('check-not-supported')
del stack1['B'].handle_check
stack1['A'].state_set(stack1['A'].CREATE, stack1['A'].COMPLETE)
stack1.check()
self.assertEqual(stack1.COMPLETE, stack1.status)
self.assertEqual(stack1.CHECK, stack1.action)
self.assertTrue(stack1['A'].handle_check.called)
self.assertIn('not fully supported', stack1.status_reason)
def test_check_fail(self):
stk = self._get_stack_to_check('check-fail')
stk.check()
self.assertEqual(stk.FAILED, stk.status)
self.assertEqual(stk.CHECK, stk.action)
self.assertFalse(stk['A'].handle_check.called)
self.assertFalse(stk['B'].handle_check.called)
self.assertIn('Resource A not created yet',
stk.status_reason)
self.assertIn('Resource B not created yet',
stk.status_reason)
stk['A'].handle_check.side_effect = Exception('fail-A')
stk['B'].handle_check.side_effect = Exception('fail-B')
stk['A'].state_set(stk['A'].CREATE, stk['A'].COMPLETE)
stk['B'].state_set(stk['B'].CREATE, stk['B'].COMPLETE)
stk.check()
self.assertEqual(stk.FAILED, stk.status)
self.assertEqual(stk.CHECK, stk.action)
self.assertTrue(stk['A'].handle_check.called)
self.assertTrue(stk['B'].handle_check.called)
self.assertIn('fail-A', stk.status_reason)
self.assertIn('fail-B', stk.status_reason)
def test_adopt_stack(self):
adopt_data = '''{
"action": "CREATE",
"status": "COMPLETE",
"name": "my-test-stack-name",
"resources": {
"AResource": {
"status": "COMPLETE",
"name": "AResource",
"resource_data": {},
"metadata": {},
"resource_id": "test-res-id",
"action": "CREATE",
"type": "GenericResourceType"
}
}
}'''
tmpl = {
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'}},
'Outputs': {'TestOutput': {'Value': {
'Fn::GetAtt': ['AResource', 'Foo']}}
}
}
self.stack = stack.Stack(utils.dummy_context(), 'test_stack',
template.Template(tmpl),
adopt_stack_data=json.loads(adopt_data))
self.stack.store()
self.stack.adopt()
res = self.stack['AResource']
self.assertEqual(u'test-res-id', res.resource_id)
self.assertEqual('AResource', res.name)
self.assertEqual('COMPLETE', res.status)
self.assertEqual('ADOPT', res.action)
self.assertEqual((self.stack.ADOPT, self.stack.COMPLETE),
self.stack.state)
loaded_stack = stack.Stack.load(self.ctx, self.stack.id)
loaded_stack._update_all_resource_data(False, True)
self.assertEqual('AResource',
loaded_stack.outputs['TestOutput'].get_value())
self.assertIsNone(loaded_stack['AResource']._stored_properties_data)
def test_adopt_stack_fails(self):
adopt_data = '''{
"action": "CREATE",
"status": "COMPLETE",
"name": "my-test-stack-name",
"resources": {}
}'''
tmpl = template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'foo': {'Type': 'GenericResourceType'},
}
})
self.stack = stack.Stack(utils.dummy_context(), 'test_stack',
tmpl,
adopt_stack_data=json.loads(adopt_data))
self.stack.store()
self.stack.adopt()
self.assertEqual((self.stack.ADOPT, self.stack.FAILED),
self.stack.state)
expected = ('Resource ADOPT failed: Exception: resources.foo: '
'Resource ID was not provided.')
self.assertEqual(expected, self.stack.status_reason)
def test_adopt_stack_rollback(self):
adopt_data = '''{
"name": "my-test-stack-name",
"resources": {}
}'''
tmpl = template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'foo': {'Type': 'GenericResourceType'},
}
})
self.stack = stack.Stack(utils.dummy_context(),
'test_stack',
tmpl,
disable_rollback=False,
adopt_stack_data=json.loads(adopt_data))
self.stack.store()
with mock.patch.object(self.stack, 'delete',
side_effect=self.stack.delete) as mock_delete:
self.stack.adopt()
self.assertEqual((self.stack.ROLLBACK, self.stack.COMPLETE),
self.stack.state)
mock_delete.assert_called_once_with(action=self.stack.ROLLBACK,
abandon=True)
def test_resource_by_refid(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'resource_by_refid_stack',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.COMPLETE),
self.stack.state)
self.assertIn('AResource', self.stack)
rsrc = self.stack['AResource']
rsrc.resource_id_set('aaaa')
for action, status in (
(rsrc.INIT, rsrc.COMPLETE),
(rsrc.CREATE, rsrc.IN_PROGRESS),
(rsrc.CREATE, rsrc.COMPLETE),
(rsrc.RESUME, rsrc.IN_PROGRESS),
(rsrc.RESUME, rsrc.COMPLETE),
(rsrc.UPDATE, rsrc.IN_PROGRESS),
(rsrc.UPDATE, rsrc.COMPLETE),
(rsrc.CHECK, rsrc.COMPLETE)):
rsrc.state_set(action, status)
stk_defn.update_resource_data(self.stack.defn, rsrc.name,
rsrc.node_data())
self.assertEqual(rsrc, self.stack.resource_by_refid('aaaa'))
rsrc.state_set(rsrc.DELETE, rsrc.IN_PROGRESS)
stk_defn.update_resource_data(self.stack.defn, rsrc.name,
rsrc.node_data())
try:
self.assertIsNone(self.stack.resource_by_refid('aaaa'))
self.assertIsNone(self.stack.resource_by_refid('bbbb'))
finally:
rsrc.state_set(rsrc.CREATE, rsrc.COMPLETE)
def test_resource_name_ref_by_depends_on(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'AResource': {'Type': 'GenericResourceType'},
'BResource': {'Type': 'ResourceWithPropsType',
'Properties': {'Foo': 'AResource'},
'DependsOn': 'AResource'}}}
self.stack = stack.Stack(self.ctx, 'resource_by_name_ref_stack',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.COMPLETE),
self.stack.state)
self.assertIn('AResource', self.stack)
self.assertIn('BResource', self.stack)
rsrc = self.stack['AResource']
rsrc.resource_id_set('aaaa')
b_rsrc = self.stack['BResource']
b_rsrc.resource_id_set('bbbb')
b_foo_ref = b_rsrc.properties.get('Foo')
for action, status in (
(rsrc.INIT, rsrc.COMPLETE),
(rsrc.CREATE, rsrc.IN_PROGRESS),
(rsrc.CREATE, rsrc.COMPLETE),
(rsrc.RESUME, rsrc.IN_PROGRESS),
(rsrc.RESUME, rsrc.COMPLETE),
(rsrc.UPDATE, rsrc.IN_PROGRESS),
(rsrc.UPDATE, rsrc.COMPLETE)):
rsrc.state_set(action, status)
ref_rsrc = self.stack.resource_by_refid(b_foo_ref)
self.assertEqual(rsrc, ref_rsrc)
self.assertIn(b_rsrc.name, ref_rsrc.required_by())
def test_create_failure_recovery(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'AResource': {'Type': 'OverwrittenFnGetRefIdType',
'Properties': {'Foo': 'abc'}},
'BResource': {'Type': 'ResourceWithPropsType',
'Properties': {
'Foo': {'Ref': 'AResource'}}}}}
self.stack = stack.Stack(self.ctx, 'update_test_stack',
template.Template(tmpl),
disable_rollback=True)
class FakeException(Exception):
pass
mock_create = self.patchobject(generic_rsrc.ResourceWithFnGetRefIdType,
'handle_create',
side_effect=[FakeException, None])
mock_delete = self.patchobject(generic_rsrc.ResourceWithFnGetRefIdType,
'handle_delete', return_value=None)
self.stack.store()
self.stack.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.FAILED),
self.stack.state)
self.assertEqual('abc', self.stack['AResource'].properties['Foo'])
updated_stack = stack.Stack(self.ctx, 'updated_stack',
template.Template(tmpl),
disable_rollback=True)
self.stack.update(updated_stack)
self.assertEqual((stack.Stack.UPDATE, stack.Stack.COMPLETE),
self.stack.state)
self.assertEqual(
'abc',
self.stack['AResource']._stored_properties_data['Foo'])
self.assertEqual(
'ID-AResource',
self.stack['BResource']._stored_properties_data['Foo'])
mock_delete.assert_called_once_with()
self.assertEqual(2, mock_create.call_count)
def test_create_bad_attribute(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'AResource': {'Type': 'GenericResourceType'},
'BResource': {'Type': 'ResourceWithPropsType',
'Properties': {
'Foo': {'Fn::GetAtt': ['AResource',
'Foo']}}}}}
self.stack = stack.Stack(self.ctx, 'bad_attr_test_stack',
template.Template(tmpl),
disable_rollback=True)
self.patchobject(generic_rsrc.ResourceWithProps,
'_update_stored_properties',
side_effect=exception.InvalidTemplateAttribute(
resource='a', key='foo'))
self.stack.store()
self.stack.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.FAILED),
self.stack.state)
self.assertEqual('Resource CREATE failed: The Referenced Attribute '
'(a foo) is incorrect.', self.stack.status_reason)
def test_stack_create_timeout(self):
def dummy_task():
while True:
yield
self.patchobject(scheduler.DependencyTaskGroup, '__call__',
return_value=dummy_task())
stk = stack.Stack(self.ctx, 's', self.tmpl)
start_time = time.time()
self.patchobject(timeutils, 'wallclock',
side_effect=[start_time, start_time + 1,
start_time + stk.timeout_secs() + 1])
stk.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.FAILED), stk.state)
self.assertEqual('Create timed out', stk.status_reason)
self.assertEqual(3, timeutils.wallclock.call_count)
def test_stack_name_valid(self):
stk = stack.Stack(self.ctx, 's', self.tmpl)
self.assertIsInstance(stk, stack.Stack)
stk = stack.Stack(self.ctx, 'stack123', self.tmpl)
self.assertIsInstance(stk, stack.Stack)
stk = stack.Stack(self.ctx, 'test.stack', self.tmpl)
self.assertIsInstance(stk, stack.Stack)
stk = stack.Stack(self.ctx, 'test_stack', self.tmpl)
self.assertIsInstance(stk, stack.Stack)
stk = stack.Stack(self.ctx, 'TEST', self.tmpl)
self.assertIsInstance(stk, stack.Stack)
stk = stack.Stack(self.ctx, 'test-stack', self.tmpl)
self.assertIsInstance(stk, stack.Stack)
def test_stack_name_invalid(self):
gt_255_chars = ('abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz'
'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz'
'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz'
'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz'
'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuv')
stack_names = ['_foo', '1bad', '.kcats', 'test stack', ' teststack',
'^-^', '"stack"', '1234', 'cat|dog', '$(foo)',
'test/stack', 'test\\stack', 'test::stack',
'test;stack', 'test~stack', '#test', gt_255_chars]
for stack_name in stack_names:
ex = self.assertRaises(
exception.StackValidationFailed, stack.Stack,
self.ctx, stack_name, self.tmpl)
self.assertIn("Invalid stack name %s must contain" % stack_name,
str(ex))
def test_stack_name_invalid_type(self):
stack_names = [{"bad": 123}, ["no", "lists"]]
for stack_name in stack_names:
ex = self.assertRaises(
exception.StackValidationFailed, stack.Stack,
self.ctx, stack_name, self.tmpl)
self.assertIn("Invalid stack name %s, must be a string"
% stack_name, str(ex))
def test_resource_state_get_att(self):
tmpl = {
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'}},
'Outputs': {'TestOutput': {'Value': {
'Fn::GetAtt': ['AResource', 'Foo']}}
}
}
self.stack = stack.Stack(self.ctx, 'resource_state_get_att',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.COMPLETE),
self.stack.state)
self.assertIn('AResource', self.stack)
rsrc = self.stack['AResource']
rsrc.resource_id_set('aaaa')
self.assertEqual('AResource', rsrc.FnGetAtt('Foo'))
for action, status in (
(rsrc.CREATE, rsrc.IN_PROGRESS),
(rsrc.CREATE, rsrc.COMPLETE),
(rsrc.CREATE, rsrc.FAILED),
(rsrc.SUSPEND, rsrc.IN_PROGRESS),
(rsrc.SUSPEND, rsrc.COMPLETE),
(rsrc.RESUME, rsrc.IN_PROGRESS),
(rsrc.RESUME, rsrc.COMPLETE),
(rsrc.UPDATE, rsrc.IN_PROGRESS),
(rsrc.UPDATE, rsrc.FAILED),
(rsrc.UPDATE, rsrc.COMPLETE),
(rsrc.DELETE, rsrc.IN_PROGRESS),
(rsrc.DELETE, rsrc.FAILED),
(rsrc.DELETE, rsrc.COMPLETE)):
rsrc.state_set(action, status)
self.stack._update_all_resource_data(False, True)
self.assertEqual('AResource',
self.stack.outputs['TestOutput'].get_value())
def test_resource_required_by(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'},
'BResource': {'Type': 'GenericResourceType',
'DependsOn': 'AResource'},
'CResource': {'Type': 'GenericResourceType',
'DependsOn': 'BResource'},
'DResource': {'Type': 'GenericResourceType',
'DependsOn': 'BResource'}}}
self.stack = stack.Stack(self.ctx, 'depends_test_stack',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.COMPLETE),
self.stack.state)
self.assertEqual(['BResource'],
self.stack['AResource'].required_by())
self.assertEqual([],
self.stack['CResource'].required_by())
required_by = self.stack['BResource'].required_by()
self.assertEqual(2, len(required_by))
for r in ['CResource', 'DResource']:
self.assertIn(r, required_by)
def test_resource_multi_required_by(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'},
'BResource': {'Type': 'GenericResourceType'},
'CResource': {'Type': 'GenericResourceType'},
'DResource': {'Type': 'GenericResourceType',
'DependsOn': ['AResource',
'BResource',
'CResource']}}}
self.stack = stack.Stack(self.ctx, 'depends_test_stack',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.COMPLETE),
self.stack.state)
for r in ['AResource', 'BResource', 'CResource']:
self.assertEqual(['DResource'],
self.stack[r].required_by())
def test_store_saves_owner(self):
self.stack = stack.Stack(self.ctx, 'owner_stack', self.tmpl)
stack_ownee = stack.Stack(self.ctx, 'ownee_stack', self.tmpl,
owner_id=self.stack.id)
stack_ownee.store()
db_stack = stack_object.Stack.get_by_id(self.ctx, stack_ownee.id)
self.assertEqual(self.stack.id, db_stack.owner_id)
def test_init_user_creds_id(self):
ctx_init = utils.dummy_context(user='my_user',
password='my_pass')
ctx_init.request_id = self.ctx.request_id
creds = ucreds_object.UserCreds.create(ctx_init)
self.stack = stack.Stack(self.ctx, 'creds_init', self.tmpl,
user_creds_id=creds.id)
self.stack.store()
self.assertEqual(creds.id, self.stack.user_creds_id)
ctx_expected = ctx_init.to_dict()
ctx_expected['auth_token'] = None
self.assertEqual(ctx_expected, self.stack.stored_context().to_dict())
def test_tags_property_get_set(self):
self.stack = stack.Stack(self.ctx, 'stack_tags', self.tmpl)
self.stack.store()
stack_id = self.stack.id
test_stack = stack.Stack.load(self.ctx, stack_id=stack_id)
self.assertIsNone(test_stack._tags)
self.assertEqual([], test_stack.tags)
self.stack = stack.Stack(self.ctx, 'stack_name', self.tmpl)
self.stack.tags = ['tag1', 'tag2']
self.assertEqual(['tag1', 'tag2'], self.stack._tags)
self.stack.store()
stack_id = self.stack.id
test_stack = stack.Stack.load(self.ctx, stack_id=stack_id)
self.assertIsNone(test_stack._tags)
self.assertEqual(['tag1', 'tag2'], test_stack.tags)
self.assertEqual(['tag1', 'tag2'], test_stack._tags)
def test_load_reads_tags(self):
self.stack = stack.Stack(self.ctx, 'stack_tags', self.tmpl)
self.stack.store()
stack_id = self.stack.id
test_stack = stack.Stack.load(self.ctx, stack_id=stack_id)
self.assertEqual([], test_stack.tags)
self.stack = stack.Stack(self.ctx, 'stack_name', self.tmpl,
tags=['tag1', 'tag2'])
self.stack.store()
stack_id = self.stack.id
test_stack = stack.Stack.load(self.ctx, stack_id=stack_id)
self.assertEqual(['tag1', 'tag2'], test_stack.tags)
def test_store_saves_tags(self):
self.stack = stack.Stack(self.ctx, 'tags_stack', self.tmpl)
self.stack.store()
db_tags = stack_tag_object.StackTagList.get(self.stack.context,
self.stack.id)
self.assertIsNone(db_tags)
self.stack = stack.Stack(self.ctx, 'tags_stack2', self.tmpl,
tags=['tag1', 'tag2'])
self.stack.store()
db_tags = stack_tag_object.StackTagList.get(self.stack.context,
self.stack.id)
self.assertEqual('tag1', db_tags[0].tag)
self.assertEqual('tag2', db_tags[1].tag)
def test_store_saves_creds(self):
cfg.CONF.set_default('deferred_auth_method', 'password')
self.stack = stack.Stack(self.ctx, 'creds_stack', self.tmpl)
self.stack.store()
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
user_creds_id = db_stack.user_creds_id
self.assertIsNotNone(user_creds_id)
# should've stored the username/password in the context
user_creds = ucreds_object.UserCreds.get_by_id(self.ctx, user_creds_id)
self.assertEqual(self.ctx.username, user_creds.get('username'))
self.assertEqual(self.ctx.password, user_creds.get('password'))
self.assertIsNone(user_creds.get('trust_id'))
self.assertIsNone(user_creds.get('trustor_user_id'))
expected_context = context.RequestContext.from_dict(self.ctx.to_dict())
expected_context.auth_token = None
stored_context = self.stack.stored_context().to_dict()
self.assertEqual(expected_context.to_dict(), stored_context)
self.stack.store()
self.assertEqual(user_creds_id, db_stack.user_creds_id)
def test_store_saves_creds_trust(self):
cfg.CONF.set_override('deferred_auth_method', 'trusts')
self.patchobject(keystone.KeystoneClientPlugin, '_create',
return_value=fake_ks.FakeKeystoneClient(
user_id='auser123'))
self.stack = stack.Stack(self.ctx, 'creds_stack', self.tmpl)
self.stack.store()
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
user_creds_id = db_stack.user_creds_id
self.assertIsNotNone(user_creds_id)
# should've stored the trust_id and trustor_user_id returned from
user_creds = ucreds_object.UserCreds.get_by_id(self.ctx, user_creds_id)
self.assertIsNone(user_creds.get('username'))
self.assertIsNone(user_creds.get('password'))
self.assertEqual('atrust', user_creds.get('trust_id'))
self.assertEqual('auser123', user_creds.get('trustor_user_id'))
auth = self.patchobject(context.RequestContext,
'trusts_auth_plugin')
self.patchobject(auth, 'get_access',
return_value=fakes.FakeAccessInfo([], None, None))
expected_context = context.RequestContext(
trust_id='atrust', trustor_user_id='auser123',
request_id=self.ctx.request_id, is_admin=False).to_dict()
stored_context = self.stack.stored_context().to_dict()
self.assertEqual(expected_context, stored_context)
self.stack.store()
self.assertEqual(user_creds_id, db_stack.user_creds_id)
keystone.KeystoneClientPlugin._create.assert_called_with()
def test_backup_copies_user_creds_id(self):
ctx_init = utils.dummy_context(user='my_user',
password='my_pass')
ctx_init.request_id = self.ctx.request_id
creds = ucreds_object.UserCreds.create(ctx_init)
self.stack = stack.Stack(self.ctx, 'creds_init', self.tmpl,
user_creds_id=creds.id)
self.stack.store()
self.assertEqual(creds.id, self.stack.user_creds_id)
backup = self.stack._backup_stack()
self.assertEqual(creds.id, backup.user_creds_id)
def test_stored_context_err(self):
self.stack = stack.Stack(self.ctx, 'creds_stack', self.tmpl)
ex = self.assertRaises(exception.Error, self.stack.stored_context)
expected_err = 'Attempt to use stored_context with no user_creds'
self.assertEqual(expected_err, str(ex))
def test_store_gets_username_from_stack(self):
self.stack = stack.Stack(self.ctx, 'username_stack',
self.tmpl, username='foobar')
self.ctx.username = 'not foobar'
self.stack.store()
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
self.assertEqual('foobar', db_stack.username)
def test_store_backup_true(self):
self.stack = stack.Stack(self.ctx, 'username_stack',
self.tmpl, username='foobar')
self.ctx.username = 'not foobar'
self.stack.store(backup=True)
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
self.assertTrue(db_stack.backup)
def test_store_backup_false(self):
self.stack = stack.Stack(self.ctx, 'username_stack',
self.tmpl, username='foobar')
self.ctx.username = 'not foobar'
self.stack.store(backup=False)
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
self.assertFalse(db_stack.backup)
def test_init_stored_context_false(self):
ctx_init = utils.dummy_context(user='mystored_user',
password='mystored_pass')
ctx_init.request_id = self.ctx.request_id
creds = ucreds_object.UserCreds.create(ctx_init)
self.stack = stack.Stack(self.ctx, 'creds_store1', self.tmpl,
user_creds_id=creds.id,
use_stored_context=False)
ctx_expected = self.ctx.to_dict()
self.assertEqual(ctx_expected, self.stack.context.to_dict())
self.stack.store()
self.assertEqual(ctx_expected, self.stack.context.to_dict())
def test_init_stored_context_true(self):
ctx_init = utils.dummy_context(user='mystored_user',
password='mystored_pass')
ctx_init.request_id = self.ctx.request_id
creds = ucreds_object.UserCreds.create(ctx_init)
self.stack = stack.Stack(self.ctx, 'creds_store2', self.tmpl,
user_creds_id=creds.id,
use_stored_context=True)
ctx_expected = ctx_init.to_dict()
ctx_expected['auth_token'] = None
self.assertEqual(ctx_expected, self.stack.context.to_dict())
self.stack.store()
self.assertEqual(ctx_expected, self.stack.context.to_dict())
def test_load_stored_context_false(self):
ctx_init = utils.dummy_context(user='mystored_user',
password='mystored_pass')
ctx_init.request_id = self.ctx.request_id
creds = ucreds_object.UserCreds.create(ctx_init)
self.stack = stack.Stack(self.ctx, 'creds_store3', self.tmpl,
user_creds_id=creds.id)
self.stack.store()
load_stack = stack.Stack.load(self.ctx, stack_id=self.stack.id,
use_stored_context=False)
self.assertEqual(self.ctx.to_dict(), load_stack.context.to_dict())
def test_load_stored_context_true(self):
ctx_init = utils.dummy_context(user='mystored_user',
password='mystored_pass')
ctx_init.request_id = self.ctx.request_id
creds = ucreds_object.UserCreds.create(ctx_init)
self.stack = stack.Stack(self.ctx, 'creds_store4', self.tmpl,
user_creds_id=creds.id)
self.stack.store()
ctx_expected = ctx_init.to_dict()
ctx_expected['auth_token'] = None
load_stack = stack.Stack.load(self.ctx, stack_id=self.stack.id,
use_stored_context=True)
self.assertEqual(ctx_expected, load_stack.context.to_dict())
def test_load_honors_owner(self):
self.stack = stack.Stack(self.ctx, 'owner_stack', self.tmpl)
stack_ownee = stack.Stack(self.ctx, 'ownee_stack', self.tmpl,
owner_id=self.stack.id)
stack_ownee.store()
saved_stack = stack.Stack.load(self.ctx, stack_id=stack_ownee.id)
self.assertEqual(self.stack.id, saved_stack.owner_id)
def _test_load_with_refresh_cred(self, refresh=True):
cfg.CONF.set_override('deferred_auth_method', 'trusts')
self.patchobject(self.ctx.auth_plugin, 'get_user_id',
return_value='old_trustor_user_id')
self.patchobject(self.ctx.auth_plugin, 'get_project_id',
return_value='test_tenant_id')
old_context = utils.dummy_context()
old_context.trust_id = 'atrust123'
old_context.trustor_user_id = (
'trustor_user_id' if refresh else 'old_trustor_user_id')
m_sc = self.patchobject(context, 'StoredContext')
m_sc.from_dict.return_value = old_context
self.stack = stack.Stack(self.ctx, 'test_regenerate_trust', self.tmpl)
self.stack.store()
load_stack = stack.Stack.load(self.ctx, stack_id=self.stack.id,
check_refresh_cred=True)
self.assertEqual(refresh, load_stack.refresh_cred)
def test_load_with_refresh_cred(self):
self._test_load_with_refresh_cred()
def test_load_with_no_refresh_cred(self):
self._test_load_with_refresh_cred(refresh=False)
def test_requires_deferred_auth(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'},
'BResource': {'Type': 'GenericResourceType'},
'CResource': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'update_test_stack',
template.Template(tmpl),
disable_rollback=False)
self.assertFalse(self.stack.requires_deferred_auth())
self.stack['CResource'].requires_deferred_auth = True
self.assertTrue(self.stack.requires_deferred_auth())
def test_stack_user_project_id_default(self):
self.stack = stack.Stack(self.ctx, 'user_project_none', self.tmpl)
self.stack.store()
self.assertIsNone(self.stack.stack_user_project_id)
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
self.assertIsNone(db_stack.stack_user_project_id)
def test_stack_user_project_id_constructor(self):
self.stub_keystoneclient()
self.stack = stack.Stack(self.ctx, 'user_project_init',
self.tmpl,
stack_user_project_id='aproject1234')
self.stack.store()
self.assertEqual('aproject1234', self.stack.stack_user_project_id)
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
self.assertEqual('aproject1234', db_stack.stack_user_project_id)
self.stack.delete()
self.assertEqual((stack.Stack.DELETE, stack.Stack.COMPLETE),
self.stack.state)
def test_stack_user_project_id_setter(self):
self.stub_keystoneclient()
self.stack = stack.Stack(self.ctx, 'user_project_init', self.tmpl)
self.stack.store()
self.assertIsNone(self.stack.stack_user_project_id)
self.stack.set_stack_user_project_id(project_id='aproject456')
self.assertEqual('aproject456', self.stack.stack_user_project_id)
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
self.assertEqual('aproject456', db_stack.stack_user_project_id)
self.stack.delete()
self.assertEqual((stack.Stack.DELETE, stack.Stack.COMPLETE),
self.stack.state)
def test_stack_user_project_id_create(self):
self.stub_keystoneclient()
self.stack = stack.Stack(self.ctx, 'user_project_init', self.tmpl)
self.stack.store()
self.assertIsNone(self.stack.stack_user_project_id)
self.stack.create_stack_user_project_id()
self.assertEqual('aprojectid', self.stack.stack_user_project_id)
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
self.assertEqual('aprojectid', db_stack.stack_user_project_id)
self.stack.delete()
self.assertEqual((stack.Stack.DELETE, stack.Stack.COMPLETE),
self.stack.state)
def test_stack_eager_or_lazy_load_templ(self):
self.stack = stack.Stack(self.ctx, 'test_stack_eager_or_lazy_tmpl',
self.tmpl)
self.stack.store()
ctx1 = utils.dummy_context()
s1_db_result = db_api.stack_get(ctx1, self.stack.id, eager_load=True)
s1_obj = stack_object.Stack._from_db_object(ctx1, stack_object.Stack(),
s1_db_result)
self.assertIsNotNone(s1_obj._raw_template)
self.assertIsNotNone(s1_obj.raw_template)
ctx2 = utils.dummy_context()
s2_db_result = db_api.stack_get(ctx2, self.stack.id, eager_load=False)
s2_obj = stack_object.Stack._from_db_object(ctx2, stack_object.Stack(),
s2_db_result)
self.assertFalse(hasattr(s2_obj, "_raw_template"))
self.assertIsNotNone(s2_obj.raw_template)
self.assertIsNotNone(s2_obj._raw_template)
def test_preview_resources_returns_list_of_resource_previews(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'preview_stack',
template.Template(tmpl))
res = mock.Mock()
res.preview.return_value = 'foo'
self.stack._resources = {'r1': res}
resources = self.stack.preview_resources()
self.assertEqual(['foo'], resources)
def test_correct_outputs(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'AResource': {'Type': 'ResourceWithPropsType',
'Properties': {'Foo': 'abc'}},
'BResource': {'Type': 'ResourceWithPropsType',
'Properties': {'Foo': 'def'}}},
'Outputs': {
'Resource_attr': {
'Value': {
'Fn::GetAtt': ['AResource', 'Foo']}}}}
self.stack = stack.Stack(self.ctx, 'stack_with_correct_outputs',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.COMPLETE),
self.stack.state)
self.assertEqual('abc', self.stack['AResource'].properties['Foo'])
self.stack._update_all_resource_data(False, True)
self.assertEqual('AResource',
self.stack.outputs['Resource_attr'].get_value())
self.stack.delete()
self.assertEqual((self.stack.DELETE, self.stack.COMPLETE),
self.stack.state)
def test_incorrect_outputs(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'AResource': {'Type': 'ResourceWithPropsType',
'Properties': {'Foo': 'abc'}}},
'Outputs': {
'Resource_attr': {
'Value': {
'Fn::GetAtt': ['AResource', 'Bar']}}}}
self.stack = stack.Stack(self.ctx, 'stack_with_incorrect_outputs',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.COMPLETE),
self.stack.state)
ex = self.assertRaises(exception.InvalidTemplateAttribute,
self.stack.outputs['Resource_attr'].get_value)
self.assertIn('The Referenced Attribute (AResource Bar) is '
'incorrect.',
str(ex))
self.stack.delete()
self.assertEqual((self.stack.DELETE, self.stack.COMPLETE),
self.stack.state)
def test_stack_load_no_param_value_validation(self):
tmpl = template_format.parse('''
heat_template_version: 2013-05-23
parameters:
flavor:
type: string
description: A flavor.
constraints:
- custom_constraint: nova.flavor
resources:
a_resource:
type: GenericResourceType
''')
fc = fakes.FakeClient()
self.patchobject(nova.NovaClientPlugin, 'client', return_value=fc)
fc.flavors = mock.Mock()
flavor = collections.namedtuple("Flavor", ["id", "name"])
flavor.id = "1234"
flavor.name = "dummy"
fc.flavors.get.return_value = flavor
test_env = environment.Environment({'flavor': '1234'})
self.stack = stack.Stack(self.ctx, 'stack_with_custom_constraint',
template.Template(tmpl, env=test_env))
self.stack.validate()
self.stack.store()
self.stack.create()
stack_id = self.stack.id
self.assertEqual((stack.Stack.CREATE, stack.Stack.COMPLETE),
self.stack.state)
loaded_stack = stack.Stack.load(self.ctx, stack_id=self.stack.id)
self.assertEqual(stack_id, loaded_stack.parameters['OS::stack_id'])
fc.flavors.get.assert_called_once_with('1234')
def test_snapshot_delete(self):
snapshots = []
class ResourceDeleteSnapshot(generic_rsrc.ResourceWithProps):
def handle_delete_snapshot(self, data):
snapshots.append(data)
resource._register_class(
'ResourceDeleteSnapshot', ResourceDeleteSnapshot)
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'ResourceDeleteSnapshot'}}}
self.stack = stack.Stack(self.ctx, 'snapshot_stack',
template.Template(tmpl))
data = self.stack.prepare_abandon()
fake_snapshot = collections.namedtuple('Snapshot', ('data',))(data)
self.stack.delete_snapshot(fake_snapshot)
self.assertEqual([data['resources']['AResource']], snapshots)
def test_delete_snapshot_without_data(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'R1': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'snapshot_stack',
template.Template(tmpl))
fake_snapshot = collections.namedtuple('Snapshot', ('data',))(None)
self.assertIsNone(self.stack.delete_snapshot(fake_snapshot))
def test_incorrect_outputs_cfn_get_attr(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'AResource': {'Type': 'ResourceWithPropsType',
'Properties': {'Foo': 'abc'}}},
'Outputs': {
'Resource_attr': {
'Value': {
'Fn::GetAtt': ['AResource', 'Bar']}}}}
self.stack = stack.Stack(self.ctx, 'stack_with_correct_outputs',
template.Template(tmpl))
self.assertRaisesRegex(
exception.StackValidationFailed,
('Outputs.Resource_attr.Value.Fn::GetAtt: The Referenced '
r'Attribute \(AResource Bar\) is incorrect.'),
self.stack.validate)
def test_incorrect_outputs_cfn_incorrect_reference(self):
tmpl = template_format.parse("""
HeatTemplateFormatVersion: '2012-12-12'
Outputs:
Output:
Value:
Fn::GetAtt:
- Resource
- Foo
""")
self.stack = stack.Stack(self.ctx, 'stack_with_incorrect_outputs',
template.Template(tmpl))
ex = self.assertRaises(exception.StackValidationFailed,
self.stack.validate)
self.assertIn('The specified reference "Resource" '
'(in unknown) is incorrect.', str(ex))
def test_incorrect_outputs_incorrect_reference(self):
tmpl = template_format.parse("""
heat_template_version: 2013-05-23
outputs:
output:
value: { get_attr: [resource, foo] }
""")
self.stack = stack.Stack(self.ctx, 'stack_with_incorrect_outputs',
template.Template(tmpl))
ex = self.assertRaises(exception.StackValidationFailed,
self.stack.validate)
self.assertIn('The specified reference "resource" '
'(in unknown) is incorrect.', str(ex))
def test_incorrect_outputs_cfn_missing_value(self):
tmpl = template_format.parse("""
HeatTemplateFormatVersion: '2012-12-12'
Resources:
AResource:
Type: ResourceWithPropsType
Properties:
Foo: abc
Outputs:
Resource_attr:
Description: the attr
""")
self.stack = stack.Stack(self.ctx, 'stack_with_correct_outputs',
template.Template(tmpl))
ex = self.assertRaises(exception.StackValidationFailed,
self.stack.validate)
self.assertIn('Each output definition must contain a Value key.',
str(ex))
self.assertIn('Outputs.Resource_attr', str(ex))
def test_incorrect_outputs_cfn_empty_value(self):
tmpl = template_format.parse("""
HeatTemplateFormatVersion: '2012-12-12'
Resources:
AResource:
Type: ResourceWithPropsType
Properties:
Foo: abc
Outputs:
Resource_attr:
Value: ''
""")
self.stack = stack.Stack(self.ctx, 'stack_with_correct_outputs',
template.Template(tmpl))
self.assertIsNone(self.stack.validate())
def test_incorrect_outputs_cfn_none_value(self):
tmpl = template_format.parse("""
HeatTemplateFormatVersion: '2012-12-12'
Resources:
AResource:
Type: ResourceWithPropsType
Properties:
Foo: abc
Outputs:
Resource_attr:
Value:
""")
self.stack = stack.Stack(self.ctx, 'stack_with_correct_outputs',
template.Template(tmpl))
self.assertIsNone(self.stack.validate())
def test_incorrect_outputs_cfn_string_data(self):
tmpl = template_format.parse("""
HeatTemplateFormatVersion: '2012-12-12'
Resources:
AResource:
Type: ResourceWithPropsType
Properties:
Foo: abc
Outputs:
Resource_attr:
This is wrong data
""")
self.stack = stack.Stack(self.ctx, 'stack_with_correct_outputs',
template.Template(tmpl))
ex = self.assertRaises(exception.StackValidationFailed,
self.stack.validate)
self.assertIn('Found a %s instead' % str.__name__,
str(ex))
self.assertIn('Outputs.Resource_attr', str(ex))
def test_prop_validate_value(self):
tmpl = template_format.parse("""
HeatTemplateFormatVersion: '2012-12-12'
Resources:
AResource:
Type: ResourceWithPropsType
Properties:
FooInt: notanint
""")
self.stack = stack.Stack(self.ctx, 'stack_with_bad_property',
template.Template(tmpl))
ex = self.assertRaises(exception.StackValidationFailed,
self.stack.validate)
self.assertIn("'notanint' is not an integer",
str(ex))
self.stack.strict_validate = False
self.assertIsNone(self.stack.validate())
def test_disable_validate_required_param(self):
tmpl = template_format.parse("""
heat_template_version: 2013-05-23
parameters:
aparam:
type: number
resources:
AResource:
type: ResourceWithPropsRefPropOnValidate
properties:
FooInt: {get_param: aparam}
""")
self.stack = stack.Stack(self.ctx, 'stack_with_reqd_param',
template.Template(tmpl))
ex = self.assertRaises(exception.UserParameterMissing,
self.stack.validate)
self.assertIn("The Parameter (aparam) was not provided",
str(ex))
self.stack.strict_validate = False
ex = self.assertRaises(exception.StackValidationFailed,
self.stack.validate)
self.assertIn("The Parameter (aparam) was not provided",
str(ex))
self.assertIsNone(self.stack.validate(validate_res_tmpl_only=True))
def test_nodisable_validate_tmpl_err(self):
tmpl = template_format.parse("""
heat_template_version: 2013-05-23
resources:
AResource:
type: ResourceWithPropsRefPropOnValidate
depends_on: noexist
properties:
FooInt: 123
""")
self.stack = stack.Stack(self.ctx, 'stack_with_tmpl_err',
template.Template(tmpl))
ex = self.assertRaises(exception.InvalidTemplateReference,
self.stack.validate)
self.assertIn(
"The specified reference \"noexist\" (in AResource) is incorrect",
str(ex))
self.stack.strict_validate = False
ex = self.assertRaises(exception.InvalidTemplateReference,
self.stack.validate)
self.assertIn(
"The specified reference \"noexist\" (in AResource) is incorrect",
str(ex))
ex = self.assertRaises(exception.InvalidTemplateReference,
self.stack.validate,
validate_res_tmpl_only=True)
self.assertIn(
"The specified reference \"noexist\" (in AResource) is incorrect",
str(ex))
def test_validate_property_getatt(self):
tmpl = {
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'R1': {'Type': 'ResourceWithPropsType'},
'R2': {'Type': 'ResourceWithPropsType',
'Properties': {'Foo': {'Fn::GetAtt': ['R1', 'Foo']}}}}
}
self.stack = stack.Stack(self.ctx, 'test_stack',
template.Template(tmpl))
self.assertIsNone(self.stack.validate())
def test_param_validate_value(self):
tmpl = template_format.parse("""
HeatTemplateFormatVersion: '2012-12-12'
Parameters:
foo:
Type: Number
""")
env1 = environment.Environment({'parameters': {'foo': 'abc'}})
self.stack = stack.Stack(self.ctx, 'stack_with_bad_param',
template.Template(tmpl, env=env1))
ex = self.assertRaises(exception.StackValidationFailed,
self.stack.validate)
self.assertIn("Parameter 'foo' is invalid: could not convert "
"string to float:", str(ex))
self.assertIn("abc", str(ex))
self.stack.strict_validate = False
self.assertIsNone(self.stack.validate())
def test_incorrect_outputs_cfn_list_data(self):
tmpl = template_format.parse("""
HeatTemplateFormatVersion: '2012-12-12'
Resources:
AResource:
Type: ResourceWithPropsType
Properties:
Foo: abc
Outputs:
Resource_attr:
- Data is not what it seems
""")
self.stack = stack.Stack(self.ctx, 'stack_with_correct_outputs',
template.Template(tmpl))
ex = self.assertRaises(exception.StackValidationFailed,
self.stack.validate)
self.assertIn('Found a list', str(ex))
self.assertIn('Outputs.Resource_attr', str(ex))
def test_incorrect_deletion_policy(self):
tmpl = template_format.parse("""
HeatTemplateFormatVersion: '2012-12-12'
Parameters:
Deletion_Policy:
Type: String
Default: [1, 2]
Resources:
AResource:
Type: ResourceWithPropsType
DeletionPolicy: {Ref: Deletion_Policy}
Properties:
Foo: abc
""")
self.stack = stack.Stack(self.ctx, 'stack_bad_delpol',
template.Template(tmpl))
ex = self.assertRaises(exception.StackValidationFailed,
self.stack.validate)
self.assertIn('Invalid deletion policy "[1, 2]"',
str(ex))
def test_deletion_policy_apply_ref(self):
tmpl = template_format.parse("""
HeatTemplateFormatVersion: '2012-12-12'
Parameters:
Deletion_Policy:
Type: String
Default: Delete
Resources:
AResource:
Type: ResourceWithPropsType
DeletionPolicy: wibble
Properties:
Foo: abc
DeletionPolicy: {Ref: Deletion_Policy}
""")
self.stack = stack.Stack(self.ctx, 'stack_delpol_get_param',
template.Template(tmpl))
self.stack.validate()
self.stack.store()
self.stack.create()
self.assertEqual((self.stack.CREATE, self.stack.COMPLETE),
self.stack.state)
def test_deletion_policy_apply_get_param(self):
tmpl = template_format.parse("""
heat_template_version: 2016-04-08
parameters:
deletion_policy:
type: string
default: Delete
resources:
AResource:
type: ResourceWithPropsType
deletion_policy: {get_param: deletion_policy}
properties:
Foo: abc
""")
self.stack = stack.Stack(self.ctx, 'stack_delpol_get_param',
template.Template(tmpl))
self.stack.validate()
self.stack.store()
self.stack.create()
self.assertEqual((self.stack.CREATE, self.stack.COMPLETE),
self.stack.state)
def test_incorrect_deletion_policy_hot(self):
tmpl = template_format.parse("""
heat_template_version: 2013-05-23
parameters:
deletion_policy:
type: string
default: [1, 2]
resources:
AResource:
type: ResourceWithPropsType
deletion_policy: {get_param: deletion_policy}
properties:
Foo: abc
""")
self.stack = stack.Stack(self.ctx, 'stack_bad_delpol',
template.Template(tmpl))
ex = self.assertRaises(exception.StackValidationFailed,
self.stack.validate)
self.assertIn('Invalid deletion policy "[1, 2]',
str(ex))
def test_incorrect_outputs_hot_get_attr(self):
tmpl = {'heat_template_version': '2013-05-23',
'resources': {
'AResource': {'type': 'ResourceWithPropsType',
'properties': {'Foo': 'abc'}}},
'outputs': {
'resource_attr': {
'value': {
'get_attr': ['AResource', 'Bar']}}}}
self.stack = stack.Stack(self.ctx, 'stack_with_correct_outputs',
template.Template(tmpl))
self.assertRaisesRegex(
exception.StackValidationFailed,
('outputs.resource_attr.value.get_attr: The Referenced Attribute '
r'\(AResource Bar\) is incorrect.'),
self.stack.validate)
def test_snapshot_save_called_first(self):
def snapshotting_called_first(stack, action, status, reason):
self.assertEqual(stack.status, stack.IN_PROGRESS)
self.assertEqual(stack.action, stack.SNAPSHOT)
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'A': {'Type': 'GenericResourceType'},
'B': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'stack_details_test',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.stack.snapshot(save_snapshot_func=snapshotting_called_first)
def test_restore(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'A': {'Type': 'GenericResourceType'},
'B': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'stack_details_test',
template.Template(tmpl))
self.stack.store()
self.stack.create()
data = copy.deepcopy(self.stack.prepare_abandon())
fake_snapshot = collections.namedtuple(
'Snapshot', ('data', 'stack_id'))(data, self.stack.id)
new_tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'A': {'Type': 'GenericResourceType'}}}
updated_stack = stack.Stack(self.ctx, 'updated_stack',
template.Template(new_tmpl))
self.stack.update(updated_stack)
self.assertEqual(1, len(self.stack.resources))
self.stack.restore(fake_snapshot)
self.assertEqual((stack.Stack.RESTORE, stack.Stack.COMPLETE),
self.stack.state)
self.assertEqual(2, len(self.stack.resources))
def test_restore_with_original_env(self):
tmpl = {
'heat_template_version': '2013-05-23',
'parameters': {
'foo': {'type': 'string'}
},
'resources': {
'A': {
'type': 'ResourceWithPropsType',
'properties': {'Foo': {'get_param': 'foo'}}
}
}
}
self.stack = stack.Stack(self.ctx, 'stack_restore_test',
template.Template(
tmpl,
env=environment.Environment(
{'foo': 'abc'})))
self.stack.store()
self.stack.create()
self.assertEqual('abc',
self.stack.resources['A'].properties['Foo'])
data = copy.deepcopy(self.stack.prepare_abandon())
fake_snapshot = collections.namedtuple(
'Snapshot', ('data', 'stack_id'))(data, self.stack.id)
updated_stack = stack.Stack(self.ctx, 'updated_stack',
template.Template(
tmpl,
env=environment.Environment(
{'foo': 'xyz'})))
self.stack.update(updated_stack)
self.assertEqual('xyz',
self.stack.resources['A'].properties['Foo'])
self.stack.restore(fake_snapshot)
self.assertEqual((stack.Stack.RESTORE, stack.Stack.COMPLETE),
self.stack.state)
self.assertEqual('abc',
self.stack.resources['A'].properties['Foo'])
def test_hot_restore(self):
tpl = {'heat_template_version': '2013-05-23',
'resources':
{'A': {'type': 'ResourceWithRestoreType'}}}
self.stack = stack.Stack(self.ctx, 'stack_details_test',
template.Template(tpl))
self.stack.store()
self.stack.create()
data = self.stack.prepare_abandon()
data['resources']['A']['resource_data']['a_string'] = 'foo'
fake_snapshot = collections.namedtuple(
'Snapshot', ('data', 'stack_id'))(data, self.stack.id)
self.stack.restore(fake_snapshot)
self.assertEqual((stack.Stack.RESTORE, stack.Stack.COMPLETE),
self.stack.state)
self.assertEqual(
'foo', self.stack.resources['A'].properties['a_string'])
@mock.patch.object(stack.Stack, 'db_resource_get')
def test_lightweight_stack_getatt(self, mock_drg):
tmpl = template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'foo': {'Type': 'GenericResourceType'},
'bar': {
'Type': 'ResourceWithPropsType',
'Properties': {
'Foo': {'Fn::GetAtt': ['foo', 'bar']},
}
}
}
})
rsrcs_data = {'foo': {'reference_id': 'foo-id',
'attrs': {'bar': 'baz'}, 'uuid': mock.ANY,
'id': mock.ANY, 'action': 'CREATE',
'status': 'COMPLETE'},
'bar': {'reference_id': 'bar-id', 'uuid': mock.ANY,
'id': mock.ANY, 'action': 'CREATE',
'status': 'COMPLETE'}}
cache_data = {n: node_data.NodeData.from_dict(d)
for n, d in rsrcs_data.items()}
tmpl_stack = stack.Stack(self.ctx, 'test', tmpl)
tmpl_stack.store()
lightweight_stack = stack.Stack.load(self.ctx, stack_id=tmpl_stack.id,
cache_data=cache_data)
# Check if the property has the appropriate resolved value.
bar = resource.Resource(
'bar',
lightweight_stack.defn.resource_definition('bar'),
lightweight_stack)
self.assertEqual('baz', bar.properties['Foo'])
# Make sure FnGetAtt returns the cached value.
attr_value = lightweight_stack.defn['foo'].FnGetAtt('bar')
self.assertEqual('baz', attr_value)
# Make sure calls are not made to the database to retrieve the
# resource state.
self.assertFalse(mock_drg.called)
@mock.patch.object(stack.Stack, 'db_resource_get')
def test_lightweight_stack_getrefid(self, mock_drg):
tmpl = template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'foo': {'Type': 'GenericResourceType'},
'bar': {
'Type': 'ResourceWithPropsType',
'Properties': {
'Foo': {'Ref': 'foo'},
}
}
}
})
rsrcs_data = {'foo': {'reference_id': 'physical-resource-id',
'uuid': mock.ANY, 'id': mock.ANY,
'action': 'CREATE', 'status': 'COMPLETE'},
'bar': {'reference_id': 'bar-id', 'uuid': mock.ANY,
'id': mock.ANY, 'action': 'CREATE',
'status': 'COMPLETE'}}
cache_data = {n: node_data.NodeData.from_dict(d)
for n, d in rsrcs_data.items()}
tmpl_stack = stack.Stack(self.ctx, 'test', tmpl)
tmpl_stack.store()
lightweight_stack = stack.Stack.load(self.ctx, stack_id=tmpl_stack.id,
cache_data=cache_data)
# Check if the property has the appropriate resolved value.
bar = resource.Resource(
'bar',
lightweight_stack.defn.resource_definition('bar'),
lightweight_stack)
self.assertEqual('physical-resource-id', bar.properties['Foo'])
# Make sure FnGetRefId returns the cached value.
resource_id = lightweight_stack.defn['foo'].FnGetRefId()
self.assertEqual('physical-resource-id', resource_id)
# Make sure calls are not made to the database to retrieve the
# resource state.
self.assertFalse(mock_drg.called)
def test_encrypt_parameters_false_parameters_stored_plaintext(self):
tmpl = template_format.parse('''
heat_template_version: 2013-05-23
parameters:
param1:
type: string
description: value1.
param2:
type: string
description: value2.
hidden: true
resources:
a_resource:
type: GenericResourceType
''')
env1 = environment.Environment({'param1': 'foo', 'param2': 'bar'})
self.stack = stack.Stack(self.ctx, 'test',
template.Template(tmpl, env=env1))
cfg.CONF.set_override('encrypt_parameters_and_properties', False)
# Verify that hidden parameters stored in plain text
self.stack.store()
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
params = db_stack.raw_template.environment['parameters']
self.assertEqual('foo', params['param1'])
self.assertEqual('bar', params['param2'])
def test_parameters_stored_encrypted_decrypted_on_load(self):
tmpl = template_format.parse('''
heat_template_version: 2013-05-23
parameters:
param1:
type: string
description: value1.
param2:
type: string
description: value2.
hidden: true
resources:
a_resource:
type: GenericResourceType
''')
env1 = environment.Environment({'param1': 'foo', 'param2': 'bar'})
self.stack = stack.Stack(self.ctx, 'test',
template.Template(tmpl, env=env1))
cfg.CONF.set_override('encrypt_parameters_and_properties', True)
# Verify that hidden parameters are stored encrypted
self.stack.store()
db_tpl = db_api.raw_template_get(self.ctx, self.stack.t.id)
db_params = db_tpl.environment['parameters']
self.assertEqual('foo', db_params['param1'])
self.assertEqual('cryptography_decrypt_v1', db_params['param2'][0])
self.assertIsNotNone(db_params['param2'][1])
# Verify that loaded stack has decrypted paramters
loaded_stack = stack.Stack.load(self.ctx, stack_id=self.stack.id)
params = loaded_stack.t.env.params
self.assertEqual('foo', params.get('param1'))
self.assertEqual('bar', params.get('param2'))
# test update the param2
loaded_stack.state_set(self.stack.CREATE, self.stack.COMPLETE,
'for_update')
env2 = environment.Environment({'param1': 'foo', 'param2': 'new_bar'})
new_stack = stack.Stack(self.ctx, 'test_update',
template.Template(tmpl, env=env2))
loaded_stack.update(new_stack)
self.assertEqual((loaded_stack.UPDATE, loaded_stack.COMPLETE),
loaded_stack.state)
db_tpl = db_api.raw_template_get(self.ctx, loaded_stack.t.id)
db_params = db_tpl.environment['parameters']
self.assertEqual('foo', db_params['param1'])
self.assertEqual('cryptography_decrypt_v1', db_params['param2'][0])
self.assertIsNotNone(db_params['param2'][1])
loaded_stack1 = stack.Stack.load(self.ctx, stack_id=self.stack.id)
params = loaded_stack1.t.env.params
self.assertEqual('foo', params.get('param1'))
self.assertEqual('new_bar', params.get('param2'))
def test_parameters_created_encrypted_updated_decrypted(self):
tmpl = template_format.parse('''
heat_template_version: 2013-05-23
parameters:
param1:
type: string
description: value1.
param2:
type: string
description: value2.
hidden: true
resources:
a_resource:
type: GenericResourceType
''')
# Create the stack with encryption enabled
cfg.CONF.set_override('encrypt_parameters_and_properties', True)
env1 = environment.Environment({'param1': 'foo', 'param2': 'bar'})
self.stack = stack.Stack(self.ctx, 'test',
template.Template(tmpl, env=env1))
self.stack.store()
# Update the stack with encryption disabled
cfg.CONF.set_override('encrypt_parameters_and_properties', False)
loaded_stack = stack.Stack.load(self.ctx, stack_id=self.stack.id)
loaded_stack.state_set(self.stack.CREATE, self.stack.COMPLETE,
'for_update')
env2 = environment.Environment({'param1': 'foo', 'param2': 'new_bar'})
new_stack = stack.Stack(self.ctx, 'test_update',
template.Template(tmpl, env=env2))
self.assertEqual(['param2'], loaded_stack.env.encrypted_param_names)
# Without the fix for bug #1572294, loaded_stack.update() will
# blow up with "ValueError: too many values to unpack"
loaded_stack.update(new_stack)
self.assertEqual([], loaded_stack.env.encrypted_param_names)
def test_parameters_inconsistent_encrypted_param_names(self):
tmpl = template_format.parse('''
heat_template_version: 2013-05-23
parameters:
param1:
type: string
description: value1.
param2:
type: string
description: value2.
hidden: true
resources:
a_resource:
type: GenericResourceType
''')
warning_logger = self.useFixture(
fixtures.FakeLogger(level=logging.WARNING,
format="%(levelname)8s [%(name)s] "
"%(message)s"))
cfg.CONF.set_override('encrypt_parameters_and_properties', False)
env1 = environment.Environment({'param1': 'foo', 'param2': 'bar'})
self.stack = stack.Stack(self.ctx, 'test',
template.Template(tmpl, env=env1))
self.stack.store()
loaded_stack = stack.Stack.load(self.ctx, stack_id=self.stack.id)
loaded_stack.state_set(self.stack.CREATE, self.stack.COMPLETE,
'for_update')
env2 = environment.Environment({'param1': 'foo', 'param2': 'new_bar'})
# Put inconsistent encrypted_param_names data in the environment
env2.encrypted_param_names = ['param1']
new_stack = stack.Stack(self.ctx, 'test_update',
template.Template(tmpl, env=env2))
self.assertIsNone(loaded_stack.update(new_stack))
self.assertIn('Encountered already-decrypted data',
warning_logger.output)
def test_parameters_stored_decrypted_successful_load(self):
tmpl = template_format.parse('''
heat_template_version: 2013-05-23
parameters:
param1:
type: string
description: value1.
param2:
type: string
description: value2.
hidden: true
resources:
a_resource:
type: GenericResourceType
''')
env1 = environment.Environment({'param1': 'foo', 'param2': 'bar'})
self.stack = stack.Stack(self.ctx, 'test',
template.Template(tmpl, env=env1))
cfg.CONF.set_override('encrypt_parameters_and_properties', False)
# Verify that hidden parameters are stored decrypted
self.stack.store()
db_tpl = db_api.raw_template_get(self.ctx, self.stack.t.id)
db_params = db_tpl.environment['parameters']
self.assertEqual('foo', db_params['param1'])
self.assertEqual('bar', db_params['param2'])
# Verify that stack loads without error
loaded_stack = stack.Stack.load(self.ctx, stack_id=self.stack.id)
params = loaded_stack.t.env.params
self.assertEqual('foo', params.get('param1'))
self.assertEqual('bar', params.get('param2'))
def test_event_dispatch(self):
env = environment.Environment()
evt = eventlet.event.Event()
sink = fakes.FakeEventSink(evt)
env.register_event_sink('dummy', lambda: sink)
env.load({"event_sinks": [{"type": "dummy"}]})
stk = stack.Stack(self.ctx, 'test',
template.Template(empty_template, env=env))
stk.thread_group_mgr = service.ThreadGroupManager()
self.addCleanup(stk.thread_group_mgr.stop, stk.id)
stk.store()
stk._add_event('CREATE', 'IN_PROGRESS', '')
evt.wait()
expected = [{
'id': mock.ANY,
'timestamp': mock.ANY,
'type': 'os.heat.event',
'version': '0.1',
'payload': {
'physical_resource_id': stk.id,
'resource_action': 'CREATE',
'resource_name': 'test',
'resource_properties': {},
'resource_status': 'IN_PROGRESS',
'resource_status_reason': '',
'resource_type':
'OS::Heat::Stack',
'stack_id': stk.id,
'version': '0.1'}}]
self.assertEqual(expected, sink.events)
@mock.patch.object(stack_object.Stack, 'delete')
@mock.patch.object(raw_template_object.RawTemplate, 'delete')
def test_mark_complete_create(self, mock_tmpl_delete, mock_stack_delete):
tmpl = template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'foo': {'Type': 'GenericResourceType'}
}
})
tmpl_stack = stack.Stack(self.ctx, 'test', tmpl, convergence=True)
tmpl_stack.store()
tmpl_stack.action = tmpl_stack.CREATE
tmpl_stack.status = tmpl_stack.IN_PROGRESS
tmpl_stack.current_traversal = 'some-traversal'
tmpl_stack.mark_complete()
self.assertEqual(tmpl_stack.prev_raw_template_id,
None)
self.assertFalse(mock_tmpl_delete.called)
self.assertFalse(mock_stack_delete.called)
self.assertEqual(tmpl_stack.status, tmpl_stack.COMPLETE)
@mock.patch.object(stack.Stack, 'purge_db')
def test_mark_complete_update(self, mock_purge_db):
tmpl = template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'foo': {'Type': 'GenericResourceType'}
}
})
cfg.CONF.set_default('convergence_engine', True)
tmpl_stack = stack.Stack(self.ctx, 'test', tmpl, convergence=True)
tmpl_stack.prev_raw_template_id = 1
tmpl_stack.action = tmpl_stack.UPDATE
tmpl_stack.status = tmpl_stack.IN_PROGRESS
tmpl_stack.current_traversal = 'some-traversal'
tmpl_stack.store()
tmpl_stack.mark_complete()
self.assertTrue(mock_purge_db.called)
@mock.patch.object(stack.Stack, 'purge_db')
def test_mark_complete_update_delete(self, mock_purge_db):
tmpl = template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Description': 'Empty Template'
})
cfg.CONF.set_default('convergence_engine', True)
tmpl_stack = stack.Stack(self.ctx, 'test', tmpl, convergence=True)
tmpl_stack.prev_raw_template_id = 1
tmpl_stack.action = tmpl_stack.DELETE
tmpl_stack.status = tmpl_stack.IN_PROGRESS
tmpl_stack.current_traversal = 'some-traversal'
tmpl_stack.store()
tmpl_stack.mark_complete()
self.assertTrue(mock_purge_db.called)
@mock.patch.object(stack.Stack, 'purge_db')
def test_mark_complete_stale_traversal(self, mock_purge_db):
tmpl = template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'foo': {'Type': 'GenericResourceType'}
}
})
tmpl_stack = stack.Stack(self.ctx, 'test', tmpl)
tmpl_stack.store()
# emulate stale traversal
tmpl_stack.current_traversal = 'old-traversal'
tmpl_stack.mark_complete()
self.assertFalse(mock_purge_db.called)
@mock.patch.object(function, 'validate')
def test_validate_assertion_exception_rethrow(self, func_val):
expected_msg = 'Expected Assertion Error'
with mock.patch('heat.engine.stack.dependencies',
new_callable=mock.PropertyMock) as mock_dependencies:
mock_dependency = mock.MagicMock()
mock_dependency.name = 'res'
mock_dependency.external_id = None
mock_dependency.validate.side_effect = AssertionError(expected_msg)
mock_dependencies.Dependencies.return_value = [mock_dependency]
stc = stack.Stack(self.ctx, utils.random_name(), self.tmpl)
mock_res = mock.Mock()
mock_res.name = mock_dependency.name
mock_res.t = mock.Mock()
mock_res.t.name = mock_res.name
stc._resources = {mock_res.name: mock_res}
expected_exception = self.assertRaises(AssertionError,
stc.validate)
self.assertEqual(expected_msg, str(expected_exception))
mock_dependency.validate.assert_called_once_with()
tmpl = template_format.parse("""
HeatTemplateFormatVersion: '2012-12-12'
Outputs:
foo:
Value: bar
""")
stc = stack.Stack(self.ctx, utils.random_name(),
template.Template(tmpl))
func_val.side_effect = AssertionError(expected_msg)
expected_exception = self.assertRaises(AssertionError, stc.validate)
self.assertEqual(expected_msg, str(expected_exception))
@mock.patch.object(update, 'StackUpdate')
def test_update_task_exception(self, mock_stack_update):
class RandomException(Exception):
pass
tmpl1 = template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'foo': {'Type': 'GenericResourceType'}
}
})
self.stack = stack.Stack(utils.dummy_context(), 'test_stack', tmpl1)
self.stack.store()
self.stack.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.COMPLETE),
self.stack.state)
tmpl2 = template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'foo': {'Type': 'GenericResourceType'},
'bar': {'Type': 'GenericResourceType'}
}
})
updated_stack = stack.Stack(utils.dummy_context(), 'test_stack', tmpl2)
mock_stack_update.side_effect = RandomException()
self.assertRaises(RandomException, self.stack.update, updated_stack)
def update_exception_handler(self, exc, action=stack.Stack.UPDATE,
disable_rollback=False):
tmpl = template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'foo': {'Type': 'GenericResourceType'}
}
})
self.stack = stack.Stack(utils.dummy_context(),
'test_stack',
tmpl,
disable_rollback=disable_rollback)
self.stack.store()
rb = self.stack._update_exception_handler(exc=exc, action=action)
return rb
def test_update_exception_handler_resource_failure_no_rollback(self):
reason = 'something strange happened'
exc = exception.ResourceFailure(reason, None, action='UPDATE')
rb = self.update_exception_handler(exc, disable_rollback=True)
self.assertFalse(rb)
def test_update_exception_handler_resource_failure_rollback(self):
reason = 'something strange happened'
exc = exception.ResourceFailure(reason, None, action='UPDATE')
rb = self.update_exception_handler(exc, disable_rollback=False)
self.assertTrue(rb)
def test_update_exception_handler_force_cancel_with_rollback(self):
exc = stack.ForcedCancel(with_rollback=True)
rb = self.update_exception_handler(exc, disable_rollback=False)
self.assertTrue(rb)
def test_update_exception_handler_force_cancel_with_rollback_off(self):
# stack-cancel-update from user *always* rolls back
exc = stack.ForcedCancel(with_rollback=True)
rb = self.update_exception_handler(exc, disable_rollback=True)
self.assertTrue(rb)
def test_update_exception_handler_force_cancel_nested(self):
exc = stack.ForcedCancel(with_rollback=False)
rb = self.update_exception_handler(exc, disable_rollback=True)
self.assertFalse(rb)
def test_store_generates_new_traversal_id_for_new_stack(self):
tmpl = template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'foo': {'Type': 'GenericResourceType'}
}
})
self.stack = stack.Stack(utils.dummy_context(),
'test_stack', tmpl, convergence=True)
self.assertIsNone(self.stack.current_traversal)
self.stack.store()
self.assertIsNotNone(self.stack.current_traversal)
@mock.patch.object(stack_object.Stack, 'select_and_update')
def test_store_uses_traversal_id_for_updating_db(self, mock_sau):
tmpl = template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'foo': {'Type': 'GenericResourceType'}
}
})
self.stack = stack.Stack(utils.dummy_context(),
'test_stack', tmpl, convergence=True)
mock_sau.return_value = True
self.stack.id = 1
self.stack.current_traversal = 1
stack_id = self.stack.store()
mock_sau.assert_called_once_with(mock.ANY, 1, mock.ANY, exp_trvsl=1)
self.assertEqual(1, stack_id)
# ensure store uses given expected traversal ID
stack_id = self.stack.store(exp_trvsl=2)
self.assertEqual(1, stack_id)
mock_sau.assert_called_with(mock.ANY, 1, mock.ANY, exp_trvsl=2)
@mock.patch.object(stack_object.Stack, 'select_and_update')
def test_store_db_update_failure(self, mock_sau):
tmpl = template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'foo': {'Type': 'GenericResourceType'}
}
})
self.stack = stack.Stack(utils.dummy_context(),
'test_stack', tmpl, convergence=True)
mock_sau.return_value = False
self.stack.id = 1
stack_id = self.stack.store()
self.assertIsNone(stack_id)
@mock.patch.object(stack_object.Stack, 'select_and_update')
def test_state_set_uses_curr_traversal_for_updating_db(self, mock_sau):
tmpl = template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'foo': {'Type': 'GenericResourceType'}
}
})
self.stack = stack.Stack(utils.dummy_context(),
'test_stack', tmpl, convergence=True)
self.stack.id = 1
self.stack.current_traversal = 'curr-traversal'
self.stack.store()
self.stack.state_set(self.stack.UPDATE, self.stack.IN_PROGRESS, '')
mock_sau.assert_called_once_with(mock.ANY, 1, mock.ANY,
exp_trvsl='curr-traversal')
class StackKwargsForCloningTest(common.HeatTestCase):
scenarios = [
('default', dict(keep_status=False, only_db=False, keep_tags=False,
not_included=['action', 'status', 'status_reason',
'tags'])),
('only_db', dict(keep_status=False, only_db=True, keep_tags=False,
not_included=['action', 'status', 'status_reason',
'strict_validate', 'tags'])),
('keep_status', dict(keep_status=True, only_db=False, keep_tags=False,
not_included=['tags'])),
('status_db', dict(keep_status=True, only_db=True, keep_tags=False,
not_included=['strict_validate', 'tags'])),
('keep_tags', dict(keep_status=False, only_db=False, keep_tags=True,
not_included=['action', 'status', 'status_reason']))
]
def test_kwargs(self):
tmpl = template.Template(copy.deepcopy(empty_template))
ctx = utils.dummy_context()
test_data = dict(action='x', status='y',
status_reason='z', timeout_mins=33,
disable_rollback=True, parent_resource='fred',
owner_id=32, stack_user_project_id=569,
user_creds_id=123, tenant_id='some-uuid',
username='jo', nested_depth=3,
strict_validate=True, convergence=False,
current_traversal=45,
tags=['tag1', 'tag2'])
db_map = {'parent_resource': 'parent_resource_name',
'tenant_id': 'tenant', 'timeout_mins': 'timeout'}
test_db_data = {}
for key in test_data:
dbkey = db_map.get(key, key)
test_db_data[dbkey] = test_data[key]
self.stack = stack.Stack(ctx, utils.random_name(), tmpl,
**test_data)
res = self.stack.get_kwargs_for_cloning(keep_status=self.keep_status,
only_db=self.only_db,
keep_tags=self.keep_tags)
for key in self.not_included:
self.assertNotIn(key, res)
for key in test_data:
if key not in self.not_included:
dbkey = db_map.get(key, key)
if self.only_db:
self.assertEqual(test_data[key], res[dbkey])
else:
self.assertEqual(test_data[key], res[key])
if not self.only_db:
# just make sure that the kwargs are valid
# (no exception should be raised)
stack.Stack(ctx, utils.random_name(), tmpl, **res)
class ResetStateOnErrorTest(common.HeatTestCase):
class DummyStack(object):
(COMPLETE, IN_PROGRESS, FAILED) = range(3)
action = 'something'
status = COMPLETE
def __init__(self):
self.mark_failed = mock.MagicMock()
self.convergence = False
@stack.reset_state_on_error
def raise_exception(self):
self.status = self.IN_PROGRESS
raise ValueError('oops')
@stack.reset_state_on_error
def raise_exit_exception(self):
self.status = self.IN_PROGRESS
raise BaseException('bye')
@stack.reset_state_on_error
def succeed(self):
return 'Hello world'
@stack.reset_state_on_error
def fail(self):
self.status = self.FAILED
return 'Hello world'
def test_success(self):
dummy = self.DummyStack()
self.assertEqual('Hello world', dummy.succeed())
self.assertFalse(dummy.mark_failed.called)
def test_failure(self):
dummy = self.DummyStack()
self.assertEqual('Hello world', dummy.fail())
self.assertFalse(dummy.mark_failed.called)
def test_reset_state_exception(self):
dummy = self.DummyStack()
exc = self.assertRaises(ValueError, dummy.raise_exception)
self.assertIn('oops', str(exc))
self.assertTrue(dummy.mark_failed.called)
def test_reset_state_exit_exception(self):
dummy = self.DummyStack()
exc = self.assertRaises(BaseException, dummy.raise_exit_exception)
self.assertIn('bye', str(exc))
self.assertTrue(dummy.mark_failed.called)
class StackStateSetTest(common.HeatTestCase):
scenarios = [
('in_progress', dict(action=stack.Stack.CREATE,
status=stack.Stack.IN_PROGRESS,
persist_count=1, error=False)),
('create_complete', dict(action=stack.Stack.CREATE,
status=stack.Stack.COMPLETE,
persist_count=0, error=False)),
('create_failed', dict(action=stack.Stack.CREATE,
status=stack.Stack.FAILED,
persist_count=0, error=False)),
('update_complete', dict(action=stack.Stack.UPDATE,
status=stack.Stack.COMPLETE,
persist_count=1, error=False)),
('update_failed', dict(action=stack.Stack.UPDATE,
status=stack.Stack.FAILED,
persist_count=1, error=False)),
('delete_complete', dict(action=stack.Stack.DELETE,
status=stack.Stack.COMPLETE,
persist_count=1, error=False)),
('delete_failed', dict(action=stack.Stack.DELETE,
status=stack.Stack.FAILED,
persist_count=1, error=False)),
('adopt_complete', dict(action=stack.Stack.ADOPT,
status=stack.Stack.COMPLETE,
persist_count=0, error=False)),
('adopt_failed', dict(action=stack.Stack.ADOPT,
status=stack.Stack.FAILED,
persist_count=0, error=False)),
('rollback_complete', dict(action=stack.Stack.ROLLBACK,
status=stack.Stack.COMPLETE,
persist_count=1, error=False)),
('rollback_failed', dict(action=stack.Stack.ROLLBACK,
status=stack.Stack.FAILED,
persist_count=1, error=False)),
('invalid_action', dict(action='action',
status=stack.Stack.FAILED,
persist_count=0, error=True)),
('invalid_status', dict(action=stack.Stack.CREATE,
status='status',
persist_count=0, error=True)),
]
def test_state(self):
self.tmpl = template.Template(copy.deepcopy(empty_template))
self.ctx = utils.dummy_context()
self.stack = stack.Stack(self.ctx, 'test_stack', self.tmpl,
action=stack.Stack.CREATE,
status=stack.Stack.IN_PROGRESS)
persist_state = self.patchobject(self.stack, '_persist_state')
self.assertEqual((stack.Stack.CREATE, stack.Stack.IN_PROGRESS),
self.stack.state)
if self.error:
self.assertRaises(ValueError, self.stack.state_set,
self.action, self.status, 'test')
else:
self.stack.state_set(self.action, self.status, 'test')
self.assertEqual((self.action, self.status), self.stack.state)
self.assertEqual('test', self.stack.status_reason)
self.assertEqual(self.persist_count, persist_state.call_count)
| true
| true
|
790d10830720ef4112e2fe611db409a0fdb26ef7
| 6,272
|
py
|
Python
|
pysrc/papers/analysis/topics.py
|
JetBrains-Research/pubtrends
|
5352bec2cca3321f8554d8e60728fe6d8494edcb
|
[
"Apache-2.0"
] | 7
|
2022-01-10T15:48:31.000Z
|
2022-02-28T11:42:15.000Z
|
pysrc/papers/analysis/topics.py
|
JetBrains-Research/pubtrends
|
5352bec2cca3321f8554d8e60728fe6d8494edcb
|
[
"Apache-2.0"
] | 12
|
2021-11-04T17:21:10.000Z
|
2022-02-23T15:01:10.000Z
|
pysrc/papers/analysis/topics.py
|
JetBrains-Research/pubtrends
|
5352bec2cca3321f8554d8e60728fe6d8494edcb
|
[
"Apache-2.0"
] | null | null | null |
import logging
from collections import Counter
from itertools import chain
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
from pysrc.papers.analysis.text import get_frequent_tokens
logger = logging.getLogger(__name__)
def compute_topics_similarity_matrix(papers_vectors, comps):
logger.debug('Computing mean similarity between topics embeddings')
n_comps = len(set(comps))
distances = pairwise_distances(papers_vectors)
similarity_matrix = np.zeros(shape=(n_comps, n_comps))
indx = {i: np.flatnonzero([c == i for c in comps]).tolist() for i in range(n_comps)}
for i in range(n_comps):
for j in range(i, n_comps):
mean_distance = np.mean(distances[indx[i], :][:, indx[j]])
similarity_matrix[i, j] = similarity_matrix[j, i] = 1 / (1 + mean_distance)
return similarity_matrix
def cluster_and_sort(x, max_clusters, min_cluster_size):
"""
:param x: object representations (X x Features)
:param max_clusters:
:param min_cluster_size:
:return: List[cluster], Hierarchical dendrogram of splits.
"""
logger.debug('Looking for an appropriate number of clusters,'
f'min_cluster_size={min_cluster_size}, max_clusters={max_clusters}')
if x.shape[1] == 0:
return [0] * x.shape[0], None
r = min(int(x.shape[0] / min_cluster_size), max_clusters) + 1
l = 1
if l >= r - 2:
return [0] * x.shape[0], None
prev_min_size = None
while l < r - 1:
n_clusters = int((l + r) / 2)
model = AgglomerativeClustering(n_clusters=n_clusters, linkage='ward').fit(x)
clusters_counter = Counter(model.labels_)
min_size = clusters_counter.most_common()[-1][1]
logger.debug(f'l={l}, r={r}, n_clusters={n_clusters}, min_cluster_size={min_cluster_size}, '
f'prev_min_size={prev_min_size}, min_size={min_size}')
if min_size < min_cluster_size:
if prev_min_size is not None and min_size <= prev_min_size:
break
r = n_clusters + 1
else:
l = n_clusters
prev_min_size = min_size
logger.debug(f'Number of clusters = {n_clusters}')
logger.debug(f'Min cluster size = {prev_min_size}')
logger.debug('Reorder clusters by size descending')
reorder_map = {c: i for i, (c, _) in enumerate(clusters_counter.most_common())}
return [reorder_map[c] for c in model.labels_], model.children_
def get_topics_description(df, comps, corpus, corpus_tokens, corpus_counts, n_words, ignore_comp=None):
"""
Get words from abstracts that describe the components the best way
using closest to the 'ideal' frequency vector - [0, ..., 0, 1, 0, ..., 0] in tokens of cosine distance
"""
logger.debug(f'Generating topics description, ignore_comp={ignore_comp}')
# Since some of the components may be skipped, use this dict for continuous indexes'
comp_idx = {c: i for i, c in enumerate(c for c in comps if c != ignore_comp)}
# In cases with less than 2 components, return frequencies
if len(comp_idx) < 2:
comp = list(comp_idx.keys())[0]
if ignore_comp is None:
most_frequent = get_frequent_tokens(chain(*chain(*corpus)))
return {comp: list(sorted(most_frequent.items(), key=lambda kv: kv[1], reverse=True))[:n_words]}
else:
most_frequent = get_frequent_tokens(
chain(*chain(*[corpus[i] for i in np.flatnonzero(df['id'].isin(set(comps[comp])))]))
)
return {comp: list(sorted(most_frequent.items(), key=lambda kv: kv[1], reverse=True))[:n_words],
ignore_comp: []}
# Pass paper indices (for corpus_tokens and corpus_counts) instead of paper ids
comps_ids = {comp: list(np.flatnonzero(df['id'].isin(comp_pids))) for comp, comp_pids in comps.items()}
result = _get_topics_description_cosine(comps_ids, corpus_tokens, corpus_counts, n_words, ignore_comp=ignore_comp)
kwds = [(comp, ','.join([f'{t}:{v:.3f}' for t, v in vs])) for comp, vs in result.items()]
logger.debug('Description\n' + '\n'.join(f'{comp}: {kwd}' for comp, kwd in kwds))
return result
def _get_topics_description_cosine(comps, corpus_tokens, corpus_counts, n_words, ignore_comp=None):
"""
Select words with the frequency vector that is the closest to the 'ideal' frequency vector
([0, ..., 0, 1, 0, ..., 0]) in tokens of cosine distance
"""
logger.debug('Compute average tokens counts per components')
# Since some of the components may be skipped, use this dict for continuous indexes
comp_idx = {c: i for i, c in enumerate(c for c in comps if c != ignore_comp)}
tokens_freqs_per_comp = np.zeros(shape=(len(comp_idx), corpus_counts.shape[1]), dtype=np.float)
for comp, comp_ids in comps.items():
if comp != ignore_comp: # Not ignored
tokens_freqs_per_comp[comp_idx[comp], :] = \
np.sum(corpus_counts[comp_ids, :], axis=0)
# Calculate total number of occurrences for each word
tokens_freqs_total = np.sum(tokens_freqs_per_comp, axis=0)
# Normalize frequency vector for each word to have length of 1
tokens_freqs_norm = np.sqrt(np.diag(tokens_freqs_per_comp.T @ tokens_freqs_per_comp))
tokens_freqs_per_comp = tokens_freqs_per_comp / tokens_freqs_norm
logger.debug('Take frequent tokens that have the most descriptive frequency vector for topics')
# Calculate cosine distance between the frequency vector and [0, ..., 0, 1, 0, ..., 0] for each cluster
cluster_mask = np.eye(len(comp_idx))
distance = tokens_freqs_per_comp.T @ cluster_mask
# Add some weight for more frequent tokens to get rid of extremely rare ones in the top
adjusted_distance = distance.T * np.log(tokens_freqs_total)
result = {}
for comp in comps.keys():
if comp == ignore_comp:
result[comp] = [] # Ignored component
continue
c = comp_idx[comp] # Get the continuous index
cluster_tokens_idx = np.argsort(-adjusted_distance[c, :])[:n_words].tolist()
result[comp] = [(corpus_tokens[i], adjusted_distance[c, i]) for i in cluster_tokens_idx]
return result
| 46.117647
| 118
| 0.67331
|
import logging
from collections import Counter
from itertools import chain
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
from pysrc.papers.analysis.text import get_frequent_tokens
logger = logging.getLogger(__name__)
def compute_topics_similarity_matrix(papers_vectors, comps):
logger.debug('Computing mean similarity between topics embeddings')
n_comps = len(set(comps))
distances = pairwise_distances(papers_vectors)
similarity_matrix = np.zeros(shape=(n_comps, n_comps))
indx = {i: np.flatnonzero([c == i for c in comps]).tolist() for i in range(n_comps)}
for i in range(n_comps):
for j in range(i, n_comps):
mean_distance = np.mean(distances[indx[i], :][:, indx[j]])
similarity_matrix[i, j] = similarity_matrix[j, i] = 1 / (1 + mean_distance)
return similarity_matrix
def cluster_and_sort(x, max_clusters, min_cluster_size):
logger.debug('Looking for an appropriate number of clusters,'
f'min_cluster_size={min_cluster_size}, max_clusters={max_clusters}')
if x.shape[1] == 0:
return [0] * x.shape[0], None
r = min(int(x.shape[0] / min_cluster_size), max_clusters) + 1
l = 1
if l >= r - 2:
return [0] * x.shape[0], None
prev_min_size = None
while l < r - 1:
n_clusters = int((l + r) / 2)
model = AgglomerativeClustering(n_clusters=n_clusters, linkage='ward').fit(x)
clusters_counter = Counter(model.labels_)
min_size = clusters_counter.most_common()[-1][1]
logger.debug(f'l={l}, r={r}, n_clusters={n_clusters}, min_cluster_size={min_cluster_size}, '
f'prev_min_size={prev_min_size}, min_size={min_size}')
if min_size < min_cluster_size:
if prev_min_size is not None and min_size <= prev_min_size:
break
r = n_clusters + 1
else:
l = n_clusters
prev_min_size = min_size
logger.debug(f'Number of clusters = {n_clusters}')
logger.debug(f'Min cluster size = {prev_min_size}')
logger.debug('Reorder clusters by size descending')
reorder_map = {c: i for i, (c, _) in enumerate(clusters_counter.most_common())}
return [reorder_map[c] for c in model.labels_], model.children_
def get_topics_description(df, comps, corpus, corpus_tokens, corpus_counts, n_words, ignore_comp=None):
logger.debug(f'Generating topics description, ignore_comp={ignore_comp}')
comp_idx = {c: i for i, c in enumerate(c for c in comps if c != ignore_comp)}
# In cases with less than 2 components, return frequencies
if len(comp_idx) < 2:
comp = list(comp_idx.keys())[0]
if ignore_comp is None:
most_frequent = get_frequent_tokens(chain(*chain(*corpus)))
return {comp: list(sorted(most_frequent.items(), key=lambda kv: kv[1], reverse=True))[:n_words]}
else:
most_frequent = get_frequent_tokens(
chain(*chain(*[corpus[i] for i in np.flatnonzero(df['id'].isin(set(comps[comp])))]))
)
return {comp: list(sorted(most_frequent.items(), key=lambda kv: kv[1], reverse=True))[:n_words],
ignore_comp: []}
# Pass paper indices (for corpus_tokens and corpus_counts) instead of paper ids
comps_ids = {comp: list(np.flatnonzero(df['id'].isin(comp_pids))) for comp, comp_pids in comps.items()}
result = _get_topics_description_cosine(comps_ids, corpus_tokens, corpus_counts, n_words, ignore_comp=ignore_comp)
kwds = [(comp, ','.join([f'{t}:{v:.3f}' for t, v in vs])) for comp, vs in result.items()]
logger.debug('Description\n' + '\n'.join(f'{comp}: {kwd}' for comp, kwd in kwds))
return result
def _get_topics_description_cosine(comps, corpus_tokens, corpus_counts, n_words, ignore_comp=None):
logger.debug('Compute average tokens counts per components')
# Since some of the components may be skipped, use this dict for continuous indexes
comp_idx = {c: i for i, c in enumerate(c for c in comps if c != ignore_comp)}
tokens_freqs_per_comp = np.zeros(shape=(len(comp_idx), corpus_counts.shape[1]), dtype=np.float)
for comp, comp_ids in comps.items():
if comp != ignore_comp: # Not ignored
tokens_freqs_per_comp[comp_idx[comp], :] = \
np.sum(corpus_counts[comp_ids, :], axis=0)
# Calculate total number of occurrences for each word
tokens_freqs_total = np.sum(tokens_freqs_per_comp, axis=0)
# Normalize frequency vector for each word to have length of 1
tokens_freqs_norm = np.sqrt(np.diag(tokens_freqs_per_comp.T @ tokens_freqs_per_comp))
tokens_freqs_per_comp = tokens_freqs_per_comp / tokens_freqs_norm
logger.debug('Take frequent tokens that have the most descriptive frequency vector for topics')
# Calculate cosine distance between the frequency vector and [0, ..., 0, 1, 0, ..., 0] for each cluster
cluster_mask = np.eye(len(comp_idx))
distance = tokens_freqs_per_comp.T @ cluster_mask
# Add some weight for more frequent tokens to get rid of extremely rare ones in the top
adjusted_distance = distance.T * np.log(tokens_freqs_total)
result = {}
for comp in comps.keys():
if comp == ignore_comp:
result[comp] = [] # Ignored component
continue
c = comp_idx[comp] # Get the continuous index
cluster_tokens_idx = np.argsort(-adjusted_distance[c, :])[:n_words].tolist()
result[comp] = [(corpus_tokens[i], adjusted_distance[c, i]) for i in cluster_tokens_idx]
return result
| true
| true
|
790d108a0cc7c6f00486c7e67db972bd9001b06e
| 2,637
|
py
|
Python
|
Programs/day_11_blackjack.py
|
Yunram/python_training
|
be3fbab05511716757ecdacef827a16329a85e90
|
[
"Apache-2.0"
] | null | null | null |
Programs/day_11_blackjack.py
|
Yunram/python_training
|
be3fbab05511716757ecdacef827a16329a85e90
|
[
"Apache-2.0"
] | null | null | null |
Programs/day_11_blackjack.py
|
Yunram/python_training
|
be3fbab05511716757ecdacef827a16329a85e90
|
[
"Apache-2.0"
] | null | null | null |
from art import logo_blackjack
from replit import clear
import random
def deal_card():
"""Return random card"""
cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]
card = random.choice(cards)
return card
def calculate_score(cards):
"""Take a list of cards and return the score"""
if sum(cards) == 21 and len(cards) == 2:
return 0
if 11 in cards and sum(cards) > 21:
cards.remove(11)
cards.append(1)
return sum(cards)
def compare(current_score_of_user, current_score_of_computer):
if current_score_of_user > 21 and current_score_of_computer > 21:
return "You went over. You lose"
if current_score_of_user == current_score_of_computer:
return "DRAW"
elif current_score_of_computer == 0:
return "You lose. Opponent has a blackjack"
elif current_score_of_user == 0:
return "You win with blackjack"
elif current_score_of_user > 21:
return "You went over. You lose"
elif current_score_of_computer > 21:
return "Opponent went over. You win"
elif current_score_of_user > current_score_of_computer:
return "You win"
else:
return "You lose"
def play_game():
print(logo_blackjack)
user_cards = []
computer_cards = []
is_game_over = False
for i in range(2):
user_cards.append(deal_card())
computer_cards.append(deal_card())
while not is_game_over:
current_score_of_user = calculate_score(user_cards)
current_score_of_computer = calculate_score(computer_cards)
print(f"Your cards: {user_cards} and current score of yours: {current_score_of_user}")
print(f"Computer's first card: [{computer_cards[0]}]")
if current_score_of_user == 0 or current_score_of_computer == 0 or current_score_of_user > 21:
is_game_over = True
else:
want_card = input("To get another card type 'y', to pass type 'n': ")
if want_card == "y":
user_cards.append(deal_card())
else:
is_game_over = True
while current_score_of_computer != 0 and current_score_of_computer < 17:
computer_cards.append(deal_card())
current_score_of_computer = calculate_score(computer_cards)
print(f"Your final hand: {user_cards} and final score: {current_score_of_user}")
print(f"Computer's final hand: {computer_cards}, final score: {current_score_of_computer}")
print(compare(current_score_of_user, current_score_of_computer))
while input("Do you want to play a game of blackjack? Type 'y' or 'n': ") == "y":
clear()
play_game()
| 36.123288
| 102
| 0.665908
|
from art import logo_blackjack
from replit import clear
import random
def deal_card():
cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]
card = random.choice(cards)
return card
def calculate_score(cards):
if sum(cards) == 21 and len(cards) == 2:
return 0
if 11 in cards and sum(cards) > 21:
cards.remove(11)
cards.append(1)
return sum(cards)
def compare(current_score_of_user, current_score_of_computer):
if current_score_of_user > 21 and current_score_of_computer > 21:
return "You went over. You lose"
if current_score_of_user == current_score_of_computer:
return "DRAW"
elif current_score_of_computer == 0:
return "You lose. Opponent has a blackjack"
elif current_score_of_user == 0:
return "You win with blackjack"
elif current_score_of_user > 21:
return "You went over. You lose"
elif current_score_of_computer > 21:
return "Opponent went over. You win"
elif current_score_of_user > current_score_of_computer:
return "You win"
else:
return "You lose"
def play_game():
print(logo_blackjack)
user_cards = []
computer_cards = []
is_game_over = False
for i in range(2):
user_cards.append(deal_card())
computer_cards.append(deal_card())
while not is_game_over:
current_score_of_user = calculate_score(user_cards)
current_score_of_computer = calculate_score(computer_cards)
print(f"Your cards: {user_cards} and current score of yours: {current_score_of_user}")
print(f"Computer's first card: [{computer_cards[0]}]")
if current_score_of_user == 0 or current_score_of_computer == 0 or current_score_of_user > 21:
is_game_over = True
else:
want_card = input("To get another card type 'y', to pass type 'n': ")
if want_card == "y":
user_cards.append(deal_card())
else:
is_game_over = True
while current_score_of_computer != 0 and current_score_of_computer < 17:
computer_cards.append(deal_card())
current_score_of_computer = calculate_score(computer_cards)
print(f"Your final hand: {user_cards} and final score: {current_score_of_user}")
print(f"Computer's final hand: {computer_cards}, final score: {current_score_of_computer}")
print(compare(current_score_of_user, current_score_of_computer))
while input("Do you want to play a game of blackjack? Type 'y' or 'n': ") == "y":
clear()
play_game()
| true
| true
|
790d12206b3ebd7f14bfba18d0ff708645d4e054
| 1,478
|
py
|
Python
|
Python/pythonLevel1/python0811_file.py
|
PomTTcat/pythonPersonTips
|
adae81832211791342bcd3638d1aaa24796afea0
|
[
"MIT"
] | null | null | null |
Python/pythonLevel1/python0811_file.py
|
PomTTcat/pythonPersonTips
|
adae81832211791342bcd3638d1aaa24796afea0
|
[
"MIT"
] | null | null | null |
Python/pythonLevel1/python0811_file.py
|
PomTTcat/pythonPersonTips
|
adae81832211791342bcd3638d1aaa24796afea0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
print '-------操作文件和目录-------'
# 操作系统名字
print os.name + '\n'
print '\n' + '详细的系统信息'
print os.uname()
print '\n' + '环境变量'
print os.environ
print '\n' + '获取某个环境变量的值'
print os.getenv('PATH')
print '\n'
# 查看当前目录的绝对路径:
print os.path.abspath('.')
selfAbsPath = os.path.abspath('.')
# 在某个目录下创建一个新目录,
# 首先把新目录的完整路径表示出来:
filePathDir = os.path.join(selfAbsPath, 'testdir')
# '/Users/michael/testdir'
# # 然后创建一个目录:
os.mkdir(filePathDir)
# # 删掉一个目录:
os.rmdir(filePathDir)
print '-------os.path.join()函数-------'
# 这样可以正确处理不同操作系统的路径分隔符
print '-------os.path.split() 直接让你得到文件扩展名-------'
print os.path.split('/Users/michael/testdir/file.txt')
# 对文件重命名:
# os.rename('test.txt', 'test.py')
# 删掉文件:
# os.remove('test.py')
print '-------shutil-------'
# shutil模块提供了copyfile()的函数,你还可以在shutil模块中找到很多实用函数,它们可以看做是os模块的补充。
# 当前目录下的所有目录
print[x for x in os.listdir('.') if os.path.isdir(x)]
# # 当前文件夹下所有python文件
# print [x for x in os.listdir('.') if os.path.isfile(x) and
# os.path.splitext(x)[1]=='.py']
# print os.listdir('.')
# print dir(os.path)
# 编写一个search(s)的函数,能在当前目录以及当前目录的所有子目录下查找文件名包含指定字符串的文件,并打印出完整路径:
def search(fileName):
currentPath = os.path.abspath('.')
for x in os.listdir('.'):
if os.path.isfile(x) and fileName in os.path.splitext(x)[0]:
print x
if os.path.isdir(x):
newP = os.path.join(currentPath, x)
print newP
print '-------search start-------'
search('0810')
| 18.948718
| 68
| 0.627876
|
import os
print '-------操作文件和目录-------'
print os.name + '\n'
print '\n' + '详细的系统信息'
print os.uname()
print '\n' + '环境变量'
print os.environ
print '\n' + '获取某个环境变量的值'
print os.getenv('PATH')
print '\n'
print os.path.abspath('.')
selfAbsPath = os.path.abspath('.')
filePathDir = os.path.join(selfAbsPath, 'testdir')
ilePathDir)
(filePathDir)
print '-------os.path.join()函数-------'
print '-------os.path.split() 直接让你得到文件扩展名-------'
print os.path.split('/Users/michael/testdir/file.txt')
print '-------shutil-------'
print[x for x in os.listdir('.') if os.path.isdir(x)]
rch(fileName):
currentPath = os.path.abspath('.')
for x in os.listdir('.'):
if os.path.isfile(x) and fileName in os.path.splitext(x)[0]:
print x
if os.path.isdir(x):
newP = os.path.join(currentPath, x)
print newP
print '-------search start-------'
search('0810')
| false
| true
|
790d12c7411153b288ed8c80e7ca8a275e6ea043
| 352
|
py
|
Python
|
db/backends/postgresql/base.py
|
felliott/SHARE
|
8fd60ff4749349c9b867f6188650d71f4f0a1a56
|
[
"Apache-2.0"
] | 1
|
2019-10-12T20:51:06.000Z
|
2019-10-12T20:51:06.000Z
|
db/backends/postgresql/base.py
|
felliott/SHARE
|
8fd60ff4749349c9b867f6188650d71f4f0a1a56
|
[
"Apache-2.0"
] | 21
|
2020-06-01T13:59:32.000Z
|
2021-08-01T06:20:29.000Z
|
db/backends/postgresql/base.py
|
aaxelb/SHARE
|
896e4f0c0e119436c0aaea364ea19389e7099d59
|
[
"Apache-2.0"
] | null | null | null |
from django.db.backends.postgresql.base import DatabaseWrapper as PostgresqlDatabaseWrapper
from db.backends.postgresql.creation import DatabaseCreation
from db.backends.postgresql.schema import DatabaseSchemaEditor
class DatabaseWrapper(PostgresqlDatabaseWrapper):
creation_class = DatabaseCreation
SchemaEditorClass = DatabaseSchemaEditor
| 35.2
| 91
| 0.863636
|
from django.db.backends.postgresql.base import DatabaseWrapper as PostgresqlDatabaseWrapper
from db.backends.postgresql.creation import DatabaseCreation
from db.backends.postgresql.schema import DatabaseSchemaEditor
class DatabaseWrapper(PostgresqlDatabaseWrapper):
creation_class = DatabaseCreation
SchemaEditorClass = DatabaseSchemaEditor
| true
| true
|
790d1336d03e7a6c5fd71d2681a02a2c8f297cef
| 15,919
|
py
|
Python
|
src/m3_more_nested_loops_in_sequences.py
|
dalesil/19-MoreLoopsWithinLoops
|
008f0a24f1420135632472641ac4eb3718046e0b
|
[
"MIT"
] | null | null | null |
src/m3_more_nested_loops_in_sequences.py
|
dalesil/19-MoreLoopsWithinLoops
|
008f0a24f1420135632472641ac4eb3718046e0b
|
[
"MIT"
] | null | null | null |
src/m3_more_nested_loops_in_sequences.py
|
dalesil/19-MoreLoopsWithinLoops
|
008f0a24f1420135632472641ac4eb3718046e0b
|
[
"MIT"
] | null | null | null |
"""
This project demonstrates NESTED LOOPS (i.e., loops within loops)
in the context of SEQUENCES OF SUB-SEQUENCES.
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Mark Hays, Amanda Stouder, Aaron Wilkin, their colleagues,
and Lucas D'Alesio.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
def main():
""" Calls the other functions to test them. """
#run_test_largest_number()
#run_test_largest_negative_number()
run_test_first_is_elsewhere_too()
def run_test_largest_number():
""" Tests the largest_number function. """
# -------------------------------------------------------------------------
# DONE: 2. Implement this TEST function.
# It TESTS the largest_number function defined below.
# Include at least ** 1 ** ADDITIONAL test beyond those we wrote.
# -------------------------------------------------------------------------
print()
print('-------------------------------------')
print('Testing the LARGEST_NUMBER function:')
print('-------------------------------------')
# Test 1:
expected = 13
answer = largest_number([(3, 1, 4),
(13, 10, 11, 7, 10),
[1, 2, 3, 4]])
print('Expected and actual are:', expected, answer)
# Test 2:
expected = -1111111111111111
answer = largest_number(([], [-1111111111111111], []))
print('Expected and actual are:', expected, answer)
# Test 3:
expected = None
answer = largest_number(([], [], []))
print('Expected and actual are:', expected, answer)
# DONE 2 (continued): Add your ADDITIONAL test(s) here:
# Test 3:
expected = 13
answer = largest_number([(3, 1, 4),
(13, 10, 11, 7, 10),
[1, 2, 3, 4]])
print('Expected and actual are:', expected, answer)
def largest_number(seq_seq):
"""
Returns the largest number in the subsequences of the given
sequence of sequences. Returns None if there are NO numbers
in the subsequences.
For example, if the given argument is:
[(3, 1, 4),
(13, 10, 11, 7, 10),
[1, 2, 3, 4]]
then this function returns 13.
As another example, if the given argument is:
([], [-1111111111111111], [])
then this function returns -1111111111111111.
As yet another example, if the given argument is:
([], [], [])
then this function returns None.
Preconditions:
:type seq_seq: (list, tuple)
and the given argument is a sequence of sequences,
where each subsequence contains only numbers.
"""
# -------------------------------------------------------------------------
# DONE: 3. Implement and test this function.
# Note that you should write its TEST function first (above).
# -------------------------------------------------------------------------
x = None
for j in range (len(seq_seq)):
for k in range(len(seq_seq[j])):
x = j
y = k
for l in range(len(seq_seq)):
for o in range(len(seq_seq[l])):
if seq_seq[l][o] > seq_seq[x][y]:
x = l
y = o
if x == None:
return None
return seq_seq[x][y]
def run_test_largest_negative_number():
""" Tests the largest_negative_number function. """
# -------------------------------------------------------------------------
# DONE: 4. Implement this TEST function.
# It TESTS the largest_negative_number function defined below.
#
# Include enough tests to give you confidence that your solution
# to this challenging problem is indeed correct.
# -------------------------------------------------------------------------
print()
print('-------------------------------------------------')
print('Testing the LARGEST_NEGATIVE_NUMBER function:')
print('-------------------------------------------------')
# Test 1:
expected = 11
answer = largest_number([(3, 1, 4),
(-13, 10, 11, 7, 10),
[1, 2, 3, 4]])
print('Expected and actual are:', expected, answer)
# Test 2:
expected = -2
answer = largest_number(([-10], [-1111111111111111], [-2]))
print('Expected and actual are:', expected, answer)
# Test 3:
expected = None
answer = largest_number(([], [], []))
print('Expected and actual are:', expected, answer)
def largest_negative_number(seq_seq):
"""
Returns the largest NEGATIVE number in the given sequence of
sequences of numbers. Returns None if there are no negative numbers
in the sequence of sequences.
For example, if the given argument is:
[(30, -5, 8, -20),
(100, -2.6, 88, -40, -5),
(400, 500)
]
then this function returns -2.6.
As another example, if the given argument is:
[(200, 2, 20), (500, 400)]
then this function returns None.
Preconditions:
:type seq_seq: (list, tuple)
and the given argument is a sequence of sequences,
where each subsequence contains only numbers.
"""
# -------------------------------------------------------------------------
# DONE: 5. Implement and test this function.
# Note that you should write its TEST function first (above).
#
# CHALLENGE: Try to solve this problem with no additional sequences
# being constructed (so the SPACE allowed is limited to the
# give sequence of sequences plus any non-list variables you want).
# -------------------------------------------------------------------------
s = []
for k in range(len(seq_seq)):
s2 = seq_seq[k]
if s2 != []:
s = s + [max(s2)]
return max(s)
def run_test_first_is_elsewhere_too():
""" Tests the first_is_elsewhere_too function. """
# -------------------------------------------------------------------------
# We have supplied tests for you. No additional tests are required,
# although you are welcome to supply more tests if you choose.
# -------------------------------------------------------------------------
print()
print('-------------------------------------')
print('Testing the FIRST_IS_ELSEWHERE_TOO function:')
print('-------------------------------------')
# FYI: The notation below constructs what is called a DICTIONARY.
# It is like a list, but the indices can be any immutable
# objects (here, True or False), not just 0, 1, 2, ... as in lists.
message = {True: 'Your code PASSED this test.\n',
False: 'Your code FAILED this test.\n'}
no_failures = True
# Test 1:
expected = True
answer = first_is_elsewhere_too([(3, 1, 4),
(13, 10, 11, 7, 10),
[11, 12, 3, 10]])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 2:
expected = False
answer = first_is_elsewhere_too([(3, 1, 4),
(13, 10, 11, 7, 10),
[11, 2, 13, 14]])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 3:
expected = False
answer = first_is_elsewhere_too([[], [1, 2], [1, 2]])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 4:
expected = True
answer = first_is_elsewhere_too([('a', 9),
(13, 10, 11, 7, 'a'),
[11, 12, 3, 10]])
print('Expected and actual are:', expected, answer)
print(message[answer == expected]) # Test 1:
no_failures = no_failures and (answer == expected)
# Test 5:
expected = False
answer = first_is_elsewhere_too([('a', 9),
(13, 10, 11, 7, 'aa'),
[11, 12, 3, 10]])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 6:
expected = False
answer = first_is_elsewhere_too([('a', 'a', 'b', 'b', 'a', 'b')])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 7:
expected = False
answer = first_is_elsewhere_too([()])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 8:
expected = True
answer = first_is_elsewhere_too([('a'), (), (), (), ('a')])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 9:
expected = True
answer = first_is_elsewhere_too([('a'), (), (), (), ('a'), ()])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 10:
expected = False
answer = first_is_elsewhere_too([('a'), (), (), (), ('b'), ()])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 11:
expected = True
answer = first_is_elsewhere_too(['hello', 'goodbye'])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 12:
expected = False
answer = first_is_elsewhere_too(['hello', 'xxxxxxxxxxx'])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 13:
expected = False
answer = first_is_elsewhere_too(['1234567890',
'one two three',
'i am free',
'four five six',
'get my sticks',
'seven eight nine',
'i am fine'])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 14:
expected = True
answer = first_is_elsewhere_too([(1000 * 'a') + 'b' + (500 * 'a'),
(800 * 'c') + 'd' + 1200 * 'c',
'b'])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 15:
expected = True
answer = first_is_elsewhere_too([(1000 * 'a') + 'b' + (500 * 'a'),
(800 * 'c') + 'd' + 1200 * 'c',
(700 * 'eee') + 'b' + (90 * 'd'),
(800 * 'c') + 'd' + 1200 * 'c'])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 16:
expected = True
answer = first_is_elsewhere_too([(1000 * 'b') + 'acd' + (500 * 'f'),
(800 * '1') + '234a',
'eeee'])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 17:
expected = True
answer = first_is_elsewhere_too([(1000 * 'b') + 'acd' + (500 * 'f'),
'a' + (800 * '1') + '234',
'123'])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 18:
test1 = [(1000 * 'b') + 'acd' + (500 * 'f'),
(800 * '1') + '234',
'123']
for k in range(95):
test1.append(k * chr(k))
test2 = []
for k in range(30):
test2.append(k * chr(k))
expected = True
answer = first_is_elsewhere_too(test1 + ['a'] + test2)
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 19 (continues test 18):
expected = False
answer = first_is_elsewhere_too(test1 + test2)
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 20 (continues test 18):
expected = True
a_inside = (100 * 'b') + 'a' + (100 * 'b')
answer = first_is_elsewhere_too(test1 + [a_inside] + test2)
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
if no_failures:
print('*** Your code PASSED all')
else:
print('!!! Your code FAILED some')
print(' of the tests for first_is_elsewhere_too')
def first_is_elsewhere_too(seq_seq):
"""
Given a sequence of subsequences:
-- Returns True if any element of the first (initial) subsequence
appears in any of the other subsequences.
-- Returns False otherwise.
For example, if the given argument is:
[(3, 1, 4),
(13, 10, 11, 7, 10),
[11, 12, 3, 10]]
then this function returns True because 3 appears
in the first subsequence and also in the third subsequence.
As another example, if the given argument is:
[(3, 1, 4),
(13, 10, 11, 7, 10),
[11, 2, 13, 14]]
then this function returns False because 3 does not appear in
any subsequence except the first, 1 does not appear in any
subsequence except the first, and 4 does not appear in any
subsequence except the first.
As yet another example, if the given argument is:
([], [1, 2], [1, 2])
then this function returns False since no element of the first
subsequence appears elsewhere.
Preconditions:
:type seq_seq: (list, tuple)
and the given argument is a sequence of sequences.
"""
# -------------------------------------------------------------------------
# DONE: 6. Implement and test this function.
# Some tests are already written for you (above).
#
# IMPLEMENTATION RESTRICTION:
# ** You may NOT use anything but comparison (==) in judging
# membership. In particular, you may NOT use:
# -- the IN operator
# (example: 7 in [9, 6, 7, 9] returns True)
# -- the COUNT method
# (example: [9, 6, 7, 9].count(9) returns 2)
# -- the INDEX method
# (example: [9, 6, 7, 9, 6, 1].index(6) returns 1)
# in this problem, as doing so would defeat the goal of providing
# practice at loops within loops (within loops within ...)
# -------------------------------------------------------------------------
for j in range(len(seq_seq[0])):
for k in range(1, len(seq_seq)):
for i in range(len(seq_seq[k])):
if seq_seq[k][i] == seq_seq[0][j]:
return True
return False
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# -----------------------------------------------------------------------------
main()
| 36.935035
| 79
| 0.519002
|
def main():
run_test_first_is_elsewhere_too()
def run_test_largest_number():
print()
print('-------------------------------------')
print('Testing the LARGEST_NUMBER function:')
print('-------------------------------------')
expected = 13
answer = largest_number([(3, 1, 4),
(13, 10, 11, 7, 10),
[1, 2, 3, 4]])
print('Expected and actual are:', expected, answer)
expected = -1111111111111111
answer = largest_number(([], [-1111111111111111], []))
print('Expected and actual are:', expected, answer)
expected = None
answer = largest_number(([], [], []))
print('Expected and actual are:', expected, answer)
expected = 13
answer = largest_number([(3, 1, 4),
(13, 10, 11, 7, 10),
[1, 2, 3, 4]])
print('Expected and actual are:', expected, answer)
def largest_number(seq_seq):
x = None
for j in range (len(seq_seq)):
for k in range(len(seq_seq[j])):
x = j
y = k
for l in range(len(seq_seq)):
for o in range(len(seq_seq[l])):
if seq_seq[l][o] > seq_seq[x][y]:
x = l
y = o
if x == None:
return None
return seq_seq[x][y]
def run_test_largest_negative_number():
print()
print('-------------------------------------------------')
print('Testing the LARGEST_NEGATIVE_NUMBER function:')
print('-------------------------------------------------')
expected = 11
answer = largest_number([(3, 1, 4),
(-13, 10, 11, 7, 10),
[1, 2, 3, 4]])
print('Expected and actual are:', expected, answer)
expected = -2
answer = largest_number(([-10], [-1111111111111111], [-2]))
print('Expected and actual are:', expected, answer)
expected = None
answer = largest_number(([], [], []))
print('Expected and actual are:', expected, answer)
def largest_negative_number(seq_seq):
s = []
for k in range(len(seq_seq)):
s2 = seq_seq[k]
if s2 != []:
s = s + [max(s2)]
return max(s)
def run_test_first_is_elsewhere_too():
print()
print('-------------------------------------')
print('Testing the FIRST_IS_ELSEWHERE_TOO function:')
print('-------------------------------------')
message = {True: 'Your code PASSED this test.\n',
False: 'Your code FAILED this test.\n'}
no_failures = True
expected = True
answer = first_is_elsewhere_too([(3, 1, 4),
(13, 10, 11, 7, 10),
[11, 12, 3, 10]])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
expected = False
answer = first_is_elsewhere_too([(3, 1, 4),
(13, 10, 11, 7, 10),
[11, 2, 13, 14]])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
expected = False
answer = first_is_elsewhere_too([[], [1, 2], [1, 2]])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
expected = True
answer = first_is_elsewhere_too([('a', 9),
(13, 10, 11, 7, 'a'),
[11, 12, 3, 10]])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
expected = False
answer = first_is_elsewhere_too([('a', 9),
(13, 10, 11, 7, 'aa'),
[11, 12, 3, 10]])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
expected = False
answer = first_is_elsewhere_too([('a', 'a', 'b', 'b', 'a', 'b')])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
expected = False
answer = first_is_elsewhere_too([()])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
expected = True
answer = first_is_elsewhere_too([('a'), (), (), (), ('a')])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
expected = True
answer = first_is_elsewhere_too([('a'), (), (), (), ('a'), ()])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
expected = False
answer = first_is_elsewhere_too([('a'), (), (), (), ('b'), ()])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
expected = True
answer = first_is_elsewhere_too(['hello', 'goodbye'])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
expected = False
answer = first_is_elsewhere_too(['hello', 'xxxxxxxxxxx'])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
expected = False
answer = first_is_elsewhere_too(['1234567890',
'one two three',
'i am free',
'four five six',
'get my sticks',
'seven eight nine',
'i am fine'])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
expected = True
answer = first_is_elsewhere_too([(1000 * 'a') + 'b' + (500 * 'a'),
(800 * 'c') + 'd' + 1200 * 'c',
'b'])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
expected = True
answer = first_is_elsewhere_too([(1000 * 'a') + 'b' + (500 * 'a'),
(800 * 'c') + 'd' + 1200 * 'c',
(700 * 'eee') + 'b' + (90 * 'd'),
(800 * 'c') + 'd' + 1200 * 'c'])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
expected = True
answer = first_is_elsewhere_too([(1000 * 'b') + 'acd' + (500 * 'f'),
(800 * '1') + '234a',
'eeee'])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
expected = True
answer = first_is_elsewhere_too([(1000 * 'b') + 'acd' + (500 * 'f'),
'a' + (800 * '1') + '234',
'123'])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
test1 = [(1000 * 'b') + 'acd' + (500 * 'f'),
(800 * '1') + '234',
'123']
for k in range(95):
test1.append(k * chr(k))
test2 = []
for k in range(30):
test2.append(k * chr(k))
expected = True
answer = first_is_elsewhere_too(test1 + ['a'] + test2)
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
expected = False
answer = first_is_elsewhere_too(test1 + test2)
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
expected = True
a_inside = (100 * 'b') + 'a' + (100 * 'b')
answer = first_is_elsewhere_too(test1 + [a_inside] + test2)
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
if no_failures:
print('*** Your code PASSED all')
else:
print('!!! Your code FAILED some')
print(' of the tests for first_is_elsewhere_too')
def first_is_elsewhere_too(seq_seq):
for j in range(len(seq_seq[0])):
for k in range(1, len(seq_seq)):
for i in range(len(seq_seq[k])):
if seq_seq[k][i] == seq_seq[0][j]:
return True
return False
main()
| true
| true
|
790d1502e864285b9fca52303a5657729be5e026
| 4,462
|
py
|
Python
|
tools/data/textdet/funsd_converter.py
|
nuveo/mmocr
|
f134421c628b87b03bd36f564626225ee6af966b
|
[
"Apache-2.0"
] | 1
|
2022-03-02T14:34:53.000Z
|
2022-03-02T14:34:53.000Z
|
tools/data/textdet/funsd_converter.py
|
nuveo/mmocr
|
f134421c628b87b03bd36f564626225ee6af966b
|
[
"Apache-2.0"
] | null | null | null |
tools/data/textdet/funsd_converter.py
|
nuveo/mmocr
|
f134421c628b87b03bd36f564626225ee6af966b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import math
import os
import os.path as osp
import mmcv
from mmocr.utils import convert_annotations
def collect_files(img_dir, gt_dir):
"""Collect all images and their corresponding groundtruth files.
Args:
img_dir (str): The image directory
gt_dir (str): The groundtruth directory
Returns:
files (list): The list of tuples (img_file, groundtruth_file)
"""
assert isinstance(img_dir, str)
assert img_dir
assert isinstance(gt_dir, str)
assert gt_dir
ann_list, imgs_list = [], []
for gt_file in os.listdir(gt_dir):
ann_list.append(osp.join(gt_dir, gt_file))
imgs_list.append(osp.join(img_dir, gt_file.replace('.json', '.png')))
files = list(zip(sorted(imgs_list), sorted(ann_list)))
assert len(files), f'No images found in {img_dir}'
print(f'Loaded {len(files)} images from {img_dir}')
return files
def collect_annotations(files, nproc=1):
"""Collect the annotation information.
Args:
files (list): The list of tuples (image_file, groundtruth_file)
nproc (int): The number of process to collect annotations
Returns:
images (list): The list of image information dicts
"""
assert isinstance(files, list)
assert isinstance(nproc, int)
if nproc > 1:
images = mmcv.track_parallel_progress(
load_img_info, files, nproc=nproc)
else:
images = mmcv.track_progress(load_img_info, files)
return images
def load_img_info(files):
"""Load the information of one image.
Args:
files (tuple): The tuple of (img_file, groundtruth_file)
Returns:
img_info (dict): The dict of the img and annotation information
"""
assert isinstance(files, tuple)
img_file, gt_file = files
assert osp.basename(gt_file).split('.')[0] == osp.basename(img_file).split(
'.')[0]
# read imgs while ignoring orientations
img = mmcv.imread(img_file, 'unchanged')
img_info = dict(
file_name=osp.join(osp.basename(img_file)),
height=img.shape[0],
width=img.shape[1],
segm_file=osp.join(osp.basename(gt_file)))
if osp.splitext(gt_file)[1] == '.json':
img_info = load_json_info(gt_file, img_info)
else:
raise NotImplementedError
return img_info
def load_json_info(gt_file, img_info):
"""Collect the annotation information.
Args:
gt_file (str): The path to ground-truth
img_info (dict): The dict of the img and annotation information
Returns:
img_info (dict): The dict of the img and annotation information
"""
annotation = mmcv.load(gt_file)
anno_info = []
for form in annotation['form']:
for ann in form['words']:
iscrowd = 1 if len(ann['text']) == 0 else 0
x1, y1, x2, y2 = ann['box']
x = max(0, min(math.floor(x1), math.floor(x2)))
y = max(0, min(math.floor(y1), math.floor(y2)))
w, h = math.ceil(abs(x2 - x1)), math.ceil(abs(y2 - y1))
bbox = [x, y, w, h]
segmentation = [x, y, x + w, y, x + w, y + h, x, y + h]
anno = dict(
iscrowd=iscrowd,
category_id=1,
bbox=bbox,
area=w * h,
segmentation=[segmentation])
anno_info.append(anno)
img_info.update(anno_info=anno_info)
return img_info
def parse_args():
parser = argparse.ArgumentParser(
description='Generate training and test set of FUNSD ')
parser.add_argument('root_path', help='Root dir path of FUNSD')
parser.add_argument(
'--nproc', default=1, type=int, help='Number of process')
args = parser.parse_args()
return args
def main():
args = parse_args()
root_path = args.root_path
for split in ['training', 'test']:
print(f'Processing {split} set...')
with mmcv.Timer(print_tmpl='It takes {}s to convert FUNSD annotation'):
files = collect_files(
osp.join(root_path, 'imgs'),
osp.join(root_path, 'annotations', split))
image_infos = collect_annotations(files, nproc=args.nproc)
convert_annotations(
image_infos, osp.join(root_path,
'instances_' + split + '.json'))
if __name__ == '__main__':
main()
| 28.240506
| 79
| 0.613178
|
import argparse
import math
import os
import os.path as osp
import mmcv
from mmocr.utils import convert_annotations
def collect_files(img_dir, gt_dir):
assert isinstance(img_dir, str)
assert img_dir
assert isinstance(gt_dir, str)
assert gt_dir
ann_list, imgs_list = [], []
for gt_file in os.listdir(gt_dir):
ann_list.append(osp.join(gt_dir, gt_file))
imgs_list.append(osp.join(img_dir, gt_file.replace('.json', '.png')))
files = list(zip(sorted(imgs_list), sorted(ann_list)))
assert len(files), f'No images found in {img_dir}'
print(f'Loaded {len(files)} images from {img_dir}')
return files
def collect_annotations(files, nproc=1):
assert isinstance(files, list)
assert isinstance(nproc, int)
if nproc > 1:
images = mmcv.track_parallel_progress(
load_img_info, files, nproc=nproc)
else:
images = mmcv.track_progress(load_img_info, files)
return images
def load_img_info(files):
assert isinstance(files, tuple)
img_file, gt_file = files
assert osp.basename(gt_file).split('.')[0] == osp.basename(img_file).split(
'.')[0]
img = mmcv.imread(img_file, 'unchanged')
img_info = dict(
file_name=osp.join(osp.basename(img_file)),
height=img.shape[0],
width=img.shape[1],
segm_file=osp.join(osp.basename(gt_file)))
if osp.splitext(gt_file)[1] == '.json':
img_info = load_json_info(gt_file, img_info)
else:
raise NotImplementedError
return img_info
def load_json_info(gt_file, img_info):
annotation = mmcv.load(gt_file)
anno_info = []
for form in annotation['form']:
for ann in form['words']:
iscrowd = 1 if len(ann['text']) == 0 else 0
x1, y1, x2, y2 = ann['box']
x = max(0, min(math.floor(x1), math.floor(x2)))
y = max(0, min(math.floor(y1), math.floor(y2)))
w, h = math.ceil(abs(x2 - x1)), math.ceil(abs(y2 - y1))
bbox = [x, y, w, h]
segmentation = [x, y, x + w, y, x + w, y + h, x, y + h]
anno = dict(
iscrowd=iscrowd,
category_id=1,
bbox=bbox,
area=w * h,
segmentation=[segmentation])
anno_info.append(anno)
img_info.update(anno_info=anno_info)
return img_info
def parse_args():
parser = argparse.ArgumentParser(
description='Generate training and test set of FUNSD ')
parser.add_argument('root_path', help='Root dir path of FUNSD')
parser.add_argument(
'--nproc', default=1, type=int, help='Number of process')
args = parser.parse_args()
return args
def main():
args = parse_args()
root_path = args.root_path
for split in ['training', 'test']:
print(f'Processing {split} set...')
with mmcv.Timer(print_tmpl='It takes {}s to convert FUNSD annotation'):
files = collect_files(
osp.join(root_path, 'imgs'),
osp.join(root_path, 'annotations', split))
image_infos = collect_annotations(files, nproc=args.nproc)
convert_annotations(
image_infos, osp.join(root_path,
'instances_' + split + '.json'))
if __name__ == '__main__':
main()
| true
| true
|
790d1557204d00d143353268325cbc0450d35ffd
| 388
|
py
|
Python
|
nkdsu/apps/vote/migrations/0003_track_metadata_locked.py
|
theshillito/nkd.su
|
9d1166454dd909c755206e27a35c51391a12c588
|
[
"BSD-3-Clause"
] | 1
|
2015-09-16T19:27:14.000Z
|
2015-09-16T19:27:14.000Z
|
nkdsu/apps/vote/migrations/0003_track_metadata_locked.py
|
theshillito/nkd.su
|
9d1166454dd909c755206e27a35c51391a12c588
|
[
"BSD-3-Clause"
] | 55
|
2015-02-28T21:47:57.000Z
|
2020-06-11T14:48:54.000Z
|
nkdsu/apps/vote/migrations/0003_track_metadata_locked.py
|
theshillito/nkd.su
|
9d1166454dd909c755206e27a35c51391a12c588
|
[
"BSD-3-Clause"
] | 1
|
2017-12-16T20:56:49.000Z
|
2017-12-16T20:56:49.000Z
|
# Generated by Django 2.2.12 on 2020-07-05 18:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vote', '0002_request_track'),
]
operations = [
migrations.AddField(
model_name='track',
name='metadata_locked',
field=models.BooleanField(default=False),
),
]
| 20.421053
| 53
| 0.600515
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vote', '0002_request_track'),
]
operations = [
migrations.AddField(
model_name='track',
name='metadata_locked',
field=models.BooleanField(default=False),
),
]
| true
| true
|
790d155c083336d246ea68e1ea36ef42ffc98b10
| 3,731
|
py
|
Python
|
test/IECore/Turbulence.py
|
gcodebackups/cortex-vfx
|
72fa6c6eb3327fce4faf01361c8fcc2e1e892672
|
[
"BSD-3-Clause"
] | 5
|
2016-07-26T06:09:28.000Z
|
2022-03-07T03:58:51.000Z
|
test/IECore/Turbulence.py
|
turbosun/cortex
|
4bdc01a692652cd562f3bfa85f3dae99d07c0b15
|
[
"BSD-3-Clause"
] | null | null | null |
test/IECore/Turbulence.py
|
turbosun/cortex
|
4bdc01a692652cd562f3bfa85f3dae99d07c0b15
|
[
"BSD-3-Clause"
] | 3
|
2015-03-25T18:45:24.000Z
|
2020-02-15T15:37:18.000Z
|
##########################################################################
#
# Copyright (c) 2007-2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import unittest
import IECore
class TestTurbulence( unittest.TestCase ) :
def testConstructors( self ) :
t = IECore.TurbulenceV2ff()
self.assertEqual( t.octaves, 4 )
self.assertEqual( t.gain, 0.5 )
self.assertEqual( t.lacunarity, 2 )
self.assertEqual( t.turbulent, True )
t = IECore.TurbulenceV2ff( 2, 1, 3, False )
self.assertEqual( t.octaves, 2 )
self.assertEqual( t.gain, 1 )
self.assertEqual( t.lacunarity, 3 )
self.assertEqual( t.turbulent, False )
t = IECore.TurbulenceV2ff(
octaves = 3,
gain = 1.4,
lacunarity = 3,
turbulent = False
)
self.assertEqual( t.octaves, 3 )
self.assertAlmostEqual( t.gain, 1.4 )
self.assertEqual( t.lacunarity, 3 )
self.assertEqual( t.turbulent, False )
def test2d( self ) :
t = IECore.TurbulenceV2ff(
octaves = 4,
gain = 0.35,
lacunarity = 2,
turbulent = False
)
width = 400
height = 400
f = IECore.FloatVectorData( width * height )
o = 0
for i in range( 0, height ) :
for j in range( 0, width ) :
f[o] = 0.5 + t.turbulence( IECore.V2f( i/50.0, j/50.0 ) )
o += 1
b = IECore.Box2i( IECore.V2i( 0, 0 ), IECore.V2i( width-1, height-1 ) )
i = IECore.ImagePrimitive( b, b )
i["r"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, f )
i["g"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, f )
i["b"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, f )
e = IECore.Reader.create( "test/IECore/data/expectedResults/turbulence2d.exr" ).read()
op = IECore.ImageDiffOp()
res = op(
imageA = i,
imageB = e,
maxError = 0.0005
)
self.failIf( res.value )
def testNaN( self ) :
t = IECore.TurbulenceV2ff(
octaves = 28,
gain = 0.35,
lacunarity = 2,
turbulent = True
)
f = t.turbulence( IECore.V2f( 21.3, 51.2 ) )
self.assert_( f == f )
if __name__ == "__main__":
unittest.main()
| 31.352941
| 88
| 0.670598
| true
| true
|
|
790d15997c402709a16402938cdb7e7c649a1bff
| 3,628
|
py
|
Python
|
python/GafferUI/WidgetAlgo.py
|
mattigruener/gaffer
|
8216ba1a884712575a0acae747c51b02f7a99a5d
|
[
"BSD-3-Clause"
] | null | null | null |
python/GafferUI/WidgetAlgo.py
|
mattigruener/gaffer
|
8216ba1a884712575a0acae747c51b02f7a99a5d
|
[
"BSD-3-Clause"
] | 2
|
2017-08-23T21:35:45.000Z
|
2018-01-29T08:59:33.000Z
|
python/GafferUI/WidgetAlgo.py
|
mattigruener/gaffer
|
8216ba1a884712575a0acae747c51b02f7a99a5d
|
[
"BSD-3-Clause"
] | null | null | null |
##########################################################################
#
# Copyright (c) 2017, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import sys
import GafferUI
import Qt
from Qt import QtCore
from Qt import QtGui
from Qt import QtWidgets
def joinEdges( listContainer ) :
if listContainer.orientation() == listContainer.Orientation.Horizontal :
lowProperty = "gafferFlatLeft"
highProperty = "gafferFlatRight"
else :
lowProperty = "gafferFlatTop"
highProperty = "gafferFlatBottom"
visibleWidgets = [ w for w in listContainer if w.getVisible() ]
l = len( visibleWidgets )
for i in range( 0, l ) :
visibleWidgets[i]._qtWidget().setProperty( lowProperty, i > 0 )
visibleWidgets[i]._qtWidget().setProperty( highProperty, i < l - 1 )
def grab( widget, imagePath ) :
GafferUI.EventLoop.waitForIdle()
imageDir = os.path.dirname( imagePath )
if imageDir and not os.path.isdir( imageDir ) :
os.makedirs( imageDir )
if Qt.__binding__ in ( "PySide2", "PyQt5" ) :
# Qt 5
screen = QtWidgets.QApplication.primaryScreen()
windowHandle = widget._qtWidget().windowHandle()
if windowHandle :
screen = windowHandle.screen()
pixmap = screen.grabWindow( long( widget._qtWidget().winId() ) )
if sys.platform == "darwin" and pixmap.size() == screen.size() * screen.devicePixelRatio() :
# A bug means that the entire screen will have been captured,
# not just the widget we requested. Copy out just the widget.
topLeft = widget._qtWidget().mapToGlobal( QtCore.QPoint( 0, 0 ) )
bottomRight = widget._qtWidget().mapToGlobal( QtCore.QPoint( widget._qtWidget().width(), widget._qtWidget().height() ) )
size = bottomRight - topLeft
pixmap = pixmap.copy(
QtCore.QRect(
topLeft * screen.devicePixelRatio(),
QtCore.QSize( size.x(), size.y() ) * screen.devicePixelRatio()
)
)
else :
# Qt 4
pixmap = QtGui.QPixmap.grabWindow( long( widget._qtWidget().winId() ) )
pixmap.save( imagePath )
| 37.402062
| 123
| 0.693219
| true
| true
|
|
790d16644a299f1fcc3deed64fe02f419411ed00
| 621
|
py
|
Python
|
jaseci_core/jaseci/attr/item.py
|
Gorgeous-Patrick/jaseci
|
b423165fefbbc9574cd4467ee05728add7f47e5a
|
[
"MIT"
] | 6
|
2021-10-30T03:35:36.000Z
|
2022-02-10T02:06:18.000Z
|
jaseci_core/jaseci/attr/item.py
|
Gorgeous-Patrick/jaseci
|
b423165fefbbc9574cd4467ee05728add7f47e5a
|
[
"MIT"
] | 85
|
2021-10-29T22:47:39.000Z
|
2022-03-31T06:11:52.000Z
|
jaseci_core/jaseci/attr/item.py
|
Gorgeous-Patrick/jaseci
|
b423165fefbbc9574cd4467ee05728add7f47e5a
|
[
"MIT"
] | 12
|
2021-11-03T17:29:22.000Z
|
2022-03-30T16:01:53.000Z
|
"""
Item class for Jaseci
Each item has an id, name, timestamp.
"""
from jaseci.element.element import element
class item(element):
"""Item class for Jaseci"""
def __init__(self, value=None, *args, **kwargs):
self.item_value = value
super().__init__(*args, **kwargs)
@property
def value(self):
return self.item_value
@value.setter
def value(self, val):
self.item_value = val
self.save()
def __str__(self):
if self.value:
return super().__str__() + f":{self.value}"
else:
return super().__str__() + ":None"
| 20.7
| 55
| 0.584541
|
from jaseci.element.element import element
class item(element):
def __init__(self, value=None, *args, **kwargs):
self.item_value = value
super().__init__(*args, **kwargs)
@property
def value(self):
return self.item_value
@value.setter
def value(self, val):
self.item_value = val
self.save()
def __str__(self):
if self.value:
return super().__str__() + f":{self.value}"
else:
return super().__str__() + ":None"
| true
| true
|
790d16ec2e8cb18374ad78b563151c1228056379
| 233
|
py
|
Python
|
modules/process.py
|
Steve132/loquis
|
49c9efcdcd8e29ceec662e11cb89d7e00db7d1d7
|
[
"MIT"
] | null | null | null |
modules/process.py
|
Steve132/loquis
|
49c9efcdcd8e29ceec662e11cb89d7e00db7d1d7
|
[
"MIT"
] | null | null | null |
modules/process.py
|
Steve132/loquis
|
49c9efcdcd8e29ceec662e11cb89d7e00db7d1d7
|
[
"MIT"
] | null | null | null |
import loquis
import subprocess
@loquis.command
def run(query,*args):
try:
L=[query.lower()]+list(args)
print(L)
return [subprocess.check_output(L)]
except:
return ["Failed to run command"]
languages={'en':{'run':run}}
| 15.533333
| 37
| 0.686695
|
import loquis
import subprocess
@loquis.command
def run(query,*args):
try:
L=[query.lower()]+list(args)
print(L)
return [subprocess.check_output(L)]
except:
return ["Failed to run command"]
languages={'en':{'run':run}}
| true
| true
|
790d16f5b63406cdab58504717a604eae8b2e149
| 4,919
|
py
|
Python
|
seisflows/tools/graphics.py
|
fanwu8/sf
|
8ce5671a3f8c2e8f3425aabc373fc58954f5bdbf
|
[
"BSD-2-Clause"
] | 1
|
2021-09-17T18:25:55.000Z
|
2021-09-17T18:25:55.000Z
|
seisflows/tools/graphics.py
|
fanwu8/sf
|
8ce5671a3f8c2e8f3425aabc373fc58954f5bdbf
|
[
"BSD-2-Clause"
] | null | null | null |
seisflows/tools/graphics.py
|
fanwu8/sf
|
8ce5671a3f8c2e8f3425aabc373fc58954f5bdbf
|
[
"BSD-2-Clause"
] | 1
|
2019-06-27T19:16:30.000Z
|
2019-06-27T19:16:30.000Z
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
from obspy.core.stream import Stream
def plot_gll(x, y, z):
""" Plots values on 2D unstructured GLL mesh
"""
r = (max(x) - min(x))/(max(y) - min(y))
rx = r/np.sqrt(1 + r**2)
ry = 1/np.sqrt(1 + r**2)
f = plt.figure(figsize=(10*rx, 10*ry))
p = plt.tricontourf(x, y, z, 125)
plt.axis('image')
return f, p
def plot_vector(t, v, xlabel='', ylabel='', title=''):
""" Plots a vector or time series.
Parameters
----------
v: ndarray, ndims = 1/2
Vector or time series to plot
xlabel: str
x axis label
ylabel: str
y axis label
title: str
plot title
Raises
------
ValueError
If dimensions of v are greater than 2
"""
# check input dimension
if v.ndim > 2:
raise ValueError('v must be a vector or a time series')
if v.ndim == 1:
x = list(range(len(v)))
y = v
else:
x = v[:, 0]
y = v[:, 1]
# plot
plt.plot(t, v)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.show()
def plot_section(stream, ax=None, cmap='seismic', clip=100, title='', x_interval=1.0, y_interval=1.0):
""" Plots a seismic section from an obspy stream.
Parameters
----------
stream: Obspy stream object
Obspy stream object created from a SU data file
ax: Matplotlib Axes object
Optional axis object
cmap: str
Matplotlib colormap option.
clip: float
Percentage value (0-100) for amplitude clipping
title: str
plot title
x_interval: float
Offset axis tick interval in km
y_interval: float
Time axis tick interval in km
Raises
------
NotImplementedError
If stream object does not have SU format
"""
# check format of stream
if stream[0].stats._format != 'SU':
raise NotImplemented('plot_section currently only supports streams for SU data files.')
# get dimensions
nr = len(stream)
nt = len(stream[0].data)
dt = stream[0].stats.delta
d_aspect = nr / float(nt)
# convert stream to image array
data = _convert_to_array(stream)
# default values
fsize = 6
scale_factor = 1.5
if ax is None:
fig, ax = plt.subplots(figsize=(fsize, scale_factor*fsize))
im = ax.imshow(data, aspect=scale_factor*d_aspect, clim=_cscale(data, clip=clip))
im.set_cmap(cmap)
# labels
ax.set_title(title)
ax.set_xlabel('Offset [km]')
ax.set_ylabel('Time [s]')
#set ticks
t = _get_time(stream)
yticks, ytick_labels = get_regular_ticks(t, y_interval)
ax.set_yticks(yticks)
ax.set_yticklabels(ytick_labels)
offsets =_get_offsets(stream)
xticks, xtick_labels = get_regular_ticks(offsets, x_interval)
ax.set_xticks(xticks)
ax.set_xticklabels(xtick_labels)
return ax
def _convert_to_array(stream):
""" Extracts trace data from an obspy stream and returns a 2D array.
Parameters
----------
stream: Obspy stream object
Stream storing trace data
Returns
-------
output: ndarray, ndim=2
Returns an (nt*nr) array. nt and nr are the number of sample points
and number of traces respectively. Assumes trace lengths are equal
for all traces.
Raises
------
TypeError
If stream is not an obspy stream
"""
if not isinstance(stream, Stream):
raise TypeError('Input object should be an obspy stream.')
nt = len(stream.traces[0].data)
nr = len(stream)
output = np.zeros((nt, nr))
for i, trace in enumerate(stream):
output[:, i] = trace.data[:]
return output
def _cscale(v, clip=100):
""" Return limits for colormap.
"""
perc = clip / 100.
return -perc * abs(v).max(), perc * abs(v).max()
def _get_time(stream):
""" Get fixed time vector for stream object.
"""
dt = stream[0].stats.delta
nt = len(stream[0].data)
return np.arange(0, nt*dt, dt)
def _get_offsets(stream):
""" Return offsets.
"""
nr = len(stream)
offsets = np.zeros(nr)
scalco = stream[0].stats.su.trace_header.scalar_to_be_applied_to_all_coordinates
# set scale to km
if scalco == 0:
scalco = 1e-3 # assume coords are in m
else:
scalco = 1.0e-3 / scalco
for i, tr in enumerate(stream):
offsets[i] = (tr.stats.su.trace_header.group_coordinate_x -
tr.stats.su.trace_header.source_coordinate_x) * scalco
return offsets
def get_regular_ticks(v, interval):
""" Returns regular tick intervals.
"""
f = interp1d(v, list(range(len(v))))
begin = int(v[0] / interval) * interval
end = v[-1]
tick_labels = np.arange(begin, end, interval)
ticks = f(tick_labels)
return ticks, tick_labels
| 23.878641
| 102
| 0.61049
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
from obspy.core.stream import Stream
def plot_gll(x, y, z):
r = (max(x) - min(x))/(max(y) - min(y))
rx = r/np.sqrt(1 + r**2)
ry = 1/np.sqrt(1 + r**2)
f = plt.figure(figsize=(10*rx, 10*ry))
p = plt.tricontourf(x, y, z, 125)
plt.axis('image')
return f, p
def plot_vector(t, v, xlabel='', ylabel='', title=''):
if v.ndim > 2:
raise ValueError('v must be a vector or a time series')
if v.ndim == 1:
x = list(range(len(v)))
y = v
else:
x = v[:, 0]
y = v[:, 1]
plt.plot(t, v)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.show()
def plot_section(stream, ax=None, cmap='seismic', clip=100, title='', x_interval=1.0, y_interval=1.0):
if stream[0].stats._format != 'SU':
raise NotImplemented('plot_section currently only supports streams for SU data files.')
nr = len(stream)
nt = len(stream[0].data)
dt = stream[0].stats.delta
d_aspect = nr / float(nt)
data = _convert_to_array(stream)
fsize = 6
scale_factor = 1.5
if ax is None:
fig, ax = plt.subplots(figsize=(fsize, scale_factor*fsize))
im = ax.imshow(data, aspect=scale_factor*d_aspect, clim=_cscale(data, clip=clip))
im.set_cmap(cmap)
ax.set_title(title)
ax.set_xlabel('Offset [km]')
ax.set_ylabel('Time [s]')
t = _get_time(stream)
yticks, ytick_labels = get_regular_ticks(t, y_interval)
ax.set_yticks(yticks)
ax.set_yticklabels(ytick_labels)
offsets =_get_offsets(stream)
xticks, xtick_labels = get_regular_ticks(offsets, x_interval)
ax.set_xticks(xticks)
ax.set_xticklabels(xtick_labels)
return ax
def _convert_to_array(stream):
if not isinstance(stream, Stream):
raise TypeError('Input object should be an obspy stream.')
nt = len(stream.traces[0].data)
nr = len(stream)
output = np.zeros((nt, nr))
for i, trace in enumerate(stream):
output[:, i] = trace.data[:]
return output
def _cscale(v, clip=100):
perc = clip / 100.
return -perc * abs(v).max(), perc * abs(v).max()
def _get_time(stream):
dt = stream[0].stats.delta
nt = len(stream[0].data)
return np.arange(0, nt*dt, dt)
def _get_offsets(stream):
nr = len(stream)
offsets = np.zeros(nr)
scalco = stream[0].stats.su.trace_header.scalar_to_be_applied_to_all_coordinates
if scalco == 0:
scalco = 1e-3
else:
scalco = 1.0e-3 / scalco
for i, tr in enumerate(stream):
offsets[i] = (tr.stats.su.trace_header.group_coordinate_x -
tr.stats.su.trace_header.source_coordinate_x) * scalco
return offsets
def get_regular_ticks(v, interval):
f = interp1d(v, list(range(len(v))))
begin = int(v[0] / interval) * interval
end = v[-1]
tick_labels = np.arange(begin, end, interval)
ticks = f(tick_labels)
return ticks, tick_labels
| true
| true
|
790d171ff6fc66d68231509c1a8420a5f0905f52
| 327
|
py
|
Python
|
CTD_controller/gps_test1.py
|
Raniita/Accuatic-Probe
|
fc0054b5c1a3a9be979379d8c7838cf1406c473f
|
[
"MIT"
] | 1
|
2021-11-13T14:55:21.000Z
|
2021-11-13T14:55:21.000Z
|
CTD_controller/gps_test1.py
|
Raniita/Ocean-CTD
|
fc0054b5c1a3a9be979379d8c7838cf1406c473f
|
[
"MIT"
] | null | null | null |
CTD_controller/gps_test1.py
|
Raniita/Ocean-CTD
|
fc0054b5c1a3a9be979379d8c7838cf1406c473f
|
[
"MIT"
] | null | null | null |
import serial
import pynmea2
# Probando con el pincho usb azul
ser = serial.Serial('/dev/ttyUSB0',4800)
while 1:
try:
data = ser.readline().decode('utf-8')
if(data.startswith("$GPGGA")):
parse = pynmea2.parse(data)
print(repr(parse))
except UnicodeDecodeError:
continue
| 23.357143
| 45
| 0.617737
|
import serial
import pynmea2
ser = serial.Serial('/dev/ttyUSB0',4800)
while 1:
try:
data = ser.readline().decode('utf-8')
if(data.startswith("$GPGGA")):
parse = pynmea2.parse(data)
print(repr(parse))
except UnicodeDecodeError:
continue
| true
| true
|
790d17afceb80fa17d6809240e3a4e5529a0f458
| 762
|
py
|
Python
|
data/stackoverflow/dataset.py
|
xuwanwei/FedML
|
c049a30d9839c4554e7e14b0c18275e96fea8130
|
[
"Apache-2.0"
] | 1,120
|
2020-07-22T02:30:52.000Z
|
2022-03-31T08:10:44.000Z
|
data/stackoverflow/dataset.py
|
xuwanwei/FedML
|
c049a30d9839c4554e7e14b0c18275e96fea8130
|
[
"Apache-2.0"
] | 113
|
2020-07-27T03:48:09.000Z
|
2022-03-30T03:25:56.000Z
|
data/stackoverflow/dataset.py
|
xuwanwei/FedML
|
c049a30d9839c4554e7e14b0c18275e96fea8130
|
[
"Apache-2.0"
] | 381
|
2020-07-22T06:12:57.000Z
|
2022-03-30T18:38:35.000Z
|
import tensorflow_federated as tff
def download_and_save_stackoverflow():
tff.simulation.datasets.stackoverflow.load_data(cache_dir='./')
def download_and_save_word_counts():
tff.simulation.datasets.stackoverflow.load_word_counts(cache_dir='./')
def download_and_save_tag_counts():
tff.simulation.datasets.stackoverflow.load_tag_counts(cache_dir='./')
"""
#with Tensorflow dependencies, you can run this python script to process the data from Tensorflow Federated locally:
python dataset.py
Before downloading, please install TFF as its official instruction:
pip install --upgrade tensorflow_federated
"""
if __name__ == "__main__":
download_and_save_stackoverflow()
download_and_save_word_counts()
download_and_save_tag_counts()
| 29.307692
| 116
| 0.799213
|
import tensorflow_federated as tff
def download_and_save_stackoverflow():
tff.simulation.datasets.stackoverflow.load_data(cache_dir='./')
def download_and_save_word_counts():
tff.simulation.datasets.stackoverflow.load_word_counts(cache_dir='./')
def download_and_save_tag_counts():
tff.simulation.datasets.stackoverflow.load_tag_counts(cache_dir='./')
if __name__ == "__main__":
download_and_save_stackoverflow()
download_and_save_word_counts()
download_and_save_tag_counts()
| true
| true
|
790d18c000cbd34272ce5e58feb3eb2b358ab314
| 223
|
py
|
Python
|
models/layer/__init__.py
|
LegenDong/IQIYI_VID_FACE_2019
|
258ff9282206e7b7074ed9ada5ef928bc9305ec6
|
[
"MIT"
] | 17
|
2019-07-11T02:41:01.000Z
|
2022-01-13T05:13:24.000Z
|
models/layer/__init__.py
|
xmpy/IQIYI_VID_FACE_2019
|
258ff9282206e7b7074ed9ada5ef928bc9305ec6
|
[
"MIT"
] | 1
|
2021-04-16T15:37:12.000Z
|
2021-04-17T13:46:57.000Z
|
models/layer/__init__.py
|
LegenDong/IQIYI_VID_FACE_2019
|
258ff9282206e7b7074ed9ada5ef928bc9305ec6
|
[
"MIT"
] | 5
|
2019-07-23T02:18:04.000Z
|
2021-07-14T03:42:32.000Z
|
# -*- coding: utf-8 -*-
# @Time : 2019/5/11 15:12
# @Author : LegenDong
# @User : legendong
# @File : __init__.py.py
# @Software: PyCharm
from .channel_attention_layer import *
from .nan_attention_layer import *
| 22.3
| 38
| 0.654709
|
from .channel_attention_layer import *
from .nan_attention_layer import *
| true
| true
|
790d19613a477fabf2d42a3423b461b97ae79ed8
| 9,812
|
py
|
Python
|
openpyxl/packaging/tests/test_manifest.py
|
chenc2/openpyxl
|
0f9044a55ccf1b738f66195444a83a88a1cfb854
|
[
"MIT"
] | null | null | null |
openpyxl/packaging/tests/test_manifest.py
|
chenc2/openpyxl
|
0f9044a55ccf1b738f66195444a83a88a1cfb854
|
[
"MIT"
] | null | null | null |
openpyxl/packaging/tests/test_manifest.py
|
chenc2/openpyxl
|
0f9044a55ccf1b738f66195444a83a88a1cfb854
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
# Copyright (c) 2010-2019 openpyxl
import pytest
from io import BytesIO
from zipfile import ZipFile
from openpyxl.xml.functions import fromstring, tostring
from openpyxl.tests.helper import compare_xml
from ..manifest import WORKSHEET_TYPE
@pytest.fixture
def FileExtension():
from ..manifest import FileExtension
return FileExtension
class TestFileExtension:
def test_ctor(self, FileExtension):
ext = FileExtension(
ContentType="application/xml",
Extension="xml"
)
xml = tostring(ext.to_tree())
expected = """
<Default ContentType="application/xml" Extension="xml"/>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, FileExtension):
src = """
<Default ContentType="application/xml" Extension="xml"/>
"""
node = fromstring(src)
ext = FileExtension.from_tree(node)
assert ext == FileExtension(ContentType="application/xml", Extension="xml")
@pytest.fixture
def Override():
from ..manifest import Override
return Override
class TestOverride:
def test_ctor(self, Override):
override = Override(
ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.main+xml",
PartName="/xl/workbook.xml"
)
xml = tostring(override.to_tree())
expected = """
<Override ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.main+xml"
PartName="/xl/workbook.xml"/>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, Override):
src = """
<Override ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.main+xml"
PartName="/xl/workbook.xml"/>
"""
node = fromstring(src)
override = Override.from_tree(node)
assert override == Override(
ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.main+xml",
PartName="/xl/workbook.xml"
)
@pytest.fixture
def Manifest():
from ..manifest import Manifest
return Manifest
class TestManifest:
def test_ctor(self, Manifest):
manifest = Manifest()
xml = tostring(manifest.to_tree())
expected = """
<Types xmlns="http://schemas.openxmlformats.org/package/2006/content-types">
<Default ContentType="application/vnd.openxmlformats-package.relationships+xml" Extension="rels" />
<Default ContentType="application/xml" Extension="xml" />
<Override ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.styles+xml"
PartName="/xl/styles.xml"/>
<Override ContentType="application/vnd.openxmlformats-officedocument.theme+xml"
PartName="/xl/theme/theme1.xml"/>
<Override ContentType="application/vnd.openxmlformats-package.core-properties+xml"
PartName="/docProps/core.xml"/>
<Override ContentType="application/vnd.openxmlformats-officedocument.extended-properties+xml"
PartName="/docProps/app.xml"/>
</Types>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, datadir, Manifest):
datadir.chdir()
with open("manifest.xml") as src:
node = fromstring(src.read())
manifest = Manifest.from_tree(node)
assert len(manifest.Default) == 2
defaults = [
("application/xml", 'xml'),
("application/vnd.openxmlformats-package.relationships+xml", 'rels'),
]
assert [(ct.ContentType, ct.Extension) for ct in manifest.Default] == defaults
overrides = [
('application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.main+xml',
'/xl/workbook.xml'),
('application/vnd.openxmlformats-officedocument.spreadsheetml.worksheet+xml',
'/xl/worksheets/sheet1.xml'),
('application/vnd.openxmlformats-officedocument.spreadsheetml.chartsheet+xml',
'/xl/chartsheets/sheet1.xml'),
('application/vnd.openxmlformats-officedocument.theme+xml',
'/xl/theme/theme1.xml'),
('application/vnd.openxmlformats-officedocument.spreadsheetml.styles+xml',
'/xl/styles.xml'),
('application/vnd.openxmlformats-officedocument.spreadsheetml.sharedStrings+xml',
'/xl/sharedStrings.xml'),
('application/vnd.openxmlformats-officedocument.drawing+xml',
'/xl/drawings/drawing1.xml'),
('application/vnd.openxmlformats-officedocument.drawingml.chart+xml',
'/xl/charts/chart1.xml'),
('application/vnd.openxmlformats-package.core-properties+xml',
'/docProps/core.xml'),
('application/vnd.openxmlformats-officedocument.extended-properties+xml',
'/docProps/app.xml')
]
assert [(ct.ContentType, ct.PartName) for ct in manifest.Override] == overrides
def test_filenames(self, datadir, Manifest):
datadir.chdir()
with open("manifest.xml") as src:
node = fromstring(src.read())
manifest = Manifest.from_tree(node)
assert manifest.filenames == [
'/xl/workbook.xml',
'/xl/worksheets/sheet1.xml',
'/xl/chartsheets/sheet1.xml',
'/xl/theme/theme1.xml',
'/xl/styles.xml',
'/xl/sharedStrings.xml',
'/xl/drawings/drawing1.xml',
'/xl/charts/chart1.xml',
'/docProps/core.xml',
'/docProps/app.xml',
]
def test_exts(self, datadir, Manifest):
datadir.chdir()
with open("manifest.xml") as src:
node = fromstring(src.read())
manifest = Manifest.from_tree(node)
assert manifest.extensions == [
('xml', 'application/xml'),
]
def test_no_dupe_overrides(self, Manifest):
manifest = Manifest()
assert len(manifest.Override) == 4
manifest.Override.append("a")
manifest.Override.append("a")
assert len(manifest.Override) == 5
def test_no_dupe_types(self, Manifest):
manifest = Manifest()
assert len(manifest.Default) == 2
manifest.Default.append("a")
manifest.Default.append("a")
assert len(manifest.Default) == 3
def test_append(self, Manifest):
from openpyxl import Workbook
wb = Workbook()
ws = wb.active
manifest = Manifest()
manifest.append(ws)
assert len(manifest.Override) == 5
def test_write(self, Manifest):
mf = Manifest()
from openpyxl import Workbook
wb = Workbook()
archive = ZipFile(BytesIO(), "w")
mf._write(archive, wb)
assert "/xl/workbook.xml" in mf.filenames
@pytest.mark.parametrize("file, registration",
[
('xl/media/image1.png',
'<Default ContentType="image/png" Extension="png" />'),
('xl/drawings/commentsDrawing.vml',
'<Default ContentType="application/vnd.openxmlformats-officedocument.vmlDrawing" Extension="vml" />'),
]
)
def test_media(self, Manifest, file, registration):
from openpyxl import Workbook
wb = Workbook()
manifest = Manifest()
manifest._register_mimetypes([file])
xml = tostring(manifest.Default[-1].to_tree())
diff = compare_xml(xml, registration)
assert diff is None, diff
def test_vba(self, datadir, Manifest):
datadir.chdir()
from openpyxl import load_workbook
wb = load_workbook('sample.xlsm', keep_vba=True)
manifest = Manifest()
manifest._write_vba(wb)
partnames = set([t.PartName for t in manifest.Override])
expected = set([
'/xl/workbook.xml',
'/xl/worksheets/sheet1.xml',
'/xl/worksheets/sheet2.xml',
'/xl/worksheets/sheet3.xml',
'/xl/theme/theme1.xml',
'/xl/styles.xml',
'/docProps/core.xml',
'/docProps/app.xml',
])
assert partnames == expected
def test_no_defaults(self, Manifest):
"""
LibreOffice does not use the Default element
"""
xml = """
<Types xmlns="http://schemas.openxmlformats.org/package/2006/content-types">
<Override PartName="/_rels/.rels" ContentType="application/vnd.openxmlformats-package.relationships+xml"/>
</Types>
"""
node = fromstring(xml)
manifest = Manifest.from_tree(node)
exts = manifest.extensions
assert exts == []
def test_find(self, datadir, Manifest):
datadir.chdir()
with open("manifest.xml", "rb") as src:
xml = src.read()
tree = fromstring(xml)
manifest = Manifest.from_tree(tree)
ws = manifest.find(WORKSHEET_TYPE)
assert ws.PartName == "/xl/worksheets/sheet1.xml"
def test_find_none(self, Manifest):
manifest = Manifest()
assert manifest.find(WORKSHEET_TYPE) is None
def test_findall(self, datadir, Manifest):
datadir.chdir()
with open("manifest.xml", "rb") as src:
xml = src.read()
tree = fromstring(xml)
manifest = Manifest.from_tree(tree)
sheets = manifest.findall(WORKSHEET_TYPE)
assert len(list(sheets)) == 1
| 34.188153
| 135
| 0.603139
|
from __future__ import absolute_import
import pytest
from io import BytesIO
from zipfile import ZipFile
from openpyxl.xml.functions import fromstring, tostring
from openpyxl.tests.helper import compare_xml
from ..manifest import WORKSHEET_TYPE
@pytest.fixture
def FileExtension():
from ..manifest import FileExtension
return FileExtension
class TestFileExtension:
def test_ctor(self, FileExtension):
ext = FileExtension(
ContentType="application/xml",
Extension="xml"
)
xml = tostring(ext.to_tree())
expected = """
<Default ContentType="application/xml" Extension="xml"/>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, FileExtension):
src = """
<Default ContentType="application/xml" Extension="xml"/>
"""
node = fromstring(src)
ext = FileExtension.from_tree(node)
assert ext == FileExtension(ContentType="application/xml", Extension="xml")
@pytest.fixture
def Override():
from ..manifest import Override
return Override
class TestOverride:
def test_ctor(self, Override):
override = Override(
ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.main+xml",
PartName="/xl/workbook.xml"
)
xml = tostring(override.to_tree())
expected = """
<Override ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.main+xml"
PartName="/xl/workbook.xml"/>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, Override):
src = """
<Override ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.main+xml"
PartName="/xl/workbook.xml"/>
"""
node = fromstring(src)
override = Override.from_tree(node)
assert override == Override(
ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.main+xml",
PartName="/xl/workbook.xml"
)
@pytest.fixture
def Manifest():
from ..manifest import Manifest
return Manifest
class TestManifest:
def test_ctor(self, Manifest):
manifest = Manifest()
xml = tostring(manifest.to_tree())
expected = """
<Types xmlns="http://schemas.openxmlformats.org/package/2006/content-types">
<Default ContentType="application/vnd.openxmlformats-package.relationships+xml" Extension="rels" />
<Default ContentType="application/xml" Extension="xml" />
<Override ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.styles+xml"
PartName="/xl/styles.xml"/>
<Override ContentType="application/vnd.openxmlformats-officedocument.theme+xml"
PartName="/xl/theme/theme1.xml"/>
<Override ContentType="application/vnd.openxmlformats-package.core-properties+xml"
PartName="/docProps/core.xml"/>
<Override ContentType="application/vnd.openxmlformats-officedocument.extended-properties+xml"
PartName="/docProps/app.xml"/>
</Types>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, datadir, Manifest):
datadir.chdir()
with open("manifest.xml") as src:
node = fromstring(src.read())
manifest = Manifest.from_tree(node)
assert len(manifest.Default) == 2
defaults = [
("application/xml", 'xml'),
("application/vnd.openxmlformats-package.relationships+xml", 'rels'),
]
assert [(ct.ContentType, ct.Extension) for ct in manifest.Default] == defaults
overrides = [
('application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.main+xml',
'/xl/workbook.xml'),
('application/vnd.openxmlformats-officedocument.spreadsheetml.worksheet+xml',
'/xl/worksheets/sheet1.xml'),
('application/vnd.openxmlformats-officedocument.spreadsheetml.chartsheet+xml',
'/xl/chartsheets/sheet1.xml'),
('application/vnd.openxmlformats-officedocument.theme+xml',
'/xl/theme/theme1.xml'),
('application/vnd.openxmlformats-officedocument.spreadsheetml.styles+xml',
'/xl/styles.xml'),
('application/vnd.openxmlformats-officedocument.spreadsheetml.sharedStrings+xml',
'/xl/sharedStrings.xml'),
('application/vnd.openxmlformats-officedocument.drawing+xml',
'/xl/drawings/drawing1.xml'),
('application/vnd.openxmlformats-officedocument.drawingml.chart+xml',
'/xl/charts/chart1.xml'),
('application/vnd.openxmlformats-package.core-properties+xml',
'/docProps/core.xml'),
('application/vnd.openxmlformats-officedocument.extended-properties+xml',
'/docProps/app.xml')
]
assert [(ct.ContentType, ct.PartName) for ct in manifest.Override] == overrides
def test_filenames(self, datadir, Manifest):
datadir.chdir()
with open("manifest.xml") as src:
node = fromstring(src.read())
manifest = Manifest.from_tree(node)
assert manifest.filenames == [
'/xl/workbook.xml',
'/xl/worksheets/sheet1.xml',
'/xl/chartsheets/sheet1.xml',
'/xl/theme/theme1.xml',
'/xl/styles.xml',
'/xl/sharedStrings.xml',
'/xl/drawings/drawing1.xml',
'/xl/charts/chart1.xml',
'/docProps/core.xml',
'/docProps/app.xml',
]
def test_exts(self, datadir, Manifest):
datadir.chdir()
with open("manifest.xml") as src:
node = fromstring(src.read())
manifest = Manifest.from_tree(node)
assert manifest.extensions == [
('xml', 'application/xml'),
]
def test_no_dupe_overrides(self, Manifest):
manifest = Manifest()
assert len(manifest.Override) == 4
manifest.Override.append("a")
manifest.Override.append("a")
assert len(manifest.Override) == 5
def test_no_dupe_types(self, Manifest):
manifest = Manifest()
assert len(manifest.Default) == 2
manifest.Default.append("a")
manifest.Default.append("a")
assert len(manifest.Default) == 3
def test_append(self, Manifest):
from openpyxl import Workbook
wb = Workbook()
ws = wb.active
manifest = Manifest()
manifest.append(ws)
assert len(manifest.Override) == 5
def test_write(self, Manifest):
mf = Manifest()
from openpyxl import Workbook
wb = Workbook()
archive = ZipFile(BytesIO(), "w")
mf._write(archive, wb)
assert "/xl/workbook.xml" in mf.filenames
@pytest.mark.parametrize("file, registration",
[
('xl/media/image1.png',
'<Default ContentType="image/png" Extension="png" />'),
('xl/drawings/commentsDrawing.vml',
'<Default ContentType="application/vnd.openxmlformats-officedocument.vmlDrawing" Extension="vml" />'),
]
)
def test_media(self, Manifest, file, registration):
from openpyxl import Workbook
wb = Workbook()
manifest = Manifest()
manifest._register_mimetypes([file])
xml = tostring(manifest.Default[-1].to_tree())
diff = compare_xml(xml, registration)
assert diff is None, diff
def test_vba(self, datadir, Manifest):
datadir.chdir()
from openpyxl import load_workbook
wb = load_workbook('sample.xlsm', keep_vba=True)
manifest = Manifest()
manifest._write_vba(wb)
partnames = set([t.PartName for t in manifest.Override])
expected = set([
'/xl/workbook.xml',
'/xl/worksheets/sheet1.xml',
'/xl/worksheets/sheet2.xml',
'/xl/worksheets/sheet3.xml',
'/xl/theme/theme1.xml',
'/xl/styles.xml',
'/docProps/core.xml',
'/docProps/app.xml',
])
assert partnames == expected
def test_no_defaults(self, Manifest):
xml = """
<Types xmlns="http://schemas.openxmlformats.org/package/2006/content-types">
<Override PartName="/_rels/.rels" ContentType="application/vnd.openxmlformats-package.relationships+xml"/>
</Types>
"""
node = fromstring(xml)
manifest = Manifest.from_tree(node)
exts = manifest.extensions
assert exts == []
def test_find(self, datadir, Manifest):
datadir.chdir()
with open("manifest.xml", "rb") as src:
xml = src.read()
tree = fromstring(xml)
manifest = Manifest.from_tree(tree)
ws = manifest.find(WORKSHEET_TYPE)
assert ws.PartName == "/xl/worksheets/sheet1.xml"
def test_find_none(self, Manifest):
manifest = Manifest()
assert manifest.find(WORKSHEET_TYPE) is None
def test_findall(self, datadir, Manifest):
datadir.chdir()
with open("manifest.xml", "rb") as src:
xml = src.read()
tree = fromstring(xml)
manifest = Manifest.from_tree(tree)
sheets = manifest.findall(WORKSHEET_TYPE)
assert len(list(sheets)) == 1
| true
| true
|
790d19ac33725a0573f7bf558dfd94112c839fe9
| 1,655
|
py
|
Python
|
araig_calculators/src/comparators/comp_param.py
|
ipa-kut/araig_test_stack
|
9b8f0b4ed7fffc052e52de04a8e1b27db521d0b4
|
[
"Apache-2.0"
] | null | null | null |
araig_calculators/src/comparators/comp_param.py
|
ipa-kut/araig_test_stack
|
9b8f0b4ed7fffc052e52de04a8e1b27db521d0b4
|
[
"Apache-2.0"
] | null | null | null |
araig_calculators/src/comparators/comp_param.py
|
ipa-kut/araig_test_stack
|
9b8f0b4ed7fffc052e52de04a8e1b27db521d0b4
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from multipledispatch import dispatch as Override
import rospy
import threading
from std_msgs.msg import Float64
from araig_msgs.msg import BoolStamped
from base_classes.base_calculator import BaseCalculator
"""Compare data from one topic with one param
pub_list = {"out_bool": "BoolStamped"}
sub_list = {"in_float": "Float64"}
rosparam
inherit Base, only modify compare function"""
class compParam(BaseCalculator):
_pub_topic = "/out_bool"
_sub_topic = "/in_float"
def __init__(self,
sub_dict = {_sub_topic: Float64},
pub_dict = {_pub_topic: BoolStamped},
rosparam = None,
tolerance = 0,
rate = None):
if rosparam == None:
rospy.logerr(rospy.get_name() + ": Please provide rosparam")
else:
self.compare_param = rosparam
self.tolerance = tolerance
super(compParam, self).__init__(
sub_dict = sub_dict,
pub_dict = pub_dict,
rate = rate)
@Override()
def calculate(self):
with BaseCalculator.LOCK[self._sub_topic]:
current_vel = BaseCalculator.MSG[self._sub_topic]
flag_test_ready = True
if current_vel == None:
flag_test_ready = False
if flag_test_ready == True:
msg = self.PubDict[self._pub_topic]()
msg.header.stamp = rospy.Time.now()
if abs(self.compare_param - current_vel.data) <= self.tolerance:
msg.data = True
else:
msg.data = False
self.PubDiag[self._pub_topic].publish(msg)
| 29.553571
| 76
| 0.610876
|
from multipledispatch import dispatch as Override
import rospy
import threading
from std_msgs.msg import Float64
from araig_msgs.msg import BoolStamped
from base_classes.base_calculator import BaseCalculator
class compParam(BaseCalculator):
_pub_topic = "/out_bool"
_sub_topic = "/in_float"
def __init__(self,
sub_dict = {_sub_topic: Float64},
pub_dict = {_pub_topic: BoolStamped},
rosparam = None,
tolerance = 0,
rate = None):
if rosparam == None:
rospy.logerr(rospy.get_name() + ": Please provide rosparam")
else:
self.compare_param = rosparam
self.tolerance = tolerance
super(compParam, self).__init__(
sub_dict = sub_dict,
pub_dict = pub_dict,
rate = rate)
@Override()
def calculate(self):
with BaseCalculator.LOCK[self._sub_topic]:
current_vel = BaseCalculator.MSG[self._sub_topic]
flag_test_ready = True
if current_vel == None:
flag_test_ready = False
if flag_test_ready == True:
msg = self.PubDict[self._pub_topic]()
msg.header.stamp = rospy.Time.now()
if abs(self.compare_param - current_vel.data) <= self.tolerance:
msg.data = True
else:
msg.data = False
self.PubDiag[self._pub_topic].publish(msg)
| true
| true
|
790d19c0597d4813beb5c2998926c1d171ef9736
| 4,625
|
py
|
Python
|
georss_ign_sismologia_client/__init__.py
|
exxamalte/python-georss-ign-sismologia-client
|
0927f474159b466b43c75d8b8df0c9dd9e6c1084
|
[
"Apache-2.0"
] | null | null | null |
georss_ign_sismologia_client/__init__.py
|
exxamalte/python-georss-ign-sismologia-client
|
0927f474159b466b43c75d8b8df0c9dd9e6c1084
|
[
"Apache-2.0"
] | 2
|
2021-06-12T15:12:22.000Z
|
2021-07-03T09:34:24.000Z
|
georss_ign_sismologia_client/__init__.py
|
exxamalte/python-georss-ign-sismologia-client
|
0927f474159b466b43c75d8b8df0c9dd9e6c1084
|
[
"Apache-2.0"
] | 2
|
2019-09-24T09:20:06.000Z
|
2021-07-02T15:54:21.000Z
|
"""
IGN Instituto Geográfico Nacional Sismología Feed.
Fetches GeoRSS feed from IGN Instituto Geográfico Nacional Sismología.
"""
from datetime import datetime
from typing import Optional
import dateparser as dateparser
from georss_client import FeedEntry, GeoRssFeed
from georss_client.consts import CUSTOM_ATTRIBUTE
from georss_client.feed_manager import FeedManagerBase
ATTRIBUTION = "Instituto Geográfico Nacional"
IMAGE_URL_PATTERN = (
"http://www.ign.es/web/resources/sismologia/www/"
"dir_images_terremotos/detalle/{}.gif"
)
REGEXP_ATTR_MAGNITUDE = r"magnitud (?P<{}>[^ ]+) ".format(CUSTOM_ATTRIBUTE)
REGEXP_ATTR_REGION = r"magnitud [^ ]+ en (?P<{}>[A-ZÁÉÓÜÑ0-9 \-\.]+) en".format(
CUSTOM_ATTRIBUTE
)
REGEXP_ATTR_PUBLISHED_DATE = r"-Info.terremoto: (?P<{}>.+)$".format(CUSTOM_ATTRIBUTE)
REGEXP_ATTR_SHORT_ID = (
r"http:\/\/www\.ign\.es\/web\/ign\/portal\/"
r"sis-catalogo-terremotos\/-\/catalogo-terremotos\/"
r"detailTerremoto\?evid=(?P<{}>\w+)$".format(CUSTOM_ATTRIBUTE)
)
URL = "http://www.ign.es/ign/RssTools/sismologia.xml"
class IgnSismologiaFeedManager(FeedManagerBase):
"""Feed Manager for IGN Sismología feed."""
def __init__(
self,
generate_callback,
update_callback,
remove_callback,
coordinates,
filter_radius=None,
filter_minimum_magnitude=None,
):
"""Initialize the IGN Sismología Feed Manager."""
feed = IgnSismologiaFeed(
coordinates,
filter_radius=filter_radius,
filter_minimum_magnitude=filter_minimum_magnitude,
)
super().__init__(feed, generate_callback, update_callback, remove_callback)
class IgnSismologiaFeed(GeoRssFeed):
"""IGN Sismología feed."""
def __init__(
self, home_coordinates, filter_radius=None, filter_minimum_magnitude=None
):
"""Initialise this service."""
super().__init__(home_coordinates, URL, filter_radius=filter_radius)
self._filter_minimum_magnitude = filter_minimum_magnitude
def __repr__(self):
"""Return string representation of this feed."""
return "<{}(home={}, url={}, radius={}, magnitude={})>".format(
self.__class__.__name__,
self._home_coordinates,
self._url,
self._filter_radius,
self._filter_minimum_magnitude,
)
def _new_entry(self, home_coordinates, rss_entry, global_data):
"""Generate a new entry."""
return IgnSismologiaFeedEntry(home_coordinates, rss_entry)
def _filter_entries(self, entries):
"""Filter the provided entries."""
entries = super()._filter_entries(entries)
if self._filter_minimum_magnitude:
# Return only entries that have an actual magnitude value, and
# the value is equal or above the defined threshold.
return list(
filter(
lambda entry: entry.magnitude
and entry.magnitude >= self._filter_minimum_magnitude,
entries,
)
)
return entries
class IgnSismologiaFeedEntry(FeedEntry):
"""IGN Sismología feed entry."""
def __init__(self, home_coordinates, rss_entry):
"""Initialise this service."""
super().__init__(home_coordinates, rss_entry)
@property
def attribution(self) -> str:
"""Return the attribution of this entry."""
return ATTRIBUTION
@property
def published(self) -> Optional[datetime]:
"""Return the published date of this entry."""
published_date = self._search_in_title(REGEXP_ATTR_PUBLISHED_DATE)
if published_date:
published_date = dateparser.parse(published_date)
return published_date
@property
def magnitude(self) -> Optional[float]:
"""Return the magnitude of this entry."""
magnitude = self._search_in_description(REGEXP_ATTR_MAGNITUDE)
if magnitude:
magnitude = float(magnitude)
return magnitude
@property
def region(self) -> Optional[float]:
"""Return the region of this entry."""
return self._search_in_description(REGEXP_ATTR_REGION)
def _short_id(self) -> Optional[str]:
"""Return the short id of this entry."""
return self._search_in_external_id(REGEXP_ATTR_SHORT_ID)
@property
def image_url(self) -> Optional[str]:
"""Return the image url of this entry."""
short_id = self._short_id()
if short_id:
return IMAGE_URL_PATTERN.format(short_id)
return None
| 33.035714
| 85
| 0.65773
|
from datetime import datetime
from typing import Optional
import dateparser as dateparser
from georss_client import FeedEntry, GeoRssFeed
from georss_client.consts import CUSTOM_ATTRIBUTE
from georss_client.feed_manager import FeedManagerBase
ATTRIBUTION = "Instituto Geográfico Nacional"
IMAGE_URL_PATTERN = (
"http://www.ign.es/web/resources/sismologia/www/"
"dir_images_terremotos/detalle/{}.gif"
)
REGEXP_ATTR_MAGNITUDE = r"magnitud (?P<{}>[^ ]+) ".format(CUSTOM_ATTRIBUTE)
REGEXP_ATTR_REGION = r"magnitud [^ ]+ en (?P<{}>[A-ZÁÉÓÜÑ0-9 \-\.]+) en".format(
CUSTOM_ATTRIBUTE
)
REGEXP_ATTR_PUBLISHED_DATE = r"-Info.terremoto: (?P<{}>.+)$".format(CUSTOM_ATTRIBUTE)
REGEXP_ATTR_SHORT_ID = (
r"http:\/\/www\.ign\.es\/web\/ign\/portal\/"
r"sis-catalogo-terremotos\/-\/catalogo-terremotos\/"
r"detailTerremoto\?evid=(?P<{}>\w+)$".format(CUSTOM_ATTRIBUTE)
)
URL = "http://www.ign.es/ign/RssTools/sismologia.xml"
class IgnSismologiaFeedManager(FeedManagerBase):
def __init__(
self,
generate_callback,
update_callback,
remove_callback,
coordinates,
filter_radius=None,
filter_minimum_magnitude=None,
):
feed = IgnSismologiaFeed(
coordinates,
filter_radius=filter_radius,
filter_minimum_magnitude=filter_minimum_magnitude,
)
super().__init__(feed, generate_callback, update_callback, remove_callback)
class IgnSismologiaFeed(GeoRssFeed):
def __init__(
self, home_coordinates, filter_radius=None, filter_minimum_magnitude=None
):
super().__init__(home_coordinates, URL, filter_radius=filter_radius)
self._filter_minimum_magnitude = filter_minimum_magnitude
def __repr__(self):
return "<{}(home={}, url={}, radius={}, magnitude={})>".format(
self.__class__.__name__,
self._home_coordinates,
self._url,
self._filter_radius,
self._filter_minimum_magnitude,
)
def _new_entry(self, home_coordinates, rss_entry, global_data):
return IgnSismologiaFeedEntry(home_coordinates, rss_entry)
def _filter_entries(self, entries):
entries = super()._filter_entries(entries)
if self._filter_minimum_magnitude:
return list(
filter(
lambda entry: entry.magnitude
and entry.magnitude >= self._filter_minimum_magnitude,
entries,
)
)
return entries
class IgnSismologiaFeedEntry(FeedEntry):
def __init__(self, home_coordinates, rss_entry):
super().__init__(home_coordinates, rss_entry)
@property
def attribution(self) -> str:
return ATTRIBUTION
@property
def published(self) -> Optional[datetime]:
published_date = self._search_in_title(REGEXP_ATTR_PUBLISHED_DATE)
if published_date:
published_date = dateparser.parse(published_date)
return published_date
@property
def magnitude(self) -> Optional[float]:
magnitude = self._search_in_description(REGEXP_ATTR_MAGNITUDE)
if magnitude:
magnitude = float(magnitude)
return magnitude
@property
def region(self) -> Optional[float]:
return self._search_in_description(REGEXP_ATTR_REGION)
def _short_id(self) -> Optional[str]:
return self._search_in_external_id(REGEXP_ATTR_SHORT_ID)
@property
def image_url(self) -> Optional[str]:
short_id = self._short_id()
if short_id:
return IMAGE_URL_PATTERN.format(short_id)
return None
| true
| true
|
790d1bbabbebaa639d68b3ec8915702c7dd56273
| 32,391
|
py
|
Python
|
cumulusci/tasks/bulkdata.py
|
davidmreed/CumulusCI
|
933159305e9fc0448087366b5f69484cc01a7a12
|
[
"BSD-3-Clause"
] | null | null | null |
cumulusci/tasks/bulkdata.py
|
davidmreed/CumulusCI
|
933159305e9fc0448087366b5f69484cc01a7a12
|
[
"BSD-3-Clause"
] | null | null | null |
cumulusci/tasks/bulkdata.py
|
davidmreed/CumulusCI
|
933159305e9fc0448087366b5f69484cc01a7a12
|
[
"BSD-3-Clause"
] | null | null | null |
from future import standard_library
standard_library.install_aliases()
from builtins import zip
from contextlib import contextmanager
import datetime
import io
import os
import time
import tempfile
import xml.etree.ElementTree as ET
from salesforce_bulk.util import IteratorBytesIO
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import aliased
from sqlalchemy.orm import create_session
from sqlalchemy.orm import mapper
from sqlalchemy.orm import Session
from sqlalchemy import create_engine
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import Table
from sqlalchemy import Unicode
from sqlalchemy import text
from sqlalchemy import types
from sqlalchemy import event
import requests
import unicodecsv
from cumulusci.core.utils import process_bool_arg, ordered_yaml_load
from cumulusci.core.exceptions import BulkDataException
from cumulusci.core.exceptions import TaskOptionsError
from cumulusci.tasks.salesforce import BaseSalesforceApiTask
from cumulusci.utils import convert_to_snake_case, log_progress, os_friendly_path
# TODO: UserID Catcher
# TODO: Dater
# Create a custom sqlalchemy field type for sqlite datetime fields which are stored as integer of epoch time
class EpochType(types.TypeDecorator):
impl = types.Integer
epoch = datetime.datetime(1970, 1, 1, 0, 0, 0)
def process_bind_param(self, value, dialect):
return int((value - self.epoch).total_seconds()) * 1000
def process_result_value(self, value, dialect):
if value is not None:
return self.epoch + datetime.timedelta(seconds=value / 1000)
# Listen for sqlalchemy column_reflect event and map datetime fields to EpochType
@event.listens_for(Table, "column_reflect")
def setup_epoch(inspector, table, column_info):
if isinstance(column_info["type"], types.DateTime):
column_info["type"] = EpochType()
class BulkJobTaskMixin(object):
def _job_state_from_batches(self, job_id):
uri = "{}/job/{}/batch".format(self.bulk.endpoint, job_id)
response = requests.get(uri, headers=self.bulk.headers())
return self._parse_job_state(response.content)
def _parse_job_state(self, xml):
tree = ET.fromstring(xml)
completed = 0
pending = 0
failed = 0
for el in tree.iterfind(".//{%s}state" % self.bulk.jobNS):
state = el.text
if state == "Not Processed":
return "Aborted"
elif state == "Failed":
failed += 1
elif state == "Completed":
completed += 1
else: # Queued, InProgress
pending += 1
if pending:
return "InProgress"
elif failed:
return "Failed"
else:
return "Completed"
def _wait_for_job(self, job_id):
while True:
job_status = self.bulk.job_status(job_id)
self.logger.info(
" Waiting for job {} ({}/{})".format(
job_id,
job_status["numberBatchesCompleted"],
job_status["numberBatchesTotal"],
)
)
result = self._job_state_from_batches(job_id)
if result != "InProgress":
break
time.sleep(10)
self.logger.info("Job {} finished with result: {}".format(job_id, result))
return result
def _sql_bulk_insert_from_csv(self, conn, table, columns, data_file):
if conn.dialect.name in ("postgresql", "psycopg2"):
# psycopg2 (the postgres driver) supports COPY FROM
# to efficiently bulk insert rows in CSV format
with conn.connection.cursor() as cursor:
cursor.copy_expert(
"COPY {} ({}) FROM STDIN WITH (FORMAT CSV)".format(
table, ",".join(columns)
),
data_file,
)
else:
# For other db drivers we need to use standard SQL
# -- this is optimized for ease of implementation
# rather than performance and may need more work.
reader = unicodecsv.DictReader(data_file, columns)
table = self.metadata.tables[table]
rows = list(reader)
if rows:
conn.execute(table.insert().values(rows))
self.session.flush()
class DeleteData(BaseSalesforceApiTask, BulkJobTaskMixin):
task_options = {
"objects": {
"description": "A list of objects to delete records from in order of deletion. If passed via command line, use a comma separated string",
"required": True,
},
"hardDelete": {
"description": "If True, perform a hard delete, bypassing the recycle bin. Default: False"
},
}
def _init_options(self, kwargs):
super(DeleteData, self)._init_options(kwargs)
# Split and trim objects string into a list if not already a list
if not isinstance(self.options["objects"], list):
self.options["objects"] = [
obj.strip() for obj in self.options["objects"].split(",")
]
self.options["hardDelete"] = process_bool_arg(self.options.get("hardDelete"))
def _run_task(self):
for obj in self.options["objects"]:
self.logger.info("Deleting all {} records".format(obj))
delete_job = self._create_job(obj)
if delete_job is not None:
self._wait_for_job(delete_job)
def _create_job(self, obj):
# Query for rows to delete
delete_rows = self._query_salesforce_for_records_to_delete(obj)
if not delete_rows:
self.logger.info(" No {} objects found, skipping delete".format(obj))
return
# Upload all the batches
operation = "hardDelete" if self.options["hardDelete"] else "delete"
delete_job = self.bulk.create_job(obj, operation)
self.logger.info(" Deleting {} {} records".format(len(delete_rows), obj))
batch_num = 1
for batch in self._upload_batches(delete_job, delete_rows):
self.logger.info(" Uploaded batch {}".format(batch))
batch_num += 1
self.bulk.close_job(delete_job)
return delete_job
def _query_salesforce_for_records_to_delete(self, obj):
# Query for all record ids
self.logger.info(" Querying for all {} objects".format(obj))
query_job = self.bulk.create_query_job(obj, contentType="CSV")
batch = self.bulk.query(query_job, "select Id from {}".format(obj))
while not self.bulk.is_batch_done(batch, query_job):
time.sleep(10)
self.bulk.close_job(query_job)
delete_rows = []
for result in self.bulk.get_all_results_for_query_batch(batch, query_job):
reader = unicodecsv.DictReader(result, encoding="utf-8")
for row in reader:
delete_rows.append(row)
return delete_rows
def _split_batches(self, data, batch_size):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(data), batch_size):
yield data[i : i + batch_size]
def _upload_batches(self, job, data):
uri = "{}/job/{}/batch".format(self.bulk.endpoint, job)
headers = self.bulk.headers({"Content-Type": "text/csv"})
for batch in self._split_batches(data, 10000):
rows = ['"Id"']
rows += ['"{}"'.format(record["Id"]) for record in batch]
resp = requests.post(uri, data="\n".join(rows), headers=headers)
content = resp.content
if resp.status_code >= 400:
self.bulk.raise_error(content, resp.status_code)
tree = ET.fromstring(content)
batch_id = tree.findtext("{%s}id" % self.bulk.jobNS)
yield batch_id
class LoadData(BulkJobTaskMixin, BaseSalesforceApiTask):
task_options = {
"database_url": {
"description": "The database url to a database containing the test data to load",
"required": True,
},
"mapping": {
"description": "The path to a yaml file containing mappings of the database fields to Salesforce object fields",
"required": True,
},
"start_step": {
"description": "If specified, skip steps before this one in the mapping",
"required": False,
},
"sql_path": {
"description": "If specified, a database will be created from an SQL script at the provided path"
},
}
def _init_options(self, kwargs):
super(LoadData, self)._init_options(kwargs)
if self.options.get("sql_path"):
if self.options.get("database_url"):
raise TaskOptionsError(
"The database_url option is set dynamically with the sql_path option. Please unset the database_url option."
)
self.options["sql_path"] = os_friendly_path(self.options["sql_path"])
if not os.path.isfile(self.options["sql_path"]):
raise TaskOptionsError(
"File {} does not exist".format(self.options["sql_path"])
)
self.logger.info("Using in-memory sqlite database")
self.options["database_url"] = "sqlite://"
def _run_task(self):
self._init_mapping()
self._init_db()
start_step = self.options.get("start_step")
started = False
for name, mapping in self.mapping.items():
# Skip steps until start_step
if not started and start_step and name != start_step:
self.logger.info("Skipping step: {}".format(name))
continue
started = True
self.logger.info("Running Job: {}".format(name))
result = self._load_mapping(mapping)
if result != "Completed":
break
def _load_mapping(self, mapping):
"""Load data for a single step."""
mapping["oid_as_pk"] = bool(mapping.get("fields", {}).get("Id"))
job_id, local_ids_for_batch = self._create_job(mapping)
result = self._wait_for_job(job_id)
# We store inserted ids even if some batches failed
self._store_inserted_ids(mapping, job_id, local_ids_for_batch)
return result
def _create_job(self, mapping):
"""Initiate a bulk insert and upload batches to run in parallel."""
job_id = self.bulk.create_insert_job(mapping["sf_object"], contentType="CSV")
self.logger.info(" Created bulk job {}".format(job_id))
# Upload batches
local_ids_for_batch = {}
for batch_file, local_ids in self._get_batches(mapping):
batch_id = self.bulk.post_batch(job_id, batch_file)
local_ids_for_batch[batch_id] = local_ids
self.logger.info(" Uploaded batch {}".format(batch_id))
self.bulk.close_job(job_id)
return job_id, local_ids_for_batch
def _get_batches(self, mapping, batch_size=10000):
"""Get data from the local db"""
action = mapping.get("action", "insert")
fields = mapping.get("fields", {}).copy()
static = mapping.get("static", {})
lookups = mapping.get("lookups", {})
record_type = mapping.get("record_type")
# Skip Id field on insert
if action == "insert" and "Id" in fields:
del fields["Id"]
# Build the list of fields to import
columns = []
columns.extend(fields.keys())
columns.extend(lookups.keys())
columns.extend(static.keys())
if record_type:
columns.append("RecordTypeId")
# default to the profile assigned recordtype if we can't find any
# query for the RT by developer name
query = (
"SELECT Id FROM RecordType WHERE SObjectType='{0}'"
"AND DeveloperName = '{1}' LIMIT 1"
)
record_type_id = self.sf.query(
query.format(mapping.get("sf_object"), record_type)
)["records"][0]["Id"]
query = self._query_db(mapping)
total_rows = 0
batch_num = 1
def start_batch():
batch_file = io.BytesIO()
writer = unicodecsv.writer(batch_file)
writer.writerow(columns)
batch_ids = []
return batch_file, writer, batch_ids
batch_file, writer, batch_ids = start_batch()
for row in query.yield_per(batch_size):
total_rows += 1
# Add static values to row
pkey = row[0]
row = list(row[1:]) + list(static.values())
if record_type:
row.append(record_type_id)
writer.writerow([self._convert(value) for value in row])
batch_ids.append(pkey)
# Yield and start a new file every [batch_size] rows
if not total_rows % batch_size:
batch_file.seek(0)
self.logger.info(" Processing batch {}".format(batch_num))
yield batch_file, batch_ids
batch_file, writer, batch_ids = start_batch()
batch_num += 1
# Yield result file for final batch
if batch_ids:
batch_file.seek(0)
yield batch_file, batch_ids
self.logger.info(
" Prepared {} rows for import to {}".format(
total_rows, mapping["sf_object"]
)
)
def _query_db(self, mapping):
"""Build a query to retrieve data from the local db.
Includes columns from the mapping
as well as joining to the id tables to get real SF ids
for lookups.
"""
model = self.models[mapping.get("table")]
# Use primary key instead of the field mapped to SF Id
fields = mapping.get("fields", {}).copy()
if mapping["oid_as_pk"]:
del fields["Id"]
id_column = model.__table__.primary_key.columns.keys()[0]
columns = [getattr(model, id_column)]
for f in fields.values():
columns.append(model.__table__.columns[f])
lookups = mapping.get("lookups", {}).copy()
for lookup in lookups.values():
lookup["aliased_table"] = aliased(
self.metadata.tables["{}_sf_ids".format(lookup["table"])]
)
columns.append(lookup["aliased_table"].columns.sf_id)
query = self.session.query(*columns)
if "record_type" in mapping and hasattr(model, "record_type"):
query = query.filter(model.record_type == mapping["record_type"])
if "filters" in mapping:
filter_args = []
for f in mapping["filters"]:
filter_args.append(text(f))
query = query.filter(*filter_args)
for sf_field, lookup in lookups.items():
# Outer join with lookup ids table:
# returns main obj even if lookup is null
key_field = get_lookup_key_field(lookup, sf_field)
value_column = getattr(model, key_field)
query = query.outerjoin(
lookup["aliased_table"],
lookup["aliased_table"].columns.id == value_column,
)
# Order by foreign key to minimize lock contention
# by trying to keep lookup targets in the same batch
lookup_column = getattr(model, key_field)
query = query.order_by(lookup_column)
self.logger.info(str(query))
return query
def _convert(self, value):
if value:
if isinstance(value, datetime.datetime):
return value.isoformat()
return value
def _store_inserted_ids(self, mapping, job_id, local_ids_for_batch):
"""Get the job results and store inserted SF Ids in a new table"""
id_table_name = self._reset_id_table(mapping)
conn = self.session.connection()
for batch_id, local_ids in local_ids_for_batch.items():
try:
results_url = "{}/job/{}/batch/{}/result".format(
self.bulk.endpoint, job_id, batch_id
)
# Download entire result file to a temporary file first
# to avoid the server dropping connections
with _download_file(results_url, self.bulk) as f:
self.logger.info(
" Downloaded results for batch {}".format(batch_id)
)
self._store_inserted_ids_for_batch(
f, local_ids, id_table_name, conn
)
self.logger.info(
" Updated {} for batch {}".format(id_table_name, batch_id)
)
except Exception: # pragma: nocover
# If we can't download one result file,
# don't let that stop us from downloading the others
self.logger.error(
"Could not download batch results: {}".format(batch_id)
)
continue
self.session.commit()
def _reset_id_table(self, mapping):
"""Create an empty table to hold the inserted SF Ids"""
if not hasattr(self, "_initialized_id_tables"):
self._initialized_id_tables = set()
id_table_name = "{}_sf_ids".format(mapping["table"])
if id_table_name not in self._initialized_id_tables:
if id_table_name in self.metadata.tables:
self.metadata.remove(self.metadata.tables[id_table_name])
id_table = Table(
id_table_name,
self.metadata,
Column("id", Unicode(255), primary_key=True),
Column("sf_id", Unicode(18)),
)
if id_table.exists():
id_table.drop()
id_table.create()
self._initialized_id_tables.add(id_table_name)
return id_table_name
def _store_inserted_ids_for_batch(
self, result_file, local_ids, id_table_name, conn
):
# Set up a function to generate rows based on this result file
def produce_csv():
"""Iterate over job results and prepare rows for id table"""
reader = unicodecsv.reader(result_file)
next(reader) # skip header
i = 0
for row, local_id in zip(reader, local_ids):
if row[1] == "true": # Success
sf_id = row[0]
yield "{},{}\n".format(local_id, sf_id).encode("utf-8")
else:
self.logger.warning(" Error on row {}: {}".format(i, row[3]))
i += 1
# Bulk insert rows into id table
columns = ("id", "sf_id")
data_file = IteratorBytesIO(produce_csv())
self._sql_bulk_insert_from_csv(conn, id_table_name, columns, data_file)
def _sqlite_load(self):
conn = self.session.connection()
cursor = conn.connection.cursor()
with open(self.options["sql_path"], "r") as f:
try:
cursor.executescript(f.read())
finally:
cursor.close()
# self.session.flush()
def _init_db(self):
# initialize the DB engine
self.engine = create_engine(self.options["database_url"])
# initialize the DB session
self.session = Session(self.engine)
if self.options.get("sql_path"):
self._sqlite_load()
# initialize DB metadata
self.metadata = MetaData()
self.metadata.bind = self.engine
# initialize the automap mapping
self.base = automap_base(bind=self.engine, metadata=self.metadata)
self.base.prepare(self.engine, reflect=True)
# Loop through mappings and reflect each referenced table
self.models = {}
for name, mapping in self.mapping.items():
if "table" in mapping and mapping["table"] not in self.models:
self.models[mapping["table"]] = self.base.classes[mapping["table"]]
def _init_mapping(self):
with open(self.options["mapping"], "r") as f:
self.mapping = ordered_yaml_load(f)
class QueryData(BulkJobTaskMixin, BaseSalesforceApiTask):
task_options = {
"database_url": {
"description": "A DATABASE_URL where the query output should be written",
"required": True,
},
"mapping": {
"description": "The path to a yaml file containing mappings of the database fields to Salesforce object fields",
"required": True,
},
"sql_path": {
"description": "If set, an SQL script will be generated at the path provided "
+ "This is useful for keeping data in the repository and allowing diffs."
},
}
def _init_options(self, kwargs):
super(QueryData, self)._init_options(kwargs)
if self.options.get("sql_path"):
if self.options.get("database_url"):
raise TaskOptionsError(
"The database_url option is set dynamically with the sql_path option. Please unset the database_url option."
)
self.logger.info("Using in-memory sqlite database")
self.options["database_url"] = "sqlite://"
self.options["sql_path"] = os_friendly_path(self.options["sql_path"])
def _run_task(self):
self._init_mapping()
self._init_db()
for mapping in self.mappings.values():
soql = self._soql_for_mapping(mapping)
self._run_query(soql, mapping)
self._drop_sf_id_columns()
if self.options.get("sql_path"):
self._sqlite_dump()
def _init_db(self):
self.models = {}
# initialize the DB engine
self.engine = create_engine(self.options["database_url"])
# initialize DB metadata
self.metadata = MetaData()
self.metadata.bind = self.engine
# Create the tables
self._create_tables()
# initialize the automap mapping
self.base = automap_base(bind=self.engine, metadata=self.metadata)
self.base.prepare(self.engine, reflect=True)
# initialize session
self.session = create_session(bind=self.engine, autocommit=False)
def _init_mapping(self):
with open(self.options["mapping"], "r") as f:
self.mappings = ordered_yaml_load(f)
def _soql_for_mapping(self, mapping):
sf_object = mapping["sf_object"]
fields = []
if not mapping["oid_as_pk"]:
fields.append("Id")
fields += [field["sf"] for field in self._fields_for_mapping(mapping)]
soql = "SELECT {fields} FROM {sf_object}".format(
**{"fields": ", ".join(fields), "sf_object": sf_object}
)
if "record_type" in mapping:
soql += " WHERE RecordType.DeveloperName = '{}'".format(
mapping["record_type"]
)
return soql
def _run_query(self, soql, mapping):
self.logger.info("Creating bulk job for: {sf_object}".format(**mapping))
job = self.bulk.create_query_job(mapping["sf_object"], contentType="CSV")
self.logger.info("Job id: {0}".format(job))
self.logger.info("Submitting query: {}".format(soql))
batch = self.bulk.query(job, soql)
self.logger.info("Batch id: {0}".format(batch))
self.bulk.wait_for_batch(job, batch)
self.logger.info("Batch {0} finished".format(batch))
self.bulk.close_job(job)
self.logger.info("Job {0} closed".format(job))
conn = self.session.connection()
for result_file in self._get_results(batch, job):
self._import_results(mapping, result_file, conn)
def _get_results(self, batch_id, job_id):
result_ids = self.bulk.get_query_batch_result_ids(batch_id, job_id=job_id)
for result_id in result_ids:
self.logger.info("Result id: {}".format(result_id))
uri = "{}/job/{}/batch/{}/result/{}".format(
self.bulk.endpoint, job_id, batch_id, result_id
)
with _download_file(uri, self.bulk) as f:
self.logger.info("Result {} downloaded".format(result_id))
yield f
def _import_results(self, mapping, result_file, conn):
# Map SF field names to local db column names
sf_header = [
name.strip('"')
for name in result_file.readline().strip().decode("utf-8").split(",")
]
columns = []
lookup_keys = []
for sf in sf_header:
if sf == "Records not found for this query":
return
if sf:
column = mapping.get("fields", {}).get(sf)
if not column:
lookup = mapping.get("lookups", {}).get(sf, {})
if lookup:
lookup_keys.append(sf)
column = get_lookup_key_field(lookup, sf)
if column:
columns.append(column)
if not columns:
return
record_type = mapping.get("record_type")
if record_type:
columns.append("record_type")
processor = log_progress(
process_incoming_rows(result_file, record_type), self.logger
)
data_file = IteratorBytesIO(processor)
if mapping["oid_as_pk"]:
self._sql_bulk_insert_from_csv(conn, mapping["table"], columns, data_file)
else:
# If using the autogenerated id field, split out the CSV file from the Bulk API
# into two separate files and load into the main table and the sf_id_table
with tempfile.TemporaryFile("w+b") as f_values:
with tempfile.TemporaryFile("w+b") as f_ids:
data_file_values, data_file_ids = self._split_batch_csv(
data_file, f_values, f_ids
)
self._sql_bulk_insert_from_csv(
conn, mapping["table"], columns, data_file_values
)
self._sql_bulk_insert_from_csv(
conn, mapping["sf_id_table"], ["sf_id"], data_file_ids
)
self.session.commit()
if lookup_keys and not mapping["oid_as_pk"]:
self._convert_lookups_to_id(mapping, lookup_keys)
def _get_mapping_for_table(self, table):
""" Returns the first mapping for a table name """
for mapping in self.mappings.values():
if mapping["table"] == table:
return mapping
def _split_batch_csv(self, data_file, f_values, f_ids):
writer_values = unicodecsv.writer(f_values)
writer_ids = unicodecsv.writer(f_ids)
for row in unicodecsv.reader(data_file):
writer_values.writerow(row[1:])
writer_ids.writerow([row[:1]])
f_values.seek(0)
f_ids.seek(0)
return f_values, f_ids
def _convert_lookups_to_id(self, mapping, lookup_keys):
for lookup_key in lookup_keys:
lookup_dict = mapping["lookups"][lookup_key]
model = self.models[mapping["table"]]
lookup_mapping = self._get_mapping_for_table(lookup_dict["table"])
lookup_model = self.models[lookup_mapping["sf_id_table"]]
key_field = get_lookup_key_field(lookup_dict, lookup_key)
key_attr = getattr(model, key_field)
try:
self.session.query(model).filter(
key_attr.isnot(None), key_attr == lookup_model.sf_id
).update({key_attr: lookup_model.id}, synchronize_session=False)
except NotImplementedError:
# Some databases such as sqlite don't support multitable update
mappings = []
for row, lookup_id in self.session.query(model, lookup_model.id).join(
lookup_model, key_attr == lookup_model.sf_id
):
mappings.append({"id": row.id, key_field: lookup_id})
self.session.bulk_update_mappings(model, mappings)
self.session.commit()
def _create_tables(self):
for mapping in self.mappings.values():
self._create_table(mapping)
self.metadata.create_all()
def _create_table(self, mapping):
model_name = "{}Model".format(mapping["table"])
mapper_kwargs = {}
table_kwargs = {}
self.models[mapping["table"]] = type(model_name, (object,), {})
# Provide support for legacy mappings which used the OID as the pk but
# default to using an autoincrementing int pk and a separate sf_id column
fields = []
mapping["oid_as_pk"] = bool(mapping.get("fields", {}).get("Id"))
if mapping["oid_as_pk"]:
id_column = mapping["fields"]["Id"]
fields.append(Column(id_column, Unicode(255), primary_key=True))
else:
fields.append(Column("id", Integer(), primary_key=True, autoincrement=True))
for field in self._fields_for_mapping(mapping):
if mapping["oid_as_pk"] and field["sf"] == "Id":
continue
fields.append(Column(field["db"], Unicode(255)))
if "record_type" in mapping:
fields.append(Column("record_type", Unicode(255)))
t = Table(mapping["table"], self.metadata, *fields, **table_kwargs)
if t.exists():
raise BulkDataException("Table already exists: {}".format(mapping["table"]))
if not mapping["oid_as_pk"]:
mapping["sf_id_table"] = mapping["table"] + "_sf_id"
# If multiple mappings point to the same table, don't recreate the table
if mapping["sf_id_table"] not in self.models:
sf_id_model_name = "{}Model".format(mapping["sf_id_table"])
self.models[mapping["sf_id_table"]] = type(
sf_id_model_name, (object,), {}
)
sf_id_fields = [
Column("id", Integer(), primary_key=True, autoincrement=True),
Column("sf_id", Unicode(24)),
]
id_t = Table(mapping["sf_id_table"], self.metadata, *sf_id_fields)
mapper(self.models[mapping["sf_id_table"]], id_t)
mapper(self.models[mapping["table"]], t, **mapper_kwargs)
def _fields_for_mapping(self, mapping):
fields = []
for sf_field, db_field in mapping.get("fields", {}).items():
fields.append({"sf": sf_field, "db": db_field})
for sf_field, lookup in mapping.get("lookups", {}).items():
fields.append(
{"sf": sf_field, "db": get_lookup_key_field(lookup, sf_field)}
)
return fields
def _drop_sf_id_columns(self):
for mapping in self.mappings.values():
if mapping.get("oid_as_pk"):
continue
self.metadata.tables[mapping["sf_id_table"]].drop()
def _sqlite_dump(self):
path = self.options["sql_path"]
if os.path.exists(path):
os.remove(path)
with open(path, "w") as f:
for line in self.session.connection().connection.iterdump():
f.write(line + "\n")
@contextmanager
def _download_file(uri, bulk_api):
"""Download the bulk API result file for a single batch"""
resp = requests.get(uri, headers=bulk_api.headers(), stream=True)
with tempfile.TemporaryFile("w+b") as f:
for chunk in resp.iter_content(chunk_size=None):
f.write(chunk)
f.seek(0)
yield f
def process_incoming_rows(f, record_type=None):
if record_type and not isinstance(record_type, bytes):
record_type = record_type.encode("utf-8")
for line in f:
if record_type:
yield line.rstrip() + b"," + record_type + b"\n"
else:
yield line
def get_lookup_key_field(lookup, sf_field):
return lookup.get("key_field", convert_to_snake_case(sf_field))
| 39.35723
| 150
| 0.59038
|
from future import standard_library
standard_library.install_aliases()
from builtins import zip
from contextlib import contextmanager
import datetime
import io
import os
import time
import tempfile
import xml.etree.ElementTree as ET
from salesforce_bulk.util import IteratorBytesIO
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import aliased
from sqlalchemy.orm import create_session
from sqlalchemy.orm import mapper
from sqlalchemy.orm import Session
from sqlalchemy import create_engine
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import Table
from sqlalchemy import Unicode
from sqlalchemy import text
from sqlalchemy import types
from sqlalchemy import event
import requests
import unicodecsv
from cumulusci.core.utils import process_bool_arg, ordered_yaml_load
from cumulusci.core.exceptions import BulkDataException
from cumulusci.core.exceptions import TaskOptionsError
from cumulusci.tasks.salesforce import BaseSalesforceApiTask
from cumulusci.utils import convert_to_snake_case, log_progress, os_friendly_path
class EpochType(types.TypeDecorator):
impl = types.Integer
epoch = datetime.datetime(1970, 1, 1, 0, 0, 0)
def process_bind_param(self, value, dialect):
return int((value - self.epoch).total_seconds()) * 1000
def process_result_value(self, value, dialect):
if value is not None:
return self.epoch + datetime.timedelta(seconds=value / 1000)
@event.listens_for(Table, "column_reflect")
def setup_epoch(inspector, table, column_info):
if isinstance(column_info["type"], types.DateTime):
column_info["type"] = EpochType()
class BulkJobTaskMixin(object):
def _job_state_from_batches(self, job_id):
uri = "{}/job/{}/batch".format(self.bulk.endpoint, job_id)
response = requests.get(uri, headers=self.bulk.headers())
return self._parse_job_state(response.content)
def _parse_job_state(self, xml):
tree = ET.fromstring(xml)
completed = 0
pending = 0
failed = 0
for el in tree.iterfind(".//{%s}state" % self.bulk.jobNS):
state = el.text
if state == "Not Processed":
return "Aborted"
elif state == "Failed":
failed += 1
elif state == "Completed":
completed += 1
else:
pending += 1
if pending:
return "InProgress"
elif failed:
return "Failed"
else:
return "Completed"
def _wait_for_job(self, job_id):
while True:
job_status = self.bulk.job_status(job_id)
self.logger.info(
" Waiting for job {} ({}/{})".format(
job_id,
job_status["numberBatchesCompleted"],
job_status["numberBatchesTotal"],
)
)
result = self._job_state_from_batches(job_id)
if result != "InProgress":
break
time.sleep(10)
self.logger.info("Job {} finished with result: {}".format(job_id, result))
return result
def _sql_bulk_insert_from_csv(self, conn, table, columns, data_file):
if conn.dialect.name in ("postgresql", "psycopg2"):
with conn.connection.cursor() as cursor:
cursor.copy_expert(
"COPY {} ({}) FROM STDIN WITH (FORMAT CSV)".format(
table, ",".join(columns)
),
data_file,
)
else:
reader = unicodecsv.DictReader(data_file, columns)
table = self.metadata.tables[table]
rows = list(reader)
if rows:
conn.execute(table.insert().values(rows))
self.session.flush()
class DeleteData(BaseSalesforceApiTask, BulkJobTaskMixin):
task_options = {
"objects": {
"description": "A list of objects to delete records from in order of deletion. If passed via command line, use a comma separated string",
"required": True,
},
"hardDelete": {
"description": "If True, perform a hard delete, bypassing the recycle bin. Default: False"
},
}
def _init_options(self, kwargs):
super(DeleteData, self)._init_options(kwargs)
if not isinstance(self.options["objects"], list):
self.options["objects"] = [
obj.strip() for obj in self.options["objects"].split(",")
]
self.options["hardDelete"] = process_bool_arg(self.options.get("hardDelete"))
def _run_task(self):
for obj in self.options["objects"]:
self.logger.info("Deleting all {} records".format(obj))
delete_job = self._create_job(obj)
if delete_job is not None:
self._wait_for_job(delete_job)
def _create_job(self, obj):
delete_rows = self._query_salesforce_for_records_to_delete(obj)
if not delete_rows:
self.logger.info(" No {} objects found, skipping delete".format(obj))
return
operation = "hardDelete" if self.options["hardDelete"] else "delete"
delete_job = self.bulk.create_job(obj, operation)
self.logger.info(" Deleting {} {} records".format(len(delete_rows), obj))
batch_num = 1
for batch in self._upload_batches(delete_job, delete_rows):
self.logger.info(" Uploaded batch {}".format(batch))
batch_num += 1
self.bulk.close_job(delete_job)
return delete_job
def _query_salesforce_for_records_to_delete(self, obj):
self.logger.info(" Querying for all {} objects".format(obj))
query_job = self.bulk.create_query_job(obj, contentType="CSV")
batch = self.bulk.query(query_job, "select Id from {}".format(obj))
while not self.bulk.is_batch_done(batch, query_job):
time.sleep(10)
self.bulk.close_job(query_job)
delete_rows = []
for result in self.bulk.get_all_results_for_query_batch(batch, query_job):
reader = unicodecsv.DictReader(result, encoding="utf-8")
for row in reader:
delete_rows.append(row)
return delete_rows
def _split_batches(self, data, batch_size):
for i in range(0, len(data), batch_size):
yield data[i : i + batch_size]
def _upload_batches(self, job, data):
uri = "{}/job/{}/batch".format(self.bulk.endpoint, job)
headers = self.bulk.headers({"Content-Type": "text/csv"})
for batch in self._split_batches(data, 10000):
rows = ['"Id"']
rows += ['"{}"'.format(record["Id"]) for record in batch]
resp = requests.post(uri, data="\n".join(rows), headers=headers)
content = resp.content
if resp.status_code >= 400:
self.bulk.raise_error(content, resp.status_code)
tree = ET.fromstring(content)
batch_id = tree.findtext("{%s}id" % self.bulk.jobNS)
yield batch_id
class LoadData(BulkJobTaskMixin, BaseSalesforceApiTask):
task_options = {
"database_url": {
"description": "The database url to a database containing the test data to load",
"required": True,
},
"mapping": {
"description": "The path to a yaml file containing mappings of the database fields to Salesforce object fields",
"required": True,
},
"start_step": {
"description": "If specified, skip steps before this one in the mapping",
"required": False,
},
"sql_path": {
"description": "If specified, a database will be created from an SQL script at the provided path"
},
}
def _init_options(self, kwargs):
super(LoadData, self)._init_options(kwargs)
if self.options.get("sql_path"):
if self.options.get("database_url"):
raise TaskOptionsError(
"The database_url option is set dynamically with the sql_path option. Please unset the database_url option."
)
self.options["sql_path"] = os_friendly_path(self.options["sql_path"])
if not os.path.isfile(self.options["sql_path"]):
raise TaskOptionsError(
"File {} does not exist".format(self.options["sql_path"])
)
self.logger.info("Using in-memory sqlite database")
self.options["database_url"] = "sqlite://"
def _run_task(self):
self._init_mapping()
self._init_db()
start_step = self.options.get("start_step")
started = False
for name, mapping in self.mapping.items():
if not started and start_step and name != start_step:
self.logger.info("Skipping step: {}".format(name))
continue
started = True
self.logger.info("Running Job: {}".format(name))
result = self._load_mapping(mapping)
if result != "Completed":
break
def _load_mapping(self, mapping):
mapping["oid_as_pk"] = bool(mapping.get("fields", {}).get("Id"))
job_id, local_ids_for_batch = self._create_job(mapping)
result = self._wait_for_job(job_id)
self._store_inserted_ids(mapping, job_id, local_ids_for_batch)
return result
def _create_job(self, mapping):
job_id = self.bulk.create_insert_job(mapping["sf_object"], contentType="CSV")
self.logger.info(" Created bulk job {}".format(job_id))
local_ids_for_batch = {}
for batch_file, local_ids in self._get_batches(mapping):
batch_id = self.bulk.post_batch(job_id, batch_file)
local_ids_for_batch[batch_id] = local_ids
self.logger.info(" Uploaded batch {}".format(batch_id))
self.bulk.close_job(job_id)
return job_id, local_ids_for_batch
def _get_batches(self, mapping, batch_size=10000):
action = mapping.get("action", "insert")
fields = mapping.get("fields", {}).copy()
static = mapping.get("static", {})
lookups = mapping.get("lookups", {})
record_type = mapping.get("record_type")
if action == "insert" and "Id" in fields:
del fields["Id"]
columns = []
columns.extend(fields.keys())
columns.extend(lookups.keys())
columns.extend(static.keys())
if record_type:
columns.append("RecordTypeId")
# query for the RT by developer name
query = (
"SELECT Id FROM RecordType WHERE SObjectType='{0}'"
"AND DeveloperName = '{1}' LIMIT 1"
)
record_type_id = self.sf.query(
query.format(mapping.get("sf_object"), record_type)
)["records"][0]["Id"]
query = self._query_db(mapping)
total_rows = 0
batch_num = 1
def start_batch():
batch_file = io.BytesIO()
writer = unicodecsv.writer(batch_file)
writer.writerow(columns)
batch_ids = []
return batch_file, writer, batch_ids
batch_file, writer, batch_ids = start_batch()
for row in query.yield_per(batch_size):
total_rows += 1
# Add static values to row
pkey = row[0]
row = list(row[1:]) + list(static.values())
if record_type:
row.append(record_type_id)
writer.writerow([self._convert(value) for value in row])
batch_ids.append(pkey)
# Yield and start a new file every [batch_size] rows
if not total_rows % batch_size:
batch_file.seek(0)
self.logger.info(" Processing batch {}".format(batch_num))
yield batch_file, batch_ids
batch_file, writer, batch_ids = start_batch()
batch_num += 1
# Yield result file for final batch
if batch_ids:
batch_file.seek(0)
yield batch_file, batch_ids
self.logger.info(
" Prepared {} rows for import to {}".format(
total_rows, mapping["sf_object"]
)
)
def _query_db(self, mapping):
model = self.models[mapping.get("table")]
# Use primary key instead of the field mapped to SF Id
fields = mapping.get("fields", {}).copy()
if mapping["oid_as_pk"]:
del fields["Id"]
id_column = model.__table__.primary_key.columns.keys()[0]
columns = [getattr(model, id_column)]
for f in fields.values():
columns.append(model.__table__.columns[f])
lookups = mapping.get("lookups", {}).copy()
for lookup in lookups.values():
lookup["aliased_table"] = aliased(
self.metadata.tables["{}_sf_ids".format(lookup["table"])]
)
columns.append(lookup["aliased_table"].columns.sf_id)
query = self.session.query(*columns)
if "record_type" in mapping and hasattr(model, "record_type"):
query = query.filter(model.record_type == mapping["record_type"])
if "filters" in mapping:
filter_args = []
for f in mapping["filters"]:
filter_args.append(text(f))
query = query.filter(*filter_args)
for sf_field, lookup in lookups.items():
# Outer join with lookup ids table:
# returns main obj even if lookup is null
key_field = get_lookup_key_field(lookup, sf_field)
value_column = getattr(model, key_field)
query = query.outerjoin(
lookup["aliased_table"],
lookup["aliased_table"].columns.id == value_column,
)
# Order by foreign key to minimize lock contention
# by trying to keep lookup targets in the same batch
lookup_column = getattr(model, key_field)
query = query.order_by(lookup_column)
self.logger.info(str(query))
return query
def _convert(self, value):
if value:
if isinstance(value, datetime.datetime):
return value.isoformat()
return value
def _store_inserted_ids(self, mapping, job_id, local_ids_for_batch):
id_table_name = self._reset_id_table(mapping)
conn = self.session.connection()
for batch_id, local_ids in local_ids_for_batch.items():
try:
results_url = "{}/job/{}/batch/{}/result".format(
self.bulk.endpoint, job_id, batch_id
)
# Download entire result file to a temporary file first
# to avoid the server dropping connections
with _download_file(results_url, self.bulk) as f:
self.logger.info(
" Downloaded results for batch {}".format(batch_id)
)
self._store_inserted_ids_for_batch(
f, local_ids, id_table_name, conn
)
self.logger.info(
" Updated {} for batch {}".format(id_table_name, batch_id)
)
except Exception: # pragma: nocover
# If we can't download one result file,
self.logger.error(
"Could not download batch results: {}".format(batch_id)
)
continue
self.session.commit()
def _reset_id_table(self, mapping):
if not hasattr(self, "_initialized_id_tables"):
self._initialized_id_tables = set()
id_table_name = "{}_sf_ids".format(mapping["table"])
if id_table_name not in self._initialized_id_tables:
if id_table_name in self.metadata.tables:
self.metadata.remove(self.metadata.tables[id_table_name])
id_table = Table(
id_table_name,
self.metadata,
Column("id", Unicode(255), primary_key=True),
Column("sf_id", Unicode(18)),
)
if id_table.exists():
id_table.drop()
id_table.create()
self._initialized_id_tables.add(id_table_name)
return id_table_name
def _store_inserted_ids_for_batch(
self, result_file, local_ids, id_table_name, conn
):
# Set up a function to generate rows based on this result file
def produce_csv():
reader = unicodecsv.reader(result_file)
next(reader) # skip header
i = 0
for row, local_id in zip(reader, local_ids):
if row[1] == "true": # Success
sf_id = row[0]
yield "{},{}\n".format(local_id, sf_id).encode("utf-8")
else:
self.logger.warning(" Error on row {}: {}".format(i, row[3]))
i += 1
# Bulk insert rows into id table
columns = ("id", "sf_id")
data_file = IteratorBytesIO(produce_csv())
self._sql_bulk_insert_from_csv(conn, id_table_name, columns, data_file)
def _sqlite_load(self):
conn = self.session.connection()
cursor = conn.connection.cursor()
with open(self.options["sql_path"], "r") as f:
try:
cursor.executescript(f.read())
finally:
cursor.close()
# self.session.flush()
def _init_db(self):
# initialize the DB engine
self.engine = create_engine(self.options["database_url"])
# initialize the DB session
self.session = Session(self.engine)
if self.options.get("sql_path"):
self._sqlite_load()
# initialize DB metadata
self.metadata = MetaData()
self.metadata.bind = self.engine
# initialize the automap mapping
self.base = automap_base(bind=self.engine, metadata=self.metadata)
self.base.prepare(self.engine, reflect=True)
# Loop through mappings and reflect each referenced table
self.models = {}
for name, mapping in self.mapping.items():
if "table" in mapping and mapping["table"] not in self.models:
self.models[mapping["table"]] = self.base.classes[mapping["table"]]
def _init_mapping(self):
with open(self.options["mapping"], "r") as f:
self.mapping = ordered_yaml_load(f)
class QueryData(BulkJobTaskMixin, BaseSalesforceApiTask):
task_options = {
"database_url": {
"description": "A DATABASE_URL where the query output should be written",
"required": True,
},
"mapping": {
"description": "The path to a yaml file containing mappings of the database fields to Salesforce object fields",
"required": True,
},
"sql_path": {
"description": "If set, an SQL script will be generated at the path provided "
+ "This is useful for keeping data in the repository and allowing diffs."
},
}
def _init_options(self, kwargs):
super(QueryData, self)._init_options(kwargs)
if self.options.get("sql_path"):
if self.options.get("database_url"):
raise TaskOptionsError(
"The database_url option is set dynamically with the sql_path option. Please unset the database_url option."
)
self.logger.info("Using in-memory sqlite database")
self.options["database_url"] = "sqlite://"
self.options["sql_path"] = os_friendly_path(self.options["sql_path"])
def _run_task(self):
self._init_mapping()
self._init_db()
for mapping in self.mappings.values():
soql = self._soql_for_mapping(mapping)
self._run_query(soql, mapping)
self._drop_sf_id_columns()
if self.options.get("sql_path"):
self._sqlite_dump()
def _init_db(self):
self.models = {}
# initialize the DB engine
self.engine = create_engine(self.options["database_url"])
# initialize DB metadata
self.metadata = MetaData()
self.metadata.bind = self.engine
# Create the tables
self._create_tables()
# initialize the automap mapping
self.base = automap_base(bind=self.engine, metadata=self.metadata)
self.base.prepare(self.engine, reflect=True)
# initialize session
self.session = create_session(bind=self.engine, autocommit=False)
def _init_mapping(self):
with open(self.options["mapping"], "r") as f:
self.mappings = ordered_yaml_load(f)
def _soql_for_mapping(self, mapping):
sf_object = mapping["sf_object"]
fields = []
if not mapping["oid_as_pk"]:
fields.append("Id")
fields += [field["sf"] for field in self._fields_for_mapping(mapping)]
soql = "SELECT {fields} FROM {sf_object}".format(
**{"fields": ", ".join(fields), "sf_object": sf_object}
)
if "record_type" in mapping:
soql += " WHERE RecordType.DeveloperName = '{}'".format(
mapping["record_type"]
)
return soql
def _run_query(self, soql, mapping):
self.logger.info("Creating bulk job for: {sf_object}".format(**mapping))
job = self.bulk.create_query_job(mapping["sf_object"], contentType="CSV")
self.logger.info("Job id: {0}".format(job))
self.logger.info("Submitting query: {}".format(soql))
batch = self.bulk.query(job, soql)
self.logger.info("Batch id: {0}".format(batch))
self.bulk.wait_for_batch(job, batch)
self.logger.info("Batch {0} finished".format(batch))
self.bulk.close_job(job)
self.logger.info("Job {0} closed".format(job))
conn = self.session.connection()
for result_file in self._get_results(batch, job):
self._import_results(mapping, result_file, conn)
def _get_results(self, batch_id, job_id):
result_ids = self.bulk.get_query_batch_result_ids(batch_id, job_id=job_id)
for result_id in result_ids:
self.logger.info("Result id: {}".format(result_id))
uri = "{}/job/{}/batch/{}/result/{}".format(
self.bulk.endpoint, job_id, batch_id, result_id
)
with _download_file(uri, self.bulk) as f:
self.logger.info("Result {} downloaded".format(result_id))
yield f
def _import_results(self, mapping, result_file, conn):
# Map SF field names to local db column names
sf_header = [
name.strip('"')
for name in result_file.readline().strip().decode("utf-8").split(",")
]
columns = []
lookup_keys = []
for sf in sf_header:
if sf == "Records not found for this query":
return
if sf:
column = mapping.get("fields", {}).get(sf)
if not column:
lookup = mapping.get("lookups", {}).get(sf, {})
if lookup:
lookup_keys.append(sf)
column = get_lookup_key_field(lookup, sf)
if column:
columns.append(column)
if not columns:
return
record_type = mapping.get("record_type")
if record_type:
columns.append("record_type")
processor = log_progress(
process_incoming_rows(result_file, record_type), self.logger
)
data_file = IteratorBytesIO(processor)
if mapping["oid_as_pk"]:
self._sql_bulk_insert_from_csv(conn, mapping["table"], columns, data_file)
else:
# If using the autogenerated id field, split out the CSV file from the Bulk API
# into two separate files and load into the main table and the sf_id_table
with tempfile.TemporaryFile("w+b") as f_values:
with tempfile.TemporaryFile("w+b") as f_ids:
data_file_values, data_file_ids = self._split_batch_csv(
data_file, f_values, f_ids
)
self._sql_bulk_insert_from_csv(
conn, mapping["table"], columns, data_file_values
)
self._sql_bulk_insert_from_csv(
conn, mapping["sf_id_table"], ["sf_id"], data_file_ids
)
self.session.commit()
if lookup_keys and not mapping["oid_as_pk"]:
self._convert_lookups_to_id(mapping, lookup_keys)
def _get_mapping_for_table(self, table):
for mapping in self.mappings.values():
if mapping["table"] == table:
return mapping
def _split_batch_csv(self, data_file, f_values, f_ids):
writer_values = unicodecsv.writer(f_values)
writer_ids = unicodecsv.writer(f_ids)
for row in unicodecsv.reader(data_file):
writer_values.writerow(row[1:])
writer_ids.writerow([row[:1]])
f_values.seek(0)
f_ids.seek(0)
return f_values, f_ids
def _convert_lookups_to_id(self, mapping, lookup_keys):
for lookup_key in lookup_keys:
lookup_dict = mapping["lookups"][lookup_key]
model = self.models[mapping["table"]]
lookup_mapping = self._get_mapping_for_table(lookup_dict["table"])
lookup_model = self.models[lookup_mapping["sf_id_table"]]
key_field = get_lookup_key_field(lookup_dict, lookup_key)
key_attr = getattr(model, key_field)
try:
self.session.query(model).filter(
key_attr.isnot(None), key_attr == lookup_model.sf_id
).update({key_attr: lookup_model.id}, synchronize_session=False)
except NotImplementedError:
# Some databases such as sqlite don't support multitable update
mappings = []
for row, lookup_id in self.session.query(model, lookup_model.id).join(
lookup_model, key_attr == lookup_model.sf_id
):
mappings.append({"id": row.id, key_field: lookup_id})
self.session.bulk_update_mappings(model, mappings)
self.session.commit()
def _create_tables(self):
for mapping in self.mappings.values():
self._create_table(mapping)
self.metadata.create_all()
def _create_table(self, mapping):
model_name = "{}Model".format(mapping["table"])
mapper_kwargs = {}
table_kwargs = {}
self.models[mapping["table"]] = type(model_name, (object,), {})
# Provide support for legacy mappings which used the OID as the pk but
# default to using an autoincrementing int pk and a separate sf_id column
fields = []
mapping["oid_as_pk"] = bool(mapping.get("fields", {}).get("Id"))
if mapping["oid_as_pk"]:
id_column = mapping["fields"]["Id"]
fields.append(Column(id_column, Unicode(255), primary_key=True))
else:
fields.append(Column("id", Integer(), primary_key=True, autoincrement=True))
for field in self._fields_for_mapping(mapping):
if mapping["oid_as_pk"] and field["sf"] == "Id":
continue
fields.append(Column(field["db"], Unicode(255)))
if "record_type" in mapping:
fields.append(Column("record_type", Unicode(255)))
t = Table(mapping["table"], self.metadata, *fields, **table_kwargs)
if t.exists():
raise BulkDataException("Table already exists: {}".format(mapping["table"]))
if not mapping["oid_as_pk"]:
mapping["sf_id_table"] = mapping["table"] + "_sf_id"
# If multiple mappings point to the same table, don't recreate the table
if mapping["sf_id_table"] not in self.models:
sf_id_model_name = "{}Model".format(mapping["sf_id_table"])
self.models[mapping["sf_id_table"]] = type(
sf_id_model_name, (object,), {}
)
sf_id_fields = [
Column("id", Integer(), primary_key=True, autoincrement=True),
Column("sf_id", Unicode(24)),
]
id_t = Table(mapping["sf_id_table"], self.metadata, *sf_id_fields)
mapper(self.models[mapping["sf_id_table"]], id_t)
mapper(self.models[mapping["table"]], t, **mapper_kwargs)
def _fields_for_mapping(self, mapping):
fields = []
for sf_field, db_field in mapping.get("fields", {}).items():
fields.append({"sf": sf_field, "db": db_field})
for sf_field, lookup in mapping.get("lookups", {}).items():
fields.append(
{"sf": sf_field, "db": get_lookup_key_field(lookup, sf_field)}
)
return fields
def _drop_sf_id_columns(self):
for mapping in self.mappings.values():
if mapping.get("oid_as_pk"):
continue
self.metadata.tables[mapping["sf_id_table"]].drop()
def _sqlite_dump(self):
path = self.options["sql_path"]
if os.path.exists(path):
os.remove(path)
with open(path, "w") as f:
for line in self.session.connection().connection.iterdump():
f.write(line + "\n")
@contextmanager
def _download_file(uri, bulk_api):
resp = requests.get(uri, headers=bulk_api.headers(), stream=True)
with tempfile.TemporaryFile("w+b") as f:
for chunk in resp.iter_content(chunk_size=None):
f.write(chunk)
f.seek(0)
yield f
def process_incoming_rows(f, record_type=None):
if record_type and not isinstance(record_type, bytes):
record_type = record_type.encode("utf-8")
for line in f:
if record_type:
yield line.rstrip() + b"," + record_type + b"\n"
else:
yield line
def get_lookup_key_field(lookup, sf_field):
return lookup.get("key_field", convert_to_snake_case(sf_field))
| true
| true
|
790d1c0e82f31a38771f3294625c990afbc23d97
| 5,141
|
py
|
Python
|
paper_results/PLEDGE/tax/data_transformation_scripts/create_tax_csv.py
|
john-doe-3141592653/XXX
|
d8840663fa73cc78281e7bd3a6df980e7440a3cc
|
[
"CECILL-B"
] | null | null | null |
paper_results/PLEDGE/tax/data_transformation_scripts/create_tax_csv.py
|
john-doe-3141592653/XXX
|
d8840663fa73cc78281e7bd3a6df980e7440a3cc
|
[
"CECILL-B"
] | null | null | null |
paper_results/PLEDGE/tax/data_transformation_scripts/create_tax_csv.py
|
john-doe-3141592653/XXX
|
d8840663fa73cc78281e7bd3a6df980e7440a3cc
|
[
"CECILL-B"
] | null | null | null |
import statistics as stat
import os
def array_to_string(array):
res = ""
for a in array:
res += str(a) + ";"
return res[:-1] + "\n"
for i in range(10):
for j in range(100):
none_id = ""
vision_id = ""
a_id = ""
fr_id = ""
lu_id = ""
de_id = ""
be_id = ""
other_id = ""
nb_tax_payer = -1
disability_type = []
is_resident = []
address = []
income = []
country = {}
is_local = {}
nb_none = -1
nb_vision = -1
nb_a = -1
line_counter = 0
previous_lines = []
save_line_address = 0
save_line_income = 0
tmp_address = []
tmp_income = []
with open("./" + str(i) + "/test_case_" + str(j) + "/tax.uml", "r") as f:
for line in f:
line_counter += 1
if "Tax_payer" in line:
nb_tax_payer += 1
if "None" in line and "ownedLiteral" in line:
none_id = line.split(" ")[5][8:][:-1]
elif "Vision" in line and "ownedLiteral" in line:
vision_id = line.split(" ")[5][8:][:-1]
elif "\"A\"" in line and "ownedLiteral" in line:
a_id = line.split(" ")[5][8:][:-1]
elif "\"FR\"" in line and "ownedLiteral" in line:
fr_id = line.split(" ")[5][8:][:-1]
elif "\"LU\"" in line and "ownedLiteral" in line:
lu_id = line.split(" ")[5][8:][:-1]
elif "\"DE\"" in line and "ownedLiteral" in line:
de_id = line.split(" ")[5][8:][:-1]
elif "\"BE\"" in line and "ownedLiteral" in line:
be_id = line.split(" ")[5][8:][:-1]
elif "\"Other\"" in line and "ownedLiteral" in line:
other_id = line.split(" ")[5][8:][:-1]
elif none_id != "" and none_id in line:
nb_none += 1
if nb_none >= 0:
disability_type.append("none")
elif vision_id != "" and vision_id in line:
nb_vision += 1
if nb_vision >= 0:
disability_type.append("vision")
elif a_id != "" and a_id in line:
nb_a += 1
if nb_a >= 0:
disability_type.append("a")
elif "is_resident" in line and not "ownedAttribute" in line:
if "value=" in line:
is_resident.append(True)
else:
is_resident.append(False)
elif "address" in line and "instance" in line:
tmp = line.split(" ")[10][10:][:-4]
if save_line_address == 0:
save_line_address = line_counter
if line_counter < save_line_address + 3:
tmp_address.append(tmp)
else:
address.append(tmp_address)
tmp_address = []
tmp_address.append(tmp)
save_line_address = line_counter
elif "country" in line:
if not " type" in line:
tmp = line.split(" ")[10][10:][:-4]
if tmp == fr_id:
tmp = "FR"
elif tmp == lu_id:
tmp = "LU"
elif tmp == de_id:
tmp = "DE"
elif tmp == be_id:
tmp = "BE"
else #other
tmp = "OTHER"
country[previous_lines[1].split(" ")[4][8:][:-1]] = tmp
elif "income" in line and "instance" in line and not "Tax_card" in previous_lines[1]:
tmp = line.split(" ")[10][10:][:-4]
if save_line_income == 0:
save_line_income = line_counter
if line_counter < save_line_income + 3:
tmp_income.append(tmp)
else:
income.append(tmp_income)
tmp_income = []
tmp_income.append(tmp)
save_line_income = line_counter
elif "is_local" in line and "Pension" in previous_lines[1]:
if "value=" in line:
tmp = ("P", line.split(" ")[-1][7:][:-4])
else:
tmp = ("P", "false")
is_local[previous_lines[1].split(" ")[4][8:][:-1]] = tmp
elif "is_local" in line and "Employment" in previous_lines[1]:
if "value=" in line:
tmp = ("E", line.split(" ")[-1][7:][:-4])
else:
tmp = ("E", "false")
is_local[previous_lines[1].split(" ")[4][8:][:-1]] = tmp
elif "is_local" in line and "Other" in previous_lines[1]:
if "value=" in line:
tmp = ("O", line.split(" ")[-1][7:][:-4])
else:
tmp = ("O", "false")
is_local[previous_lines[1].split(" ")[4][8:][:-1]] = tmp
if len(previous_lines) > 2:
previous_lines = previous_lines[1:]
previous_lines.append(line)
address.append(tmp_address)
income.append(tmp_income)
with open("./" + str(i) + "/test_case_" + str(j) + "/tax.csv", "w") as f:
f.write(str(nb_tax_payer) + "\n")
for k in range(nb_tax_payer):
tmp = ""
tmp += "1920;"
if disability_type[k] == "none":
tmp += "0.0;"
else:
tmp += "1.0;"
tmp += disability_type[k] + ";"
tmp += str(is_resident[k]) + "\n"
for add in address[k]:
tmp += country[add] + ";"
tmp = tmp[:-1] + "\n0\n"
p = []
e = []
o = []
for inc in income[k]:
if is_local[inc][0] == "P":
p.append(is_local[inc][1])
elif is_local[inc][0] == "E":
e.append(is_local[inc][1])
else:
o.append(is_local[inc][1])
tmp += str(len(p)) + "\n"
if len(p) > 0:
for isloc in p:
tmp += isloc + ";"
tmp = tmp[:-1] + "\n"
tmp += str(len(e)) + "\n"
if len(e) > 0:
for isloc in e:
tmp += isloc + ";"
tmp = tmp[:-1] + "\n"
tmp += str(len(o)) + "\n"
if len(o) > 0:
for isloc in o:
tmp += isloc + ";"
tmp = tmp[:-1] + "\n"
f.write(tmp)
| 27.491979
| 89
| 0.536082
|
import statistics as stat
import os
def array_to_string(array):
res = ""
for a in array:
res += str(a) + ";"
return res[:-1] + "\n"
for i in range(10):
for j in range(100):
none_id = ""
vision_id = ""
a_id = ""
fr_id = ""
lu_id = ""
de_id = ""
be_id = ""
other_id = ""
nb_tax_payer = -1
disability_type = []
is_resident = []
address = []
income = []
country = {}
is_local = {}
nb_none = -1
nb_vision = -1
nb_a = -1
line_counter = 0
previous_lines = []
save_line_address = 0
save_line_income = 0
tmp_address = []
tmp_income = []
with open("./" + str(i) + "/test_case_" + str(j) + "/tax.uml", "r") as f:
for line in f:
line_counter += 1
if "Tax_payer" in line:
nb_tax_payer += 1
if "None" in line and "ownedLiteral" in line:
none_id = line.split(" ")[5][8:][:-1]
elif "Vision" in line and "ownedLiteral" in line:
vision_id = line.split(" ")[5][8:][:-1]
elif "\"A\"" in line and "ownedLiteral" in line:
a_id = line.split(" ")[5][8:][:-1]
elif "\"FR\"" in line and "ownedLiteral" in line:
fr_id = line.split(" ")[5][8:][:-1]
elif "\"LU\"" in line and "ownedLiteral" in line:
lu_id = line.split(" ")[5][8:][:-1]
elif "\"DE\"" in line and "ownedLiteral" in line:
de_id = line.split(" ")[5][8:][:-1]
elif "\"BE\"" in line and "ownedLiteral" in line:
be_id = line.split(" ")[5][8:][:-1]
elif "\"Other\"" in line and "ownedLiteral" in line:
other_id = line.split(" ")[5][8:][:-1]
elif none_id != "" and none_id in line:
nb_none += 1
if nb_none >= 0:
disability_type.append("none")
elif vision_id != "" and vision_id in line:
nb_vision += 1
if nb_vision >= 0:
disability_type.append("vision")
elif a_id != "" and a_id in line:
nb_a += 1
if nb_a >= 0:
disability_type.append("a")
elif "is_resident" in line and not "ownedAttribute" in line:
if "value=" in line:
is_resident.append(True)
else:
is_resident.append(False)
elif "address" in line and "instance" in line:
tmp = line.split(" ")[10][10:][:-4]
if save_line_address == 0:
save_line_address = line_counter
if line_counter < save_line_address + 3:
tmp_address.append(tmp)
else:
address.append(tmp_address)
tmp_address = []
tmp_address.append(tmp)
save_line_address = line_counter
elif "country" in line:
if not " type" in line:
tmp = line.split(" ")[10][10:][:-4]
if tmp == fr_id:
tmp = "FR"
elif tmp == lu_id:
tmp = "LU"
elif tmp == de_id:
tmp = "DE"
elif tmp == be_id:
tmp = "BE"
else
tmp = "OTHER"
country[previous_lines[1].split(" ")[4][8:][:-1]] = tmp
elif "income" in line and "instance" in line and not "Tax_card" in previous_lines[1]:
tmp = line.split(" ")[10][10:][:-4]
if save_line_income == 0:
save_line_income = line_counter
if line_counter < save_line_income + 3:
tmp_income.append(tmp)
else:
income.append(tmp_income)
tmp_income = []
tmp_income.append(tmp)
save_line_income = line_counter
elif "is_local" in line and "Pension" in previous_lines[1]:
if "value=" in line:
tmp = ("P", line.split(" ")[-1][7:][:-4])
else:
tmp = ("P", "false")
is_local[previous_lines[1].split(" ")[4][8:][:-1]] = tmp
elif "is_local" in line and "Employment" in previous_lines[1]:
if "value=" in line:
tmp = ("E", line.split(" ")[-1][7:][:-4])
else:
tmp = ("E", "false")
is_local[previous_lines[1].split(" ")[4][8:][:-1]] = tmp
elif "is_local" in line and "Other" in previous_lines[1]:
if "value=" in line:
tmp = ("O", line.split(" ")[-1][7:][:-4])
else:
tmp = ("O", "false")
is_local[previous_lines[1].split(" ")[4][8:][:-1]] = tmp
if len(previous_lines) > 2:
previous_lines = previous_lines[1:]
previous_lines.append(line)
address.append(tmp_address)
income.append(tmp_income)
with open("./" + str(i) + "/test_case_" + str(j) + "/tax.csv", "w") as f:
f.write(str(nb_tax_payer) + "\n")
for k in range(nb_tax_payer):
tmp = ""
tmp += "1920;"
if disability_type[k] == "none":
tmp += "0.0;"
else:
tmp += "1.0;"
tmp += disability_type[k] + ";"
tmp += str(is_resident[k]) + "\n"
for add in address[k]:
tmp += country[add] + ";"
tmp = tmp[:-1] + "\n0\n"
p = []
e = []
o = []
for inc in income[k]:
if is_local[inc][0] == "P":
p.append(is_local[inc][1])
elif is_local[inc][0] == "E":
e.append(is_local[inc][1])
else:
o.append(is_local[inc][1])
tmp += str(len(p)) + "\n"
if len(p) > 0:
for isloc in p:
tmp += isloc + ";"
tmp = tmp[:-1] + "\n"
tmp += str(len(e)) + "\n"
if len(e) > 0:
for isloc in e:
tmp += isloc + ";"
tmp = tmp[:-1] + "\n"
tmp += str(len(o)) + "\n"
if len(o) > 0:
for isloc in o:
tmp += isloc + ";"
tmp = tmp[:-1] + "\n"
f.write(tmp)
| false
| true
|
790d1c7d35bc3f38aa1958cb1f817e9d71b575aa
| 43,253
|
py
|
Python
|
pipenv/utils.py
|
bryant1410/pipenv
|
5cdf493dbae431fc486b953c4279b04b0837c95b
|
[
"MIT"
] | null | null | null |
pipenv/utils.py
|
bryant1410/pipenv
|
5cdf493dbae431fc486b953c4279b04b0837c95b
|
[
"MIT"
] | null | null | null |
pipenv/utils.py
|
bryant1410/pipenv
|
5cdf493dbae431fc486b953c4279b04b0837c95b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import errno
import os
import re
import hashlib
import tempfile
import sys
import shutil
import logging
import click
import crayons
import delegator
import parse
import requests
import six
import stat
import warnings
try:
from weakref import finalize
except ImportError:
try:
from .vendor.backports.weakref import finalize
except ImportError:
class finalize(object):
def __init__(self, *args, **kwargs):
logging.warn('weakref.finalize unavailable, not cleaning...')
def detach(self):
return False
from time import time
logging.basicConfig(level=logging.ERROR)
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from pathlib import Path
except ImportError:
try:
from .vendor.pathlib2 import Path
except ImportError:
pass
from distutils.spawn import find_executable
from contextlib import contextmanager
from .patched.piptools.resolver import Resolver
from .patched.piptools.repositories.pypi import PyPIRepository
from .patched.piptools.scripts.compile import get_pip_command
from .patched.piptools import logging as piptools_logging
from .patched.piptools.exceptions import NoCandidateFound
from .vendor.pip9.download import is_archive_file
from .vendor.pip9.exceptions import DistributionNotFound
from .vendor.pip9.index import Link
from .vendor.pip9._vendor.requests.exceptions import HTTPError, ConnectionError
from .pep508checker import lookup
from .environments import PIPENV_MAX_ROUNDS, PIPENV_CACHE_DIR
if six.PY2:
class ResourceWarning(Warning):
pass
specifiers = [k for k in lookup.keys()]
# List of version control systems we support.
VCS_LIST = ('git', 'svn', 'hg', 'bzr')
SCHEME_LIST = ('http://', 'https://', 'ftp://', 'ftps://', 'file://')
requests = requests.Session()
def get_requirement(dep):
from .vendor.pip9.req.req_install import _strip_extras, Wheel
from .vendor import requirements
"""Pre-clean requirement strings passed to the requirements parser.
Ensures that we can accept both local and relative paths, file and VCS URIs,
remote URIs, and package names, and that we pass only valid requirement strings
to the requirements parser. Performs necessary modifications to requirements
object if the user input was a local relative path.
:param str dep: A requirement line
:returns: :class:`requirements.Requirement` object
"""
path = None
uri = None
cleaned_uri = None
editable = False
dep_link = None
# check for editable dep / vcs dep
if dep.startswith('-e '):
editable = True
# Use the user supplied path as the written dependency
dep = dep.split(' ', 1)[1]
# Split out markers if they are present - similar to how pip does it
# See pip9.req.req_install.InstallRequirement.from_line
if not any(dep.startswith(uri_prefix) for uri_prefix in SCHEME_LIST):
marker_sep = ';'
else:
marker_sep = '; '
if marker_sep in dep:
dep, markers = dep.split(marker_sep, 1)
markers = markers.strip()
if not markers:
markers = None
else:
markers = None
# Strip extras from the requirement so we can make a properly parseable req
dep, extras = _strip_extras(dep)
# Only operate on local, existing, non-URI formatted paths which are installable
if is_installable_file(dep):
dep_path = Path(dep)
dep_link = Link(dep_path.absolute().as_uri())
if dep_path.is_absolute() or dep_path.as_posix() == '.':
path = dep_path.as_posix()
else:
path = get_converted_relative_path(dep)
dep = dep_link.egg_fragment if dep_link.egg_fragment else dep_link.url_without_fragment
elif is_vcs(dep):
# Generate a Link object for parsing egg fragments
dep_link = Link(dep)
# Save the original path to store in the pipfile
uri = dep_link.url
# Construct the requirement using proper git+ssh:// replaced uris or names if available
cleaned_uri = clean_git_uri(dep)
dep = cleaned_uri
if editable:
dep = '-e {0}'.format(dep)
req = [r for r in requirements.parse(dep)][0]
# if all we built was the requirement name and still need everything else
if req.name and not any([req.uri, req.path]):
if dep_link:
if dep_link.scheme.startswith('file') and path and not req.path:
req.path = path
req.local_file = True
req.uri = None
else:
req.uri = dep_link.url_without_fragment
# If the result is a local file with a URI and we have a local path, unset the URI
# and set the path instead -- note that local files may have 'path' set by accident
elif req.local_file and path and not req.vcs:
req.path = path
req.uri = None
if dep_link and dep_link.is_wheel and not req.name:
req.name = os.path.basename(Wheel(dep_link.path).name)
elif req.vcs and req.uri and cleaned_uri and cleaned_uri != uri:
req.uri = strip_ssh_from_git_uri(req.uri)
req.line = strip_ssh_from_git_uri(req.line)
req.editable = editable
if markers:
req.markers = markers
if extras:
# Bizarrely this is also what pip does...
req.extras = [
r for r in requirements.parse('fakepkg{0}'.format(extras))
][
0
].extras
return req
def cleanup_toml(tml):
toml = tml.split('\n')
new_toml = []
# Remove all empty lines from TOML.
for line in toml:
if line.strip():
new_toml.append(line)
toml = '\n'.join(new_toml)
new_toml = []
# Add newlines between TOML sections.
for i, line in enumerate(toml.split('\n')):
# Skip the first line.
if line.startswith('['):
if i > 0:
# Insert a newline before the heading.
new_toml.append('')
new_toml.append(line)
# adding new line at the end of the TOML file
new_toml.append('')
toml = '\n'.join(new_toml)
return toml
def parse_python_version(output):
"""Parse a Python version output returned by `python --version`.
Return a dict with three keys: major, minor, and micro. Each value is a
string containing a version part.
Note: The micro part would be `'0'` if it's missing from the input string.
"""
version_pattern = re.compile(r'''
^ # Beginning of line.
Python # Literally "Python".
\s # Space.
(?P<major>\d+) # Major = one or more digits.
\. # Dot.
(?P<minor>\d+) # Minor = one or more digits.
(?: # Unnamed group for dot-micro.
\. # Dot.
(?P<micro>\d+) # Micro = one or more digit.
)? # Micro is optional because pypa/pipenv#1893.
.* # Trailing garbage.
$ # End of line.
''', re.VERBOSE)
match = version_pattern.match(output)
if not match:
return None
return match.groupdict(default='0')
def python_version(path_to_python):
if not path_to_python:
return None
try:
c = delegator.run([path_to_python, '--version'], block=False)
except Exception:
return None
c.block()
version = parse_python_version(c.out.strip() or c.err.strip())
try:
version = u'{major}.{minor}.{micro}'.format(**version)
except TypeError:
return None
return version
def escape_grouped_arguments(s):
"""Prepares a string for the shell (on Windows too!)
Only for use on grouped arguments (passed as a string to Popen)
"""
if s is None:
return None
# Additional escaping for windows paths
if os.name == 'nt':
s = "{}".format(s.replace("\\", "\\\\"))
return '"' + s.replace("'", "'\\''") + '"'
def clean_pkg_version(version):
"""Uses pip to prepare a package version string, from our internal version."""
return six.u(pep440_version(str(version).replace('==', '')))
class HackedPythonVersion(object):
"""A Beautiful hack, which allows us to tell pip which version of Python we're using."""
def __init__(self, python_version, python_path):
self.python_version = python_version
self.python_path = python_path
def __enter__(self):
os.environ['PIP_PYTHON_VERSION'] = str(self.python_version)
os.environ['PIP_PYTHON_PATH'] = str(self.python_path)
def __exit__(self, *args):
# Restore original Python version information.
del os.environ['PIP_PYTHON_VERSION']
def prepare_pip_source_args(sources, pip_args=None):
if pip_args is None:
pip_args = []
if sources:
# Add the source to pip9.
pip_args.extend(['-i', sources[0]['url']])
# Trust the host if it's not verified.
if not sources[0].get('verify_ssl', True):
pip_args.extend(
[
'--trusted-host',
urlparse(sources[0]['url']).netloc.split(':')[0],
]
)
# Add additional sources as extra indexes.
if len(sources) > 1:
for source in sources[1:]:
pip_args.extend(['--extra-index-url', source['url']])
# Trust the host if it's not verified.
if not source.get('verify_ssl', True):
pip_args.extend(
[
'--trusted-host',
urlparse(source['url']).hostname,
]
)
return pip_args
def actually_resolve_reps(
deps, index_lookup, markers_lookup, project, sources, verbose, clear, pre
):
from pip9 import basecommand, req
from pip9._vendor import requests as pip_requests
class PipCommand(basecommand.Command):
"""Needed for pip-tools."""
name = 'PipCommand'
constraints = []
req_dir = tempfile.mkdtemp(prefix='pipenv-', suffix='-requirements')
for dep in deps:
if dep:
if dep.startswith('-e '):
constraint = req.InstallRequirement.from_editable(
dep[len('-e '):]
)
else:
fd, t = tempfile.mkstemp(
prefix='pipenv-', suffix='-requirement.txt', dir=req_dir
)
with os.fdopen(fd, 'w') as f:
f.write(dep)
constraint = [
c for c in req.parse_requirements(t, session=pip_requests)
][
0
]
# extra_constraints = []
if ' -i ' in dep:
index_lookup[constraint.name] = project.get_source(
url=dep.split(' -i ')[1]
).get(
'name'
)
if constraint.markers:
markers_lookup[constraint.name] = str(
constraint.markers
).replace(
'"', "'"
)
constraints.append(constraint)
rmtree(req_dir)
pip_command = get_pip_command()
pip_args = []
if sources:
pip_args = prepare_pip_source_args(sources, pip_args)
if verbose:
print('Using pip: {0}'.format(' '.join(pip_args)))
pip_options, _ = pip_command.parse_args(pip_args)
session = pip_command._build_session(pip_options)
pypi = PyPIRepository(
pip_options=pip_options, use_json=False, session=session
)
if verbose:
logging.log.verbose = True
piptools_logging.log.verbose = True
resolved_tree = set()
resolver = Resolver(
constraints=constraints,
repository=pypi,
clear_caches=clear,
prereleases=pre,
)
# pre-resolve instead of iterating to avoid asking pypi for hashes of editable packages
try:
resolved_tree.update(resolver.resolve(max_rounds=PIPENV_MAX_ROUNDS))
except (NoCandidateFound, DistributionNotFound, HTTPError) as e:
click.echo(
'{0}: Your dependencies could not be resolved. You likely have a mismatch in your sub-dependencies.\n '
'You can use {1} to bypass this mechanism, then run {2} to inspect the situation.'
''.format(
crayons.red('Warning', bold=True),
crayons.red('$ pipenv install --skip-lock'),
crayons.red('$ pipenv graph'),
),
err=True,
)
click.echo(crayons.blue(str(e)), err=True)
if 'no version found at all' in str(e):
click.echo(
crayons.blue(
'Please check your version specifier and version number. See PEP440 for more information.'
)
)
raise RuntimeError
return resolved_tree, resolver
def venv_resolve_deps(
deps, which, project, pre=False, verbose=False, clear=False, allow_global=False
):
from . import resolver
import json
resolver = escape_grouped_arguments(resolver.__file__.rstrip('co'))
cmd = '{0} {1} {2} {3} {4} {5}'.format(
escape_grouped_arguments(which('python')),
resolver,
'--pre' if pre else '',
'--verbose' if verbose else '',
'--clear' if clear else '',
'--system' if allow_global else '',
)
os.environ['PIPENV_PACKAGES'] = '\n'.join(deps)
c = delegator.run(cmd, block=True)
del os.environ['PIPENV_PACKAGES']
try:
assert c.return_code == 0
except AssertionError:
if verbose:
click.echo(c.out, err=True)
click.echo(c.err, err=True)
else:
click.echo(c.err[int(len(c.err) / 2) - 1:], err=True)
sys.exit(c.return_code)
if verbose:
click.echo(c.out.split('RESULTS:')[0], err=True)
try:
return json.loads(c.out.split('RESULTS:')[1].strip())
except IndexError:
raise RuntimeError('There was a problem with locking.')
def resolve_deps(
deps,
which,
project,
sources=None,
verbose=False,
python=False,
clear=False,
pre=False,
allow_global=False,
):
"""Given a list of dependencies, return a resolved list of dependencies,
using pip-tools -- and their hashes, using the warehouse API / pip9.
"""
index_lookup = {}
markers_lookup = {}
python_path = which('python', allow_global=allow_global)
backup_python_path = sys.executable
results = []
# First (proper) attempt:
with HackedPythonVersion(python_version=python, python_path=python_path):
try:
resolved_tree, resolver = actually_resolve_reps(
deps,
index_lookup,
markers_lookup,
project,
sources,
verbose,
clear,
pre,
)
except RuntimeError:
# Don't exit here, like usual.
resolved_tree = None
# Second (last-resort) attempt:
if resolved_tree is None:
with HackedPythonVersion(
python_version='.'.join([str(s) for s in sys.version_info[:3]]),
python_path=backup_python_path,
):
try:
# Attempt to resolve again, with different Python version information,
# particularly for particularly particular packages.
resolved_tree, resolver = actually_resolve_reps(
deps,
index_lookup,
markers_lookup,
project,
sources,
verbose,
clear,
pre,
)
except RuntimeError:
sys.exit(1)
for result in resolved_tree:
if not result.editable:
name = pep423_name(result.name)
version = clean_pkg_version(result.specifier)
index = index_lookup.get(result.name)
if not markers_lookup.get(result.name):
markers = str(
result.markers
) if result.markers and 'extra' not in str(
result.markers
) else None
else:
markers = markers_lookup.get(result.name)
collected_hashes = []
if any('python.org' in source['url'] or 'pypi.org' in source['url']
for source in sources):
try:
# Grab the hashes from the new warehouse API.
r = requests.get(
'https://pypi.org/pypi/{0}/json'.format(name),
timeout=10,
)
api_releases = r.json()['releases']
cleaned_releases = {}
for api_version, api_info in api_releases.items():
cleaned_releases[
clean_pkg_version(api_version)
] = api_info
for release in cleaned_releases[version]:
collected_hashes.append(release['digests']['sha256'])
collected_hashes = [
'sha256:' + s for s in collected_hashes
]
except (ValueError, KeyError, ConnectionError):
if verbose:
click.echo(
'{0}: Error generating hash for {1}'.format(
crayons.red('Warning', bold=True), name
)
)
# Collect un-collectable hashes (should work with devpi).
try:
collected_hashes = collected_hashes + list(
list(resolver.resolve_hashes([result]).items())[0][1]
)
except (ValueError, KeyError, ConnectionError, IndexError):
if verbose:
print('Error generating hash for {}'.format(name))
collected_hashes = sorted(set(collected_hashes))
d = {'name': name, 'version': version, 'hashes': collected_hashes}
if index:
d.update({'index': index})
if markers:
d.update({'markers': markers.replace('"', "'")})
results.append(d)
return results
def multi_split(s, split):
"""Splits on multiple given separators."""
for r in split:
s = s.replace(r, '|')
return [i for i in s.split('|') if len(i) > 0]
def convert_deps_from_pip(dep):
""""Converts a pip-formatted dependency to a Pipfile-formatted one."""
dependency = {}
req = get_requirement(dep)
extras = {'extras': req.extras}
# File installs.
if (req.uri or req.path or is_installable_file(req.name)) and not req.vcs:
# Assign a package name to the file, last 7 of it's sha256 hex digest.
if not req.uri and not req.path:
req.path = os.path.abspath(req.name)
hashable_path = req.uri if req.uri else req.path
if not req.name:
req.name = hashlib.sha256(hashable_path.encode('utf-8')).hexdigest()
req.name = req.name[len(req.name) - 7:]
# {path: uri} TOML (spec 4 I guess...)
if req.uri:
dependency[req.name] = {'file': hashable_path}
else:
dependency[req.name] = {'path': hashable_path}
if req.extras:
dependency[req.name].update(extras)
# Add --editable if applicable
if req.editable:
dependency[req.name].update({'editable': True})
# VCS Installs.
elif req.vcs:
if req.name is None:
raise ValueError(
'pipenv requires an #egg fragment for version controlled '
'dependencies. Please install remote dependency '
'in the form {0}#egg=<package-name>.'.format(req.uri)
)
# Crop off the git+, etc part.
if req.uri.startswith('{0}+'.format(req.vcs)):
req.uri = req.uri[len(req.vcs) + 1:]
dependency.setdefault(req.name, {}).update({req.vcs: req.uri})
# Add --editable, if it's there.
if req.editable:
dependency[req.name].update({'editable': True})
# Add subdirectory, if it's there
if req.subdirectory:
dependency[req.name].update({'subdirectory': req.subdirectory})
# Add the specifier, if it was provided.
if req.revision:
dependency[req.name].update({'ref': req.revision})
# Extras: e.g. #egg=requests[security]
if req.extras:
dependency[req.name].update({'extras': req.extras})
elif req.extras or req.specs or hasattr(req, 'markers'):
specs = None
# Comparison operators: e.g. Django>1.10
if req.specs:
r = multi_split(dep, '!=<>~')
specs = dep[len(r[0]):]
dependency[req.name] = specs
# Extras: e.g. requests[socks]
if req.extras:
dependency[req.name] = extras
if specs:
dependency[req.name].update({'version': specs})
if hasattr(req, 'markers'):
if isinstance(dependency[req.name], six.string_types):
dependency[req.name] = {'version': specs}
dependency[req.name].update({'markers': req.markers})
# Bare dependencies: e.g. requests
else:
dependency[dep] = '*'
# Cleanup when there's multiple values, e.g. -e.
if len(dependency) > 1:
for key in dependency.copy():
if not hasattr(dependency[key], 'keys'):
del dependency[key]
return dependency
def is_star(val):
return isinstance(val, six.string_types) and val == '*'
def is_pinned(val):
return isinstance(val, six.string_types) and val.startswith('==')
def convert_deps_to_pip(deps, project=None, r=True, include_index=False):
""""Converts a Pipfile-formatted dependency to a pip-formatted one."""
dependencies = []
for dep in deps.keys():
# Default (e.g. '>1.10').
extra = deps[dep] if isinstance(deps[dep], six.string_types) else ''
version = ''
index = ''
# Get rid of '*'.
if is_star(deps[dep]) or str(extra) == '{}':
extra = ''
hash = ''
# Support for single hash (spec 1).
if 'hash' in deps[dep]:
hash = ' --hash={0}'.format(deps[dep]['hash'])
# Support for multiple hashes (spec 2).
if 'hashes' in deps[dep]:
hash = '{0} '.format(
''.join(
[' --hash={0} '.format(h) for h in deps[dep]['hashes']]
)
)
# Support for extras (e.g. requests[socks])
if 'extras' in deps[dep]:
extra = '[{0}]'.format(','.join(deps[dep]['extras']))
if 'version' in deps[dep]:
if not is_star(deps[dep]['version']):
version = deps[dep]['version']
# For lockfile format.
if 'markers' in deps[dep]:
specs = '; {0}'.format(deps[dep]['markers'])
else:
# For pipfile format.
specs = []
for specifier in specifiers:
if specifier in deps[dep]:
if not is_star(deps[dep][specifier]):
specs.append(
'{0} {1}'.format(specifier, deps[dep][specifier])
)
if specs:
specs = '; {0}'.format(' and '.join(specs))
else:
specs = ''
if include_index and not is_file(deps[dep]) and not is_vcs(deps[dep]):
pip_src_args = []
if 'index' in deps[dep]:
pip_src_args = [project.get_source(deps[dep]['index'])]
else:
pip_src_args = project.sources
pip_args = prepare_pip_source_args(pip_src_args)
index = ' '.join(pip_args)
# Support for version control
maybe_vcs = [vcs for vcs in VCS_LIST if vcs in deps[dep]]
vcs = maybe_vcs[0] if maybe_vcs else None
# Support for files.
if 'file' in deps[dep]:
extra = '{1}{0}'.format(extra, deps[dep]['file']).strip()
# Flag the file as editable if it is a local relative path
if 'editable' in deps[dep]:
dep = '-e '
else:
dep = ''
# Support for paths.
elif 'path' in deps[dep]:
extra = '{1}{0}'.format(extra, deps[dep]['path']).strip()
# Flag the file as editable if it is a local relative path
if 'editable' in deps[dep]:
dep = '-e '
else:
dep = ''
if vcs:
extra = '{0}+{1}'.format(vcs, deps[dep][vcs])
# Support for @refs.
if 'ref' in deps[dep]:
extra += '@{0}'.format(deps[dep]['ref'])
extra += '#egg={0}'.format(dep)
# Support for subdirectory
if 'subdirectory' in deps[dep]:
extra += '&subdirectory={0}'.format(deps[dep]['subdirectory'])
# Support for editable.
if 'editable' in deps[dep]:
# Support for --egg.
dep = '-e '
else:
dep = ''
s = '{0}{1}{2}{3}{4} {5}'.format(
dep, extra, version, specs, hash, index
).strip()
dependencies.append(s)
if not r:
return dependencies
# Write requirements.txt to tmp directory.
f = tempfile.NamedTemporaryFile(suffix='-requirements.txt', delete=False)
f.write('\n'.join(dependencies).encode('utf-8'))
f.close()
return f.name
def mkdir_p(newdir):
"""works the way a good mkdir should :)
- already exists, silently complete
- regular file in the way, raise an exception
- parent directory(ies) does not exist, make them as well
From: http://code.activestate.com/recipes/82465-a-friendly-mkdir/
"""
if os.path.isdir(newdir):
pass
elif os.path.isfile(newdir):
raise OSError(
"a file with the same name as the desired dir, '{0}', already exists.".format(
newdir
)
)
else:
head, tail = os.path.split(newdir)
if head and not os.path.isdir(head):
mkdir_p(head)
if tail:
os.mkdir(newdir)
def is_required_version(version, specified_version):
"""Check to see if there's a hard requirement for version
number provided in the Pipfile.
"""
# Certain packages may be defined with multiple values.
if isinstance(specified_version, dict):
specified_version = specified_version.get('version', '')
if specified_version.startswith('=='):
return version.strip() == specified_version.split('==')[1].strip()
return True
def strip_ssh_from_git_uri(uri):
"""Return git+ssh:// formatted URI to git+git@ format"""
if isinstance(uri, six.string_types):
uri = uri.replace('git+ssh://', 'git+')
return uri
def clean_git_uri(uri):
"""Cleans VCS uris from pip9 format"""
if isinstance(uri, six.string_types):
# Add scheme for parsing purposes, this is also what pip does
if uri.startswith('git+') and '://' not in uri:
uri = uri.replace('git+', 'git+ssh://')
return uri
def is_editable(pipfile_entry):
if hasattr(pipfile_entry, 'get'):
return pipfile_entry.get('editable', False) and any(
pipfile_entry.get(key) for key in ('file', 'path') + VCS_LIST
)
return False
def is_vcs(pipfile_entry):
from .vendor import requirements
"""Determine if dictionary entry from Pipfile is for a vcs dependency."""
if hasattr(pipfile_entry, 'keys'):
return any(key for key in pipfile_entry.keys() if key in VCS_LIST)
elif isinstance(pipfile_entry, six.string_types):
return bool(
requirements.requirement.VCS_REGEX.match(
clean_git_uri(pipfile_entry)
)
)
return False
def is_installable_file(path):
"""Determine if a path can potentially be installed"""
from .vendor.pip9.utils import is_installable_dir
from .vendor.pip9.utils.packaging import specifiers
if hasattr(path, 'keys') and any(
key for key in path.keys() if key in ['file', 'path']
):
path = urlparse(path['file']).path if 'file' in path else path['path']
if not isinstance(path, six.string_types) or path == '*':
return False
# If the string starts with a valid specifier operator, test if it is a valid
# specifier set before making a path object (to avoid breaking windows)
if any(path.startswith(spec) for spec in '!=<>~'):
try:
specifiers.SpecifierSet(path)
# If this is not a valid specifier, just move on and try it as a path
except specifiers.InvalidSpecifier:
pass
else:
return False
if not os.path.exists(os.path.abspath(path)):
return False
lookup_path = Path(path)
absolute_path = '{0}'.format(lookup_path.absolute())
if lookup_path.is_dir() and is_installable_dir(absolute_path):
return True
elif lookup_path.is_file() and is_archive_file(absolute_path):
return True
return False
def is_file(package):
"""Determine if a package name is for a File dependency."""
if hasattr(package, 'keys'):
return any(key for key in package.keys() if key in ['file', 'path'])
if os.path.exists(str(package)):
return True
for start in SCHEME_LIST:
if str(package).startswith(start):
return True
return False
def pep440_version(version):
"""Normalize version to PEP 440 standards"""
from .vendor.pip9.index import parse_version
# Use pip built-in version parser.
return str(parse_version(version))
def pep423_name(name):
"""Normalize package name to PEP 423 style standard."""
name = name.lower()
if any(i not in name for i in (VCS_LIST + SCHEME_LIST)):
return name.replace('_', '-')
else:
return name
def proper_case(package_name):
"""Properly case project name from pypi.org."""
# Hit the simple API.
r = requests.get(
'https://pypi.org/pypi/{0}/json'.format(package_name),
timeout=0.3,
stream=True,
)
if not r.ok:
raise IOError(
'Unable to find package {0} in PyPI repository.'.format(
package_name
)
)
r = parse.parse('https://pypi.org/pypi/{name}/json', r.url)
good_name = r['name']
return good_name
def split_section(input_file, section_suffix, test_function):
"""
Split a pipfile or a lockfile section out by section name and test function
:param dict input_file: A dictionary containing either a pipfile or lockfile
:param str section_suffix: A string of the name of the section
:param func test_function: A test function to test against the value in the key/value pair
>>> split_section(my_lockfile, 'vcs', is_vcs)
{
'default': {
"six": {
"hashes": [
"sha256:832dc0e10feb1aa2c68dcc57dbb658f1c7e65b9b61af69048abc87a2db00a0eb",
"sha256:70e8a77beed4562e7f14fe23a786b54f6296e34344c23bc42f07b15018ff98e9"
],
"version": "==1.11.0"
}
},
'default-vcs': {
"e1839a8": {
"editable": true,
"path": "."
}
}
}
"""
pipfile_sections = ('packages', 'dev-packages')
lockfile_sections = ('default', 'develop')
if any(section in input_file for section in pipfile_sections):
sections = pipfile_sections
elif any(section in input_file for section in lockfile_sections):
sections = lockfile_sections
else:
# return the original file if we can't find any pipfile or lockfile sections
return input_file
for section in sections:
split_dict = {}
entries = input_file.get(section, {})
for k in list(entries.keys()):
if test_function(entries.get(k)):
split_dict[k] = entries.pop(k)
input_file['-'.join([section, section_suffix])] = split_dict
return input_file
def split_file(file_dict):
"""Split VCS and editable dependencies out from file."""
sections = {
'vcs': is_vcs,
'editable': lambda x: hasattr(x, 'keys') and x.get('editable'),
}
for k, func in sections.items():
file_dict = split_section(file_dict, k, func)
return file_dict
def merge_deps(
file_dict,
project,
dev=False,
requirements=False,
ignore_hashes=False,
blocking=False,
only=False,
):
"""
Given a file_dict, merges dependencies and converts them to pip dependency lists.
:param dict file_dict: The result of calling :func:`pipenv.utils.split_file`
:param :class:`pipenv.project.Project` project: Pipenv project
:param bool dev=False: Flag indicating whether dev dependencies are to be installed
:param bool requirements=False: Flag indicating whether to use a requirements file
:param bool ignore_hashes=False:
:param bool blocking=False:
:param bool only=False:
:return: Pip-converted 3-tuples of [deps, requirements_deps]
"""
deps = []
requirements_deps = []
for section in list(file_dict.keys()):
# Turn develop-vcs into ['develop', 'vcs']
section_name, suffix = section.rsplit(
'-', 1
) if '-' in section and not section == 'dev-packages' else (
section, None
)
if not file_dict[section] or section_name not in (
'dev-packages', 'packages', 'default', 'develop'
):
continue
is_dev = section_name in ('dev-packages', 'develop')
if is_dev and not dev:
continue
if ignore_hashes:
for k, v in file_dict[section]:
if 'hash' in v:
del v['hash']
# Block and ignore hashes for all suffixed sections (vcs/editable)
no_hashes = True if suffix else ignore_hashes
block = True if suffix else blocking
include_index = True if not suffix else False
converted = convert_deps_to_pip(
file_dict[section], project, r=False, include_index=include_index
)
deps.extend((d, no_hashes, block) for d in converted)
if dev and is_dev and requirements:
requirements_deps.extend((d, no_hashes, block) for d in converted)
return deps, requirements_deps
def recase_file(file_dict):
"""Recase file before writing to output."""
if 'packages' in file_dict or 'dev-packages' in file_dict:
sections = ('packages', 'dev-packages')
elif 'default' in file_dict or 'develop' in file_dict:
sections = ('default', 'develop')
for section in sections:
file_section = file_dict.get(section, {})
# Try to properly case each key if we can.
for key in list(file_section.keys()):
try:
cased_key = proper_case(key)
except IOError:
cased_key = key
file_section[cased_key] = file_section.pop(key)
return file_dict
def get_windows_path(*args):
"""Sanitize a path for windows environments
Accepts an arbitrary list of arguments and makes a clean windows path"""
return os.path.normpath(os.path.join(*args))
def find_windows_executable(bin_path, exe_name):
"""Given an executable name, search the given location for an executable"""
requested_path = get_windows_path(bin_path, exe_name)
if os.path.exists(requested_path):
return requested_path
# Ensure we aren't adding two layers of file extensions
exe_name = os.path.splitext(exe_name)[0]
files = [
'{0}.{1}'.format(exe_name, ext) for ext in ['', 'py', 'exe', 'bat']
]
exec_paths = [get_windows_path(bin_path, f) for f in files]
exec_files = [
filename for filename in exec_paths if os.path.isfile(filename)
]
if exec_files:
return exec_files[0]
return find_executable(exe_name)
def path_to_url(path):
return Path(normalize_drive(os.path.abspath(path))).as_uri()
def get_converted_relative_path(path, relative_to=os.curdir):
"""Given a vague relative path, return the path relative to the given location"""
return os.path.join('.', os.path.relpath(path, start=relative_to))
def walk_up(bottom):
"""Mimic os.walk, but walk 'up' instead of down the directory tree.
From: https://gist.github.com/zdavkeos/1098474
"""
bottom = os.path.realpath(bottom)
# Get files in current dir.
try:
names = os.listdir(bottom)
except Exception:
return
dirs, nondirs = [], []
for name in names:
if os.path.isdir(os.path.join(bottom, name)):
dirs.append(name)
else:
nondirs.append(name)
yield bottom, dirs, nondirs
new_path = os.path.realpath(os.path.join(bottom, '..'))
# See if we are at the top.
if new_path == bottom:
return
for x in walk_up(new_path):
yield x
def find_requirements(max_depth=3):
"""Returns the path of a Pipfile in parent directories."""
i = 0
for c, d, f in walk_up(os.getcwd()):
i += 1
if i < max_depth:
if 'requirements.txt':
r = os.path.join(c, 'requirements.txt')
if os.path.isfile(r):
return r
raise RuntimeError('No requirements.txt found!')
# Borrowed from pew to avoid importing pew which imports psutil
# See https://github.com/berdario/pew/blob/master/pew/_utils.py#L82
@contextmanager
def temp_environ():
"""Allow the ability to set os.environ temporarily"""
environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(environ)
def is_valid_url(url):
"""Checks if a given string is an url"""
pieces = urlparse(url)
return all([pieces.scheme, pieces.netloc])
def download_file(url, filename):
"""Downloads file from url to a path with filename"""
r = requests.get(url, stream=True)
if not r.ok:
raise IOError('Unable to download file')
with open(filename, 'wb') as f:
f.write(r.content)
def need_update_check():
"""Determines whether we need to check for updates."""
mkdir_p(PIPENV_CACHE_DIR)
p = os.sep.join((PIPENV_CACHE_DIR, '.pipenv_update_check'))
if not os.path.exists(p):
return True
out_of_date_time = time() - (24 * 60 * 60)
if os.path.isfile(p) and os.path.getmtime(p) <= out_of_date_time:
return True
else:
return False
def touch_update_stamp():
"""Touches PIPENV_CACHE_DIR/.pipenv_update_check"""
mkdir_p(PIPENV_CACHE_DIR)
p = os.sep.join((PIPENV_CACHE_DIR, '.pipenv_update_check'))
try:
os.utime(p, None)
except OSError:
with open(p, 'w') as fh:
fh.write('')
def normalize_drive(path):
"""Normalize drive in path so they stay consistent.
This currently only affects local drives on Windows, which can be
identified with either upper or lower cased drive names. The case is
always converted to uppercase because it seems to be preferred.
See: <https://github.com/pypa/pipenv/issues/1218>
"""
if os.name != 'nt' or not isinstance(path, six.string_types):
return path
drive, tail = os.path.splitdrive(path)
# Only match (lower cased) local drives (e.g. 'c:'), not UNC mounts.
if drive.islower() and len(drive) == 2 and drive[1] == ':':
return '{}{}'.format(drive.upper(), tail)
return path
def is_readonly_path(fn):
"""Check if a provided path exists and is readonly.
Permissions check is `bool(path.stat & stat.S_IREAD)` or `not os.access(path, os.W_OK)`
"""
if os.path.exists(fn):
return (os.stat(fn).st_mode & stat.S_IREAD) or not os.access(
fn, os.W_OK
)
return False
def set_write_bit(fn):
if os.path.exists(fn):
os.chmod(fn, stat.S_IWRITE | stat.S_IWUSR)
return
def rmtree(directory, ignore_errors=False):
shutil.rmtree(
directory, ignore_errors=ignore_errors, onerror=handle_remove_readonly
)
def handle_remove_readonly(func, path, exc):
"""Error handler for shutil.rmtree.
Windows source repo folders are read-only by default, so this error handler
attempts to set them as writeable and then proceed with deletion."""
# Check for read-only attribute
default_warning_message = 'Unable to remove file due to permissions restriction: {!r}'
# split the initial exception out into its type, exception, and traceback
exc_type, exc_exception, exc_tb = exc
if is_readonly_path(path):
# Apply write permission and call original function
set_write_bit(path)
try:
func(path)
except (OSError, IOError) as e:
if e.errno in [errno.EACCES, errno.EPERM]:
warnings.warn(
default_warning_message.format(path), ResourceWarning
)
return
if exc_exception.errno in [errno.EACCES, errno.EPERM]:
warnings.warn(default_warning_message.format(path), ResourceWarning)
return
raise
class TemporaryDirectory(object):
"""Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
example:
with TemporaryDirectory() as tmpdir:
...
Upon exiting the context, the directory and everything contained
in it are removed.
"""
def __init__(self, suffix, prefix, dir=None):
if 'RAM_DISK' in os.environ:
import uuid
name = uuid.uuid4().hex
dir_name = os.path.join(os.environ['RAM_DISK'].strip(), name)
os.mkdir(dir_name)
self.name = dir_name
else:
self.name = tempfile.mkdtemp(suffix, prefix, dir)
self._finalizer = finalize(
self,
self._cleanup,
self.name,
warn_message="Implicitly cleaning up {!r}".format(self),
)
@classmethod
def _cleanup(cls, name, warn_message):
rmtree(name)
warnings.warn(warn_message, ResourceWarning)
def __repr__(self):
return "<{} {!r}>".format(self.__class__.__name__, self.name)
def __enter__(self):
return self
def __exit__(self, exc, value, tb):
self.cleanup()
def cleanup(self):
if self._finalizer.detach():
rmtree(self.name)
| 33.712393
| 116
| 0.586711
|
import errno
import os
import re
import hashlib
import tempfile
import sys
import shutil
import logging
import click
import crayons
import delegator
import parse
import requests
import six
import stat
import warnings
try:
from weakref import finalize
except ImportError:
try:
from .vendor.backports.weakref import finalize
except ImportError:
class finalize(object):
def __init__(self, *args, **kwargs):
logging.warn('weakref.finalize unavailable, not cleaning...')
def detach(self):
return False
from time import time
logging.basicConfig(level=logging.ERROR)
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from pathlib import Path
except ImportError:
try:
from .vendor.pathlib2 import Path
except ImportError:
pass
from distutils.spawn import find_executable
from contextlib import contextmanager
from .patched.piptools.resolver import Resolver
from .patched.piptools.repositories.pypi import PyPIRepository
from .patched.piptools.scripts.compile import get_pip_command
from .patched.piptools import logging as piptools_logging
from .patched.piptools.exceptions import NoCandidateFound
from .vendor.pip9.download import is_archive_file
from .vendor.pip9.exceptions import DistributionNotFound
from .vendor.pip9.index import Link
from .vendor.pip9._vendor.requests.exceptions import HTTPError, ConnectionError
from .pep508checker import lookup
from .environments import PIPENV_MAX_ROUNDS, PIPENV_CACHE_DIR
if six.PY2:
class ResourceWarning(Warning):
pass
specifiers = [k for k in lookup.keys()]
VCS_LIST = ('git', 'svn', 'hg', 'bzr')
SCHEME_LIST = ('http://', 'https://', 'ftp://', 'ftps://', 'file://')
requests = requests.Session()
def get_requirement(dep):
from .vendor.pip9.req.req_install import _strip_extras, Wheel
from .vendor import requirements
path = None
uri = None
cleaned_uri = None
editable = False
dep_link = None
if dep.startswith('-e '):
editable = True
dep = dep.split(' ', 1)[1]
if not any(dep.startswith(uri_prefix) for uri_prefix in SCHEME_LIST):
marker_sep = ';'
else:
marker_sep = '; '
if marker_sep in dep:
dep, markers = dep.split(marker_sep, 1)
markers = markers.strip()
if not markers:
markers = None
else:
markers = None
dep, extras = _strip_extras(dep)
if is_installable_file(dep):
dep_path = Path(dep)
dep_link = Link(dep_path.absolute().as_uri())
if dep_path.is_absolute() or dep_path.as_posix() == '.':
path = dep_path.as_posix()
else:
path = get_converted_relative_path(dep)
dep = dep_link.egg_fragment if dep_link.egg_fragment else dep_link.url_without_fragment
elif is_vcs(dep):
dep_link = Link(dep)
uri = dep_link.url
cleaned_uri = clean_git_uri(dep)
dep = cleaned_uri
if editable:
dep = '-e {0}'.format(dep)
req = [r for r in requirements.parse(dep)][0]
if req.name and not any([req.uri, req.path]):
if dep_link:
if dep_link.scheme.startswith('file') and path and not req.path:
req.path = path
req.local_file = True
req.uri = None
else:
req.uri = dep_link.url_without_fragment
elif req.local_file and path and not req.vcs:
req.path = path
req.uri = None
if dep_link and dep_link.is_wheel and not req.name:
req.name = os.path.basename(Wheel(dep_link.path).name)
elif req.vcs and req.uri and cleaned_uri and cleaned_uri != uri:
req.uri = strip_ssh_from_git_uri(req.uri)
req.line = strip_ssh_from_git_uri(req.line)
req.editable = editable
if markers:
req.markers = markers
if extras:
req.extras = [
r for r in requirements.parse('fakepkg{0}'.format(extras))
][
0
].extras
return req
def cleanup_toml(tml):
toml = tml.split('\n')
new_toml = []
for line in toml:
if line.strip():
new_toml.append(line)
toml = '\n'.join(new_toml)
new_toml = []
for i, line in enumerate(toml.split('\n')):
if line.startswith('['):
if i > 0:
new_toml.append('')
new_toml.append(line)
new_toml.append('')
toml = '\n'.join(new_toml)
return toml
def parse_python_version(output):
version_pattern = re.compile(r'''
^ # Beginning of line.
Python # Literally "Python".
\s # Space.
(?P<major>\d+) # Major = one or more digits.
\. # Dot.
(?P<minor>\d+) # Minor = one or more digits.
(?: # Unnamed group for dot-micro.
\. # Dot.
(?P<micro>\d+) # Micro = one or more digit.
)? # Micro is optional because pypa/pipenv#1893.
.* # Trailing garbage.
$ # End of line.
''', re.VERBOSE)
match = version_pattern.match(output)
if not match:
return None
return match.groupdict(default='0')
def python_version(path_to_python):
if not path_to_python:
return None
try:
c = delegator.run([path_to_python, '--version'], block=False)
except Exception:
return None
c.block()
version = parse_python_version(c.out.strip() or c.err.strip())
try:
version = u'{major}.{minor}.{micro}'.format(**version)
except TypeError:
return None
return version
def escape_grouped_arguments(s):
if s is None:
return None
if os.name == 'nt':
s = "{}".format(s.replace("\\", "\\\\"))
return '"' + s.replace("'", "'\\''") + '"'
def clean_pkg_version(version):
return six.u(pep440_version(str(version).replace('==', '')))
class HackedPythonVersion(object):
def __init__(self, python_version, python_path):
self.python_version = python_version
self.python_path = python_path
def __enter__(self):
os.environ['PIP_PYTHON_VERSION'] = str(self.python_version)
os.environ['PIP_PYTHON_PATH'] = str(self.python_path)
def __exit__(self, *args):
del os.environ['PIP_PYTHON_VERSION']
def prepare_pip_source_args(sources, pip_args=None):
if pip_args is None:
pip_args = []
if sources:
pip_args.extend(['-i', sources[0]['url']])
if not sources[0].get('verify_ssl', True):
pip_args.extend(
[
'--trusted-host',
urlparse(sources[0]['url']).netloc.split(':')[0],
]
)
# Add additional sources as extra indexes.
if len(sources) > 1:
for source in sources[1:]:
pip_args.extend(['--extra-index-url', source['url']])
# Trust the host if it's not verified.
if not source.get('verify_ssl', True):
pip_args.extend(
[
'--trusted-host',
urlparse(source['url']).hostname,
]
)
return pip_args
def actually_resolve_reps(
deps, index_lookup, markers_lookup, project, sources, verbose, clear, pre
):
from pip9 import basecommand, req
from pip9._vendor import requests as pip_requests
class PipCommand(basecommand.Command):
name = 'PipCommand'
constraints = []
req_dir = tempfile.mkdtemp(prefix='pipenv-', suffix='-requirements')
for dep in deps:
if dep:
if dep.startswith('-e '):
constraint = req.InstallRequirement.from_editable(
dep[len('-e '):]
)
else:
fd, t = tempfile.mkstemp(
prefix='pipenv-', suffix='-requirement.txt', dir=req_dir
)
with os.fdopen(fd, 'w') as f:
f.write(dep)
constraint = [
c for c in req.parse_requirements(t, session=pip_requests)
][
0
]
if ' -i ' in dep:
index_lookup[constraint.name] = project.get_source(
url=dep.split(' -i ')[1]
).get(
'name'
)
if constraint.markers:
markers_lookup[constraint.name] = str(
constraint.markers
).replace(
'"', "'"
)
constraints.append(constraint)
rmtree(req_dir)
pip_command = get_pip_command()
pip_args = []
if sources:
pip_args = prepare_pip_source_args(sources, pip_args)
if verbose:
print('Using pip: {0}'.format(' '.join(pip_args)))
pip_options, _ = pip_command.parse_args(pip_args)
session = pip_command._build_session(pip_options)
pypi = PyPIRepository(
pip_options=pip_options, use_json=False, session=session
)
if verbose:
logging.log.verbose = True
piptools_logging.log.verbose = True
resolved_tree = set()
resolver = Resolver(
constraints=constraints,
repository=pypi,
clear_caches=clear,
prereleases=pre,
)
# pre-resolve instead of iterating to avoid asking pypi for hashes of editable packages
try:
resolved_tree.update(resolver.resolve(max_rounds=PIPENV_MAX_ROUNDS))
except (NoCandidateFound, DistributionNotFound, HTTPError) as e:
click.echo(
'{0}: Your dependencies could not be resolved. You likely have a mismatch in your sub-dependencies.\n '
'You can use {1} to bypass this mechanism, then run {2} to inspect the situation.'
''.format(
crayons.red('Warning', bold=True),
crayons.red('$ pipenv install --skip-lock'),
crayons.red('$ pipenv graph'),
),
err=True,
)
click.echo(crayons.blue(str(e)), err=True)
if 'no version found at all' in str(e):
click.echo(
crayons.blue(
'Please check your version specifier and version number. See PEP440 for more information.'
)
)
raise RuntimeError
return resolved_tree, resolver
def venv_resolve_deps(
deps, which, project, pre=False, verbose=False, clear=False, allow_global=False
):
from . import resolver
import json
resolver = escape_grouped_arguments(resolver.__file__.rstrip('co'))
cmd = '{0} {1} {2} {3} {4} {5}'.format(
escape_grouped_arguments(which('python')),
resolver,
'--pre' if pre else '',
'--verbose' if verbose else '',
'--clear' if clear else '',
'--system' if allow_global else '',
)
os.environ['PIPENV_PACKAGES'] = '\n'.join(deps)
c = delegator.run(cmd, block=True)
del os.environ['PIPENV_PACKAGES']
try:
assert c.return_code == 0
except AssertionError:
if verbose:
click.echo(c.out, err=True)
click.echo(c.err, err=True)
else:
click.echo(c.err[int(len(c.err) / 2) - 1:], err=True)
sys.exit(c.return_code)
if verbose:
click.echo(c.out.split('RESULTS:')[0], err=True)
try:
return json.loads(c.out.split('RESULTS:')[1].strip())
except IndexError:
raise RuntimeError('There was a problem with locking.')
def resolve_deps(
deps,
which,
project,
sources=None,
verbose=False,
python=False,
clear=False,
pre=False,
allow_global=False,
):
index_lookup = {}
markers_lookup = {}
python_path = which('python', allow_global=allow_global)
backup_python_path = sys.executable
results = []
# First (proper) attempt:
with HackedPythonVersion(python_version=python, python_path=python_path):
try:
resolved_tree, resolver = actually_resolve_reps(
deps,
index_lookup,
markers_lookup,
project,
sources,
verbose,
clear,
pre,
)
except RuntimeError:
# Don't exit here, like usual.
resolved_tree = None
# Second (last-resort) attempt:
if resolved_tree is None:
with HackedPythonVersion(
python_version='.'.join([str(s) for s in sys.version_info[:3]]),
python_path=backup_python_path,
):
try:
# Attempt to resolve again, with different Python version information,
# particularly for particularly particular packages.
resolved_tree, resolver = actually_resolve_reps(
deps,
index_lookup,
markers_lookup,
project,
sources,
verbose,
clear,
pre,
)
except RuntimeError:
sys.exit(1)
for result in resolved_tree:
if not result.editable:
name = pep423_name(result.name)
version = clean_pkg_version(result.specifier)
index = index_lookup.get(result.name)
if not markers_lookup.get(result.name):
markers = str(
result.markers
) if result.markers and 'extra' not in str(
result.markers
) else None
else:
markers = markers_lookup.get(result.name)
collected_hashes = []
if any('python.org' in source['url'] or 'pypi.org' in source['url']
for source in sources):
try:
# Grab the hashes from the new warehouse API.
r = requests.get(
'https://pypi.org/pypi/{0}/json'.format(name),
timeout=10,
)
api_releases = r.json()['releases']
cleaned_releases = {}
for api_version, api_info in api_releases.items():
cleaned_releases[
clean_pkg_version(api_version)
] = api_info
for release in cleaned_releases[version]:
collected_hashes.append(release['digests']['sha256'])
collected_hashes = [
'sha256:' + s for s in collected_hashes
]
except (ValueError, KeyError, ConnectionError):
if verbose:
click.echo(
'{0}: Error generating hash for {1}'.format(
crayons.red('Warning', bold=True), name
)
)
# Collect un-collectable hashes (should work with devpi).
try:
collected_hashes = collected_hashes + list(
list(resolver.resolve_hashes([result]).items())[0][1]
)
except (ValueError, KeyError, ConnectionError, IndexError):
if verbose:
print('Error generating hash for {}'.format(name))
collected_hashes = sorted(set(collected_hashes))
d = {'name': name, 'version': version, 'hashes': collected_hashes}
if index:
d.update({'index': index})
if markers:
d.update({'markers': markers.replace('"', "'")})
results.append(d)
return results
def multi_split(s, split):
for r in split:
s = s.replace(r, '|')
return [i for i in s.split('|') if len(i) > 0]
def convert_deps_from_pip(dep):
dependency = {}
req = get_requirement(dep)
extras = {'extras': req.extras}
# File installs.
if (req.uri or req.path or is_installable_file(req.name)) and not req.vcs:
# Assign a package name to the file, last 7 of it's sha256 hex digest.
if not req.uri and not req.path:
req.path = os.path.abspath(req.name)
hashable_path = req.uri if req.uri else req.path
if not req.name:
req.name = hashlib.sha256(hashable_path.encode('utf-8')).hexdigest()
req.name = req.name[len(req.name) - 7:]
if req.uri:
dependency[req.name] = {'file': hashable_path}
else:
dependency[req.name] = {'path': hashable_path}
if req.extras:
dependency[req.name].update(extras)
if req.editable:
dependency[req.name].update({'editable': True})
elif req.vcs:
if req.name is None:
raise ValueError(
'pipenv requires an #egg fragment for version controlled '
'dependencies. Please install remote dependency '
'in the form {0}#egg=<package-name>.'.format(req.uri)
)
if req.uri.startswith('{0}+'.format(req.vcs)):
req.uri = req.uri[len(req.vcs) + 1:]
dependency.setdefault(req.name, {}).update({req.vcs: req.uri})
if req.editable:
dependency[req.name].update({'editable': True})
# Add subdirectory, if it's there
if req.subdirectory:
dependency[req.name].update({'subdirectory': req.subdirectory})
if req.revision:
dependency[req.name].update({'ref': req.revision})
dependency[req.name].update({'extras': req.extras})
elif req.extras or req.specs or hasattr(req, 'markers'):
specs = None
if req.specs:
r = multi_split(dep, '!=<>~')
specs = dep[len(r[0]):]
dependency[req.name] = specs
if req.extras:
dependency[req.name] = extras
if specs:
dependency[req.name].update({'version': specs})
if hasattr(req, 'markers'):
if isinstance(dependency[req.name], six.string_types):
dependency[req.name] = {'version': specs}
dependency[req.name].update({'markers': req.markers})
else:
dependency[dep] = '*'
if len(dependency) > 1:
for key in dependency.copy():
if not hasattr(dependency[key], 'keys'):
del dependency[key]
return dependency
def is_star(val):
return isinstance(val, six.string_types) and val == '*'
def is_pinned(val):
return isinstance(val, six.string_types) and val.startswith('==')
def convert_deps_to_pip(deps, project=None, r=True, include_index=False):
dependencies = []
for dep in deps.keys():
# Default (e.g. '>1.10').
extra = deps[dep] if isinstance(deps[dep], six.string_types) else ''
version = ''
index = ''
# Get rid of '*'.
if is_star(deps[dep]) or str(extra) == '{}':
extra = ''
hash = ''
# Support for single hash (spec 1).
if 'hash' in deps[dep]:
hash = ' --hash={0}'.format(deps[dep]['hash'])
# Support for multiple hashes (spec 2).
if 'hashes' in deps[dep]:
hash = '{0} '.format(
''.join(
[' --hash={0} '.format(h) for h in deps[dep]['hashes']]
)
)
# Support for extras (e.g. requests[socks])
if 'extras' in deps[dep]:
extra = '[{0}]'.format(','.join(deps[dep]['extras']))
if 'version' in deps[dep]:
if not is_star(deps[dep]['version']):
version = deps[dep]['version']
# For lockfile format.
if 'markers' in deps[dep]:
specs = '; {0}'.format(deps[dep]['markers'])
else:
# For pipfile format.
specs = []
for specifier in specifiers:
if specifier in deps[dep]:
if not is_star(deps[dep][specifier]):
specs.append(
'{0} {1}'.format(specifier, deps[dep][specifier])
)
if specs:
specs = '; {0}'.format(' and '.join(specs))
else:
specs = ''
if include_index and not is_file(deps[dep]) and not is_vcs(deps[dep]):
pip_src_args = []
if 'index' in deps[dep]:
pip_src_args = [project.get_source(deps[dep]['index'])]
else:
pip_src_args = project.sources
pip_args = prepare_pip_source_args(pip_src_args)
index = ' '.join(pip_args)
# Support for version control
maybe_vcs = [vcs for vcs in VCS_LIST if vcs in deps[dep]]
vcs = maybe_vcs[0] if maybe_vcs else None
# Support for files.
if 'file' in deps[dep]:
extra = '{1}{0}'.format(extra, deps[dep]['file']).strip()
# Flag the file as editable if it is a local relative path
if 'editable' in deps[dep]:
dep = '-e '
else:
dep = ''
# Support for paths.
elif 'path' in deps[dep]:
extra = '{1}{0}'.format(extra, deps[dep]['path']).strip()
# Flag the file as editable if it is a local relative path
if 'editable' in deps[dep]:
dep = '-e '
else:
dep = ''
if vcs:
extra = '{0}+{1}'.format(vcs, deps[dep][vcs])
# Support for @refs.
if 'ref' in deps[dep]:
extra += '@{0}'.format(deps[dep]['ref'])
extra += '
# Support for subdirectory
if 'subdirectory' in deps[dep]:
extra += '&subdirectory={0}'.format(deps[dep]['subdirectory'])
# Support for editable.
if 'editable' in deps[dep]:
# Support for --egg.
dep = '-e '
else:
dep = ''
s = '{0}{1}{2}{3}{4} {5}'.format(
dep, extra, version, specs, hash, index
).strip()
dependencies.append(s)
if not r:
return dependencies
# Write requirements.txt to tmp directory.
f = tempfile.NamedTemporaryFile(suffix='-requirements.txt', delete=False)
f.write('\n'.join(dependencies).encode('utf-8'))
f.close()
return f.name
def mkdir_p(newdir):
if os.path.isdir(newdir):
pass
elif os.path.isfile(newdir):
raise OSError(
"a file with the same name as the desired dir, '{0}', already exists.".format(
newdir
)
)
else:
head, tail = os.path.split(newdir)
if head and not os.path.isdir(head):
mkdir_p(head)
if tail:
os.mkdir(newdir)
def is_required_version(version, specified_version):
# Certain packages may be defined with multiple values.
if isinstance(specified_version, dict):
specified_version = specified_version.get('version', '')
if specified_version.startswith('=='):
return version.strip() == specified_version.split('==')[1].strip()
return True
def strip_ssh_from_git_uri(uri):
if isinstance(uri, six.string_types):
uri = uri.replace('git+ssh://', 'git+')
return uri
def clean_git_uri(uri):
if isinstance(uri, six.string_types):
# Add scheme for parsing purposes, this is also what pip does
if uri.startswith('git+') and '://' not in uri:
uri = uri.replace('git+', 'git+ssh://')
return uri
def is_editable(pipfile_entry):
if hasattr(pipfile_entry, 'get'):
return pipfile_entry.get('editable', False) and any(
pipfile_entry.get(key) for key in ('file', 'path') + VCS_LIST
)
return False
def is_vcs(pipfile_entry):
from .vendor import requirements
if hasattr(pipfile_entry, 'keys'):
return any(key for key in pipfile_entry.keys() if key in VCS_LIST)
elif isinstance(pipfile_entry, six.string_types):
return bool(
requirements.requirement.VCS_REGEX.match(
clean_git_uri(pipfile_entry)
)
)
return False
def is_installable_file(path):
from .vendor.pip9.utils import is_installable_dir
from .vendor.pip9.utils.packaging import specifiers
if hasattr(path, 'keys') and any(
key for key in path.keys() if key in ['file', 'path']
):
path = urlparse(path['file']).path if 'file' in path else path['path']
if not isinstance(path, six.string_types) or path == '*':
return False
# If the string starts with a valid specifier operator, test if it is a valid
# specifier set before making a path object (to avoid breaking windows)
if any(path.startswith(spec) for spec in '!=<>~'):
try:
specifiers.SpecifierSet(path)
# If this is not a valid specifier, just move on and try it as a path
except specifiers.InvalidSpecifier:
pass
else:
return False
if not os.path.exists(os.path.abspath(path)):
return False
lookup_path = Path(path)
absolute_path = '{0}'.format(lookup_path.absolute())
if lookup_path.is_dir() and is_installable_dir(absolute_path):
return True
elif lookup_path.is_file() and is_archive_file(absolute_path):
return True
return False
def is_file(package):
if hasattr(package, 'keys'):
return any(key for key in package.keys() if key in ['file', 'path'])
if os.path.exists(str(package)):
return True
for start in SCHEME_LIST:
if str(package).startswith(start):
return True
return False
def pep440_version(version):
from .vendor.pip9.index import parse_version
# Use pip built-in version parser.
return str(parse_version(version))
def pep423_name(name):
name = name.lower()
if any(i not in name for i in (VCS_LIST + SCHEME_LIST)):
return name.replace('_', '-')
else:
return name
def proper_case(package_name):
# Hit the simple API.
r = requests.get(
'https://pypi.org/pypi/{0}/json'.format(package_name),
timeout=0.3,
stream=True,
)
if not r.ok:
raise IOError(
'Unable to find package {0} in PyPI repository.'.format(
package_name
)
)
r = parse.parse('https://pypi.org/pypi/{name}/json', r.url)
good_name = r['name']
return good_name
def split_section(input_file, section_suffix, test_function):
pipfile_sections = ('packages', 'dev-packages')
lockfile_sections = ('default', 'develop')
if any(section in input_file for section in pipfile_sections):
sections = pipfile_sections
elif any(section in input_file for section in lockfile_sections):
sections = lockfile_sections
else:
# return the original file if we can't find any pipfile or lockfile sections
return input_file
for section in sections:
split_dict = {}
entries = input_file.get(section, {})
for k in list(entries.keys()):
if test_function(entries.get(k)):
split_dict[k] = entries.pop(k)
input_file['-'.join([section, section_suffix])] = split_dict
return input_file
def split_file(file_dict):
sections = {
'vcs': is_vcs,
'editable': lambda x: hasattr(x, 'keys') and x.get('editable'),
}
for k, func in sections.items():
file_dict = split_section(file_dict, k, func)
return file_dict
def merge_deps(
file_dict,
project,
dev=False,
requirements=False,
ignore_hashes=False,
blocking=False,
only=False,
):
deps = []
requirements_deps = []
for section in list(file_dict.keys()):
section_name, suffix = section.rsplit(
'-', 1
) if '-' in section and not section == 'dev-packages' else (
section, None
)
if not file_dict[section] or section_name not in (
'dev-packages', 'packages', 'default', 'develop'
):
continue
is_dev = section_name in ('dev-packages', 'develop')
if is_dev and not dev:
continue
if ignore_hashes:
for k, v in file_dict[section]:
if 'hash' in v:
del v['hash']
no_hashes = True if suffix else ignore_hashes
block = True if suffix else blocking
include_index = True if not suffix else False
converted = convert_deps_to_pip(
file_dict[section], project, r=False, include_index=include_index
)
deps.extend((d, no_hashes, block) for d in converted)
if dev and is_dev and requirements:
requirements_deps.extend((d, no_hashes, block) for d in converted)
return deps, requirements_deps
def recase_file(file_dict):
if 'packages' in file_dict or 'dev-packages' in file_dict:
sections = ('packages', 'dev-packages')
elif 'default' in file_dict or 'develop' in file_dict:
sections = ('default', 'develop')
for section in sections:
file_section = file_dict.get(section, {})
for key in list(file_section.keys()):
try:
cased_key = proper_case(key)
except IOError:
cased_key = key
file_section[cased_key] = file_section.pop(key)
return file_dict
def get_windows_path(*args):
return os.path.normpath(os.path.join(*args))
def find_windows_executable(bin_path, exe_name):
requested_path = get_windows_path(bin_path, exe_name)
if os.path.exists(requested_path):
return requested_path
exe_name = os.path.splitext(exe_name)[0]
files = [
'{0}.{1}'.format(exe_name, ext) for ext in ['', 'py', 'exe', 'bat']
]
exec_paths = [get_windows_path(bin_path, f) for f in files]
exec_files = [
filename for filename in exec_paths if os.path.isfile(filename)
]
if exec_files:
return exec_files[0]
return find_executable(exe_name)
def path_to_url(path):
return Path(normalize_drive(os.path.abspath(path))).as_uri()
def get_converted_relative_path(path, relative_to=os.curdir):
return os.path.join('.', os.path.relpath(path, start=relative_to))
def walk_up(bottom):
bottom = os.path.realpath(bottom)
# Get files in current dir.
try:
names = os.listdir(bottom)
except Exception:
return
dirs, nondirs = [], []
for name in names:
if os.path.isdir(os.path.join(bottom, name)):
dirs.append(name)
else:
nondirs.append(name)
yield bottom, dirs, nondirs
new_path = os.path.realpath(os.path.join(bottom, '..'))
# See if we are at the top.
if new_path == bottom:
return
for x in walk_up(new_path):
yield x
def find_requirements(max_depth=3):
i = 0
for c, d, f in walk_up(os.getcwd()):
i += 1
if i < max_depth:
if 'requirements.txt':
r = os.path.join(c, 'requirements.txt')
if os.path.isfile(r):
return r
raise RuntimeError('No requirements.txt found!')
# Borrowed from pew to avoid importing pew which imports psutil
# See https://github.com/berdario/pew/blob/master/pew/_utils.py#L82
@contextmanager
def temp_environ():
environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(environ)
def is_valid_url(url):
pieces = urlparse(url)
return all([pieces.scheme, pieces.netloc])
def download_file(url, filename):
r = requests.get(url, stream=True)
if not r.ok:
raise IOError('Unable to download file')
with open(filename, 'wb') as f:
f.write(r.content)
def need_update_check():
mkdir_p(PIPENV_CACHE_DIR)
p = os.sep.join((PIPENV_CACHE_DIR, '.pipenv_update_check'))
if not os.path.exists(p):
return True
out_of_date_time = time() - (24 * 60 * 60)
if os.path.isfile(p) and os.path.getmtime(p) <= out_of_date_time:
return True
else:
return False
def touch_update_stamp():
mkdir_p(PIPENV_CACHE_DIR)
p = os.sep.join((PIPENV_CACHE_DIR, '.pipenv_update_check'))
try:
os.utime(p, None)
except OSError:
with open(p, 'w') as fh:
fh.write('')
def normalize_drive(path):
if os.name != 'nt' or not isinstance(path, six.string_types):
return path
drive, tail = os.path.splitdrive(path)
# Only match (lower cased) local drives (e.g. 'c:'), not UNC mounts.
if drive.islower() and len(drive) == 2 and drive[1] == ':':
return '{}{}'.format(drive.upper(), tail)
return path
def is_readonly_path(fn):
if os.path.exists(fn):
return (os.stat(fn).st_mode & stat.S_IREAD) or not os.access(
fn, os.W_OK
)
return False
def set_write_bit(fn):
if os.path.exists(fn):
os.chmod(fn, stat.S_IWRITE | stat.S_IWUSR)
return
def rmtree(directory, ignore_errors=False):
shutil.rmtree(
directory, ignore_errors=ignore_errors, onerror=handle_remove_readonly
)
def handle_remove_readonly(func, path, exc):
# Check for read-only attribute
default_warning_message = 'Unable to remove file due to permissions restriction: {!r}'
# split the initial exception out into its type, exception, and traceback
exc_type, exc_exception, exc_tb = exc
if is_readonly_path(path):
# Apply write permission and call original function
set_write_bit(path)
try:
func(path)
except (OSError, IOError) as e:
if e.errno in [errno.EACCES, errno.EPERM]:
warnings.warn(
default_warning_message.format(path), ResourceWarning
)
return
if exc_exception.errno in [errno.EACCES, errno.EPERM]:
warnings.warn(default_warning_message.format(path), ResourceWarning)
return
raise
class TemporaryDirectory(object):
def __init__(self, suffix, prefix, dir=None):
if 'RAM_DISK' in os.environ:
import uuid
name = uuid.uuid4().hex
dir_name = os.path.join(os.environ['RAM_DISK'].strip(), name)
os.mkdir(dir_name)
self.name = dir_name
else:
self.name = tempfile.mkdtemp(suffix, prefix, dir)
self._finalizer = finalize(
self,
self._cleanup,
self.name,
warn_message="Implicitly cleaning up {!r}".format(self),
)
@classmethod
def _cleanup(cls, name, warn_message):
rmtree(name)
warnings.warn(warn_message, ResourceWarning)
def __repr__(self):
return "<{} {!r}>".format(self.__class__.__name__, self.name)
def __enter__(self):
return self
def __exit__(self, exc, value, tb):
self.cleanup()
def cleanup(self):
if self._finalizer.detach():
rmtree(self.name)
| true
| true
|
790d1d156c14fb67cadee7b13ae38aaf71bb5702
| 3,172
|
py
|
Python
|
sgdr_callback.py
|
Callidior/semantic-embeddings
|
0d4177422bafbba685fb6a0f976675864f31e09f
|
[
"MIT"
] | 238
|
2018-11-12T03:37:10.000Z
|
2022-01-31T19:11:39.000Z
|
sgdr_callback.py
|
juancprzs/semantic-embeddings
|
6d826c314b41e67f1cdc0158279d15b7b5063a5f
|
[
"MIT"
] | 6
|
2019-04-27T20:41:57.000Z
|
2021-04-26T09:10:36.000Z
|
sgdr_callback.py
|
Callidior/semantic-embeddings
|
0d4177422bafbba685fb6a0f976675864f31e09f
|
[
"MIT"
] | 48
|
2018-11-22T14:49:53.000Z
|
2022-03-14T10:48:18.000Z
|
import numpy as np
from keras.callbacks import Callback
from keras import backend as K
class SGDR(Callback):
"""This callback implements the learning rate schedule for
Stochastic Gradient Descent with warm Restarts (SGDR),
as proposed by Loshchilov & Hutter (https://arxiv.org/abs/1608.03983).
The learning rate at each epoch is computed as:
lr(i) = min_lr + 0.5 * (max_lr - min_lr) * (1 + cos(pi * i/num_epochs))
Here, num_epochs is the number of epochs in the current cycle, which starts
with base_epochs initially and is multiplied by mul_epochs after each cycle.
# Example
```python
sgdr = SGDR(min_lr=0.0, max_lr=0.05,
base_epochs=10, mul_epochs=2)
model.compile(optimizer=keras.optimizers.SGD(decay=1e-4, momentum=0.9),
loss=loss)
model.fit(X_train, Y_train, callbacks=[sgdr])
```
# Arguments
min_lr: minimum learning rate reached at the end of each cycle.
max_lr: maximum learning rate used at the beginning of each cycle.
base_epochs: number of epochs in the first cycle.
mul_epochs: factor with which the number of epochs is multiplied
after each cycle.
"""
def __init__(self, min_lr=0.0, max_lr=0.05, base_epochs=10, mul_epochs=2):
super(SGDR, self).__init__()
self.min_lr = min_lr
self.max_lr = max_lr
self.base_epochs = base_epochs
self.mul_epochs = mul_epochs
self.cycles = 0.
self.cycle_iterations = 0.
self.trn_iterations = 0.
self._reset()
def _reset(self, new_min_lr=None, new_max_lr=None,
new_base_epochs=None, new_mul_epochs=None):
"""Resets cycle iterations."""
if new_min_lr != None:
self.min_lr = new_min_lr
if new_max_lr != None:
self.max_lr = new_max_lr
if new_base_epochs != None:
self.base_epochs = new_base_epochs
if new_mul_epochs != None:
self.mul_epochs = new_mul_epochs
self.cycles = 0.
self.cycle_iterations = 0.
def sgdr(self):
cycle_epochs = self.base_epochs * (self.mul_epochs ** self.cycles)
return self.min_lr + 0.5 * (self.max_lr - self.min_lr) * (1 + np.cos(np.pi * (self.cycle_iterations + 1) / cycle_epochs))
def on_train_begin(self, logs=None):
if self.cycle_iterations == 0:
K.set_value(self.model.optimizer.lr, self.max_lr)
else:
K.set_value(self.model.optimizer.lr, self.sgdr())
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
self.trn_iterations += 1
self.cycle_iterations += 1
if self.cycle_iterations >= self.base_epochs * (self.mul_epochs ** self.cycles):
self.cycles += 1
self.cycle_iterations = 0
K.set_value(self.model.optimizer.lr, self.max_lr)
else:
K.set_value(self.model.optimizer.lr, self.sgdr())
| 36.045455
| 129
| 0.60372
|
import numpy as np
from keras.callbacks import Callback
from keras import backend as K
class SGDR(Callback):
def __init__(self, min_lr=0.0, max_lr=0.05, base_epochs=10, mul_epochs=2):
super(SGDR, self).__init__()
self.min_lr = min_lr
self.max_lr = max_lr
self.base_epochs = base_epochs
self.mul_epochs = mul_epochs
self.cycles = 0.
self.cycle_iterations = 0.
self.trn_iterations = 0.
self._reset()
def _reset(self, new_min_lr=None, new_max_lr=None,
new_base_epochs=None, new_mul_epochs=None):
if new_min_lr != None:
self.min_lr = new_min_lr
if new_max_lr != None:
self.max_lr = new_max_lr
if new_base_epochs != None:
self.base_epochs = new_base_epochs
if new_mul_epochs != None:
self.mul_epochs = new_mul_epochs
self.cycles = 0.
self.cycle_iterations = 0.
def sgdr(self):
cycle_epochs = self.base_epochs * (self.mul_epochs ** self.cycles)
return self.min_lr + 0.5 * (self.max_lr - self.min_lr) * (1 + np.cos(np.pi * (self.cycle_iterations + 1) / cycle_epochs))
def on_train_begin(self, logs=None):
if self.cycle_iterations == 0:
K.set_value(self.model.optimizer.lr, self.max_lr)
else:
K.set_value(self.model.optimizer.lr, self.sgdr())
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
self.trn_iterations += 1
self.cycle_iterations += 1
if self.cycle_iterations >= self.base_epochs * (self.mul_epochs ** self.cycles):
self.cycles += 1
self.cycle_iterations = 0
K.set_value(self.model.optimizer.lr, self.max_lr)
else:
K.set_value(self.model.optimizer.lr, self.sgdr())
| true
| true
|
790d1d1bb17c17868a163ec4edcc232711174652
| 1,877
|
py
|
Python
|
tests/sentry/web/frontend/test_group_tag_export.py
|
noscripter/sentry
|
1c5b1b53e740ffd2747afb7f0995e026be9468d0
|
[
"BSD-3-Clause"
] | 1
|
2021-08-10T06:07:13.000Z
|
2021-08-10T06:07:13.000Z
|
tests/sentry/web/frontend/test_group_tag_export.py
|
fotinakis/sentry
|
c5cfa5c5e47475bf5ef41e702548c2dfc7bb8a7c
|
[
"BSD-3-Clause"
] | 5
|
2019-12-28T18:13:59.000Z
|
2022-03-02T04:32:45.000Z
|
tests/sentry/web/frontend/test_group_tag_export.py
|
fotinakis/sentry
|
c5cfa5c5e47475bf5ef41e702548c2dfc7bb8a7c
|
[
"BSD-3-Clause"
] | 1
|
2017-04-08T04:09:18.000Z
|
2017-04-08T04:09:18.000Z
|
from __future__ import absolute_import
from datetime import timedelta
from django.utils import timezone
from sentry.models import GroupTagValue, TagKey, TagValue
from sentry.testutils import TestCase
class GroupTagExportTest(TestCase):
def test_simple(self):
key, value = 'foo', 'bar'
# Drop microsecond value for MySQL
now = timezone.now().replace(microsecond=0)
project = self.create_project()
group = self.create_group(project=project)
TagKey.objects.create(project=project, key=key)
TagValue.objects.create(
project=project,
key=key,
value=value,
)
group_tag_value = GroupTagValue.objects.create(
project=project,
group=group,
key=key,
value=value,
times_seen=1,
first_seen=now - timedelta(hours=1),
last_seen=now,
)
self.login_as(user=self.user)
url = '/{}/{}/issues/{}/tags/{}/export/'.format(
project.organization.slug, project.slug, group.id, key
)
response = self.client.get(url)
assert response.status_code == 200
assert response.streaming
assert response['Content-Type'] == 'text/csv'
rows = list(response.streaming_content)
for idx, row in enumerate(rows):
row = row.decode('utf-8')
assert row.endswith(u'\r\n')
bits = row[:-2].split(',')
if idx == 0:
assert bits == ['value', 'times_seen', 'last_seen', 'first_seen']
else:
assert bits[0] == value
assert bits[1] == '1'
assert bits[2] == group_tag_value.last_seen.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
assert bits[3] == group_tag_value.first_seen.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
| 32.362069
| 94
| 0.571124
|
from __future__ import absolute_import
from datetime import timedelta
from django.utils import timezone
from sentry.models import GroupTagValue, TagKey, TagValue
from sentry.testutils import TestCase
class GroupTagExportTest(TestCase):
def test_simple(self):
key, value = 'foo', 'bar'
now = timezone.now().replace(microsecond=0)
project = self.create_project()
group = self.create_group(project=project)
TagKey.objects.create(project=project, key=key)
TagValue.objects.create(
project=project,
key=key,
value=value,
)
group_tag_value = GroupTagValue.objects.create(
project=project,
group=group,
key=key,
value=value,
times_seen=1,
first_seen=now - timedelta(hours=1),
last_seen=now,
)
self.login_as(user=self.user)
url = '/{}/{}/issues/{}/tags/{}/export/'.format(
project.organization.slug, project.slug, group.id, key
)
response = self.client.get(url)
assert response.status_code == 200
assert response.streaming
assert response['Content-Type'] == 'text/csv'
rows = list(response.streaming_content)
for idx, row in enumerate(rows):
row = row.decode('utf-8')
assert row.endswith(u'\r\n')
bits = row[:-2].split(',')
if idx == 0:
assert bits == ['value', 'times_seen', 'last_seen', 'first_seen']
else:
assert bits[0] == value
assert bits[1] == '1'
assert bits[2] == group_tag_value.last_seen.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
assert bits[3] == group_tag_value.first_seen.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.