hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acf8218852c28fff59a7a8df11466bdb480b3696 | 60 | py | Python | python/testData/multipleArgumentsCompletion/notSuggestIfNotEnoughArguments.py | 06needhamt/intellij-community | 63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b | [
"Apache-2.0"
] | null | null | null | python/testData/multipleArgumentsCompletion/notSuggestIfNotEnoughArguments.py | 06needhamt/intellij-community | 63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b | [
"Apache-2.0"
] | null | null | null | python/testData/multipleArgumentsCompletion/notSuggestIfNotEnoughArguments.py | 06needhamt/intellij-community | 63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b | [
"Apache-2.0"
] | null | null | null | def foo(x, y, z):
pass
x = 42
y = 100500
foo(<caret>)
| 7.5 | 17 | 0.516667 |
acf82225bebc697d280f330cb72eb48d30f90e68 | 8,144 | py | Python | dev/local/optimizers/radam.py | KeremTurgutlu/fast-kaggle | 0ea341b44a58da2dfb606a0ae32bac166985b49e | [
"Apache-2.0"
] | 8 | 2019-10-02T05:52:10.000Z | 2021-01-15T13:51:06.000Z | dev/local/optimizers/radam.py | KeremTurgutlu/fast-kaggle | 0ea341b44a58da2dfb606a0ae32bac166985b49e | [
"Apache-2.0"
] | 4 | 2019-10-02T06:13:13.000Z | 2019-10-28T18:21:10.000Z | dev/local/optimizers/radam.py | KeremTurgutlu/fast-kaggle | 0ea341b44a58da2dfb606a0ae32bac166985b49e | [
"Apache-2.0"
] | 2 | 2019-12-07T16:59:01.000Z | 2021-08-30T01:00:06.000Z | # from https://github.com/LiyuanLucasLiu/RAdam/blob/master/radam.py
import math
import torch
from torch.optim.optimizer import Optimizer, required
__all__ = ["RAdam", "PlainRAdam", "AdamW"]
class RAdam(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.buffer = [[None, None, None] for ind in range(10)]
super(RAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(RAdam, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('RAdam does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state['step'] += 1
buffered = self.buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
# more conservative since it's an approximated value
if N_sma >= 5:
step_size = math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
else:
step_size = 1.0 / (1 - beta1 ** state['step'])
buffered[2] = step_size
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
# more conservative since it's an approximated value
if N_sma >= 5:
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg, denom)
else:
p_data_fp32.add_(-step_size * group['lr'], exp_avg)
p.data.copy_(p_data_fp32)
return loss
class PlainRAdam(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
super(PlainRAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(PlainRAdam, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('RAdam does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state['step'] += 1
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
# more conservative since it's an approximated value
if N_sma >= 5:
step_size = group['lr'] * math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(-step_size, exp_avg, denom)
else:
step_size = group['lr'] / (1 - beta1 ** state['step'])
p_data_fp32.add_(-step_size, exp_avg)
p.data.copy_(p_data_fp32)
return loss
class AdamW(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, warmup = 0):
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, warmup = warmup)
super(AdamW, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdamW, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
if group['warmup'] > state['step']:
scheduled_lr = 1e-8 + state['step'] * group['lr'] / group['warmup']
else:
scheduled_lr = group['lr']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * scheduled_lr, p_data_fp32)
p_data_fp32.addcdiv_(-step_size, exp_avg, denom)
p.data.copy_(p_data_fp32)
return loss
| 38.415094 | 185 | 0.510314 |
acf8226ad93bb86407700bded44567fee382bebc | 44,203 | py | Python | persistence.py | fghso/camps-crawler | 8d49dfc4905ea0c50397c7c390baf8e130c7474f | [
"MIT"
] | 1 | 2017-03-11T15:02:14.000Z | 2017-03-11T15:02:14.000Z | persistence.py | fghso/camps-crawler | 8d49dfc4905ea0c50397c7c390baf8e130c7474f | [
"MIT"
] | null | null | null | persistence.py | fghso/camps-crawler | 8d49dfc4905ea0c50397c7c390baf8e130c7474f | [
"MIT"
] | 1 | 2017-12-05T14:34:41.000Z | 2017-12-05T14:34:41.000Z | # -*- coding: iso-8859-1 -*-
"""Module to store persistence handler classes.
Persistence handlers take care of all implementation details related to resource storage. They expose a common interface (defined in :class:`BasePersistenceHandler`) through which the server (and/or filters/crawlers) can load, save and perform other operations over resources independently from where and how the resources are actually stored. At any point in time, the collection status of each resource must be one of those defined in the struct-like class :class:`StatusCodes`.
"""
import os
import threading
import tempfile
import cStringIO
import glob
import re
import json
import csv
import Queue
import common
import mysql.connector
from datetime import datetime
from copy import deepcopy
from collections import deque
class StatusCodes():
"""A struct-like class to hold constants for resources status codes.
The numeric value of each code can be modified to match the one used in the final location where the resources are persisted. The name of each code (``SUCCEEDED``, ``INPROGRESS``, ``AVAILABLE``, ``FAILED``, ``ERROR``) must not be modified.
"""
SUCCEEDED = 2
INPROGRESS = 1
AVAILABLE = 0
FAILED = -1
ERROR = -2
class BasePersistenceHandler():
"""Abstract class. All persistence handlers should inherit from it or from other class that inherits."""
def __init__(self, configurationsDictionary):
"""Constructor.
Each persistence handler receives everything in its corresponding handler section of the XML configuration file as the parameter *configurationsDictionary*.
"""
self._extractConfig(configurationsDictionary)
self.status = StatusCodes()
def _extractConfig(self, configurationsDictionary):
"""Extract and store configurations.
If some configuration needs any kind of pre-processing, it is done here. Extend this method if you need to pre-process custom configuration options.
"""
self.config = configurationsDictionary
if ("echo" not in self.config): self.config["echo"] = {}
def setup(self):
"""Execute per client initialization procedures.
This method is called every time a connection to a new client is opened, allowing to execute initialization code on a per client basis (which differs from :meth:`__init__` that is called when the server instantiate the persistence handler, i.e., :meth:`__init__` is called just one time for the whole period of execution of the program).
"""
pass
def select(self):
"""Retrive an ``AVAILABLE`` resource.
Returns:
A tuple in the format (*resourceKey*, *resourceID*, *resourceInfo*).
* *resourceKey* (user defined type): Value that uniquely identify the resource internally. It works like a primary key in relational databases and makes possible the existence of resources with the same ID, if needed.
* *resourceID* (user defined type): Resource ID to be sent to a client.
* *resourceInfo* (dict): Other information related to the resource, if there is any.
"""
return (None, None, None)
def update(self, resourceKey, status, resourceInfo):
"""Update the specified resource, setting its status and information data to the ones given.
Args:
* *resourceKey* (user defined type): Value that uniquely identify the resource internally.
* *status* (:class:`StatusCodes`): New status of the resource.
* *resourceInfo* (dict): Other information related to the resource, if there is any.
"""
pass
def insert(self, resourcesList):
"""Insert new resources into the final location where resources are persisted.
Args:
* *resourcesList* (list): List of tuples containing all new resources to be inserted. Each resource is defined by a tuple in the format (*resourceID*, *resourceInfo*).
"""
pass
def count(self):
"""Count the number of resources in each status category.
Returns:
A tuple in the format (*total*, *succeeded*, *inprogress*, *available*, *failed*, *error*) where all fields are integers representing the number of resources with the respective status code.
"""
return (0, 0, 0, 0, 0, 0)
def reset(self, status):
"""Change to ``AVAILABLE`` all resources with the status code given.
Args:
* *status* (:class:`StatusCodes`): Status of the resources to be reseted.
Returns:
Number of resources reseted.
"""
return 0
def finish(self):
"""Execute per client finalization procedures.
This method is called every time a connection to a client is closed, allowing to execute finalization code on a per client basis. It is the counterpart of :meth:`setup`.
"""
pass
def shutdown(self):
"""Execute program finalization procedures (similar to a destructor).
This method is called when the server is shut down, allowing to execute finalization code in a global manner. It is intended to be the counterpart of :meth:`__init__`, but differs from :meth:`__del__() <python:object.__del__>` in that it is not bounded to the live of the persistence handler object itself, but rather to the span of execution time of the server.
"""
pass
# IMPORTANT NOTE: MemoryPersistenceHandler class was built as basis for FilePersistenceHandler and its extensions,
# and for test purposes. Altough it can be set in the configuration file, it is not intended for direct use in a
# production enviroment. In this case, choose one of the file based handlers instead
class MemoryPersistenceHandler(BasePersistenceHandler):
def __init__(self, configurationsDictionary):
BasePersistenceHandler.__init__(self, configurationsDictionary)
self.insertLock = threading.Lock()
self.resources = []
self.IDsHash = {}
self.statusRecords = {self.status.SUCCEEDED: [],
self.status.INPROGRESS: [],
self.status.AVAILABLE: deque(),
self.status.FAILED: [],
self.status.ERROR: []}
#self._loadTestData()
def _extractConfig(self, configurationsDictionary):
BasePersistenceHandler._extractConfig(self, configurationsDictionary)
if ("uniqueresourceid" not in self.config): self.config["uniqueresourceid"] = False
else: self.config["uniqueresourceid"] = common.str2bool(self.config["uniqueresourceid"])
if ("onduplicateupdate" not in self.config): self.config["onduplicateupdate"] = False
else: self.config["onduplicateupdate"] = common.str2bool(self.config["onduplicateupdate"])
def _save(self, pk, id, status, info, changeInfo = True):
if (pk is not None):
if (status is not None): self.resources[pk]["status"] = status
if (changeInfo):
if (self.resources[pk]["info"] is not None) and (info is not None): self.resources[pk]["info"].update(info)
else: self.resources[pk]["info"] = info
else:
self.resources.append({"id": id, "status": status, "info": info})
def _loadTestData(self):
self.resources.extend([
{"id": 1, "status": 0, "info": {"crawler_name": "c1", "response_code": 3}},
{"id": 2, "status": 0, "info": {"crawler_name": "c2", "response_code": 3}},
{"id": 3, "status": 0, "info": None},
{"id": 4, "status": 0, "info": None}
])
for pk, resource in enumerate(self.resources):
self.statusRecords[resource["status"]].append(pk)
if (self.config["uniqueresourceid"]):
if (resource["id"] not in self.IDsHash): self.IDsHash[resource["id"]] = pk
else: raise KeyError("Duplicated ID found in resources list: %s." % resource["id"])
def select(self):
try: pk = self.statusRecords[self.status.AVAILABLE].popleft()
except IndexError: return (None, None, None)
self._save(pk, None, self.status.INPROGRESS, None, False)
self.statusRecords[self.status.INPROGRESS].append(pk)
return (pk, self.resources[pk]["id"], deepcopy(self.resources[pk]["info"]))
def update(self, resourceKey, status, resourceInfo):
currentStatus = self.resources[resourceKey]["status"]
self.statusRecords[currentStatus].remove(resourceKey)
if (resourceInfo): self._save(resourceKey, None, status, resourceInfo)
else: self._save(resourceKey, None, status, resourceInfo, False)
self.statusRecords[status].append(resourceKey)
def insert(self, resourcesList):
for resourceID, resourceInfo in resourcesList:
if (self.config["uniqueresourceid"]) and (resourceID in self.IDsHash):
if (self.config["onduplicateupdate"]):
self._save(self.IDsHash[resourceID], None, None, resourceInfo)
continue
else: raise KeyError("Cannot insert resource, ID %s already exists." % resourceID)
with self.insertLock:
self.statusRecords[self.status.AVAILABLE].append(len(self.resources))
if (self.config["uniqueresourceid"]): self.IDsHash[resourceID] = len(self.resources)
self._save(None, resourceID, self.status.AVAILABLE, resourceInfo)
def count(self):
return (len(self.resources),
len(self.statusRecords[self.status.SUCCEEDED]),
len(self.statusRecords[self.status.INPROGRESS]),
len(self.statusRecords[self.status.AVAILABLE]),
len(self.statusRecords[self.status.FAILED]),
len(self.statusRecords[self.status.ERROR]))
def reset(self, status):
resetList = self.statusRecords[status][:]
for pk in resetList:
self.statusRecords[status].remove(pk)
self._save(pk, None, self.status.AVAILABLE, None, False)
self.statusRecords[self.status.AVAILABLE].appendleft(pk)
return len(resetList)
class FilePersistenceHandler(MemoryPersistenceHandler):
"""Load and dump resources from/to a file.
All resources in the file are loaded into memory before the server operations begin. So, this handler is recomended for small to medium size datasets that can be completely fitted into machine's memory. For larger datasets, consider using another persistence handler. Another option for large datasets is to divide the resources in more than one file, collecting the resources of one file at a time.
The default version of this handler supports CSV and JSON files. It is possible to add support to other file types by subclassing :class:`BaseFileColumns` and :class:`BaseFileHandler`. The new file type must also be included in the :attr:`supportedFileTypes` dictionary.
"""
class BaseFileColumns():
"""Hold column names of data in the file, allowing fast access to names of ID, status and info columns."""
def __init__(self, fileName, idColumn, statusColumn):
self.names = self._extractColNames(fileName)
self.idName = idColumn
self.statusName = statusColumn
self.infoNames = [name for name in self.names if (name not in (self.idName, self.statusName))]
def _extractColNames(self, fileName):
"""Extract column names from the file.
Must be overriden, as column names extraction depends on the file type.
Returns:
A list of all column names in the file.
"""
return []
class BaseFileHandler():
"""Handle low level details about persistence in a specific file type.
Each resource loaded from a file is stored in memory in a dictionary in the format ``{"id": X, "status": X, "info": {...}}``, which is the resource internal representation format. This handler is responsible for translating resources in the internal representation format to the format used in a specific file type and vice-versa.
"""
def __init__(self): self.status = StatusCodes()
def parse(self, resource, columns):
"""Transform resource from file format to internal representation format.
Args:
* *resource* (file specific type): Resource given in file format.
* *columns* (:class:`BaseFileColumns <FilePersistenceHandler.BaseFileColumns>` subclass): Object holding column names.
Returns:
A resource in internal representation format.
"""
return {"id": None, "status": None, "info": None}
def unparse(self, resource, columns):
"""Transform resource from internal representation format to file format.
Args:
* *resource* (dict): Resource given in internal representation format.
* *columns* (:class:`BaseFileColumns <FilePersistenceHandler.BaseFileColumns>` subclass): Object holding column names.
Returns:
A resource in file format.
"""
return None
def load(self, file, columns):
"""Load resources in file format and yield them in internal representation format.
Args:
* *file* (:ref:`file object<python:bltin-file-objects>`): File object bounded to the physical file where resources are stored.
* *columns* (:class:`BaseFileColumns <FilePersistenceHandler.BaseFileColumns>` subclass): Object holding column names.
Yields:
A resource in internal representation format.
"""
yield {"id": None, "status": None, "info": None}
def dump(self, resources, file, columns):
"""Save resources in internal representation format to file format.
Args:
* *resources* (list): List of resources in internal representation format.
* *file* (:ref:`file object<python:bltin-file-objects>`): File object bounded to the physical file where resources will be stored.
* *columns* (:class:`BaseFileColumns <FilePersistenceHandler.BaseFileColumns>` subclass): Object holding column names.
"""
pass
class CSVColumns(BaseFileColumns):
"""Hold column names of data in CSV files, allowing fast access to names of ID, status and info columns."""
def _extractColNames(self, fileName):
with open(fileName, "r") as file:
reader = csv.DictReader(file, quoting = csv.QUOTE_MINIMAL, quotechar = "'", skipinitialspace = True)
columns = reader.fieldnames
return [col.strip("\"") for col in columns]
class CSVHandler(BaseFileHandler):
"""Handle low level details about persistence in CSV files.
.. note::
This class and :class:`CSVColumns <FilePersistenceHandler.CSVColumns>` class uses Python's built-in :mod:`python:csv` module internally.
"""
def _parseValue(self, value):
if (not value): return None
if (not value.startswith("\"")):
if value.upper() in ("TRUE", "T"): return True
if value.upper() in ("FALSE", "F"): return False
if value.upper() in ("NONE", "NULL"): return None
if ("." in value): return float(value)
return int(value)
return value.strip("\"")
def _unparseValue(self, value):
if isinstance(value, basestring):
if isinstance(value, unicode): value = value.encode("utf-8")
return "".join(("\"", value, "\""))
if isinstance(value, bool): return ("T" if (value) else "F")
return value
def parse(self, resource, columns):
parsed = {"id": self._parseValue(resource[columns.idName])}
if ((columns.statusName in columns.names) and (resource[columns.statusName])):
parsed["status"] = self._parseValue(resource[columns.statusName])
else: parsed["status"] = self.status.AVAILABLE
if (columns.infoNames):
parsed["info"] = {}
for column in columns.infoNames:
parsed["info"][column] = self._parseValue(resource[column])
return parsed
def unparse(self, resource, columns):
buffer = cStringIO.StringIO()
writer = csv.DictWriter(buffer, columns.names, quoting = csv.QUOTE_MINIMAL, quotechar = "'", lineterminator = "\n", extrasaction = "ignore")
unparsed = {columns.idName: self._unparseValue(resource["id"])}
if (resource["status"] != self.status.AVAILABLE):
unparsed[columns.statusName] = self._unparseValue(resource["status"])
if (resource["info"]):
for key, value in resource["info"].iteritems():
if (value is not None) and (key in columns.infoNames): unparsed[key] = self._unparseValue(value)
writer.writerow(unparsed)
return buffer.getvalue()
def load(self, file, columns):
reader = csv.DictReader(file, columns.names, quoting = csv.QUOTE_MINIMAL, quotechar = "'", skipinitialspace = True)
next(reader)
for resource in reader:
yield self.parse(resource, columns)
def dump(self, resources, file, columns):
writer = csv.DictWriter(file, columns.names, quoting = csv.QUOTE_MINIMAL, quotechar = "'", lineterminator = "\n", extrasaction = "ignore")
writer.writeheader()
# In case of CSV, it is easier and faster to unparse the resource here instead of using
# unparse method, so we can use writerow method to directly save the resource to file
for resource in resources:
row = {columns.idName: self._unparseValue(resource["id"])}
if (resource["status"] != 0): row[columns.statusName] = self._unparseValue(resource["status"])
if (resource["info"]):
for key, value in resource["info"].iteritems():
if (value is not None) and (key in columns.infoNames): row[key] = self._unparseValue(value)
writer.writerow(row)
class JSONColumns(BaseFileColumns):
"""Hold column names of data in JSON files, allowing fast access to names of ID, status and info columns."""
def _extractColNames(self, fileName):
with open(fileName, "r") as file: content = file.read(1024)
columnsStart = content.index("[") + 1
columnsEnd = content.index("]")
columns = content[columnsStart:columnsEnd]
return [name.strip("\" ") for name in columns.split(",")]
class JSONHandler(BaseFileHandler):
"""Handle low level details about persistence in JSON files.
.. note::
This class and :class:`JSONColumns <FilePersistenceHandler.JSONColumns>` uses Python's built-in :mod:`python:json` module internally.
"""
def parse(self, resource, columns):
parsed = {"id": resource[columns.idName]}
if ((columns.statusName in columns.names) and (columns.statusName in resource)):
parsed["status"] = resource[columns.statusName]
else: parsed["status"] = self.status.AVAILABLE
if (columns.infoNames):
parsed["info"] = {}
for column in columns.infoNames:
if (column in resource): parsed["info"][column] = resource[column]
else: parsed["info"][column] = None
return parsed
def unparse(self, resource, columns):
unparsed = {columns.idName: resource["id"]}
if (resource["status"] != self.status.AVAILABLE): unparsed[columns.statusName] = resource["status"]
if (resource["info"]):
for key, value in resource["info"].iteritems():
if (value is not None) and (key in columns.infoNames): unparsed[key] = value
return json.dumps(unparsed)
def load(self, file, columns):
input = json.load(file)
for resource in input["resources"]:
yield self.parse(resource, columns)
def dump(self, resources, file, columns):
file.write("{\"columns\": %s, \"resources\": [" % json.dumps(columns.names))
separator = ""
for resource in resources:
file.write("%s%s" % (separator, self.unparse(resource, columns)))
separator = ", "
file.write("]}")
supportedFileTypes = {
# Type : [FileColumns, FileHandler]
"CSV" : ["CSVColumns", "CSVHandler"],
"JSON" : ["JSONColumns", "JSONHandler"]
}
"""Associate file types and its columns and handler classes. The type of the current file is provided by the user directly (through the ``filetype`` option in the XML configuration file) or indirectly (through the file extension extracted from file name). When checking if the type of the current file is on the list of supported file types, the comparison between the strings is case insensitive."""
def __init__(self, configurationsDictionary):
MemoryPersistenceHandler.__init__(self, configurationsDictionary)
self.echo = common.EchoHandler(self.config["echo"])
self.saveLock = threading.Lock()
self.dumpExceptionEvent = threading.Event()
self._setFileHandler()
with open(self.config["filename"], "r") as inputFile:
resourcesList = self.fileHandler.load(inputFile, self.fileColumns)
for resource in resourcesList:
self.statusRecords[resource["status"]].append(len(self.resources))
if (self.config["uniqueresourceid"]):
if (resource["id"] not in self.IDsHash): self.IDsHash[resource["id"]] = len(self.resources)
else: raise KeyError("Duplicated ID found in '%s': %s." % (self.config["filename"], resource["id"]))
if ("info" not in resource): resource["info"] = None
self.resources.append(resource)
self.timer = threading.Timer(self.config["savetimedelta"], self._dumpTimerThread)
self.timer.daemon = True
self.timer.start()
def _extractConfig(self, configurationsDictionary):
MemoryPersistenceHandler._extractConfig(self, configurationsDictionary)
if ("filetype" in self.config): self.config["filetype"] = self.config["filetype"].lower()
else: self.config["filetype"] = os.path.splitext(self.config["filename"])[1][1:].lower()
self.config["savetimedelta"] = int(self.config["savetimedelta"])
if (self.config["savetimedelta"] < 1): raise ValueError("Parameter 'savetimedelta' must be greater than zero.")
def _save(self, pk, id, status, info, changeInfo = True):
with self.saveLock: MemoryPersistenceHandler._save(self, pk, id, status, info, changeInfo)
def _setFileHandler(self):
for type, handler in FilePersistenceHandler.supportedFileTypes.iteritems():
if (self.config["filetype"] == type.lower()):
FileColumnsClass = getattr(self, handler[0])
FileHandlerClass = getattr(self, handler[1])
self.fileColumns = FileColumnsClass(self.config["filename"], self.config["resourceidcolumn"], self.config["statuscolumn"])
self.fileHandler = FileHandlerClass()
return
raise TypeError("Unknown file type '%s' for file '%s'." % (self.config["filetype"], self.config["filename"]))
def _checkDumpException(function):
def decoratedFunction(self, *args):
if (self.dumpExceptionEvent.is_set()):
raise RuntimeError("Exception in dump thread. Execution of FilePersistenceHandler aborted.")
return function(self, *args)
return decoratedFunction
def _dump(self):
self.echo.out("[File: %s] Saving list of resources to file..." % self.config["filename"])
with tempfile.NamedTemporaryFile(mode = "w", suffix = ".temp", prefix = "dump_", dir = "", delete = False) as temp:
with self.saveLock:
self.fileHandler.dump(self.resources, temp, self.fileColumns)
common.replace(temp.name, self.config["filename"])
self.echo.out("[File: %s] Resources saved." % self.config["filename"])
def _dumpTimerThread(self):
try:
self._dump()
except:
self.dumpExceptionEvent.set()
self.echo.out("[File: %s] Exception while saving resources." % self.config["filename"], "EXCEPTION")
else:
self.timer = threading.Timer(self.config["savetimedelta"], self._dumpTimerThread)
self.timer.daemon = True
self.timer.start()
@_checkDumpException
def select(self):
return MemoryPersistenceHandler.select(self)
@_checkDumpException
def update(self, resourceKey, status, resourceInfo):
MemoryPersistenceHandler.update(self, resourceKey, status, resourceInfo)
@_checkDumpException
def insert(self, resourcesList):
for resourceID, resourceInfo in resourcesList:
try: MemoryPersistenceHandler.insert(self, [(resourceID, resourceInfo)])
except KeyError: raise KeyError("Cannot insert resource, ID %s already exists in '%s'." % (resourceID, self.config["filename"]))
@_checkDumpException
def count(self):
return MemoryPersistenceHandler.count(self)
@_checkDumpException
def reset(self, status):
return MemoryPersistenceHandler.reset(self, status)
def shutdown(self):
self.timer.cancel()
self._dump()
class RolloverFilePersistenceHandler(FilePersistenceHandler):
"""Load and dump resources from/to files respecting limits of file size and/or number of resources per file.
This handler uses multiple instances of :class:`FilePersistenceHandler` to allow insertion of new resources respecting limits specified by the user. It is also capable of reading and updating resources from multiple files.
The rollover handler leaves the low level details of persistence for the file handlers attached to each file, taking care of the coordination necessary to maintain consistency between them and also of the verification of limits established.
When inserting new resources, every time the file size limit and/or number of resources per file limit is reached rollover handler opens a new file and assigns a new instance of :class:`FilePersistenceHandler` to handle it. All resources, however, are maintained in memory. So, as in the case of :class:`FilePersistenceHandler`, this handler is not well suited for large datasets that cannot be completely fitted in memory.
.. note::
This handler was inspired by Python's :class:`python:logging.handlers.RotatingFileHandler` class.
"""
def __init__(self, configurationsDictionary):
self.originalConfig = deepcopy(configurationsDictionary)
MemoryPersistenceHandler.__init__(self, configurationsDictionary)
self._setFileHandler()
self.fileHandlersList = []
self.nextSuffixNumber = 1
self.insertHandlerIndex = 0
self.insertSize = -1
self.insertAmount = -1
# Iterate over old rollover files to get file names and max suffix number already used
fileNamesList = [self.config["filename"]]
for name in glob.iglob(self.config["filename"] + ".*"):
if re.search("\.[0-9]+$", name):
fileNamesList.append(name)
suffixNumber = int(name.rsplit(".", 1)[1])
if (suffixNumber >= self.nextSuffixNumber): self.nextSuffixNumber = suffixNumber + 1
# Initialize file persistence handlers
for fileName in fileNamesList: self._addHandler(fileName)
# Get initial file size and amount
if (self.config["sizethreshold"]): self.insertSize = os.path.getsize(self.config["filename"])
if (self.config["amountthreshold"]): self.insertAmount = len(self.fileHandlersList[self.insertHandlerIndex].resources)
def _extractConfig(self, configurationsDictionary):
FilePersistenceHandler._extractConfig(self, configurationsDictionary)
if ("sizethreshold" not in self.config): self.config["sizethreshold"] = 0
else: self.config["sizethreshold"] = int(self.config["sizethreshold"])
if ("amountthreshold" not in self.config): self.config["amountthreshold"] = 0
else: self.config["amountthreshold"] = int(self.config["amountthreshold"])
if (self.config["sizethreshold"] < 0): raise ValueError("Parameter 'sizethreshold' must be zero or greater.")
if (self.config["amountthreshold"] < 0): raise ValueError("Parameter 'amountthreshold' must be zero or greater.")
if (self.config["sizethreshold"] == 0) and (self.config["amountthreshold"] == 0):
raise ValueError("Parameters 'sizethreshold' and 'amountthreshold' cannot be zero at the same time.")
def _addHandler(self, fileName):
config = deepcopy(self.originalConfig)
config["filename"] = fileName
config["filetype"] = self.config["filetype"]
handler = FilePersistenceHandler(config)
if (self.config["uniqueresourceid"]):
duplicated = set(handler.IDsHash).intersection(self.IDsHash)
if (not duplicated): self.IDsHash.update(dict.fromkeys(handler.IDsHash, len(self.fileHandlersList)))
else:
details = ["%s ['%s']" % (resourceID, self.fileHandlersList[self.IDsHash[resourceID]].config["filename"]) for resourceID in duplicated]
raise KeyError("Duplicated ID(s) found in '%s': %s" % (fileName, ", ".join(details)))
self.fileHandlersList.append(handler)
def select(self):
for handlerKey, handler in enumerate(self.fileHandlersList):
(resourceKey, resourceID, resourceInfo) = handler.select()
if (resourceID): return ((handlerKey, resourceKey), resourceID, resourceInfo)
return (None, None, None)
def update(self, keyPair, status, resourceInfo):
self.fileHandlersList[keyPair[0]].update(keyPair[1], status, resourceInfo)
def insert(self, resourcesList):
for resourceID, resourceInfo in resourcesList:
if (self.config["uniqueresourceid"]) and (resourceID in self.IDsHash):
handler = self.fileHandlersList[self.IDsHash[resourceID]]
#try: handler.insert([(resourceID, resourceInfo)])
#except KeyError: raise KeyError("Cannot insert resource, ID %s already exists in file '%s'." % (resourceID, handler.config["filename"]))
handler.insert([(resourceID, resourceInfo)])
continue
with self.insertLock:
handler = self.fileHandlersList[self.insertHandlerIndex]
# Change insert handler if size or amount thresholds were exceeded. If there is no more
# handlers in the list, open a new file and instantiate a new handler to take care of it
while ((self.insertSize >= self.config["sizethreshold"]) or
(self.insertAmount >= self.config["amountthreshold"])):
self.insertHandlerIndex += 1
if (self.insertHandlerIndex >= len(self.fileHandlersList)):
newFileName = "%s.%d" % (self.config["filename"], self.nextSuffixNumber)
with open(newFileName, "w") as file: self.fileHandler.dump([], file, self.fileColumns)
self._addHandler(newFileName)
self.nextSuffixNumber += 1
handler = self.fileHandlersList[self.insertHandlerIndex]
if (self.config["sizethreshold"]): self.insertSize = os.path.getsize(handler.config["filename"])
if (self.config["amountthreshold"]): self.insertAmount = len(handler.resources)
handler.insert([(resourceID, resourceInfo)])
if (self.config["uniqueresourceid"]): self.IDsHash[resourceID] = self.insertHandlerIndex
if (self.config["sizethreshold"]):
self.insertSize += len(self.fileHandler.unparse(handler.resources[-1], self.fileColumns))
if (self.config["amountthreshold"]):
self.insertAmount += 1
def count(self):
counts = [0] * 6
for handler in self.fileHandlersList:
counts = [x + y for x, y in zip(counts, handler.count())]
return counts
def reset(self, status):
for handler in self.fileHandlersList: handler.reset(status)
def shutdown(self):
for handler in self.fileHandlersList: handler.shutdown()
class MySQLPersistenceHandler(BasePersistenceHandler):
"""Store and retrieve resources to/from a MySQL database.
The table must already exist in the database and must contain at least three columns: a primary key column, a resource ID column and a status column.
.. note::
This handler uses `MySQL Connector/Python <http://dev.mysql.com/doc/connector-python/en/index.html>`_ to interact with MySQL databases.
"""
def __init__(self, configurationsDictionary):
BasePersistenceHandler.__init__(self, configurationsDictionary)
self.echo = common.EchoHandler(self.config["echo"])
self.local = threading.local()
self.selectCacheThreadExceptionEvent = threading.Event()
self.selectNoResourcesEvent = threading.Event()
self.selectWaitCondition = threading.Condition()
# Get column names
query = "SELECT * FROM " + self.config["table"] + " LIMIT 0"
connection = mysql.connector.connect(**self.config["connargs"])
cursor = connection.cursor()
cursor.execute(query)
cursor.fetchall()
self.colNames = cursor.column_names
cursor.close()
connection.close()
self.excludedColNames = (self.config["primarykeycolumn"], self.config["resourceidcolumn"], self.config["statuscolumn"])
self.infoColNames = [name for name in self.colNames if (name not in self.excludedColNames)]
# Start select cache thread
self.resourcesQueue = Queue.Queue()
t = threading.Thread(target = self._selectCacheThread)
t.daemon = True
t.start()
with self.selectWaitCondition: self.selectWaitCondition.wait()
def _extractConfig(self, configurationsDictionary):
BasePersistenceHandler._extractConfig(self, configurationsDictionary)
if ("selectcachesize" not in self.config): raise KeyError("Parameter 'selectcachesize' must be specified.")
else: self.config["selectcachesize"] = int(self.config["selectcachesize"])
if ("onduplicateupdate" not in self.config): self.config["onduplicateupdate"] = False
else: self.config["onduplicateupdate"] = common.str2bool(self.config["onduplicateupdate"])
def _selectCacheQuery(self):
query = "SELECT " + self.config["primarykeycolumn"] + " FROM " + self.config["table"] + " WHERE " + self.config["statuscolumn"] + " = %s ORDER BY " + self.config["primarykeycolumn"]
if (self.config["selectcachesize"] > 0): query += " LIMIT %d" % self.config["selectcachesize"]
connection = mysql.connector.connect(**self.config["connargs"])
connection.autocommit = True
cursor = connection.cursor()
cursor.execute(query, (self.status.AVAILABLE,))
resourcesKeys = cursor.fetchall()
cursor.close()
connection.close()
return resourcesKeys
def _selectCacheThread(self):
try:
previouslyEmpty = False
while True:
if not previouslyEmpty: self.echo.out("[Table: %s] Select cache empty. Querying database..." % self.config["table"])
resourcesKeys = self._selectCacheQuery()
if resourcesKeys:
if previouslyEmpty: self.echo.out("[Table: %s] New resources available now." % self.config["table"])
self.selectNoResourcesEvent.clear()
previouslyEmpty = False
self.echo.out("[Table: %s] Filling select cache with resources keys..." % self.config["table"])
for key in resourcesKeys: self.resourcesQueue.put(key[0])
self.echo.out("[Table: %s] Select cache filled." % self.config["table"])
with self.selectWaitCondition: self.selectWaitCondition.notify()
self.resourcesQueue.join()
else:
if not previouslyEmpty: self.echo.out("[Table: %s] No available resources found." % self.config["table"])
self.selectNoResourcesEvent.set()
previouslyEmpty = True
with self.selectWaitCondition:
self.selectWaitCondition.notify()
self.selectWaitCondition.wait()
except:
self.selectCacheThreadExceptionEvent.set()
self.echo.out("[Table: %s] Exception while trying to fill select cache." % self.config["table"], "EXCEPTION")
def setup(self):
self.local.connection = mysql.connector.connect(**self.config["connargs"])
self.local.connection.autocommit = True
def select(self):
# Try to get resource key from select cache
while True:
try:
resourceKey = self.resourcesQueue.get_nowait()
except Queue.Empty:
if self.selectCacheThreadExceptionEvent.is_set():
raise RuntimeError("Exception in select cache thread. Execution of MySQLPersistenceHandler aborted.")
elif self.selectNoResourcesEvent.is_set():
with self.selectWaitCondition: self.selectWaitCondition.notify()
return (None, None, None)
else: break
# Fetch resource information and mark it as being processed
cursor = self.local.connection.cursor(dictionary = True)
query = "UPDATE " + self.config["table"] + " SET " + self.config["statuscolumn"] + " = %s WHERE " + self.config["primarykeycolumn"] + " = %s"
cursor.execute(query, (self.status.INPROGRESS, resourceKey))
self.resourcesQueue.task_done()
query = "SELECT * FROM " + self.config["table"] + " WHERE " + self.config["primarykeycolumn"] + " = %s"
cursor.execute(query, (resourceKey,))
resource = cursor.fetchone()
cursor.close()
return (resource[self.config["primarykeycolumn"]],
resource[self.config["resourceidcolumn"]],
{k: resource[k] for k in self.infoColNames})
def update(self, resourceKey, status, resourceInfo):
cursor = self.local.connection.cursor()
if (not resourceInfo):
query = "UPDATE " + self.config["table"] + " SET " + self.config["statuscolumn"] + " = %s WHERE " + self.config["primarykeycolumn"] + " = %s"
cursor.execute(query, (status, resourceKey))
else:
info = {k: resourceInfo[k] for k in resourceInfo if (k not in self.excludedColNames)}
query = "UPDATE " + self.config["table"] + " SET " + self.config["statuscolumn"] + " = %s, " + " = %s, ".join(info.keys()) + " = %s WHERE " + self.config["primarykeycolumn"] + " = %s"
cursor.execute(query, (status,) + tuple(info.values()) + (resourceKey,))
cursor.close()
def insert(self, resourcesList):
# The method cursor.executemany() is optimized for multiple inserts, batching all data into a single INSERT INTO
# statement. This method would be the best to use here but unfortunately it does not parse the DEFAULT keyword
# correctly. This way, the alternative is to pre-build the query and send it to cursor.execute() instead.
if not resourcesList: return
query = "INSERT INTO " + self.config["table"] + " (" + ", ".join(self.colNames) + ") VALUES "
data = []
values = []
for resourceID, resourceInfo in resourcesList:
newResource = {self.config["resourceidcolumn"]: resourceID}
newResource.update(resourceInfo)
resourceValues = []
for column in self.colNames:
if (column in newResource):
resourceValues.append("%s")
data.append(newResource[column])
else: resourceValues.append("DEFAULT")
values.append("(" + ", ".join(resourceValues) + ")")
query += ", ".join(values)
if (self.config["onduplicateupdate"]):
query += " ON DUPLICATE KEY UPDATE " + ", ".join(["{0} = VALUES({0})".format(column) for column in self.infoColNames])
cursor = self.local.connection.cursor()
cursor.execute(query, data)
cursor.close()
self.selectNoResourcesEvent.clear()
with self.selectWaitCondition: self.selectWaitCondition.notify()
def count(self):
query = "SELECT " + self.config["statuscolumn"] + ", count(*) FROM " + self.config["table"] + " GROUP BY " + self.config["statuscolumn"]
cursor = self.local.connection.cursor()
cursor.execute(query)
result = cursor.fetchall()
cursor.close()
counts = [0, 0, 0, 0, 0, 0]
for row in result:
if (row[0] == self.status.SUCCEEDED): counts[1] = row[1]
elif (row[0] == self.status.INPROGRESS): counts[2] = row[1]
elif (row[0] == self.status.AVAILABLE): counts[3] = row[1]
elif (row[0] == self.status.FAILED): counts[4] = row[1]
elif (row[0] == self.status.ERROR): counts[5] = row[1]
counts[0] += row[1]
return tuple(counts)
def reset(self, status):
query = "UPDATE " + self.config["table"] + " SET " + self.config["statuscolumn"] + " = %s WHERE " + self.config["statuscolumn"] + " = %s"
cursor = self.local.connection.cursor()
cursor.execute(query, (self.status.AVAILABLE, status))
affectedRows = cursor.rowcount
cursor.close()
with self.selectWaitCondition: self.selectWaitCondition.notify()
return affectedRows
def finish(self):
self.local.connection.close()
| 52.064782 | 480 | 0.61532 |
acf82281e1088e2ec9150beb67b54db9bc1abd1b | 2,816 | py | Python | src/jk_sysinfo/entity/DrivePartitionInfo.py | jkpubsrc/python-module-jk-sysinfo | 583c9e5d10f64a722ffa794d081aaf94354ba4fb | [
"Apache-1.1"
] | null | null | null | src/jk_sysinfo/entity/DrivePartitionInfo.py | jkpubsrc/python-module-jk-sysinfo | 583c9e5d10f64a722ffa794d081aaf94354ba4fb | [
"Apache-1.1"
] | null | null | null | src/jk_sysinfo/entity/DrivePartitionInfo.py | jkpubsrc/python-module-jk-sysinfo | 583c9e5d10f64a722ffa794d081aaf94354ba4fb | [
"Apache-1.1"
] | null | null | null |
import os
import typing
import jk_typing
import jk_utils
import jk_json
import jk_prettyprintobj
class DrivePartitionInfo(jk_prettyprintobj.DumpMixin):
################################################################################################################################
## Constructor
################################################################################################################################
#
# Constructor method.
#
@jk_typing.checkFunctionSignature()
def __init__(self, jdata_lsblk_disk:dict):
assert jdata_lsblk_disk["type"] == "part"
self.devicePath = jdata_lsblk_disk["dev"]
self.fsavail = jdata_lsblk_disk["fsavail"]
self.fssize = jdata_lsblk_disk["fssize"]
self.fsused = jdata_lsblk_disk["fsused"]
self.fstype = jdata_lsblk_disk["fstype"]
self.mountpoint = jdata_lsblk_disk["mountpoint"]
self.partflags = jdata_lsblk_disk["partflags"]
self.parttype = jdata_lsblk_disk["parttype"]
self.partlabel = jdata_lsblk_disk["partlabel"]
self.partuuid = jdata_lsblk_disk["partuuid"]
self.ptuuid = jdata_lsblk_disk["ptuuid"]
self.uuid = jdata_lsblk_disk["uuid"]
self.size = jdata_lsblk_disk["size"]
#
################################################################################################################################
## Public Properties
################################################################################################################################
################################################################################################################################
## Helper Methods
################################################################################################################################
def _dumpVarNames(self) -> list:
return [
"devicePath",
"fsavail",
"fssize",
"fsused",
"fstype",
"mountpoint",
"partflags",
"parttype",
"partlabel",
"partuuid",
"ptuuid",
"uuid",
"size",
"formFactor",
"nominalMediaRotationRate",
"firmwareRevision",
"transportHR",
"isNCQSupported",
"isTRIMSupported",
]
#
################################################################################################################################
## Public Methods
################################################################################################################################
def toJSON(self) -> dict:
return {
"devicePath": self.devicePath,
"fsavail": self.fsavail,
"fssize": self.fssize,
"fsused": self.fsused,
"fstype": self.fstype,
"mountpoint": self.mountpoint,
"partflags": self.partflags,
"parttype": self.parttype,
"partlabel": self.partlabel,
"partuuid": self.partuuid,
"ptuuid": self.ptuuid,
"uuid": self.uuid,
"size": self.size,
}
#
#
| 26.317757 | 129 | 0.430753 |
acf82338a596adff5bdfc79f57447aa3e60ded01 | 22,575 | py | Python | XEOpsDatabase/GetWithNetmiko.py | cober2019/IOS-XE-Ops | fc4f1515369a3b722e0641495ee3292aa0d04447 | [
"MIT"
] | 16 | 2020-11-27T07:20:37.000Z | 2022-03-06T14:37:50.000Z | XEOpsDatabase/GetWithNetmiko.py | cober2019/IOS-XE-Ops | fc4f1515369a3b722e0641495ee3292aa0d04447 | [
"MIT"
] | 1 | 2020-12-06T12:36:37.000Z | 2020-12-08T00:16:25.000Z | XEOpsDatabase/GetWithNetmiko.py | cober2019/IOS-XE-Ops | fc4f1515369a3b722e0641495ee3292aa0d04447 | [
"MIT"
] | 4 | 2020-12-06T12:07:08.000Z | 2021-10-04T11:36:01.000Z | """Helper class which collect and call to database for writing"""
import connection as ConnectWith
import DbOperations as DbOps
from netmiko import ssh_exception
import ipaddress
def netconf_trunk_helper(interface, username, password, device):
"""Get route-map names"""
session = ConnectWith.creat_netmiko_connection(username, password, device, 22)
vlans = []
int_trunk_command = f'show run interface {interface} | i allowed vlan'
get_int_trunk = send_command(int_trunk_command, session)
for line in get_int_trunk.splitlines():
if not list(enumerate(line.split(), 0)):
continue
elif line.split()[0] == '^':
break
elif len(line.split()) == 5:
vlans.append(line.split()[4])
elif len(line.split()) == 6:
vlans.append(line.split()[5])
return vlans
def get_dmvpn_interface(session, interface, device):
"""Get route-map names"""
ip_add, tunnel_source, tunnel_mode, network_id, holdtime, profile, nhrp_shortcut, nhrp_red = None, None, None, None, \
None, None, None, None
DbOps.delete_rows('dmvpninterfaces_back_end', device)
try:
for line in send_command(f'show run interface {interface} | ex Current|Building|!', session).splitlines():
if len(line.split()) == 0:
continue
elif '^' == line.split()[0]:
break
elif 'network-id' in line:
network_id = line.split()[3]
elif 'interface' in line:
pass
elif 'address' in line:
ip_add = f'{line.split()[2]} {line.split()[3]}'
elif 'source' in line:
tunnel_source = line.split()[2]
elif 'mode' in line:
tunnel_mode = f'{line.split()[2]} {line.split()[3]}'
elif 'protection' in line:
profile = line.split()[4]
DbOps.update_dmvpn_interfaces(device, interface, ip_add, tunnel_source, tunnel_mode, network_id,
holdtime, profile, nhrp_shortcut, nhrp_red)
elif 'holdtime' in line:
holdtime = line.split()[3]
# Check dmvpn phase commands
if 'shortcut' in line:
nhrp_shortcut = line.split()[2]
if 'nhrp redirect' in line:
nhrp_red = line.split()[2]
except AttributeError:
pass
def send_command(command, session):
"""Send Netmiko commands"""
get_response = None
retries = 0
while retries != 3:
try:
get_response = session.send_command(command)
break
except (OSError, TypeError, AttributeError, ssh_exception.NetmikoTimeoutException, EOFError):
retries += 1
if retries == 3:
get_response = 'Error Connecting'
return get_response
class PollWithNetmiko:
"""Polling with Netmiko"""
def __init__(self, device, username, password, ssh_port, model, netconf_port):
self.device = device
self.username = username
self.password = password
self.ssh_port = ssh_port
self.netconf_port = netconf_port
self.model = model
self.session = None
# Calling polling method
self.start_polling()
def start_polling(self):
"""Endless loop for device polling"""
self.session = ConnectWith.creat_netmiko_connection(self.username, self.password, self.device,
self.ssh_port)
# Check for device type. Insures polling only happens with compatable technologies
if self.model[:3][-2:] != 'SR':
self._get_vlans()
self._get_mac_arp_table()
self._get_access_ports()
self._get_span_root()
elif self.model[:3][-2:] == 'SR':
self._get_dmvpn()
self._get_dmvpn_info()
self._get_arp()
self._get_cdp_neighbors()
self._get_ospf_status()
self._get_bgp_status()
self._get_vrfs()
self._get_ospf_processes()
self._get_route_maps()
self._get_hsrp_status()
self._get_ospf_routers()
self._gather_facts()
DbOps.copy_db_table(self.device)
def send_command(self, command, expect_string=None):
"""Send Netmiko commands"""
get_response = None
try:
get_response = self.session.send_command(command, expect_string=expect_string)
except (OSError, TypeError, AttributeError, ssh_exception.NetmikoTimeoutException, EOFError):
pass
return get_response
def get_model(self):
"""Get self.device model"""
model = None
try:
for i in self.send_command('show inventory').splitlines():
if i.rfind('Chassis') != -1:
model = i.split("\"")[3].split()[1][0:3]
except AttributeError:
pass
return model
def _get_vrfs(self):
"""Get self.device model"""
# Delete table data
DbOps.delete_rows('vrfs_back_end', self.device)
try:
for i in self.send_command('show vrf').splitlines():
try:
if i.rfind('Name') == -1:
DbOps.update_vrfs_table(self.device, i.split()[0])
except IndexError:
pass
except AttributeError:
pass
def _get_bgp_status(self):
"""Gets BGF neighbor statuses"""
local_as = ['Null']
# Delete table data
DbOps.delete_rows('bgp_back_end', self.device)
try:
for i in self.send_command('show ip bgp summary').splitlines():
if i.rfind('local AS number') != -1:
local_as = i.split()[-1:]
try:
ipaddress.ip_address(i.split()[0])
DbOps.update_bgp_table(self.device, i.split()[0], i.split()[2], i.split()[8], i.split()[9],
local_as)
except (ValueError, IndexError):
pass
except AttributeError:
pass
def _get_ospf_status(self):
"""Gets OSPF neighbor statuses"""
# Delete table data
DbOps.delete_rows('ospf_back_end', self.device)
try:
if self.send_command('show ip ospf neighbor').splitlines():
for i in self.send_command('show ip ospf neighbor').splitlines():
try:
ipaddress.ip_address(i.split()[0])
DbOps.update_ospf_table(self.device, i.split()[0], i.split()[2].strip("/"), i.split()[5],
i.split()[6])
except (ValueError, IndexError):
pass
else:
if self.send_command('show ip ospf').splitlines():
DbOps.update_ospf_table(self.device, 'No Established Neighbors', 'None', 'None', 'None')
except AttributeError:
pass
def _get_ospf_processes(self):
"""Get OSPF processes"""
# Delete table data
DbOps.delete_rows('ospfprocess_back_end', self.device)
try:
if self.send_command('show ip ospf | i Process'):
for process in self.send_command('show ip ospf | i Process').splitlines():
try:
DbOps.update_ospf_process_table(self.device, process.split('"')[1].split()[1])
except IndexError:
continue
except AttributeError:
pass
def _get_arp(self):
"""Get ARP table"""
# Delete table data
DbOps.delete_rows('arp_back_end', self.device)
try:
for i in self.send_command('show ip arp').splitlines():
try:
if i.split()[0] != 'Protocol':
DbOps.update_arp_table(self.device, i.split()[0], i.split()[1], i.split()[2], i.split()[3],
i.split()[4],
i.split()[5])
except IndexError:
pass
except AttributeError:
pass
def _get_cdp_neighbors(self):
"""Gets mac and arp tables. Concatinates into one"""
name = None
# Delete table data
DbOps.delete_rows('cdp_back_end', self.device)
try:
for neighbor in self.send_command('show cdp neighbors').splitlines():
try:
if not neighbor:
continue
elif neighbor.split()[0] == "":
continue
elif neighbor.split()[0] == 'Capability':
continue
elif neighbor.split()[0] == 'Device':
continue
if len(neighbor.split()) == 1:
name = neighbor.split()[0]
elif len(neighbor.split()) == 7:
remote_port = neighbor.split()[5] + neighbor.split()[6]
local_port = neighbor.split()[0] + neighbor.split()[1]
DbOps.update_cdp_table(self.device, name, local_port, remote_port)
continue
elif len(neighbor.split()) == 8:
remote_port = neighbor.split()[6] + neighbor.split()[7]
local_port = neighbor.split()[0] + neighbor.split()[1]
DbOps.update_cdp_table(self.device, name, local_port, remote_port)
continue
elif len(neighbor.split()) == 9:
remote_port = neighbor.split()[7] + neighbor.split()[8]
local_port = neighbor.split()[0] + neighbor.split()[1]
DbOps.update_cdp_table(self.device, name, local_port, remote_port)
continue
except IndexError:
continue
except AttributeError:
pass
def _get_route_maps(self):
"""Get route-map names"""
map_name = None
# Deletes table data
DbOps.delete_rows('routemaps_back_end', self.device)
try:
route_map = self.send_command('show route-map | i route-map').splitlines()
if route_map:
for line in route_map:
if len(line.split()) == 0:
continue
elif line.split()[1] != map_name:
DbOps.update_route_maps(self.device, line.split()[1])
map_name = line.split()[1]
except AttributeError:
pass
def _get_span_root(self):
"""Gets mac and arp tables. Concatinates into one"""
# Delete table data
DbOps.delete_rows('spanningtree_back_end', self.device)
for vlan in self.send_command('show spanning-tree root | ex Vlan|-').splitlines():
if len(vlan.split()) == 0:
continue
elif len(vlan.split()) == 7:
DbOps.update_spann_tree_table(self.device, vlan.split()[0].strip('VLAN'), vlan.split()[1],
vlan.split()[2], vlan.split()[3], '')
elif len(vlan.split()) == 8:
DbOps.update_spann_tree_table(self.device, vlan.split()[0].strip('VLAN'), vlan.split()[1],
vlan.split()[2], vlan.split()[3], vlan.split()[7])
def _get_mac_arp_table(self):
"""Gets mac and arp tables. Concatinates into one"""
mac_table = []
arp_table = []
# Delete table data
DbOps.delete_rows('arpmac_back_end', self.device)
try:
for mac in self.send_command('show mac address-table | ex Vlan|All|Total|%|-').splitlines():
try:
mac_table.append({'vlan': mac.split()[0], 'address': mac.split()[1], 'type': mac.split()[2],
'interface': mac.split()[3]})
except IndexError:
continue
# Gets and parse arp table response
for arp in self.send_command('show ip arp | ex Protocol|Total|%').splitlines():
try:
arp_table.append(
{'protocol': arp.split()[0], 'ip': arp.split()[1], 'age': arp.split()[2], 'mac': arp.split()[3],
'interface': arp.split()[5]})
except IndexError:
continue
# Check to see if mac has an arp entry. If so, add k/v to existing dictionary
for mac in mac_table:
for entry in arp_table:
if mac.get('address') == entry.get('mac'):
mac['ip'] = entry.get('ip')
mac['ip_int'] = entry.get('interface')
break
else:
mac['ip'] = 'None'
mac['ip_int'] = 'None'
if mac_table:
for i in mac_table:
DbOps.update_mac_arp_table(self.device, i['vlan'], i['address'], i['type'], i['interface'],
i['ip'], i['ip_int'])
except AttributeError:
pass
def _get_access_ports(self):
"""Get trunks"""
# Deletes table data
DbOps.delete_rows('accessinterfaces_back_end', self.device)
try:
for line in self.send_command('show interfaces status | ex Port').splitlines():
if len(line.split()) == 0:
continue
else:
if len(line.split()) == 7:
DbOps.update_access_interfaces_table(self.device, line.split()[0], line.split()[1],
line.split()[2],
line.split()[3],
line.split()[4], line.split()[5])
elif len(line.split()) == 6:
DbOps.update_access_interfaces_table(self.device, line.split()[0], 'N/A', line.split()[1],
line.split()[2],
line.split()[4], line.split()[5])
elif len(line.split()) == 5:
DbOps.update_access_interfaces_table(self.device, line.split()[0], 'N/A', line.split()[1],
line.split()[2],
line.split()[4], 'N/A')
except AttributeError:
pass
def _get_vlans(self):
"""Get vlans"""
# Deletes table data
DbOps.delete_rows('vlans_back_end', self.device)
try:
for vlan in self.send_command('show vlan brief').splitlines():
if len(vlan.split()) == 0:
continue
elif vlan.split()[0] == '^':
break
elif vlan.split()[0] == 'VLAN':
continue
elif vlan.split()[0] == '----':
continue
# Get vlan ports
if vlan.split()[0].rfind("/") != -1:
vlan_ports = ' '.join(vlan.split())
else:
vlan_ports = ' '.join(vlan.split()[3:])
# Compare vlan id (show vlan) to vlan priority. Use indexing since vlan prio is VLAN + 4 ints, 0000
for prio in self.send_command('show spanning-tree bridge priority').splitlines():
try:
if len(prio.split()) == 0:
continue
elif vlan.split()[0] == prio.split()[0][5:]:
DbOps.update_vlan_table(self.device, vlan.split()[0], prio.split()[1],
vlan.split()[1], vlan.split()[2], vlan_ports)
break
elif vlan.split()[0] == prio.split()[0][6:]:
DbOps.update_vlan_table(self.device, vlan.split()[0], prio.split()[1],
vlan.split()[1], vlan.split()[2], vlan_ports)
break
elif vlan.split()[0] == prio.split()[0][7]:
DbOps.update_vlan_table(self.device, vlan.split()[0], prio.split()[1],
vlan.split()[1], vlan.split()[2], vlan_ports)
break
else:
DbOps.update_vlan_table(self.device, vlan.split()[0], 'N/A',
vlan.split()[1], vlan.split()[2], vlan_ports)
break
except IndexError:
pass
except AttributeError:
pass
def _get_hsrp_status(self):
"""Gets mac and arp tables. Concatinates into one"""
# Delete table data
DbOps.delete_rows('hsrp_back_end', self.device)
try:
for interface in self.send_command('show standby brief | ex Interface').splitlines():
if len(interface.split()) == 0:
continue
else:
try:
DbOps.update_hsrp_table(self.device, interface.split()[0], interface.split()[1],
interface.split()[2], interface.split()[3], interface.split()[4],
interface.split()[5], interface.split()[6], interface.split()[7])
except IndexError:
pass
except AttributeError:
pass
def _get_ospf_routers(self):
"""Gets mac and arp tables. Concatinates into one"""
process, router_id = None, None
# Delete table data
DbOps.delete_rows('ospfrouters_back_end', self.device)
try:
for line in self.send_command('show ip ospf border-routers | ex Codes|Internal|Base').splitlines():
if len(line.split()) == 0:
continue
elif line.split()[0] == 'OSPF':
router_id = line.split()[4].strip(')').strip('(')
process = line.split()[7].strip(')')
elif len(line.split()) == 11:
DbOps.update_ospf_router_table(self.device, process, router_id, line.split()[1], line.split()[0],
line.split()[2].strip(']').strip('['), line.split()[4].strip(','),
line.split()[5].strip(','),
line.split()[6].strip(','),
f'{line.split()[7]} {line.split()[8].strip(",")}', line.split()[10])
except AttributeError:
pass
def _get_dmvpn(self):
"""Gets dmvpn peers, attributes, status, writes to DB"""
interface, router_type = None, None
# Delete table data
DbOps.delete_rows('dmvpn_back_end', self.device)
try:
for line in self.send_command('show dmvpn | b Interface').splitlines():
if len(line.split()) == 0 or '-' in line or '#' in line:
continue
elif len(line.split()) == 6:
DbOps.update_dmvpn_table(self.device, line.split()[1], line.split()[2],
line.split()[3], line.split()[4], line.split()[5])
except AttributeError:
pass
def _get_dmvpn_info(self):
"""Gets dmvpn peers, attributes, status, writes to DB"""
interface = None
# Delete table data
DbOps.delete_rows('dmvpncount_back_end', self.device)
try:
for line in self.send_command('show dmvpn | i Interface|Type').splitlines():
if len(line.split()) == 0:
continue
elif len(line.split()) == 5:
interface = line.split()[1].strip(',')
get_dmvpn_interface(self.session, interface, self.device)
elif len(line.split()) == 3:
router_type = line.split(':')[1].split(',')[0]
peer_count = line.split()[2].strip('Peers:').strip(',')
DbOps.update_dmvpn_count(self.device, interface, router_type, peer_count)
except AttributeError:
pass
def _gather_facts(self):
serial, model, uptime, software = None, None, None, None
for i in self.send_command('show inventory').splitlines():
if i.rfind('Chassis') != -1:
model = i.split("\"")[3].split(' ')[1]
elif i.rfind('NAME') != -1:
model = i.split("\"")[1]
if i.rfind('SN') != -1:
serial = i.split('SN: ')[1]
break
for i in self.send_command('show version').splitlines():
if i.rfind('Uptime') != -1:
uptime = i.split("is")[2]
break
elif i.rfind('RELEASE SOFTWARE') != -1:
software = i
DbOps.update_device_facts(self.device, serial, model, uptime, software, self.username, self.password,
self.ssh_port, self.netconf_port)
| 40.169039 | 123 | 0.472824 |
acf8237757c295275538353fc764ebdf85381ca9 | 1,517 | py | Python | ppo2_curiosity.py | flyyufelix/sonic_contest | bba80d7049fc1586a42c05905bae75c271657761 | [
"MIT"
] | 32 | 2018-06-14T01:53:12.000Z | 2022-03-22T06:53:22.000Z | ppo2_curiosity.py | flyyufelix/sonic_contest | bba80d7049fc1586a42c05905bae75c271657761 | [
"MIT"
] | 1 | 2018-07-24T17:14:41.000Z | 2018-07-24T17:14:41.000Z | ppo2_curiosity.py | flyyufelix/sonic_contest | bba80d7049fc1586a42c05905bae75c271657761 | [
"MIT"
] | 10 | 2018-08-28T12:28:26.000Z | 2019-07-18T02:23:44.000Z | #!/usr/bin/env python
"""
Train an agent on Sonic using PPO2 with curiosity-driven exploration (https://arxiv.org/abs/1705.05363)
"""
import os
import tensorflow as tf
from baselines.common.vec_env.dummy_vec_env import DummyVecEnv
from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv
# Code for curiosity-driven exploration can be found in baselines/ppo2_curiosity
import baselines.ppo2_curiosity.ppo2 as ppo2
import baselines.ppo2_curiosity.policies as policies
import gym_remote.exceptions as gre
from sonic_util import make_env
def main():
"""Run PPO until the environment throws an exception."""
config = tf.ConfigProto()
config.gpu_options.allow_growth = True # pylint: disable=E1101
with tf.Session(config=config):
# Take more timesteps than we need to be sure that
# we stop due to an exception.
ppo2.learn(policy=policies.CnnPolicy,
env=DummyVecEnv([make_env]),
nsteps=4096,
nminibatches=8,
lam=0.95,
gamma=0.99,
noptepochs=3,
log_interval=1,
ent_coef=0.001,
lr=lambda _: 2e-4,
cliprange=lambda _: 0.1,
total_timesteps=int(1e7),
load_path='./pretrain_model') # Set to None if no pretrained model
if __name__ == '__main__':
try:
main()
except gre.GymRemoteError as exc:
print('exception', exc)
| 32.276596 | 103 | 0.632169 |
acf826271fabd34e795397305592ee8080416985 | 915 | py | Python | src/stories/shortcuts.py | dargor/stories | 550a36506c5ec0a4603c0f14a3c5fe52132ef6bf | [
"BSD-2-Clause"
] | null | null | null | src/stories/shortcuts.py | dargor/stories | 550a36506c5ec0a4603c0f14a3c5fe52132ef6bf | [
"BSD-2-Clause"
] | null | null | null | src/stories/shortcuts.py | dargor/stories | 550a36506c5ec0a4603c0f14a3c5fe52132ef6bf | [
"BSD-2-Clause"
] | null | null | null | """
stories.shortcuts
-----------------
This module contains convenient functions to reduce boilerplate code.
:copyright: (c) 2018-2019 dry-python team.
:license: BSD, see LICENSE for more details.
"""
from ._mounted import ClassMountedStory
def contract_in(cls, *args):
def setter(contract):
for attrname in dir(cls):
attribute = getattr(cls, attrname)
if type(attribute) is ClassMountedStory:
attribute.contract(contract)
return contract
if args:
return setter(*args)
else:
return setter
def failures_in(cls, *args):
def setter(failures):
for attrname in dir(cls):
attribute = getattr(cls, attrname)
if type(attribute) is ClassMountedStory:
attribute.failures(failures)
return failures
if args:
return setter(*args)
else:
return setter
| 22.875 | 69 | 0.618579 |
acf826475939032f899468b9c505a629204bc7bb | 424 | py | Python | tests/shared/test_exceptions.py | Creditas/flask-toolkit | 830354b4e2c0f9fdac1d000dcc760226b704628a | [
"MIT"
] | 3 | 2018-07-31T16:11:17.000Z | 2021-08-14T17:03:44.000Z | tests/shared/test_exceptions.py | Creditas/flask-toolkit | 830354b4e2c0f9fdac1d000dcc760226b704628a | [
"MIT"
] | 6 | 2018-07-11T14:34:09.000Z | 2019-11-29T13:53:13.000Z | tests/shared/test_exceptions.py | Creditas/flask-toolkit | 830354b4e2c0f9fdac1d000dcc760226b704628a | [
"MIT"
] | null | null | null | from flask_toolkit.shared.exceptions import (
ObjectDoesNotExistException, ForbiddenException, BadRequestException
)
def test_object_does_not_exist_exception():
exception = ObjectDoesNotExistException()
assert exception
def test_forbidden_exception():
exception = ForbiddenException()
assert exception
def test_bad_request_exception():
exception = BadRequestException()
assert exception
| 19.272727 | 72 | 0.787736 |
acf82674b13e2a9ea515306e66fe20f058d45f8c | 6,422 | py | Python | examples/model_simulated/graphite_thermistor_simulated.py | swatishs/liota | f490354b5f85bddfaa4d0f78c47d461f66cf33c3 | [
"BSD-2-Clause"
] | 361 | 2016-04-29T14:07:18.000Z | 2021-01-23T10:15:39.000Z | examples/model_simulated/graphite_thermistor_simulated.py | swatishs/liota | f490354b5f85bddfaa4d0f78c47d461f66cf33c3 | [
"BSD-2-Clause"
] | 148 | 2016-05-26T19:27:04.000Z | 2021-02-22T05:39:40.000Z | examples/model_simulated/graphite_thermistor_simulated.py | swatishs/liota | f490354b5f85bddfaa4d0f78c47d461f66cf33c3 | [
"BSD-2-Clause"
] | 153 | 2016-04-28T22:46:23.000Z | 2020-02-13T19:01:07.000Z | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------#
# Copyright © 2015-2016 VMware, Inc. All Rights Reserved. #
# #
# Licensed under the BSD 2-Clause License (the “License”); you may not use #
# this file except in compliance with the License. #
# #
# The BSD 2-Clause License #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions are met:#
# #
# - Redistributions of source code must retain the above copyright notice, #
# this list of conditions and the following disclaimer. #
# #
# - Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in the #
# documentation and/or other materials provided with the distribution. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"#
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE #
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE #
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE #
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR #
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF #
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS #
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN #
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) #
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF #
# THE POSSIBILITY OF SUCH DAMAGE. #
# ----------------------------------------------------------------------------#
import math
import pint
from liota.dcc_comms.socket_comms import SocketDccComms
from liota.dccs.graphite import Graphite
from liota.entities.metrics.metric import Metric
from liota.entities.edge_systems.dell5k_edge_system import Dell5KEdgeSystem
from liota.entities.devices.thermistor_simulated import ThermistorSimulated
from liota.lib.utilities.utility import read_user_config
# getting values from conf file
config = read_user_config('../sampleProp.conf')
def static_vars(**kwargs):
def decorate(func):
for k in kwargs:
setattr(func, k, kwargs[k])
return func
return decorate
# create a pint unit registry
ureg = pint.UnitRegistry()
#---------------------------------------------------------------------------
# The following functions operate on physical variables represented in
# pint objects, and returns a pint object, too.
# Decorators provided by the pint library are used to check the dimensions of
# arguments passed to the functions.
@ureg.check(ureg.volt, ureg.ohm, ureg.volt)
def get_rx(u, r0, ux):
rx = r0 * ux / (u - ux)
return rx
@ureg.check(1 / ureg.kelvin, 1 / ureg.kelvin, 1 / ureg.kelvin, ureg.ohm)
def get_temperature(c1, c2, c3, rx):
temper = 1 / (
c1 +
c2 * math.log(rx / ureg.ohm) +
c3 * math.log(rx / ureg.ohm) ** 3
)
#-----------------------------------------------------------------------
# Here commented is a counter example, showing how a dimension mismatch
# can be prevented using pint.
# Since in the correct one above, the unit of temper is
# already Kelvin, if we multiply it by ureg.kelvin, the unit of the
# returned values will become ureg.kelvin ** 2, which will consequently
# throw an exception in succeeding method calls.
# temper = 1 / ( \
# c1 + \
# c2 * math.log(rx / ureg.ohm) + \
# c3 * math.log(rx / ureg.ohm) ** 3
# ) * ureg.kelvin
return temper
#---------------------------------------------------------------------------
# This is a sampling method, which queries the physical model, and calls the
# physical functions to calculate a desired variable.
# In this specific case, it gets the coefficients, voltages, and reference
# resistance from the thermistor simulator, and calls the methods defined
# above to get the temperature.
def get_thermistor_temperature():
temper = get_temperature(
thermistor_model.get_c1(),
thermistor_model.get_c2(),
thermistor_model.get_c3(),
get_rx(
thermistor_model.get_u(),
thermistor_model.get_r0(),
thermistor_model.get_ux()
)
).to(ureg.degC)
return temper.magnitude # return a scalar for compatibility
#---------------------------------------------------------------------------
# In this example, we demonstrate how data from a simulated device generating
# random physical variables can be directed to graphite data center component
# using Liota.
if __name__ == '__main__':
edge_system = Dell5KEdgeSystem(config['EdgeSystemName'])
# initialize and run the physical model (simulated device)
thermistor_model = ThermistorSimulated(name=config['DeviceName'], ureg=ureg)
# Sending data to a data center component
# Graphite is a data center component
# Socket is the transport which the agent uses to connect to the graphite
# instance
graphite = Graphite(SocketDccComms(ip=config['GraphiteIP'],
port=config['GraphitePort']))
graphite_reg_dev = graphite.register(thermistor_model)
metric_name = "model.thermistor.temperature"
thermistor_temper = Metric(
name=metric_name,
unit=ureg.degC,
interval=5,
sampling_function=get_thermistor_temperature
)
reg_thermistor_temper = graphite.register(thermistor_temper)
graphite.create_relationship(graphite_reg_dev, reg_thermistor_temper)
reg_thermistor_temper.start_collecting()
| 44.597222 | 80 | 0.586266 |
acf82911dcdd7b9539ce1a2f59f62d7f0e037075 | 67,745 | py | Python | tensorflow/python/compiler/tensorrt/trt_convert.py | zhupengyang/tensorflow | 584cd92f6a2ff3ba63e653e2e3d0c6f78e3d15eb | [
"Apache-2.0"
] | 2 | 2022-01-12T03:17:12.000Z | 2022-01-22T03:00:26.000Z | tensorflow/python/compiler/tensorrt/trt_convert.py | zhupengyang/tensorflow | 584cd92f6a2ff3ba63e653e2e3d0c6f78e3d15eb | [
"Apache-2.0"
] | 3 | 2018-11-01T12:35:49.000Z | 2018-11-24T11:58:17.000Z | tensorflow/python/compiler/tensorrt/trt_convert.py | zhupengyang/tensorflow | 584cd92f6a2ff3ba63e653e2e3d0c6f78e3d15eb | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Exposes the Python wrapper conversion to trt_graph."""
import collections
from functools import partial # pylint: disable=g-importing-member
import os
import platform
import tempfile
import six as _six
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.compiler.tensorrt import utils as trt_utils
from tensorflow.python.eager import context
from tensorflow.python.eager import wrap_function
from tensorflow.python.framework import convert_to_constants
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.grappler import tf_optimizer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import builder
from tensorflow.python.saved_model import load
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import save
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import saver
from tensorflow.python.training.tracking import tracking
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util.lazy_loader import LazyLoader
from tensorflow.python.util.tf_export import tf_export
if platform.system() == "Windows":
raise RuntimeError("Windows platform is not supported")
# Lazily load the op, since it's not available in cpu-only builds. Importing
# this at top will cause tests that imports TF-TRT fail when they're built
# and run without CUDA/GPU.
gen_trt_ops = LazyLoader(
"gen_trt_ops", globals(),
"tensorflow.compiler.tf2tensorrt.ops.gen_trt_ops")
_pywrap_py_utils = LazyLoader(
"_pywrap_py_utils", globals(),
"tensorflow.compiler.tf2tensorrt._pywrap_py_utils")
# Register TRT ops in python, so that when users import this module they can
# execute a TRT-converted graph without calling any of the methods in this
# module.
#
# This will call register_op_list() in
# tensorflow/python/framework/op_def_registry.py, but it doesn't register
# the op or the op kernel in C++ runtime.
try:
gen_trt_ops.trt_engine_op # pylint: disable=pointless-statement
except AttributeError:
pass
def _to_bytes(s):
"""Encode s if it is a sequence of chars."""
if isinstance(s, _six.text_type):
return s.encode("utf-8", errors="surrogateescape")
return s
def _to_string(s):
"""Decode s if it is a sequence of bytes."""
if isinstance(s, _six.binary_type):
return s.decode("utf-8")
return s
class TrtPrecisionMode(object):
FP32 = "FP32"
FP16 = "FP16"
INT8 = "INT8"
@staticmethod
def supported_precision_modes():
precisions = [
TrtPrecisionMode.FP32, TrtPrecisionMode.FP16, TrtPrecisionMode.INT8
]
return precisions + [p.lower() for p in precisions]
# Use a large enough number as the default max_workspace_size for TRT engines,
# so it can produce reasonable performance results with the default.
DEFAULT_TRT_MAX_WORKSPACE_SIZE_BYTES = 1 << 30
PROFILE_STRATEGY_RANGE = "Range"
PROFILE_STRATEGY_OPTIMAL = "Optimal"
PROFILE_STRATEGY_RANGE_OPTIMAL = "Range+Optimal"
PROFILE_STRATEGY_IMPLICIT_BATCH_MODE_COMPATIBLE = "ImplicitBatchModeCompatible"
def supported_profile_strategies():
return [
PROFILE_STRATEGY_RANGE, PROFILE_STRATEGY_OPTIMAL,
PROFILE_STRATEGY_RANGE_OPTIMAL,
PROFILE_STRATEGY_IMPLICIT_BATCH_MODE_COMPATIBLE
]
@tf_export("experimental.tensorrt.ConversionParams", v1=[])
class TrtConversionParams(
collections.namedtuple("TrtConversionParams", [
"max_workspace_size_bytes", "precision_mode", "minimum_segment_size",
"maximum_cached_engines", "use_calibration", "allow_build_at_runtime"
])):
"""Parameters that are used for TF-TRT conversion.
Fields:
max_workspace_size_bytes: the maximum GPU temporary memory that the TRT
engine can use at execution time. This corresponds to the
'workspaceSize' parameter of nvinfer1::IBuilder::setMaxWorkspaceSize().
precision_mode: one of the strings in
TrtPrecisionMode.supported_precision_modes().
minimum_segment_size: the minimum number of nodes required for a subgraph
to be replaced by TRTEngineOp.
maximum_cached_engines: max number of cached TRT engines for dynamic TRT
ops. Created TRT engines for a dynamic dimension are cached. If the
number of cached engines is already at max but none of them supports the
input shapes, the TRTEngineOp will fall back to run the original TF
subgraph that corresponds to the TRTEngineOp.
use_calibration: this argument is ignored if precision_mode is not INT8.
If set to True, a calibration graph will be created to calibrate the
missing ranges. The calibration graph must be converted to an inference
graph by running calibration with calibrate(). If set to False,
quantization nodes will be expected for every tensor in the graph
(excluding those which will be fused). If a range is missing, an error
will occur. Please note that accuracy may be negatively affected if
there is a mismatch between which tensors TRT quantizes and which
tensors were trained with fake quantization.
allow_build_at_runtime: whether to allow building TensorRT engines during
runtime if no prebuilt TensorRT engine can be found that can handle the
given inputs during runtime, then a new TensorRT engine is built at
runtime if allow_build_at_runtime=True, and otherwise native TF is used.
"""
def __new__(cls,
max_workspace_size_bytes=DEFAULT_TRT_MAX_WORKSPACE_SIZE_BYTES,
precision_mode=TrtPrecisionMode.FP32,
minimum_segment_size=3,
maximum_cached_engines=1,
use_calibration=True,
allow_build_at_runtime=True):
return super(TrtConversionParams,
cls).__new__(cls, max_workspace_size_bytes, precision_mode,
minimum_segment_size, maximum_cached_engines,
use_calibration, allow_build_at_runtime)
DEFAULT_TRT_CONVERSION_PARAMS = TrtConversionParams()
_TRT_ENGINE_OP_NAME = "TRTEngineOp"
def _check_conversion_params(conversion_params, is_v2=False):
"""Validate the provided TrtConversionParams.
Args:
conversion_params: a TrtConversionParams instance.
is_v2: whether we're getting a RewriterConfig for TF 2.0.
Raises:
TypeError: if any of the parameters are of unexpected type.
ValueError: if any of the parameters are of unexpected value.
"""
supported_precision_modes = TrtPrecisionMode.supported_precision_modes()
if conversion_params.precision_mode not in supported_precision_modes:
raise ValueError(
("precision mode '{}' is not supported."
"It should be one of {}").format(conversion_params.precision_mode,
supported_precision_modes))
if (conversion_params.minimum_segment_size <= 0 and
conversion_params.minimum_segment_size != -1):
raise ValueError("minimum segment size should be positive or -1 "
"(to disable main graph conversion).")
def _check_trt_version_compatibility():
"""Check compatibility of TensorRT version.
Raises:
RuntimeError: if the TensorRT library version is incompatible.
"""
if not _pywrap_py_utils.is_tensorrt_enabled():
logging.error(
"Tensorflow needs to be built with TensorRT support enabled to allow "
"TF-TRT to operate.")
raise RuntimeError("Tensorflow has not been built with TensorRT support.")
linked_version = _pywrap_py_utils.get_linked_tensorrt_version()
loaded_version = _pywrap_py_utils.get_loaded_tensorrt_version()
logging.info("Linked TensorRT version: %s", str(linked_version))
logging.info("Loaded TensorRT version: %s", str(loaded_version))
def raise_trt_version_deprecated(version_type, trt_version):
assert version_type in [
"linked", "loaded"
], ("Incorrect value received for version_type: %s. Accepted: ['linked', "
"'loaded']") % version_type
logging.error(
"The {version_type} version of TensorRT: `{trt_version}` has now "
"been removed. Please upgrade to TensorRT 7 or more recent.".format(
version_type=version_type,
trt_version=trt_utils.version_tuple_to_string(trt_version)))
raise RuntimeError("Incompatible %s TensorRT versions" % version_type)
if not trt_utils.is_linked_tensorrt_version_greater_equal(7, 0, 0):
raise_trt_version_deprecated("linked", linked_version)
if not trt_utils.is_loaded_tensorrt_version_greater_equal(7, 0, 0):
raise_trt_version_deprecated("loaded", loaded_version)
if (loaded_version[0] != linked_version[0] or
not trt_utils.is_loaded_tensorrt_version_greater_equal(*linked_version)):
logging.error(
"Loaded TensorRT %s but linked TensorFlow against TensorRT %s. A few "
"requirements must be met:\n"
"\t-It is required to use the same major version of TensorRT during "
"compilation and runtime.\n"
"\t-TensorRT does not support forward compatibility. The loaded "
"version has to be equal or more recent than the linked version.",
trt_utils.version_tuple_to_string(loaded_version),
trt_utils.version_tuple_to_string(linked_version))
raise RuntimeError("Incompatible TensorRT major version")
elif loaded_version != linked_version:
logging.info(
"Loaded TensorRT %s and linked TensorFlow against TensorRT %s. This is "
"supported because TensorRT minor/patch upgrades are backward "
"compatible.", trt_utils.version_tuple_to_string(loaded_version),
trt_utils.version_tuple_to_string(linked_version))
def _get_tensorrt_rewriter_config(conversion_params,
is_dynamic_op=None,
max_batch_size=None,
is_v2=False,
disable_non_trt_optimizers=False,
use_implicit_batch=True,
profile_strategy=PROFILE_STRATEGY_RANGE):
"""Returns a RewriterConfig proto for TRT transformation.
Args:
conversion_params: a TrtConversionParams instance.
is_dynamic_op: whether to use dynamic engines.
max_batch_size: maximum batch size for static engines.
is_v2: whether we're getting a RewriterConfig for TF 2.0.
disable_non_trt_optimizers: Turn off all default Grappler optimizers.
use_implicit_batch: Whether to use implicit batch or explicit batch.
profile_strategy: dynamic shape optimization profile strategy.
Returns:
A RewriterConfig proto which sets a TensorRTOptimizer to run Grappler.
Raises:
TypeError: if any of the parameters are of unexpected type.
ValueError: if any of the parameters are of unexpected value.
"""
_check_conversion_params(conversion_params, is_v2=is_v2)
if is_v2 and is_dynamic_op is not None and not is_dynamic_op:
raise ValueError("is_dynamic_op is either None or True for TF2")
if not is_v2 and is_dynamic_op is None:
raise ValueError("is_dynamic_op can't be None for TF1")
if (is_dynamic_op is None or is_dynamic_op) and max_batch_size is not None:
raise ValueError("max_batch_size has to be None for TF2"
" or when is_dynamic_op == True in TF1")
if is_dynamic_op is not None and not is_dynamic_op and not isinstance(
max_batch_size, int):
raise ValueError(
"max_batch_size has to be an integer for is_dynamic_op==False in TF1")
rewriter_config_with_trt = rewriter_config_pb2.RewriterConfig()
# Disable Grappler Remapper to avoid that fused OPs that may not be
# beneficial to TF-TRT and are not supported by TF-TRT.
rewriter_config_with_trt.remapping = False
# Prevent folding of Const->QDQ chains.
rewriter_config_with_trt.experimental_disable_folding_quantization_emulation = (
trt_utils.is_linked_tensorrt_version_greater_equal(8, 0, 0) or
trt_utils.is_loaded_tensorrt_version_greater_equal(8, 0, 0))
if not disable_non_trt_optimizers:
# Layout optimizer may add Const nodes followed by Reshape nodes, thus we
# need to run constant folding again.
rewriter_config_with_trt.optimizers.extend(
["constfold", "layout", "constfold"])
rewriter_config_with_trt.meta_optimizer_iterations = (
rewriter_config_pb2.RewriterConfig.ONE)
optimizer = rewriter_config_with_trt.custom_optimizers.add()
if not disable_non_trt_optimizers:
# Add a constfold optimizer to cleanup the unused Const nodes.
rewriter_config_with_trt.custom_optimizers.add().name = "constfold"
optimizer.name = "TensorRTOptimizer"
optimizer.parameter_map[
"minimum_segment_size"].i = conversion_params.minimum_segment_size
optimizer.parameter_map["max_workspace_size_bytes"].i = (
conversion_params.max_workspace_size_bytes)
optimizer.parameter_map["precision_mode"].s = _to_bytes(
conversion_params.precision_mode)
optimizer.parameter_map[
"maximum_cached_engines"].i = conversion_params.maximum_cached_engines
optimizer.parameter_map[
"use_calibration"].b = conversion_params.use_calibration
optimizer.parameter_map["is_dynamic_op"].b = is_dynamic_op
optimizer.parameter_map[
"allow_build_at_runtime"].b = conversion_params.allow_build_at_runtime
if max_batch_size is not None:
optimizer.parameter_map["max_batch_size"].i = max_batch_size
optimizer.parameter_map["use_implicit_batch"].b = use_implicit_batch
# While we accept case insensitive strings from the users, we only pass the
# strings in lower cases to TF-TRT converter.
if not use_implicit_batch:
optimizer.parameter_map["profile_strategy"].s = _to_bytes(
profile_strategy.lower())
# Disabling optimizers should happen after defining the TF-TRT grappler pass
# otherwise the template can overwrite the disablement.
if disable_non_trt_optimizers:
trt_utils.disable_non_trt_optimizers_in_rewriter_config(
rewriter_config_with_trt)
return rewriter_config_with_trt
@deprecation.deprecated(
None, "You shouldn't need a rewriter_config with the current TF-TRT APIs.")
def get_tensorrt_rewriter_config(conversion_params,
is_dynamic_op=None,
max_batch_size=None,
is_v2=False,
disable_non_trt_optimizers=False):
return _get_tensorrt_rewriter_config(conversion_params, is_dynamic_op,
max_batch_size, is_v2,
disable_non_trt_optimizers)
# Remove all scope prefixes in the node name. In TF 2.0, the same concrete
# function can be initialized multiple times with different prefixes, and
# this will result in the same TRTEngineOp being initialized multiple times
# with different cache and duplicate TRT engines.
# TODO(laigd): this may be caused by the fact that TRTEngineOp is not
# stateful, need to investigate.
# TODO(laigd): we rely on the fact that all functions are fully inlined
# before TF-TRT optimizer is called, as otherwise it may generate the same
# name when optimizing a different function graph. Fix this.
def _get_canonical_engine_name(name):
return name.split("/")[-1]
class TrtGraphConverter(object):
"""A converter for TF-TRT transformation for TF 1.x GraphDef/SavedModels.
To run the conversion without quantization calibration (e.g. for FP32/FP16
precision modes):
```python
converter = TrtGraphConverter(
input_saved_model_dir="my_dir",
precision_mode=TrtPrecisionMode.FP16)
converted_graph_def = converter.convert()
converter.save(output_saved_model_dir)
```
To run the conversion with quantization calibration:
```python
converter = TrtGraphConverter(
input_saved_model_dir="my_dir",
precision_mode=TrtPrecisionMode.INT8)
converter.convert()
# Run calibration 10 times.
converted_graph_def = converter.calibrate(
fetch_names=['output:0'],
num_runs=10,
feed_dict_fn=lambda: {'input:0': my_next_data()})
converter.save(output_saved_model_dir)
```
"""
def __init__(self,
input_saved_model_dir=None,
input_saved_model_tags=None,
input_saved_model_signature_key=None,
input_graph_def=None,
nodes_denylist=None,
max_batch_size=1,
max_workspace_size_bytes=DEFAULT_TRT_MAX_WORKSPACE_SIZE_BYTES,
precision_mode=TrtPrecisionMode.FP32,
minimum_segment_size=3,
is_dynamic_op=False,
maximum_cached_engines=1,
use_calibration=True):
"""Initializes the converter.
Args:
input_saved_model_dir: the directory to load the SavedModel which contains
the input graph to transforms. Used only when input_graph_def is None.
input_saved_model_tags: list of tags to load the SavedModel.
input_saved_model_signature_key: the key of the signature to optimize the
graph for.
input_graph_def: a GraphDef object containing a model to be transformed.
If set to None, the graph will be read from the SavedModel loaded from
input_saved_model_dir.
nodes_denylist: list of node names to prevent the converter from touching.
max_batch_size: max size for the input batch.
max_workspace_size_bytes: the maximum GPU temporary memory which the TRT
engine can use at execution time. This corresponds to the
'workspaceSize' parameter of nvinfer1::IBuilder::setMaxWorkspaceSize().
precision_mode: one of TrtPrecisionMode.supported_precision_modes().
minimum_segment_size: the minimum number of nodes required for a subgraph
to be replaced by TRTEngineOp.
is_dynamic_op: whether to generate dynamic TRT ops which will build the
TRT network and engine at run time.
maximum_cached_engines: max number of cached TRT engines in dynamic TRT
ops. If the number of cached engines is already at max but none of them
can serve the input, the TRTEngineOp will fall back to run the TF
function based on which the TRTEngineOp is created.
use_calibration: this argument is ignored if precision_mode is not INT8.
If set to True, a calibration graph will be created to calibrate the
missing ranges. The calibration graph must be converted to an inference
graph by running calibration with calibrate(). If set to False,
quantization nodes will be expected for every tensor in the graph
(excluding those which will be fused). If a range is missing, an error
will occur. Please note that accuracy may be negatively affected if
there is a mismatch between which tensors TRT quantizes and which
tensors were trained with fake quantization.
Raises:
ValueError: if the combination of the parameters is invalid.
RuntimeError: if this class is used in TF 2.0.
"""
if context.executing_eagerly():
raise RuntimeError(
"Please use tf.experimental.tensorrt.Converter in TF 2.0.")
if input_graph_def and input_saved_model_dir:
raise ValueError(
"Can only specify one of input_graph_def and input_saved_model_dir")
if not input_graph_def and not input_saved_model_dir:
raise ValueError("Must specify one of input_graph_def and "
"input_saved_model_dir")
_check_trt_version_compatibility()
self._input_graph_def = input_graph_def
self._nodes_denylist = nodes_denylist
self._input_saved_model_dir = input_saved_model_dir
self._converted = False
self._grappler_meta_graph_def = None
self._input_saved_model_tags = (
input_saved_model_tags or [tag_constants.SERVING])
self._input_saved_model_signature_key = (
input_saved_model_signature_key or
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY)
# For calibration usage.
self._calibration_graph = None
self._calibration_data_collected = False
self._need_calibration = (
((precision_mode == TrtPrecisionMode.INT8) or
(precision_mode == TrtPrecisionMode.INT8.lower())) and use_calibration)
if self._need_calibration and not is_dynamic_op:
logging.warn(
"INT8 precision mode with calibration is supported with "
"dynamic TRT ops only. Disregarding is_dynamic_op parameter.")
is_dynamic_op = True
self._is_dynamic_op = is_dynamic_op
if is_dynamic_op:
self._max_batch_size = None
if max_batch_size is not None:
logging.warn("When is_dynamic_op==True max_batch_size should be None")
else:
if not isinstance(max_batch_size, int):
raise ValueError("When is_dynamic_op==False max_batch_size should be "
"an integer")
self._max_batch_size = max_batch_size
self._conversion_params = TrtConversionParams(
max_workspace_size_bytes=max_workspace_size_bytes,
precision_mode=precision_mode,
minimum_segment_size=minimum_segment_size,
maximum_cached_engines=maximum_cached_engines,
use_calibration=use_calibration,
allow_build_at_runtime=True)
_check_conversion_params(self._conversion_params)
self._test_only_disable_non_trt_optimizers = False
def _run_conversion(self):
"""Run Grappler's OptimizeGraph() tool to convert the graph."""
# Create custom ConfigProto for Grappler.
grappler_session_config = config_pb2.ConfigProto()
custom_rewriter_config = _get_tensorrt_rewriter_config(
conversion_params=self._conversion_params,
is_dynamic_op=self._is_dynamic_op,
max_batch_size=self._max_batch_size,
disable_non_trt_optimizers=self._test_only_disable_non_trt_optimizers,
use_implicit_batch=True)
grappler_session_config.graph_options.rewrite_options.CopyFrom(
custom_rewriter_config)
# Run Grappler.
self._converted_graph_def = tf_optimizer.OptimizeGraph(
grappler_session_config,
self._grappler_meta_graph_def,
graph_id=b"tf_graph")
self._converted = True
def _add_nodes_denylist(self):
if self._nodes_denylist:
collection_def = self._grappler_meta_graph_def.collection_def["train_op"]
denylist = collection_def.node_list.value
for i in self._nodes_denylist:
if isinstance(i, ops.Tensor):
denylist.append(_to_bytes(i.name))
else:
denylist.append(_to_bytes(i))
def _convert_graph_def(self):
"""Convert the input GraphDef."""
graph = ops.Graph()
with graph.as_default():
importer.import_graph_def(self._input_graph_def, name="")
self._grappler_meta_graph_def = saver.export_meta_graph(
graph_def=graph.as_graph_def(add_shapes=True), graph=graph)
self._add_nodes_denylist()
self._run_conversion()
def _collections_to_keep(self, collection_keys):
# TODO(laigd): currently we use the collection key to filter out
# collections that depend on variable ops, but this may miss some
# other user-defined collections. A better way would be to use
# CollectionDef::NodeList for the filtering.
collections_to_remove = (
ops.GraphKeys._VARIABLE_COLLECTIONS + [
ops.GraphKeys.TRAIN_OP, ops.GraphKeys.WHILE_CONTEXT,
ops.GraphKeys.COND_CONTEXT
])
return [key for key in collection_keys if key not in collections_to_remove]
def _convert_saved_model(self):
"""Convert the input SavedModel."""
graph = ops.Graph()
with session.Session(graph=graph) as sess:
input_meta_graph_def = loader.load(sess, self._input_saved_model_tags,
self._input_saved_model_dir)
input_signature_def = input_meta_graph_def.signature_def[
self._input_saved_model_signature_key]
def _gather_names(tensor_info):
"""Get the node names from a TensorInfo."""
return {tensor_info[key].name.split(":")[0] for key in tensor_info}
# Get input and outputs from all SignatureDef.
output_node_names = _gather_names(input_signature_def.inputs).union(
_gather_names(input_signature_def.outputs))
# Preserve nodes in collection
for collection_key in self._collections_to_keep(
input_meta_graph_def.collection_def):
for op in sess.graph.get_collection(collection_key):
if isinstance(op, ops.Operation):
output_node_names.add(op.name.split(":")[0])
# Freeze the variables in the SavedModel graph and copy the frozen
# graph over.
frozen_graph_def = graph_util.convert_variables_to_constants(
sess, sess.graph.as_graph_def(add_shapes=True),
list(output_node_names))
self._grappler_meta_graph_def = meta_graph_pb2.MetaGraphDef()
self._grappler_meta_graph_def.graph_def.CopyFrom(frozen_graph_def)
# Copy the collections that are not variables.
for collection_key in self._collections_to_keep(
input_meta_graph_def.collection_def):
self._grappler_meta_graph_def.collection_def[collection_key].CopyFrom(
input_meta_graph_def.collection_def[collection_key])
self._add_nodes_denylist()
# Copy other information.
self._grappler_meta_graph_def.meta_info_def.CopyFrom(
input_meta_graph_def.meta_info_def)
self._grappler_meta_graph_def.signature_def[
self._input_saved_model_signature_key].CopyFrom(input_signature_def)
# TODO(laigd): maybe add back AssetFileDef.
self._run_conversion()
def convert(self):
"""Run the TF-TRT conversion.
Returns:
The converted GraphDef for TF 1.x.
"""
assert not self._converted
if self._input_graph_def:
self._convert_graph_def()
else:
self._convert_saved_model()
return self._converted_graph_def
def calibrate(self,
fetch_names,
num_runs,
feed_dict_fn=None,
input_map_fn=None):
"""Run the calibration and return the calibrated GraphDef.
Args:
fetch_names: a list of output tensor name to fetch during calibration.
num_runs: number of runs of the graph during calibration.
feed_dict_fn: a function that returns a dictionary mapping input names (as
strings) in the GraphDef to be calibrated to values (e.g. Python list,
numpy arrays, etc). One and only one of `feed_dict_fn` and
`input_map_fn` should be specified.
input_map_fn: a function that returns a dictionary mapping input names (as
strings) in the GraphDef to be calibrated to Tensor objects. The values
of the named input tensors in the GraphDef to be calibrated will be
re-mapped to the respective `Tensor` values during calibration. One and
only one of `feed_dict_fn` and `input_map_fn` should be specified.
Raises:
ValueError: if the input combination is invalid.
RuntimeError: if this method is called in eager mode.
Returns:
The GraphDef after the calibration.
"""
assert self._converted
assert self._need_calibration
assert not self._calibration_data_collected
if (feed_dict_fn and input_map_fn) or (not feed_dict_fn and
not input_map_fn):
raise ValueError(
"Should specify one and only one of feed_dict_fn and input_map_fn.")
if input_map_fn:
for k, v in input_map_fn().items():
if not isinstance(k, str):
raise ValueError("Keys of input_map_fn must be of type str")
if not isinstance(v, ops.Tensor):
raise ValueError("Values of input_map_fn must be of type tf.Tensor")
self._calibration_graph = ops.Graph()
with self._calibration_graph.as_default():
fetches = importer.import_graph_def(
self._converted_graph_def,
input_map=input_map_fn() if input_map_fn else None,
return_elements=fetch_names,
name="")
calibrate_rewriter_cfg = rewriter_config_pb2.RewriterConfig()
if self._test_only_disable_non_trt_optimizers:
trt_utils.disable_non_trt_optimizers_in_rewriter_config(
calibrate_rewriter_cfg)
# Set allow_soft_placement=True to run the graph for calibration so that
# OPs supported by TensorRT but don't have a GPU implementation are allowed
# to execute on CPU.
calibrate_config = config_pb2.ConfigProto(
allow_soft_placement=True,
graph_options=config_pb2.GraphOptions(
rewrite_options=calibrate_rewriter_cfg))
with session.Session(
graph=self._calibration_graph,
config=calibrate_config) as calibration_sess:
for _ in range(num_runs):
calibration_sess.run(
fetches, feed_dict=feed_dict_fn() if feed_dict_fn else None)
# Maps device name to the corresponding get_calibration_data.
#
# TODO(laigd): a better way would be to use calibration_sess to list
# all the devices, add one get_calibration_data for each device, and
# fetch each such op for every resource until its found. This can work
# even when the device of the TRTEngineOp is empty or not fully specified.
device_to_get_resource_op_map = {}
with self._calibration_graph.as_default():
resource_name_input = array_ops.placeholder(dtypes.string)
for node in self._converted_graph_def.node:
if node.op == _TRT_ENGINE_OP_NAME:
# Adds the get_calibration_data op for the device if not done
# before. We only add one such op for each device.
# TODO(laigd): What if the device is empty?????
if node.device not in device_to_get_resource_op_map:
with self._calibration_graph.device(node.device):
serialized_resources_output = (
gen_trt_ops.get_calibration_data_op(resource_name_input))
device_to_get_resource_op_map[node.device] = (
serialized_resources_output)
# Get the calibration resource.
calibration_result = calibration_sess.run(
device_to_get_resource_op_map[node.device],
feed_dict={
resource_name_input: _get_canonical_engine_name(node.name)
})
node.attr["calibration_data"].s = calibration_result
self._calibration_data_collected = True
return self._converted_graph_def
def save(self, output_saved_model_dir):
"""Save the converted graph as a SavedModel.
Args:
output_saved_model_dir: construct a SavedModel using the converted
GraphDef and save it to the specified directory. This option only works
when the input graph is loaded from a SavedModel, i.e. when
input_saved_model_dir is specified and input_graph_def is None in
__init__().
Raises:
ValueError: if the input to the converter is a GraphDef instead of a
SavedModel.
"""
assert self._converted
if self._need_calibration:
assert self._calibration_data_collected
if self._input_graph_def:
raise ValueError(
"Not able to save to a SavedModel since input is a GraphDef")
def _restore_collections(dest_graph, src_meta_graph_def, collection_keys):
"""Restores collections that we need to keep."""
scope = ""
for key in collection_keys:
collection_def = src_meta_graph_def.collection_def[key]
kind = collection_def.WhichOneof("kind")
if kind is None:
logging.error(
"Cannot identify data type for collection %s. Skipping.", key)
continue
from_proto = ops.get_from_proto_function(key)
if from_proto and kind == "bytes_list":
proto_type = ops.get_collection_proto_type(key)
# It is assumed that there are no Variables Keys in collections
for value in collection_def.bytes_list.value:
proto = proto_type()
proto.ParseFromString(value)
try:
new_value = from_proto(proto, import_scope=scope)
except:
continue
dest_graph.add_to_collection(key, new_value)
else:
field = getattr(collection_def, kind)
if kind == "node_list":
for value in field.value:
name = ops.prepend_name_scope(value, scope)
# Since the graph has been optimized, the node may no longer
# exists
try:
col_op = dest_graph.as_graph_element(name)
except (TypeError, ValueError, KeyError):
continue
dest_graph.add_to_collection(key, col_op)
elif kind == "int64_list":
# NOTE(opensource): This force conversion is to work around the
# fact that Python2 distinguishes between int and long, while
# Python3 has only int.
for value in field.value:
dest_graph.add_to_collection(key, int(value))
else:
for value in field.value:
dest_graph.add_to_collection(key,
ops.prepend_name_scope(value, scope))
# Write the transformed graphdef as SavedModel.
saved_model_builder = builder.SavedModelBuilder(output_saved_model_dir)
with ops.Graph().as_default():
importer.import_graph_def(self._converted_graph_def, name="")
_restore_collections(
ops.get_default_graph(), self._grappler_meta_graph_def,
self._collections_to_keep(
self._grappler_meta_graph_def.collection_def))
# We don't use any specific converter here.
with session.Session() as sess:
saved_model_builder.add_meta_graph_and_variables(
sess,
self._input_saved_model_tags,
signature_def_map=self._grappler_meta_graph_def.signature_def)
# Ignore other meta graphs from the input SavedModel.
saved_model_builder.save()
def _get_resource_handle(name, device):
with ops.device(device):
return gen_trt_ops.create_trt_resource_handle(resource_name=name)
class _TRTEngineResource(tracking.TrackableResource):
"""Class to track the serialized engines resource."""
def __init__(self,
resource_name,
filename,
maximum_cached_engines,
device="GPU"):
super(_TRTEngineResource, self).__init__(device=device)
self._resource_name = resource_name
# Track the serialized engine file in the SavedModel.
self._filename = self._track_trackable(
tracking.Asset(filename), "_serialized_trt_resource_filename")
self._maximum_cached_engines = maximum_cached_engines
def _create_resource(self):
return _get_resource_handle(self._resource_name, self._resource_device)
def _initialize(self):
gen_trt_ops.initialize_trt_resource(
self.resource_handle,
self._filename,
max_cached_engines_count=self._maximum_cached_engines)
def _destroy_resource(self):
handle = _get_resource_handle(self._resource_name, self._resource_device)
with ops.device(self._resource_device):
gen_resource_variable_ops.destroy_resource_op(
handle, ignore_lookup_error=True)
def _print_row(fields, positions, print_fn):
"""Prints a row."""
line = ""
for i, field in enumerate(fields):
field = str(field)
end_line_pos = positions[i]
if i > 0:
line = line + " "
line = "{0:{min_length}}".format(line + field, min_length=end_line_pos)
if len(line) > end_line_pos:
line = line[:(end_line_pos - 4)] + " ..."
print_fn(line)
def _get_nodes_in_engine(graphdef, node_name):
ops_in_engine = collections.defaultdict(int)
for func in graphdef.library.function:
if f"{node_name}_native_segment" == func.signature.name:
node_count = len(func.node_def)
for node in func.node_def:
ops_in_engine[node.op] += 1
break
return node_count, ops_in_engine
def _extract_shapes_from_node(node, key):
out_shape = []
for shape in node.attr[key].list.shape:
out_shape.append([dim.size for dim in shape.dim])
return out_shape
def _get_engine_dtypes_from_node(node, key):
return [dtypes._TYPE_TO_STRING[dtype] for dtype in node.attr[key].list.type]
@tf_export("experimental.tensorrt.Converter", v1=[])
class TrtGraphConverterV2(object):
"""An offline converter for TF-TRT transformation for TF 2.0 SavedModels.
Currently this is not available on Windows platform.
There are several ways to run the conversion:
1. FP32/FP16 precision
```python
params = tf.experimental.tensorrt.ConversionParams(
precision_mode='FP16')
converter = tf.experimental.tensorrt.Converter(
input_saved_model_dir="my_dir", conversion_params=params)
converter.convert()
converter.save(output_saved_model_dir)
```
In this case, no TRT engines will be built or saved in the converted
SavedModel. But if input data is available during conversion, we can still
build and save the TRT engines to reduce the cost during inference (see
option 2 below).
2. FP32/FP16 precision with pre-built engines
```python
params = tf.experimental.tensorrt.ConversionParams(
precision_mode='FP16',
# Set this to a large enough number so it can cache all the engines.
maximum_cached_engines=16)
converter = tf.experimental.tensorrt.Converter(
input_saved_model_dir="my_dir", conversion_params=params)
converter.convert()
# Define a generator function that yields input data, and use it to execute
# the graph to build TRT engines.
def my_input_fn():
for _ in range(num_runs):
inp1, inp2 = ...
yield inp1, inp2
converter.build(input_fn=my_input_fn) # Generate corresponding TRT engines
converter.save(output_saved_model_dir) # Generated engines will be saved.
```
In this way, one engine will be built/saved for each unique input shapes of
the TRTEngineOp. This is good for applications that cannot afford building
engines during inference but have access to input data that is similar to
the one used in production (for example, that has the same input shapes).
Also, the generated TRT engines is platform dependent, so we need to run
`build()` in an environment that is similar to production (e.g. with
same type of GPU).
3. INT8 precision and calibration with pre-built engines
```python
params = tf.experimental.tensorrt.ConversionParams(
precision_mode='INT8',
# Currently only one INT8 engine is supported in this mode.
maximum_cached_engines=1,
use_calibration=True)
converter = tf.experimental.tensorrt.Converter(
input_saved_model_dir="my_dir", conversion_params=params)
# Define a generator function that yields input data, and run INT8
# calibration with the data. All input data should have the same shape.
# At the end of convert(), the calibration stats (e.g. range information)
# will be saved and can be used to generate more TRT engines with different
# shapes. Also, one TRT engine will be generated (with the same shape as
# the calibration data) for save later.
def my_calibration_input_fn():
for _ in range(num_runs):
inp1, inp2 = ...
yield inp1, inp2
converter.convert(calibration_input_fn=my_calibration_input_fn)
# (Optional) Generate more TRT engines offline (same as the previous
# option), to avoid the cost of generating them during inference.
def my_input_fn():
for _ in range(num_runs):
inp1, inp2 = ...
yield inp1, inp2
converter.build(input_fn=my_input_fn)
# Save the TRT engine and the engines.
converter.save(output_saved_model_dir)
```
4. To use dynamic shape, we need to call the build method with an input
function to generate profiles. This step is similar to the INT8 calibration
step described above. The converter also needs to be created with
use_dynamic_shape=True and one of the following profile_strategies for
creating profiles based on the inputs produced by the input function:
* `Range`: create one profile that works for inputs with dimension values
in the range of [min_dims, max_dims] where min_dims and max_dims are
derived from the provided inputs.
* `Optimal`: create one profile for each input. The profile only works for
inputs with the same dimensions as the input it is created for. The GPU
engine will be run with optimal performance with such inputs.
* `Range+Optimal`: create the profiles for both `Range` and `Optimal`.
* `ImplicitBatchModeCompatible`: create the profiles that will produce the
same GPU engines as the implicit_batch_mode would produce.
"""
def _verify_profile_strategy(self, strategy):
supported_strategies = [s.lower() for s in supported_profile_strategies()]
if strategy.lower() not in supported_strategies:
raise ValueError(
("profile_strategy '{}' is not supported. It should be one of {}"
).format(strategy, supported_profile_strategies()))
@deprecation.deprecated_args(None,
"Use individual converter parameters instead",
"conversion_params")
def __init__(self,
input_saved_model_dir=None,
input_saved_model_tags=None,
input_saved_model_signature_key=None,
use_dynamic_shape=None,
dynamic_shape_profile_strategy=None,
max_workspace_size_bytes=DEFAULT_TRT_MAX_WORKSPACE_SIZE_BYTES,
precision_mode=TrtPrecisionMode.FP32,
minimum_segment_size=3,
maximum_cached_engines=1,
use_calibration=True,
allow_build_at_runtime=True,
conversion_params=None):
"""Initialize the converter.
Args:
input_saved_model_dir: the directory to load the SavedModel which contains
the input graph to transforms. Required.
input_saved_model_tags: list of tags to load the SavedModel.
input_saved_model_signature_key: the key of the signature to optimize the
graph for.
use_dynamic_shape: whether to enable dynamic shape support. None is
equivalent to False in the current implementation.
dynamic_shape_profile_strategy: one of the strings in
supported_profile_strategies(). None is equivalent to Range in the
current implementation.
max_workspace_size_bytes: the maximum GPU temporary memory that the TRT
engine can use at execution time. This corresponds to the
'workspaceSize' parameter of nvinfer1::IBuilder::setMaxWorkspaceSize().
precision_mode: one of the strings in
TrtPrecisionMode.supported_precision_modes().
minimum_segment_size: the minimum number of nodes required for a subgraph
to be replaced by TRTEngineOp.
maximum_cached_engines: max number of cached TRT engines for dynamic TRT
ops. Created TRT engines for a dynamic dimension are cached. If the
number of cached engines is already at max but none of them supports the
input shapes, the TRTEngineOp will fall back to run the original TF
subgraph that corresponds to the TRTEngineOp.
use_calibration: this argument is ignored if precision_mode is not INT8.
If set to True, a calibration graph will be created to calibrate the
missing ranges. The calibration graph must be converted to an inference
graph by running calibration with calibrate(). If set to False,
quantization nodes will be expected for every tensor in the graph
(excluding those which will be fused). If a range is missing, an error
will occur. Please note that accuracy may be negatively affected if
there is a mismatch between which tensors TRT quantizes and which
tensors were trained with fake quantization.
allow_build_at_runtime: whether to allow building TensorRT engines during
runtime if no prebuilt TensorRT engine can be found that can handle the
given inputs during runtime, then a new TensorRT engine is built at
runtime if allow_build_at_runtime=True, and otherwise native TF is used.
conversion_params: a TrtConversionParams instance (deprecated).
Raises:
ValueError: if the combination of the parameters is invalid.
"""
assert context.executing_eagerly()
if conversion_params is None:
conversion_params = TrtConversionParams(
max_workspace_size_bytes=max_workspace_size_bytes,
precision_mode=precision_mode,
minimum_segment_size=minimum_segment_size,
maximum_cached_engines=maximum_cached_engines,
use_calibration=use_calibration,
allow_build_at_runtime=allow_build_at_runtime)
_check_trt_version_compatibility()
_check_conversion_params(conversion_params, is_v2=True)
self._conversion_params = conversion_params
self._input_saved_model_dir = input_saved_model_dir
self._input_saved_model_tags = (
input_saved_model_tags or [tag_constants.SERVING])
self._input_saved_model_signature_key = (
input_saved_model_signature_key or
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY)
self._need_calibration = ((
(conversion_params.precision_mode == TrtPrecisionMode.INT8) or
(conversion_params.precision_mode == TrtPrecisionMode.INT8.lower())) and
conversion_params.use_calibration)
self._converted = False
self._build_called_once = False
if use_dynamic_shape is None:
self._use_dynamic_shape = False
else:
self._use_dynamic_shape = use_dynamic_shape
self._profile_strategy = "Unknown"
if self._use_dynamic_shape:
if dynamic_shape_profile_strategy is None:
self._profile_strategy = PROFILE_STRATEGY_RANGE
else:
self._verify_profile_strategy(dynamic_shape_profile_strategy)
self._profile_strategy = dynamic_shape_profile_strategy
# Fields to support TF-TRT testing and shouldn't be used for other purpose.
self._test_only_disable_non_trt_optimizers = False
def _need_trt_profiles(self):
return self._use_dynamic_shape
def _run_conversion(self, meta_graph_def):
"""Run Grappler's OptimizeGraph() tool to convert the graph.
Args:
meta_graph_def: the MetaGraphDef instance to run the optimizations on.
Returns:
The optimized GraphDef.
"""
grappler_session_config = config_pb2.ConfigProto()
# Always set `allow_build_at_runtime` for offline TensorRT engine building.
custom_rewriter_config = _get_tensorrt_rewriter_config(
conversion_params=self._conversion_params._replace(
allow_build_at_runtime=True),
is_dynamic_op=True,
max_batch_size=None,
disable_non_trt_optimizers=self._test_only_disable_non_trt_optimizers,
use_implicit_batch=not self._use_dynamic_shape,
profile_strategy=self._profile_strategy)
grappler_session_config.graph_options.rewrite_options.CopyFrom(
custom_rewriter_config)
return tf_optimizer.OptimizeGraph(
grappler_session_config, meta_graph_def, graph_id=b"tf_graph")
def _for_each_trt_node(self, graph_def, fn):
"""Helper method to manipulate all TRTEngineOps in a GraphDef."""
for node in graph_def.node:
if node.op == _TRT_ENGINE_OP_NAME:
fn(node)
for func in graph_def.library.function:
for node in func.node_def:
if node.op == _TRT_ENGINE_OP_NAME:
fn(node)
def _rebuild_func(self, func):
"""Rebuild function from graph_def."""
rebuilt_func = wrap_function.function_from_graph_def(
self._converted_graph_def, [tensor.name for tensor in func.inputs],
[tensor.name for tensor in func.outputs])
rebuilt_func.graph.structured_outputs = nest.pack_sequence_as(
func.graph.structured_outputs, rebuilt_func.graph.structured_outputs)
# Copy structured input signature from original function (used during
# serialization)
rebuilt_func.graph.structured_input_signature = (
func.structured_input_signature)
return rebuilt_func
# TODO(laigd): provide a utility function to optimize a ConcreteFunction and
# use it here (b/124792963).
def convert(self, calibration_input_fn=None):
"""Convert the input SavedModel in 2.0 format.
Args:
calibration_input_fn: a generator function that yields input data as a
list or tuple or dict, which will be used to execute the converted
signature for calibration. All the returned input data should have the
same shape. Example: `def input_fn(): yield input1, input2, input3`
Raises:
ValueError: if the input combination is invalid.
Returns:
The TF-TRT converted Function.
"""
assert not self._converted
if (self._need_calibration and not calibration_input_fn):
raise ValueError("Should specify calibration_input_fn because INT8 "
"calibration is needed")
if (not self._need_calibration and calibration_input_fn):
raise ValueError("Should not specify calibration_input_fn because INT8 "
"calibration is not needed")
self._saved_model = load.load(self._input_saved_model_dir,
self._input_saved_model_tags)
func = self._saved_model.signatures[self._input_saved_model_signature_key]
frozen_func = convert_to_constants.convert_variables_to_constants_v2(func)
grappler_meta_graph_def = saver.export_meta_graph(
graph_def=frozen_func.graph.as_graph_def(), graph=frozen_func.graph)
# Add a collection 'train_op' so that Grappler knows the outputs.
fetch_collection = meta_graph_pb2.CollectionDef()
for array in frozen_func.inputs + frozen_func.outputs:
fetch_collection.node_list.value.append(array.name)
grappler_meta_graph_def.collection_def["train_op"].CopyFrom(
fetch_collection)
# Run TRT optimizer in Grappler to convert the graph.
self._converted_graph_def = self._run_conversion(grappler_meta_graph_def)
# If a function is converted, then the TF context contains the original
# function while the converted_graph_def contains the converted function.
# Remove the original function from the TF context in this case.
for f in self._converted_graph_def.library.function:
while context.context().has_function(f.signature.name):
logging.info("Removing original function %s from the context",
f.signature.name)
context.context().remove_function(f.signature.name)
# This also adds the converted functions to the context.
self._converted_func = wrap_function.function_from_graph_def(
self._converted_graph_def,
[tensor.name for tensor in frozen_func.inputs],
[tensor.name for tensor in frozen_func.outputs])
# Reconstruct the output signatures using the ones from original model.
self._converted_func.graph.structured_outputs = nest.pack_sequence_as(
func.graph.structured_outputs,
self._converted_func.graph.structured_outputs)
# Copy structured input signature from original function (used during
# serialization)
self._converted_func.graph.structured_input_signature = (
func.structured_input_signature)
if self._need_calibration:
for inp in calibration_input_fn():
if isinstance(inp, dict):
self._converted_func(
**{k: ops.convert_to_tensor(v) for k, v in inp.items()})
else:
self._converted_func(*map(ops.convert_to_tensor, inp))
def _save_calibration_table(node):
calibration_table = gen_trt_ops.get_calibration_data_op(
_get_canonical_engine_name(node.name))
node.attr["calibration_data"].s = calibration_table.numpy()
self._for_each_trt_node(self._converted_graph_def,
_save_calibration_table)
# Rebuild the function since calibration has changed the graph.
self._converted_func = self._rebuild_func(self._converted_func)
self._converted = True
return self._converted_func
def build(self, input_fn):
"""Run inference with converted graph in order to build TensorRT engines.
Args:
input_fn: a generator function that yields input data as a list or tuple
or dict, which will be used to execute the converted signature to
generate TRT engines. Example:
`def input_fn(): # Let's assume a network with 2 input tensors. We
generate 3 sets
# of dummy input data: input_shapes = [[(1, 16), (2, 16)], # 1st
input list [(2, 32), (4, 32)], # 2nd list of two tensors [(4,
32), (8, 32)]] # 3rd input list
for shapes in input_shapes: # return a list of input tensors yield
[np.zeros(x).astype(np.float32) for x in shapes]`
Raises:
NotImplementedError: build() is already called.
RuntimeError: the input_fx is None.
"""
if self._build_called_once:
raise NotImplementedError("build() is already called. It is not "
"supported to call build() more than once.")
if not input_fn:
raise RuntimeError("input_fn is None. Method build() needs input_fn "
"to be specified in order to build TensorRT engines")
def _set_profile_generation_mode(value, node):
node.attr["_profile_generation_mode"].b = value
if self._need_trt_profiles():
# Enable profile generation.
self._for_each_trt_node(self._converted_graph_def,
partial(_set_profile_generation_mode, True))
# Profile generation is enabled using the _profile_generation_mode
# attribute of the TRTEngineOps. We need to rebuild the function to
# change this attribute.
func = self._rebuild_func(self._converted_func)
else:
func = self._converted_func
first_input = None
# Run inference:
# Builds TRT engines if self._need_trt_profiles is False.
# Builds TRT optimization profiles if self._need_trt_profiles is True.
for inp in input_fn():
if not first_input:
first_input = inp
if isinstance(inp, dict):
func(**{k: ops.convert_to_tensor(v) for k, v in inp.items()})
else:
func(*map(ops.convert_to_tensor, inp))
if self._need_trt_profiles():
# Disable profile generation.
self._for_each_trt_node(self._converted_graph_def,
partial(_set_profile_generation_mode, False))
# Use the first input in explicit batch mode to build TensorRT engines
# after generating all the profiles. The first input is used but any of
# the inputs can be used because the shape of this input does not
# determine the engine and instead the shapes collected in profiles
# determine the engine.
if isinstance(first_input, dict):
self._converted_func(
**{k: ops.convert_to_tensor(v) for k, v in first_input.items()})
else:
self._converted_func(*map(ops.convert_to_tensor, first_input))
self._build_called_once = True
def save(self, output_saved_model_dir, save_gpu_specific_engines=True):
"""Save the converted SavedModel.
Args:
output_saved_model_dir: directory to saved the converted SavedModel.
save_gpu_specific_engines: whether to save TRT engines that have been
built. When True, all engines are saved and when False, the engines
are not saved and will be rebuilt at inference time. By using
save_gpu_specific_engines=False after doing INT8 calibration, inference
can be done on different GPUs than the GPU that the model was calibrated
and saved on.
"""
assert self._converted
# Serialize the TRT engines in the cache if any, and create trackable
# resource to track them.
engine_asset_dir = tempfile.mkdtemp()
resource_map = {}
def _serialize_and_track_engine(node):
"""Serialize TRT engines in the cache and track them."""
# Don't dump the same cache twice.
canonical_engine_name = _get_canonical_engine_name(node.name)
if canonical_engine_name in resource_map:
return
filename = os.path.join(engine_asset_dir,
"trt-serialized-engine." + canonical_engine_name)
try:
gen_trt_ops.serialize_trt_resource(
resource_name=canonical_engine_name,
filename=filename,
delete_resource=True,
save_gpu_specific_engines=save_gpu_specific_engines)
except errors.NotFoundError:
logging.info(
"Could not find %s in TF-TRT cache. "
"This can happen if build() is not called, "
"which means TensorRT engines will be built "
"and cached at runtime.", canonical_engine_name)
return
# TODO(laigd): add an option for the user to choose the device.
resource_map[canonical_engine_name] = _TRTEngineResource(
canonical_engine_name, filename,
self._conversion_params.maximum_cached_engines)
self._for_each_trt_node(self._converted_graph_def,
_serialize_and_track_engine)
self._saved_model.trt_engine_resources = resource_map
# Rewrite the signature map using the optimized ConcreteFunction.
signatures = {
key: value for key, value in self._saved_model.signatures.items()
}
# Set allow_build_at_runtime=False if asked by user.
#
# This attribute is set here because build() needs it to be True in order to
# build engines.
if not self._conversion_params.allow_build_at_runtime:
def _reset_allow_build_at_runtime(node):
node.attr["_allow_build_at_runtime"].b = False
self._for_each_trt_node(self._converted_graph_def,
_reset_allow_build_at_runtime)
# Rebuild the function since a node attribute changed above
reset_converted_func = wrap_function.function_from_graph_def(
self._converted_graph_def,
[tensor.name for tensor in self._converted_func.inputs],
[tensor.name for tensor in self._converted_func.outputs])
reset_converted_func.graph.structured_outputs = nest.pack_sequence_as(
self._converted_func.graph.structured_outputs,
reset_converted_func.graph.structured_outputs)
reset_converted_func.graph.strucutred_input_signature = (
self._converted_func.structured_input_signature)
self._converted_func = reset_converted_func
signatures[self._input_saved_model_signature_key] = self._converted_func
save.save(self._saved_model, output_saved_model_dir, signatures)
def summary(self, line_length=160, detailed=True, print_fn=None):
"""This method describes the results of the conversion by TF-TRT.
It includes information such as the name of the engine, the number of nodes
per engine, the input and output dtype, along with the input shape of each
TRTEngineOp.
Args:
line_length: Default line length when printing on the console. Minimum 160
characters long.
detailed: Whether or not to show the nodes inside each TRTEngineOp.
print_fn: Print function to use. Defaults to `print`. It will be called on
each line of the summary. You can set it to a custom function in order
to capture the string summary.
Raises:
RuntimeError: if the graph is not converted.
"""
if not self._converted:
raise RuntimeError(
f"Impossible to call `{self.__class__.__name__}.summary()` before "
f"calling {self.__class__.__name__}.convert()`.")
if line_length < 160:
raise ValueError(f"Invalid `line_length` value has been received: "
f"{line_length}. Minimum: 160.")
if print_fn is None:
print_fn = print
# positions are percentage of `line_length`. positions[i]+1 is the starting
# position for (i+1)th field. We also make sure that the last char printed
# for each field is a space.
positions = [.22, .30, .45, .60, .8, 1.]
positions = [int(line_length * p) for p in positions]
headers = [
"TRTEngineOP Name", "# Nodes", "Input DType", "Output Dtype",
"Input Shape", "Output Shape"
]
_print_row(headers, positions, print_fn=print_fn)
print_fn("=" * line_length)
n_engines = 0
n_ops_converted = 0
n_ops_not_converted = 0
graphdef = self._converted_func.graph.as_graph_def(add_shapes=True)
for node in graphdef.node:
if node.op != "TRTEngineOp":
n_ops_not_converted += 1
continue
else:
n_engines += 1
in_shapes = _extract_shapes_from_node(node, "input_shapes")
out_shapes = _extract_shapes_from_node(node, "_output_shapes")
in_dtypes = _get_engine_dtypes_from_node(node, "InT")
out_dtypes = _get_engine_dtypes_from_node(node, "OutT")
node_count, converted_ops_dict = _get_nodes_in_engine(
graphdef, node.name)
n_ops_converted += node_count
if n_engines != 1:
print_fn(f"\n{'-'*40}\n")
_print_row([
node.name, node_count, in_dtypes, out_dtypes, in_shapes, out_shapes
],
positions,
print_fn=print_fn)
if detailed:
print_fn()
for key, value in sorted(dict(converted_ops_dict).items()):
print_fn(f"\t- {key}: {value}x")
print_fn(f"\n{'='*line_length}")
print_fn(f"[*] Total number of TensorRT engines: {n_engines}")
total_ops = n_ops_not_converted + n_ops_converted
conversion_ratio = n_ops_converted / total_ops * 100
print_fn(f"[*] % of OPs Converted: {conversion_ratio:.2f}% "
f"[{n_ops_converted}/{total_ops}]\n")
# TODO(laigd): use TrtConversionParams here.
def create_inference_graph(
input_graph_def,
outputs,
max_batch_size=1,
max_workspace_size_bytes=DEFAULT_TRT_MAX_WORKSPACE_SIZE_BYTES,
precision_mode=TrtPrecisionMode.FP32,
minimum_segment_size=3,
is_dynamic_op=False,
maximum_cached_engines=1,
input_saved_model_dir=None,
input_saved_model_tags=None,
input_saved_model_signature_key=None,
output_saved_model_dir=None):
"""Python wrapper for the TRT transformation.
Args:
input_graph_def: a GraphDef object containing a model to be transformed. If
set to None, the graph will be read from the SavedModel loaded from
input_saved_model_dir.
outputs: list of tensors or node names for the model outputs. Only used when
input_graph_def is not None.
max_batch_size: max size for the input batch.
max_workspace_size_bytes: the maximum GPU temporary memory which the TRT
engine can use at execution time. This corresponds to the 'workspaceSize'
parameter of nvinfer1::IBuilder::setMaxWorkspaceSize().
precision_mode: one of TrtPrecisionMode.supported_precision_modes().
minimum_segment_size: the minimum number of nodes required for a subgraph to
be replaced by TRTEngineOp.
is_dynamic_op: whether to generate dynamic TRT ops which will build the TRT
network and engine at run time.
maximum_cached_engines: max number of cached TRT engines in dynamic TRT ops.
If the number of cached engines is already at max but none of them can
serve the input, the TRTEngineOp will fall back to run the TF function
based on which the TRTEngineOp is created.
input_saved_model_dir: the directory to load the SavedModel which contains
the input graph to transforms. Used only when input_graph_def is None.
input_saved_model_tags: list of tags to load the SavedModel.
input_saved_model_signature_key: the key of the signature to optimize the
graph for.
output_saved_model_dir: if not None, construct a SavedModel using the
returned GraphDef and save it to the specified directory. This option only
works when the input graph is loaded from a SavedModel, i.e. when
input_saved_model_dir is specified and input_graph_def is None.
Returns:
A GraphDef transformed from input_graph_def (or the SavedModel graph def
loaded from input_saved_model_dir, if input_graph_def is not present), where
all TRT compatible subgraphs are replaced with TRTEngineOps, and a TF
function is added for each of the subgraphs.
If is_dynamic_op is True, each TRTEngineOp will contain a serialized
subgraph GraphDef, which will be converted to a TRT engine at execution time
and the TRT engine will be cached for future usage. A new TRT engine will be
created each time when none of the cached engines match the input shapes. If
it fails to execute the TRT engine or the number of cached engines reaches
maximum_cached_engines, the op will fall back to call the corresponding TF
function.
If is_dynamic_op is False, each TRTEngineOp will contain a serialized TRT
engine created from the corresponding subgraph. No more engines will be
created on the fly, and the op will fall back to call the corresponding TF
function when it fails to execute the engine.
Raises:
ValueError: if the combination of the parameters is invalid.
"""
trt_converter = TrtGraphConverter(
input_saved_model_dir=input_saved_model_dir,
input_saved_model_tags=input_saved_model_tags,
input_saved_model_signature_key=input_saved_model_signature_key,
input_graph_def=input_graph_def,
nodes_denylist=outputs,
max_batch_size=max_batch_size,
max_workspace_size_bytes=max_workspace_size_bytes,
precision_mode=precision_mode,
minimum_segment_size=minimum_segment_size,
is_dynamic_op=is_dynamic_op,
maximum_cached_engines=maximum_cached_engines,
use_calibration=False)
converted_graph_def = trt_converter.convert()
if output_saved_model_dir:
trt_converter.save(output_saved_model_dir)
return converted_graph_def
| 42.876582 | 82 | 0.712643 |
acf82926b4eae9bfb1b69eb32664125f19b105f7 | 5,119 | py | Python | benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/int8/model_init.py | hekaplex/resnet_dl | fc8d4dcc0adffbe22d01d333e6cf5db955f2f011 | [
"Apache-2.0"
] | null | null | null | benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/int8/model_init.py | hekaplex/resnet_dl | fc8d4dcc0adffbe22d01d333e6cf5db955f2f011 | [
"Apache-2.0"
] | null | null | null | benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/int8/model_init.py | hekaplex/resnet_dl | fc8d4dcc0adffbe22d01d333e6cf5db955f2f011 | [
"Apache-2.0"
] | null | null | null | #
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from common.base_model_init import BaseModelInitializer
from common.base_model_init import set_env_var
import argparse
import os
class ModelInitializer(BaseModelInitializer):
"""Model initializer for resnet50 int8 inference"""
def __init__(self, args, custom_args=[], platform_util=None):
super(ModelInitializer, self).__init__(args, custom_args, platform_util)
# Set the num_inter_threads and num_intra_threads
self.set_num_inter_intra_threads()
# Set env vars, if they haven't already been set
set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads, overwrite_existing=True)
def parse_args(self):
parser = argparse.ArgumentParser()
parser.add_argument(
"--warmup-steps", dest="warmup_steps",
help="number of warmup steps",
type=int, default=10)
parser.add_argument(
"--steps", dest="steps",
help="number of steps",
type=int, default=50)
parser.add_argument(
'--kmp-blocktime', dest='kmp_blocktime',
help='number of kmp block time',
type=int, default=1)
parser.add_argument(
"--calibration-only",
help="Calibrate the accuracy.",
dest="calibration_only", action="store_true")
parser.add_argument(
"--calibrate", dest="calibrate",
help=" run accuracy with calibration data, "
"to generate min_max ranges, calibrate=[True/False]",
type=bool, default=False)
self.args = parser.parse_args(self.custom_args,
namespace=self.args)
# Set KMP env vars, if they haven't already been set, but override the default KMP_BLOCKTIME value
config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json")
self.set_kmp_vars(config_file_path, kmp_blocktime=str(self.args.kmp_blocktime))
set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads)
def run_benchmark_or_accuracy(self):
cmd = os.path.join(
self.args.intelai_models, self.args.mode,
"eval_image_classifier_inference.py")
cmd = self.get_command_prefix(self.args.socket_id) + self.python_exe + " " + cmd
cmd += " --input-graph=" + self.args.input_graph + \
" --num-inter-threads=" + str(self.args.num_inter_threads) + \
" --num-intra-threads=" + str(self.args.num_intra_threads) + \
" --batch-size=" + str(self.args.batch_size) + \
" --warmup-steps=" + str(self.args.warmup_steps) + \
" --steps=" + str(self.args.steps)
if self.args.calibrate:
cmd += " --calibrate=" + str(self.args.calibrate)
if self.args.data_num_inter_threads:
cmd += " --data-num-inter-threads=" + str(self.args.data_num_inter_threads)
if self.args.data_num_intra_threads:
cmd += " --data-num-intra-threads=" + str(self.args.data_num_intra_threads)
# if the data location directory is not empty, then include the arg
if self.args.data_location and os.listdir(self.args.data_location):
cmd += " --data-location=" + self.args.data_location
if self.args.accuracy_only:
cmd += " --accuracy-only"
self.run_command(cmd)
def run_calibration(self):
calibration_script = os.path.join(self.args.intelai_models,
self.args.precision,
"generate_calibration_data.py")
script_args_list = [
"input_graph", "data_location",
"batch_size",
"num_inter_threads", "num_intra_threads"]
cmd_prefix = self.get_command_prefix(self.args.socket_id) + \
self.python_exe + " " + calibration_script
cmd = self.add_args_to_command(cmd_prefix, script_args_list)
self.run_command(cmd)
def run(self):
# Parse custom arguments and append to self.args
self.parse_args()
if self.args.accuracy_only and self.args.calibration_only:
self.run_calibration()
else:
self.run_benchmark_or_accuracy()
| 41.282258 | 107 | 0.622973 |
acf8298035d5020ee4f450ea6b5ccdf7bde1bf82 | 3,051 | py | Python | pymc3/variational/stein.py | cowirihy/pymc3 | f0b95773047af12f3c0ded04d707f02ddc4d4f6b | [
"Apache-2.0"
] | 1 | 2020-09-05T05:52:09.000Z | 2020-09-05T05:52:09.000Z | pymc3/variational/stein.py | cowirihy/pymc3 | f0b95773047af12f3c0ded04d707f02ddc4d4f6b | [
"Apache-2.0"
] | null | null | null | pymc3/variational/stein.py | cowirihy/pymc3 | f0b95773047af12f3c0ded04d707f02ddc4d4f6b | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import theano
import theano.tensor as tt
from pymc3.variational.opvi import node_property
from pymc3.variational.test_functions import rbf
from pymc3.theanof import floatX, change_flags
from pymc3.memoize import WithMemoization, memoize
__all__ = [
'Stein'
]
class Stein(WithMemoization):
def __init__(self, approx, kernel=rbf, use_histogram=True, temperature=1):
self.approx = approx
self.temperature = floatX(temperature)
self._kernel_f = kernel
self.use_histogram = use_histogram
@property
def input_joint_matrix(self):
if self.use_histogram:
return self.approx.joint_histogram
else:
return self.approx.symbolic_random
@node_property
def approx_symbolic_matrices(self):
if self.use_histogram:
return self.approx.collect('histogram')
else:
return self.approx.symbolic_randoms
@node_property
def dlogp(self):
grad = tt.grad(
self.logp_norm.sum(),
self.approx_symbolic_matrices
)
def flatten2(tensor):
return tensor.flatten(2)
return tt.concatenate(list(map(flatten2, grad)), -1)
@node_property
def grad(self):
n = floatX(self.input_joint_matrix.shape[0])
temperature = self.temperature
svgd_grad = (self.density_part_grad / temperature +
self.repulsive_part_grad)
return svgd_grad / n
@node_property
def density_part_grad(self):
Kxy = self.Kxy
dlogpdx = self.dlogp
return tt.dot(Kxy, dlogpdx)
@node_property
def repulsive_part_grad(self):
t = self.approx.symbolic_normalizing_constant
dxkxy = self.dxkxy
return dxkxy / t
@property
def Kxy(self):
return self._kernel()[0]
@property
def dxkxy(self):
return self._kernel()[1]
@node_property
def logp_norm(self):
sized_symbolic_logp = self.approx.sized_symbolic_logp
if self.use_histogram:
sized_symbolic_logp = theano.clone(
sized_symbolic_logp,
dict(zip(self.approx.symbolic_randoms, self.approx.collect('histogram')))
)
return sized_symbolic_logp / self.approx.symbolic_normalizing_constant
@memoize
@change_flags(compute_test_value='off')
def _kernel(self):
return self._kernel_f(self.input_joint_matrix)
| 30.207921 | 89 | 0.667978 |
acf82aa234a3b8caac5531b38e6cea5ac345bbf6 | 8,950 | py | Python | src/host/host.py | aminiok1/lamp-alveo | 35ee3b2fe9b981756fae4b292c8625c1eb5bec01 | [
"MIT"
] | null | null | null | src/host/host.py | aminiok1/lamp-alveo | 35ee3b2fe9b981756fae4b292c8625c1eb5bec01 | [
"MIT"
] | null | null | null | src/host/host.py | aminiok1/lamp-alveo | 35ee3b2fe9b981756fae4b292c8625c1eb5bec01 | [
"MIT"
] | null | null | null |
from keras.datasets import mnist
from keras.utils import to_categorical
from ctypes import *
from typing import List
import cv2
import numpy as np
import xir
import vart
import os
import math
import threading
import time
import sys
import time
from TimeSeriesGeneratorAdapt import MPTimeseriesGenerator
import scipy.io as sio
# np.set_printoptions(threshold=sys.maxsize)
"""
obtain dpu subgrah
"""
def get_child_subgraph_dpu(graph: "Graph") -> List["Subgraph"]:
assert graph is not None, "'graph' should not be None."
root_subgraph = graph.get_root_subgraph()
assert (
root_subgraph is not None
), "Failed to get root subgraph of input Graph object."
if root_subgraph.is_leaf:
return []
child_subgraphs = root_subgraph.toposort_child_subgraph()
assert child_subgraphs is not None and len(child_subgraphs) > 0
return [
cs
for cs in child_subgraphs
if cs.has_attr("device") and cs.get_attr("device").upper() == "DPU"
]
def CPUCalcSoftmax(data, size):
for j in range(size):
datanp = np.asarray(data[j])
datanp.reshape(10)
sum = 0.0
result = [0 for i in range(10)]
for i in range(10):
result[i] = math.exp(datanp[i])
sum += result[i]
for i in range(10):
result[i] /= sum
print(result.index(max(result)))
def runFC(runner, data):
inputTensors = runner.get_input_tensors()
outputTensors = runner.get_output_tensors()
input_ndim = tuple(inputTensors[0].dims)
pre_output_size = int(outputTensors[0].get_data_size() / input_ndim[0])
output_ndim = tuple(outputTensors[0].dims)
runSize = input_ndim[0]
sig_out = []
inputData = [np.empty(input_ndim, dtype=np.float32, order="C")]
outputData = [np.empty(output_ndim, dtype=np.float32, order="C")]
for i in range(0, 128, runSize):#((len(data)//runSize) + 1):
for j in range(runSize):
imageRun = inputData[0]
imageRun[j, ...] = data[i % len(data)].reshape(input_ndim[1:])
#print("imageRun shape = {}, input_ndim = {}, pre_output_size = {}, output_ndim = {}, runSize = {}".format(imageRun.shape, input_ndim, pre_output_size, output_ndim, runSize))
job_id = runner.execute_async(inputData, outputData)
runner.wait(job_id)
outDataNp = np.asarray(outputData)
res = 1/(1 + np.exp(-outDataNp))
sig_out.append(res)
#return runSize, sig_out
def runLamp(runner1, runner2, runner3, data, result, idx):
inputTensors1 = runner1.get_input_tensors()
outputTensors1 = runner1.get_output_tensors()
input_ndim1 = tuple(inputTensors1[0].dims)
pre_output_size1 = int(outputTensors1[0].get_data_size() / input_ndim1[0])
output_ndim1 = tuple(outputTensors1[0].dims)
runSize1 = input_ndim1[0]
inputTensors2 = runner2.get_input_tensors()
outputTensors2 = runner2.get_output_tensors()
input_ndim2 = tuple(inputTensors2[0].dims)
pre_output_size2 = int(outputTensors2[0].get_data_size() / input_ndim2[0])
output_ndim2 = tuple(outputTensors2[0].dims)
runSize2 = input_ndim2[0]
inputTensors3 = runner3.get_input_tensors()
outputTensors3 = runner3.get_output_tensors()
input_ndim3 = tuple(inputTensors3[0].dims)
pre_output_size3 = int(outputTensors3[0].get_data_size() / input_ndim3[0])
output_ndim3 = tuple(outputTensors3[0].dims)
runSize3 = input_ndim3[0]
print("run sizes = {} {} {}".format(runSize1, runSize2, runSize3))
#gap_output = []
#print("runSize = {}".format(runSize))
#print("valid_gen size = {}".format(len(valid_gen)))
#for w in range(1):#(len(valid_gen)):
#print("X_valid size ={}".format(len(X_valid)))
inputData1 = [np.empty(input_ndim1, dtype=np.float32, order="C")]
outputData1 = [np.empty(output_ndim1, dtype=np.float32, order="C")]
inputData2 = [np.empty(input_ndim2, dtype=np.float32, order="C")]
outputData2 = [np.empty(output_ndim2, dtype=np.float32, order="C")]
inputData3 = [np.empty(input_ndim3, dtype=np.float32, order="C")]
outputData3 = [np.empty(output_ndim3, dtype=np.float32, order="C")]
#for i in range(0, len(data), runSize):
for i in range(1):
for j in range(runSize1):
imageRun1 = inputData1[0]
imageRun1[j, ...] = data[i%len(data)].reshape(input_ndim1[1:])
start = time.time()
job_id = runner1.execute_async(inputData1, outputData1)
runner1.wait(job_id)
end = time.time()
for j in range(runSize2):
imageRun2 = inputData2[0]
imageRun2[j, ...] = outputData1[0][i%len(data)].reshape(input_ndim2[1:])
start2 = time.time()
job_id = runner2.execute_async(inputData2, outputData2)
runner2.wait(job_id)
end2 = time.time()
for j in range(runSize3):
imageRun3 = inputData3[0]
imageRun3[j, ...] = outputData2[0][i%len(data)].reshape(input_ndim3[1:])
start3 = time.time()
job_id = runner3.execute_async(inputData3, outputData3)
runner3.wait(job_id)
end3 = time.time()
print("timeeee = {}".format((end - start) + (end2 - start2) + (end3 - start3)))
# Matrix Profile configs
matrix_profile_window = 256
sample_rate = 20
lookbehind_seconds = 0
lookahead_seconds = 0
subsequence_stride = 256
lookbehind = sample_rate * lookbehind_seconds
num_outputs = 256
lookahead = sample_rate * lookahead_seconds
forward_sequences = lookahead + num_outputs
subsequences_per_input = lookbehind + num_outputs + lookahead
channel_stride = 8
n_input_series = 1
subsequences_per_input = subsequences_per_input // channel_stride
high_weight = 1
low_thresh = -1
high_thresh = 1
batch_size = 128
# Read the input data and generate the corresponding time series
all_data = sio.loadmat('insect_no_classification.mat')
mp_val = np.array(all_data['mp_test'])
ts_val = np.array(all_data['ts_test'])
valid_gen = MPTimeseriesGenerator(data=ts_val,targets= mp_val, num_input_timeseries=1, internal_stride=8, num_outputs=256,lookahead=forward_sequences, lookbehind=lookbehind, important_upper_threshold=high_thresh, important_lower_threshold=low_thresh, important_weight=high_weight, length=256, mp_window=256, stride=num_outputs, batch_size=batch_size)
# Extract x_valid, y_valid
threadnum = int(sys.argv[1])
X_valid = []
for i in range(threadnum):
data = valid_gen[i]
X, Y_valid = data
X = np.float32(X)
X_valid.append(X)
g1 = xir.Graph.deserialize("lamp_pt1.xmodel")
g2 = xir.Graph.deserialize("lamp_pt2.xmodel")
g3 = xir.Graph.deserialize("lamp_pt3.xmodel")
subgraphs1 = get_child_subgraph_dpu(g1)
subgraphs2 = get_child_subgraph_dpu(g2)
subgraphs3 = get_child_subgraph_dpu(g3)
# Threading
all_dpu_runners1 = []
all_dpu_runners2 = []
all_dpu_runners3 = []
all_dpu_runners_fc = []
threadAll = []
threadAllFc = []
results = [None] * threadnum
for i in range(threadnum):
all_dpu_runners1.append(vart.Runner.create_runner(subgraphs1[0], "run"))
all_dpu_runners2.append(vart.Runner.create_runner(subgraphs2[0], "run"))
all_dpu_runners3.append(vart.Runner.create_runner(subgraphs3[0], "run"))
for i in range(threadnum):
t1 = threading.Thread(target=runLamp, args=(all_dpu_runners1[i], all_dpu_runners2[i], all_dpu_runners3[i], X_valid[i], results, i))
threadAll.append(t1)
time_start = time.time()
for x in threadAll:
x.start()
for x in threadAll:
x.join()
time_end = time.time()
print("total time = {}".format(time_end - time_start))
exit(0)
for i in range(threadnum):
all_dpu_runners_fc.append(vart.Runner.create_runner(subgraphs[1], "run"))
for i in range(threadnum):
t1 = threading.Thread(target=runFC, args=(all_dpu_runners_fc[i], results[i]))
threadAllFc.append(t1)
for x in threadAllFc:
x.start()
for x in threadAllFc:
x.join()
#runner = vart.Runner.create_runner(subgraphs[0], "run")
#runSize, outData = runLamp(runner, X_valid)
'''
outData.resize(len(outData) * runSize, 192)
runner2 = vart.Runner.create_runner(subgraphs[1], "run")
fcRunSize, res = runFC(runner2, outData)
#for i in range(len(valid_gen)):
# data = valid_gen[i]
# X_valid, Y_valid = data
# y.append(Y_valid)
#y = np.asarray(y)
#y.resize(len(valid_gen) * 128, 256)
data_ctr = -1
yindx = 0
final_res = 0
for j in range(len(res)):
r = np.asarray(res[j])
r.resize(fcRunSize, 256)
for i in range(fcRunSize):
if ((j * fcRunSize + i) % 128 == 0):
data_ctr += 1
yindx = 0
data = valid_gen[data_ctr]
X_valid, Y_valid = data
final = (np.mean(np.abs((r[i] - Y_valid[yindx])/r[i])) * 100)
print(final)
final_res += final
yindx += 1
print("final = {}".format(final_res))
'''
| 28.233438 | 350 | 0.666927 |
acf82af8835bb30609bb5e5526678751652b528e | 20,486 | py | Python | model/ssd_aspp_a.py | dishen12/ssd_aspp | 1243743ed2ed57a0cb8ee00776e891424fe006b9 | [
"MIT"
] | null | null | null | model/ssd_aspp_a.py | dishen12/ssd_aspp | 1243743ed2ed57a0cb8ee00776e891424fe006b9 | [
"MIT"
] | null | null | null | model/ssd_aspp_a.py | dishen12/ssd_aspp | 1243743ed2ed57a0cb8ee00776e891424fe006b9 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from layers import *
from data import voc, coco
import os
"""
extra layer 中存在两个问题:
1.每次conv后没有relu激活
2.batchNormalation没有启用
但是貌似跑benchmark的时候,都没有启用,之后刷指标可以考虑用下
考虑RFBnet中dilate conv前边加的conv的作用
"""
class BasicConv(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True, bn=True, bias=False):
super(BasicConv, self).__init__()
self.out_channels = out_planes
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
self.bn = nn.BatchNorm2d(out_planes,eps=1e-5, momentum=0.01, affine=True) if bn else None
self.relu = nn.ReLU(inplace=True) if relu else None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class Aspp_a(nn.Module):
"""
并联操作的aspp,每个branch前边加conv1x1(保持stride),这里的stride是为了保持feature map大小
"""
def __init__(self,in_planes,out_planes,stride=1,scale=0.1,rate=[6,3,2,1]):
#rate can also be [1,2,3], [3,4,5], [1,2,5], [5,9,17], [1,2,5,9],try it one by one, or just concat some of them to test the map,go go go ,just move on
super(Aspp_a,self).__init__()
self.scale = scale
self.out_channels = out_planes
self.rate = rate
inter_planes = in_planes // 8
"""这里的降采样要认真考虑下
"""
if(len(rate)==4):
self.branch0 = nn.Sequential(
BasicConv(in_planes, 2*inter_planes, kernel_size=1, stride=stride),
BasicConv(2*inter_planes, 2*inter_planes, kernel_size=3, stride=1, padding=rate[0], dilation=rate[0], relu=False)
)
self.branch1 = nn.Sequential(
BasicConv(in_planes, 2*inter_planes, kernel_size=1, stride=stride),
BasicConv(2*inter_planes, 2*inter_planes, kernel_size=3, stride=1, padding=rate[1], dilation=rate[1], relu=False)
)
self.branch2 = nn.Sequential(
BasicConv(in_planes, 2*inter_planes, kernel_size=1, stride=stride),
BasicConv(2*inter_planes, 2*inter_planes, kernel_size=3, stride=1, padding=rate[2], dilation=rate[2], relu=False)
)
self.branch3 = nn.Sequential(
BasicConv(in_planes, 2*inter_planes, kernel_size=1, stride=stride),
BasicConv(2*inter_planes, 2*inter_planes, kernel_size=3, stride=1, padding=rate[3], dilation=rate[3], relu=False)
)
self.ConvLinear = BasicConv(8*inter_planes,out_planes,kernel_size=1,stride=1,relu=False)
self.shortcut = BasicConv(in_planes,out_planes,kernel_size=1,stride=stride, relu=False)
self.relu = nn.ReLU(inplace=False)
elif(len(rate)==3):
self.branch0 = nn.Sequential(
BasicConv(in_planes, 2*inter_planes, kernel_size=1, stride=stride),
BasicConv(2*inter_planes, 2*inter_planes, kernel_size=3, stride=1, padding=rate[0], dilation=rate[0], relu=False)
)
self.branch1 = nn.Sequential(
BasicConv(in_planes, 2*inter_planes, kernel_size=1, stride=stride),
BasicConv(2*inter_planes, 2*inter_planes, kernel_size=3, stride=1, padding=rate[1], dilation=rate[1], relu=False)
)
self.branch2 = nn.Sequential(
BasicConv(in_planes, 2*inter_planes, kernel_size=1, stride=stride),
BasicConv(2*inter_planes, 2*inter_planes, kernel_size=3, stride=1, padding=rate[2], dilation=rate[2], relu=False)
)
self.ConvLinear = BasicConv(6*inter_planes,out_planes,kernel_size=1,stride=1,relu=False)
self.shortcut = BasicConv(in_planes,out_planes,kernel_size=1,stride=stride, relu=False)
self.relu = nn.ReLU(inplace=False)
else:
print("error! the rate is incorrect!")
def forward(self,x):
if(len(self.rate)==4):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
out = torch.cat((x0,x1,x2,x3),1)
out = self.ConvLinear(out)
short = self.shortcut(x)
out = out*self.scale + short
out = self.relu(out)
return out
elif(len(self.rate)==3):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
out = torch.cat((x0,x1,x2),1)
out = self.ConvLinear(out)
short = self.shortcut(x)
out = out*self.scale + short
out = self.relu(out)
return out
else:
print("error!")
return
class Aspp_b_2(nn.Module):
"""
串联加并联的操作的aspp,每层延伸出去,相当于一个fpn
"""
def __init__(self,in_planes,out_planes,stride=1,scale=0.1,rate=[6,3,2,1]):
#rate 1 2 5 9
# 2 4 10 18
# 3 6 15 27
super(Aspp_b_2,self).__init__()
self.scale = scale
self.out_channels = out_planes
self.rate = rate
inter_planes = in_planes // 8 # 后边这个值,考虑微调
if(len(rate)==4):
self.branch0 = nn.Sequential(
BasicConv(in_planes, 2*inter_planes, kernel_size=1, stride=stride),
BasicConv(2*inter_planes, 2*inter_planes, kernel_size=3, stride=1, padding=rate[0], dilation=rate[0], relu=False)
)
self.branch0_1 = BasicConv(2*inter_planes, 2*inter_planes, kernel_size=3, stride=1, padding=2*rate[0], dilation=2*rate[0], relu=False)
self.branch0_2 = BasicConv(2*inter_planes, 2*inter_planes, kernel_size=3, stride=1, padding=3*rate[0], dilation=3*rate[0], relu=False)
self.branch1 = nn.Sequential(
BasicConv(in_planes, 2*inter_planes, kernel_size=1, stride=stride),
BasicConv(2*inter_planes, 2*inter_planes, kernel_size=3, stride=1, padding=rate[1], dilation=rate[1], relu=False))
self.branch1_1 = BasicConv(2*inter_planes, 2*inter_planes, kernel_size=3, stride=1, padding=2*rate[1], dilation=2*rate[1], relu=False)
self.branch1_2 = BasicConv(2*inter_planes, 2*inter_planes, kernel_size=3, stride=1, padding=3*rate[1], dilation=3*rate[1], relu=False)
self.branch2 = nn.Sequential(
BasicConv(in_planes, 2*inter_planes, kernel_size=1, stride=stride),
BasicConv(2*inter_planes, 2*inter_planes, kernel_size=3, stride=1, padding=rate[2], dilation=rate[2], relu=False))
self.branch2_1 = BasicConv(2*inter_planes, 2*inter_planes, kernel_size=3, stride=1, padding=2*rate[2], dilation=2*rate[2], relu=False)
self.branch2_2 = BasicConv(2*inter_planes, 2*inter_planes, kernel_size=3, stride=1, padding=3*rate[2], dilation=3*rate[2], relu=False)
self.branch3 = nn.Sequential(
BasicConv(in_planes, 2*inter_planes, kernel_size=1, stride=stride),
BasicConv(2*inter_planes, 2*inter_planes, kernel_size=3, stride=1, padding=rate[3], dilation=rate[3], relu=False))
self.branch3_1 = BasicConv(2*inter_planes, 2*inter_planes, kernel_size=3, stride=1, padding=2*rate[3], dilation=2*rate[3], relu=False)
self.branch3_2 = BasicConv(2*inter_planes, 2*inter_planes, kernel_size=3, stride=1, padding=3*rate[3], dilation=3*rate[3], relu=False)
self.ConvLinear = BasicConv(24*inter_planes,out_planes,kernel_size=1,stride=1,relu=False)
self.shortcut = BasicConv(in_planes,out_planes,kernel_size=1,stride=stride, relu=False)
self.relu = nn.ReLU(inplace=False)
elif(len(rate)==3):
self.branch0 = nn.Sequential(
BasicConv(in_planes, 2*inter_planes, kernel_size=1, stride=stride),
BasicConv(2*inter_planes, 2*inter_planes, kernel_size=3, stride=1, padding=rate[0], dilation=rate[0], relu=False)
)
self.branch0_1 = BasicConv(2*inter_planes, 2*inter_planes, kernel_size=3, stride=1, padding=2*rate[0], dilation=2*rate[0], relu=False)
self.branch0_2 = BasicConv(2*inter_planes, 2*inter_planes, kernel_size=3, stride=1, padding=3*rate[0], dilation=3*rate[0], relu=False)
self.branch1 = nn.Sequential(
BasicConv(in_planes, 2*inter_planes, kernel_size=1, stride=stride),
BasicConv(2*inter_planes, 2*inter_planes, kernel_size=3, stride=1, padding=rate[1], dilation=rate[1], relu=False))
self.branch1_1 = BasicConv(2*inter_planes, 2*inter_planes, kernel_size=3, stride=1, padding=2*rate[1], dilation=2*rate[1], relu=False)
self.branch1_2 = BasicConv(2*inter_planes, 2*inter_planes, kernel_size=3, stride=1, padding=3*rate[1], dilation=3*rate[1], relu=False)
self.branch2 = nn.Sequential(
BasicConv(in_planes, 2*inter_planes, kernel_size=1, stride=stride),
BasicConv(2*inter_planes, 2*inter_planes, kernel_size=3, stride=1, padding=rate[2], dilation=rate[2], relu=False))
self.branch2_1 = BasicConv(2*inter_planes, 2*inter_planes, kernel_size=3, stride=1, padding=2*rate[0], dilation=2*rate[2], relu=False)
self.branch2_2 = BasicConv(2*inter_planes, 2*inter_planes, kernel_size=3, stride=1, padding=3*rate[0], dilation=3*rate[2], relu=False)
self.ConvLinear = BasicConv(18*inter_planes,out_planes,kernel_size=1,stride=1,relu=False)
self.shortcut = BasicConv(in_planes,out_planes,kernel_size=1,stride=stride, relu=False)
self.relu = nn.ReLU(inplace=False)
else:
print("error! the rate is incorrect!")
def forward(self,x):
# some thing there
if(len(self.rate)==4):
x0 = self.branch0(x)
x01 = self.branch0_1(x0)
x02 = self.branch0_2(x01)
#print("0",x0.size(),x01.size(),x02.size())
x1 = self.branch1(x)
x11 = self.branch1_1(x1)
x12 = self.branch1_2(x11)
#print("1",x1.size(),x11.size(),x12.size())
x2 = self.branch2(x)
#print("x2",x2.size())
x21 = self.branch2_1(x2)
#print("x21",x21.size())
x22 = self.branch2_2(x21)
#print("x22",x22.size())
#print("2",x2.size(),x21.size(),x22.size())
x3 = self.branch3(x)
x31 = self.branch3_1(x3)
x32 = self.branch3_2(x31)
#print("3",x3.size(),x31.size(),x32.size())
out = torch.cat((x0,x01,x02,x1,x11,x12,x2,x21,x22,x3,x31,x32),1)
out = self.ConvLinear(out)
short = self.shortcut(x)
out = out*self.scale + short
out = self.relu(out)
return out
elif(len(self.rate)==3):
x0 = self.branch0(x)
x01 = self.branch0_1(x0)
x02 = self.branch0_2(x01)
x1 = self.branch1(x)
x11 = self.branch1_1(x1)
x12 = self.branch1_2(x11)
x2 = self.branch2(x)
x21 = self.branch2_1(x2)
x22 = self.branch2_2(x21)
out = torch.cat((x0,x01,x02,x1,x11,x12,x2,x21,x22),1)
out = self.ConvLinear(out)
short = self.shortcut(x)
out = out*self.scale + short
out = self.relu(out)
return out
else:
print("error!")
return
class SSD(nn.Module):
"""Single Shot Multibox Architecture
The network is composed of a base VGG network followed by the
added multibox conv layers. Each multibox layer branches into
1) conv2d for class conf scores
2) conv2d for localization predictions
3) associated priorbox layer to produce default bounding
boxes specific to the layer's feature map size.
See: https://arxiv.org/pdf/1512.02325.pdf for more details.
Args:
phase: (string) Can be "test" or "train"
size: input image size
base: VGG16 layers for input, size of either 300 or 500
extras: extra layers that feed to multibox loc and conf layers
head: "multibox head" consists of loc and conf conv layers
"""
def __init__(self, phase, size, base, extras, head, num_classes,Rate=[6,3,2,1]):
super(SSD, self).__init__()
self.phase = phase
self.num_classes = num_classes
self.cfg = (coco, voc)[num_classes == 21]
self.priorbox = PriorBox(self.cfg)
self.priors = Variable(self.priorbox.forward(), volatile=True)
self.size = size
# SSD network
self.vgg = nn.ModuleList(base)
# Layer learns to scale the l2 normalized features from conv4_3
self.L2Norm = L2Norm(512, 20)
self.aspp_a_4 = Aspp_a(512,512,stride=1,scale=1,rate=Rate)
self.aspp_a_7 = Aspp_a(1024,1024,stride=1,scale=1,rate=Rate)
self.extras = nn.ModuleList(extras)
self.loc = nn.ModuleList(head[0])
self.conf = nn.ModuleList(head[1])
if phase == 'test':
self.softmax = nn.Softmax(dim=-1)
self.detect = Detect(num_classes, 0, 200, 0.01, 0.45)
def forward(self, x):
"""Applies network layers and ops on input image(s) x.
Args:
x: input image or batch of images. Shape: [batch,3,300,300].
Return:
Depending on phase:
test:
Variable(tensor) of output class label predictions,
confidence score, and corresponding location predictions for
each object detected. Shape: [batch,topk,7]
train:
list of concat outputs from:
1: confidence layers, Shape: [batch*num_priors,num_classes]
2: localization layers, Shape: [batch,num_priors*4]
3: priorbox layers, Shape: [2,num_priors*4]
"""
sources = list()
loc = list()
conf = list()
# apply vgg up to conv4_3 relu
for k in range(23):
x = self.vgg[k](x)
s = self.L2Norm(x)
#print("the size of conv4_3 is :",x.size())
s = self.aspp_a_4(x)
#s = Aspp_a(x.size(1),x.size(1),stride=1,scale=1,rate=[6,3,2,1])(x)
sources.append(s)
# apply vgg up to fc7
for k in range(23, len(self.vgg)):
x = self.vgg[k](x)
#print("the size of conv7_3 is :",x.size())
s = self.aspp_a_7(x)
#s = Aspp_a(x.size(1),x.size(1),stride=1,scale=1,rate=[6,3,2,1])(x)
#s = x
sources.append(s)
# apply extra layers and cache source layer outputs
for k, v in enumerate(self.extras):
x = F.relu(v(x), inplace=True)
if k % 2 == 1:
sources.append(x)
# apply multibox head to source layers
for (x, l, c) in zip(sources, self.loc, self.conf):
loc.append(l(x).permute(0, 2, 3, 1).contiguous())
conf.append(c(x).permute(0, 2, 3, 1).contiguous())
loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)
conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)
if self.phase == "test":
output = self.detect(
loc.view(loc.size(0), -1, 4), # loc preds
self.softmax(conf.view(conf.size(0), -1,
self.num_classes)), # conf preds
self.priors.type(type(x.data)) # default boxes
)
else:
output = (
loc.view(loc.size(0), -1, 4),
conf.view(conf.size(0), -1, self.num_classes),
self.priors
)
return output
def load_weights(self, base_file):
other, ext = os.path.splitext(base_file)
if ext == '.pkl' or '.pth':
print('Loading weights into state dict...')
self.load_state_dict(torch.load(base_file,
map_location=lambda storage, loc: storage))
print('Finished!')
else:
print('Sorry only .pth and .pkl files supported.')
# This function is derived from torchvision VGG make_layers()
# https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py
def vgg(cfg, i, batch_norm=False):
layers = []
in_channels = i
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
elif v == 'C':
layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)
conv7 = nn.Conv2d(1024, 1024, kernel_size=1)
layers += [pool5, conv6,
nn.ReLU(inplace=True), conv7, nn.ReLU(inplace=True)]
return layers
def add_extras(cfg, i, batch_norm=False):
# Extra layers added to VGG for feature scaling
layers = []
in_channels = i
flag = False
for k, v in enumerate(cfg):
if in_channels != 'S':
if v == 'S': #[256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256]
layers += [nn.Conv2d(in_channels, cfg[k + 1],
kernel_size=(1, 3)[flag], stride=2, padding=1)]
else:
layers += [nn.Conv2d(in_channels, v, kernel_size=(1, 3)[flag])]
flag = not flag
in_channels = v
return layers
def add_extras_aspp(cfg, i, batch_norm=False,Rate=[6,3,2,1]):
# Extra layers added to VGG for feature scaling
layers = []
in_channels = i
flag = False
for k, v in enumerate(cfg):
if in_channels != 'S':
if v == 'S': #[256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256]
layers += [Aspp_a(in_channels,cfg[k+1],stride=2,scale=1,rate=Rate)]
else:
layers += [nn.Conv2d(in_channels, v, kernel_size=(1, 3)[flag])]
flag = not flag
in_channels = v
return layers
def multibox(vgg, extra_layers, cfg, num_classes):
loc_layers = []
conf_layers = []
vgg_source = [21, -2]
for k, v in enumerate(vgg_source):
loc_layers += [nn.Conv2d(vgg[v].out_channels,
cfg[k] * 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(vgg[v].out_channels,
cfg[k] * num_classes, kernel_size=3, padding=1)]
for k, v in enumerate(extra_layers[1::2], 2):
loc_layers += [nn.Conv2d(v.out_channels, cfg[k]
* 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(v.out_channels, cfg[k]
* num_classes, kernel_size=3, padding=1)]
return vgg, extra_layers, (loc_layers, conf_layers)
base = {
'300': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M',
512, 512, 512],
'512': [],
}
extras = {
'300': [256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256],
'512': [],
}
mbox = {
'300': [6, 6, 6, 6, 4, 4], # number of boxes per feature map location
'512': [],
}
def build_ssd(phase, size=300, num_classes=21,rate="6,3,2,1"):
Rate = [int(i) for i in rate.strip().split(",")]
if phase != "test" and phase != "train":
print("ERROR: Phase: " + phase + " not recognized")
return
if size != 300:
print("ERROR: You specified size " + repr(size) + ". However, " +
"currently only SSD300 (size=300) is supported!")
return
base_, extras_, head_ = multibox(vgg(base[str(size)], 3), #输入,三通道图片
add_extras_aspp(extras[str(size)], 1024,Rate),#vgg最后一层1024
mbox[str(size)], num_classes)
return SSD(phase, size, base_, extras_, head_, num_classes,Rate)
| 46.348416 | 159 | 0.576979 |
acf82b9d09af5d655a1d59283b8c6fb38e79c243 | 375 | py | Python | decimal_to_bin.py | RahulShah9191/data_structure | a1ab1441025535066df69b33dd6a52bdd6535670 | [
"Unlicense"
] | null | null | null | decimal_to_bin.py | RahulShah9191/data_structure | a1ab1441025535066df69b33dd6a52bdd6535670 | [
"Unlicense"
] | null | null | null | decimal_to_bin.py | RahulShah9191/data_structure | a1ab1441025535066df69b33dd6a52bdd6535670 | [
"Unlicense"
] | null | null | null |
def decimal_to_binary(num):
bin = ""
while(num > 0):
bin = bin + str(divmod(num, 2)[1])
num = divmod(num, 2)[0]
return bin[::-1]
def decimal_to_oct(num):
bin = ""
while(num > 0):
bin = bin + str(divmod(num, 8)[1])
num = divmod(num, 8)[0]
return bin[::-1]
if __name__ == "__main__":
print(decimal_to_oct(100))
| 17.857143 | 42 | 0.52 |
acf82bfb68e0d9e5be2241a10fc5158d07ac20eb | 4,747 | py | Python | ranger-util/src/scripts/saveVersion.py | tooptoop4/ranger | 3c18a99c2ca5b0a5302d0b646438be2990ee6c34 | [
"Apache-2.0"
] | 4 | 2019-04-20T03:35:03.000Z | 2022-03-29T03:26:32.000Z | ranger-util/src/scripts/saveVersion.py | tooptoop4/ranger | 3c18a99c2ca5b0a5302d0b646438be2990ee6c34 | [
"Apache-2.0"
] | 5 | 2020-03-04T23:08:53.000Z | 2022-03-25T19:36:34.000Z | ranger-util/src/scripts/saveVersion.py | daidd2019/ranger-hdp | 65a72b0882c5cf44529d964095e4c6aaab29f34a | [
"Apache-2.0"
] | 10 | 2021-04-06T06:08:03.000Z | 2022-03-11T08:45:53.000Z | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is used to generate the package-info.java class that
# records the version, revision, branch, user, timestamp, and url
import os
import re
import sys
import errno
import shlex
import hashlib
from os import listdir
import locale
import datetime
import getpass
import socket
import subprocess
from subprocess import Popen,PIPE
from time import gmtime, strftime
import platform
def isWindowsSystem():
return 'Windows' in platform.system()
def check_output(query):
output = subprocess.check_output(query)
return output.decode("UTF-8")
def hashfile(afile, hasher, blocksize=65536):
buf = afile.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(blocksize)
return hasher.hexdigest()
def main():
LANG=None
LC_CTYPE=None
LC_TIME=None
version = sys.argv[1]
shortversion = sys.argv[2]
src_dir = os.path.join(sys.argv[3])
revision = ""
user = getpass.getuser()
date = datetime.datetime.now().strftime("%I:%M%p on %B %d, %Y")
dir = os.getcwd()
cwd = dir.strip('scripts')
cwd = os.path.join(cwd, "src")
if isWindowsSystem():
cwd = cwd.replace("\\", "/")
if revision == "" :
query = (["git","rev-parse","HEAD"])
output = check_output(query)
if output != "" :
revision = output
hostname = socket.gethostname()
arr = (["git","rev-parse", "--abbrev-ref", "HEAD"])
branch = check_output(arr)
branch = branch.strip("* ")
url = "git://%s/%s" % (hostname,cwd)
else:
revision="Unknown"
branch="Unknown"
url="file://cwd"
if branch == "":
branch="Unknown"
if url == "":
url="file://cwd"
c = []
fileList = []
sortedList = []
parent_dir = os.path.join(src_dir, os.pardir)
for (dir, _, files) in os.walk(parent_dir):
for f in files:
path = os.path.join(dir, f)
if path.endswith(".java"):
if os.path.exists(path):
fileList.append(path)
else:
pass
sortedList = sorted(fileList, key = lambda x: x[:-4])
for i, val in enumerate(sortedList):
m = hashfile(open(val,'rb'), hashlib.md5())
f = m +" "+ val + "\n"
c.append(f);
srcChecksum = hashlib.md5(''.join(c).encode('UTF-8')).hexdigest()
print('hash of the ' + str(len(sortedList)) + '\n\t file from: ' + parent_dir + '\n\t is ' + srcChecksum)
dir = os.path.join(src_dir,"target","gen","org","apache","ranger","common")
if not os.path.exists(dir):
os.makedirs(dir)
# In Windows, all the following string ends with \r, need to get rid of them
branch = branch.strip('\n\r')
user = user.strip('\n\r')
date = date.strip('\n\r')
url = url.strip('\n\r')
revision = revision.strip('\n\r')
srcChecksum = srcChecksum.strip('\n\r')
content = """/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Generated by saveVersion.py
*/
@RangerVersionAnnotation(version="{VERSION}", shortVersion="{SHORTVERSION}",revision="{REV}",branch="{BRANCH}", user="{USER}",date="{DATE}", url="{URL}",srcChecksum="{SRCCHECKSUM}")
package org.apache.ranger.common;"""
content = content.format(VERSION=version,SHORTVERSION=shortversion,USER=user,DATE=date,URL=url,REV=revision,BRANCH=branch,SRCCHECKSUM=srcChecksum)
des = os.path.join(dir, "package-info.java")
f = open(des , 'w')
f.write(content)
f.close()
main()
| 31.437086 | 183 | 0.695808 |
acf82c932224578f22f953475398d17a65c546c7 | 793 | py | Python | webapi/tct-websocket-w3c-tests/websocket-py/w3c/handlers/set-cookie_http_wsh.py | zhuyongyong/crosswalk-test-suite | 24f3f8cfa663a365b0a22685d5bd096a637f72db | [
"BSD-3-Clause"
] | 3 | 2015-08-12T03:39:31.000Z | 2019-09-18T04:37:54.000Z | webapi/tct-websocket-w3c-tests/websocket-py/w3c/handlers/set-cookie_http_wsh.py | zhuyongyong/crosswalk-test-suite | 24f3f8cfa663a365b0a22685d5bd096a637f72db | [
"BSD-3-Clause"
] | 23 | 2015-01-19T03:10:13.000Z | 2016-06-13T03:08:51.000Z | webapi/tct-websocket-w3c-tests/websocket-py/w3c/handlers/set-cookie_http_wsh.py | zhuyongyong/crosswalk-test-suite | 24f3f8cfa663a365b0a22685d5bd096a637f72db | [
"BSD-3-Clause"
] | 18 | 2015-02-28T21:29:55.000Z | 2022-01-20T10:06:28.000Z | #!/usr/bin/python
from mod_pywebsocket import common, msgutil, util
from mod_pywebsocket.handshake import hybi
import urlparse
def web_socket_do_extra_handshake(request):
url_parts = urlparse.urlsplit(request.uri)
request.connection.write(
'HTTP/1.1 101 Switching Protocols:\x0D\x0AConnection: Upgrade\x0D\x0AUpgrade: WebSocket\x0D\x0ASet-Cookie: ws_test_' +
(
url_parts.query or '') +
'=test; Path=/; HttpOnly\x0D\x0ASec-WebSocket-Origin: ' +
request.ws_origin +
'\x0D\x0ASec-WebSocket-Accept: ' +
hybi.compute_accept(
request.headers_in.get(
common.SEC_WEBSOCKET_KEY_HEADER))[0] +
'\x0D\x0A\x0D\x0A')
return
def web_socket_transfer_data(request):
while True:
return
| 29.37037 | 126 | 0.667087 |
acf82cfc297b429c387d74b045591e627f74f483 | 304 | py | Python | config-file-samples/twitter_config.py | mike-ess/rpi-cgate-monitor | eb32cb492730e0d3442fa5c8b9cf6cc213a865d0 | [
"Apache-2.0"
] | null | null | null | config-file-samples/twitter_config.py | mike-ess/rpi-cgate-monitor | eb32cb492730e0d3442fa5c8b9cf6cc213a865d0 | [
"Apache-2.0"
] | null | null | null | config-file-samples/twitter_config.py | mike-ess/rpi-cgate-monitor | eb32cb492730e0d3442fa5c8b9cf6cc213a865d0 | [
"Apache-2.0"
] | null | null | null | twitter_auth = {
"access_token":"fdg897df8g78d7g8d7g89df7g9dfgdfgdfgdfgfd87gdfbv",
"access_token_secret":"fddfdf8v9fdvdfv89df7vfd6v7fdv6dfv",
"consumer_key":"fdv786df7v86fd78v6fd7",
"consumer_secret":"ghn989ghn8hg9nhgnghnhgnhg6n7n7n5ghh65nhg"
}
twitter_recipients="user1,user2,etc..."
| 27.636364 | 69 | 0.796053 |
acf82d568984a63d6332cd6b5fad046ab1a9b2aa | 1,615 | py | Python | main.py | amrane99/lung-segmentation | ab29db75ac78918da5cbf66b830acaf36cf7b44a | [
"MIT"
] | null | null | null | main.py | amrane99/lung-segmentation | ab29db75ac78918da5cbf66b830acaf36cf7b44a | [
"MIT"
] | null | null | null | main.py | amrane99/lung-segmentation | ab29db75ac78918da5cbf66b830acaf36cf7b44a | [
"MIT"
] | null | null | null | import os, torch, torchvision
from src.data import blend
from PIL import Image
from src.models import PretrainedUNet
def main():
origin_filename = '/gris/gris-f/homestud/aranem/lung-segmentation/1.3.51.0.7.597266080.44770.52800.43566.28465.1231.17369.jpg'
result = '/gris/gris-f/homestud/aranem/lung-segmentation/seg.jpg'
result2 = '/gris/gris-f/homestud/aranem/lung-segmentation/joined.jpg'
device = 'cuda:0'
unet = PretrainedUNet(
in_channels=1,
out_channels=2,
batch_norm=True,
upscale_mode="bilinear"
)
model_name = "unet-6v.pt"
unet.load_state_dict(torch.load(os.path.join('models', model_name), map_location=torch.device("cpu")))
unet.to(device)
unet.eval()
print("Load image..")
origin = Image.open(origin_filename).convert("P")
origin = torchvision.transforms.functional.resize(origin, (512, 512))
origin = torchvision.transforms.functional.to_tensor(origin) - 0.5
print("Predict..")
with torch.no_grad():
origin = torch.stack([origin])
origin = origin.to(device)
out = unet(origin)
softmax = torch.nn.functional.log_softmax(out, dim=1)
out = torch.argmax(softmax, dim=1)
origin = origin[0].detach().cpu()
out = out[0].detach().cpu()
img = torchvision.transforms.functional.to_pil_image(torch.cat([
torch.stack([out.float()]),
torch.zeros_like(origin),
torch.zeros_like(origin)
]))
img.save(result)
img = blend(origin, out)
img.save(result2)
if __name__ == "__main__":
main() | 31.666667 | 130 | 0.647678 |
acf82eff252aceabe5db0c17c7752ebbd50e930f | 251 | py | Python | cogandmem/__init__.py | TylerEnsor/cogandmem | 6884bea58eb89c0c82e17066826c223130bf70c1 | [
"MIT"
] | null | null | null | cogandmem/__init__.py | TylerEnsor/cogandmem | 6884bea58eb89c0c82e17066826c223130bf70c1 | [
"MIT"
] | null | null | null | cogandmem/__init__.py | TylerEnsor/cogandmem | 6884bea58eb89c0c82e17066826c223130bf70c1 | [
"MIT"
] | 1 | 2019-04-27T14:52:32.000Z | 2019-04-27T14:52:32.000Z | """
memex
Tyler M. Ensor
This is a Python package for running memory experiments.
"""
__author__ = "Tyler M. Ensor (tyler.ensor@mun.ca)"
__version__ = "0.1.0"
import experiment
import text
import generic
import writing
import tetromino
import score
| 15.6875 | 56 | 0.760956 |
acf832b9c4de9b44368d391a0a64273b6c9da052 | 1,043 | py | Python | Application/system/common_functions.py | Unicorn-Dev/Minimal.io | a5e94446c0149739922287f2113349e0189b57d8 | [
"MIT"
] | null | null | null | Application/system/common_functions.py | Unicorn-Dev/Minimal.io | a5e94446c0149739922287f2113349e0189b57d8 | [
"MIT"
] | null | null | null | Application/system/common_functions.py | Unicorn-Dev/Minimal.io | a5e94446c0149739922287f2113349e0189b57d8 | [
"MIT"
] | null | null | null | def static_vars(**kwargs):
def wrapper(function):
for key in kwargs:
setattr(function, key, kwargs[key])
return function
return wrapper
@static_vars(errors_cnt=dict())
def try_wrapper(message, errors_limit, funk, *args):
"""
В общем это 'обертка функции', котороая просто выполняет функцию,
принимая ее аргументы, и следит вылетела функция или нет.
Если функция упала, то увеличивается счетчик ошибок на этой функции
и выводится трейсбек и месседж.
Если счетчик превышает лимит, то обертка raise Exception.
"""
try:
funk(*args)
except SystemExit as e:
raise e
except:
from traceback import print_exc
print_exc()
print(message)
try:
try_wrapper.errors_cnt[funk]
except:
try_wrapper.errors_cnt[funk] = 0
try_wrapper.errors_cnt[funk] += 1
if (try_wrapper.errors_cnt[funk] >= errors_limit):
try_wrapper.errors_cnt[funk] = 0
raise Exception | 30.676471 | 74 | 0.631831 |
acf832e7dfc2b3cc85f29b990ae2b244af85e548 | 2,109 | py | Python | precalc_stats_official_tf.py | stevevan00/metrics | 167685b473ae1821ee42043c84b25d0036850e71 | [
"MIT"
] | null | null | null | precalc_stats_official_tf.py | stevevan00/metrics | 167685b473ae1821ee42043c84b25d0036850e71 | [
"MIT"
] | null | null | null | precalc_stats_official_tf.py | stevevan00/metrics | 167685b473ae1821ee42043c84b25d0036850e71 | [
"MIT"
] | null | null | null | """
@Brief:
calc stats for a foldername/
modified from official inception score implementation
[bioinf-jku/TTUR](https://github.com/bioinf-jku/TTUR)
@Author: lzhbrian (https://lzhbrian.me)
@Date: 2019.4.7
@Usage:
python precalc_stats_official_tf.py foldername/ output_path/
python precalc_stats_official_tf.py /data4/linziheng/datasets/imagenet/valid_64x64/ imagenet_valid_stats_test.npz
"""
import sys
import os
from glob import glob
import numpy as np
import fid_official_tf
# from scipy.misc import imread
import tensorflow as tf
import imageio
########
# PATHS
########
# data_path = 'data' # set path to training set images
# output_path = 'fid_stats.npz' # path for where to store the statistics
data_path = sys.argv[1]
output_path = sys.argv[2]
# if you have downloaded and extracted
# http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz
# set this path to the directory where the extracted files are, otherwise
# just set it to None and the script will later download the files for you
cur_dirname = os.path.dirname(os.path.abspath(__file__))
MODEL_DIR = '%s/res/' % cur_dirname
inception_path = '%s/' % MODEL_DIR
print("check for inception model..")
inception_path = fid_official_tf.check_or_download_inception(inception_path) # download inception if necessary
print("ok")
# loads all images into memory (this might require a lot of RAM!)
print("load images..")
image_list = []
for ext in ('*.png', '*.jpg', '*.jpeg', '.bmp'):
image_list.extend(glob(os.path.join(data_path, ext)))
images = np.array([imageio.imread(str(fn)).astype(np.float32) for fn in image_list])
print("%d images found and loaded" % len(images))
print("create inception graph..")
fid_official_tf.create_inception_graph(inception_path) # load the graph into the current TF graph
print("ok")
print("calculate FID stats..")
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
mu, sigma = fid_official_tf.calculate_activation_statistics(images, sess, batch_size=100)
np.savez_compressed(output_path, mu=mu, sigma=sigma)
print("finished")
| 32.446154 | 117 | 0.748222 |
acf8331e3372b4c113d15cc6ebed2d09c3bc1945 | 1,812 | py | Python | logger.py | utanashati/curiosity-recast | 00c6efedb110eed8f37a1d1a152371d7a8707a92 | [
"MIT"
] | null | null | null | logger.py | utanashati/curiosity-recast | 00c6efedb110eed8f37a1d1a152371d7a8707a92 | [
"MIT"
] | null | null | null | logger.py | utanashati/curiosity-recast | 00c6efedb110eed8f37a1d1a152371d7a8707a92 | [
"MIT"
] | 1 | 2021-12-14T12:20:39.000Z | 2021-12-14T12:20:39.000Z | import os
import logging
import logging.config
import time
import tensorboard_logger as tb
LOG_LEVEL = os.getenv('LOG_LEVEL', 'INFO')
def setup_logs(args):
args.sum_base_dir = ('runs/{}/{}({})').format(
args.env_name, time.strftime('%Y.%m.%d-%H.%M.%S'),
args.short_description)
if not os.path.exists(args.sum_base_dir):
os.makedirs(args.sum_base_dir)
configure(args.sum_base_dir, 'rl.log')
args_list = [f'{k}: {v}\n' for k, v in vars(args).items()]
logging.info("\nArguments:\n----------\n" + ''.join(args_list))
logging.info('Logging run logs to {}'.format(args.sum_base_dir))
tb.configure(args.sum_base_dir)
def configure(dir_, file):
logdict = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': "[%(asctime)s] %(levelname)s " \
"[%(threadName)s:%(lineno)s] %(message)s",
'datefmt': "%Y-%m-%d %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'console': {
'level': LOG_LEVEL,
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
'file': {
'level': LOG_LEVEL,
'class': 'logging.handlers.RotatingFileHandler',
'formatter': 'verbose',
'filename': os.path.join(dir_, file),
'maxBytes': 10 * 10**6,
'backupCount': 3
}
},
'loggers': {
'': {
'handlers': ['console', 'file'],
'level': LOG_LEVEL,
},
}
}
logging.config.dictConfig(logdict)
| 27.876923 | 68 | 0.481788 |
acf83380b0a4b6af105667211079b2f5a547ed32 | 1,951 | py | Python | networking_generic_switch/tests/unit/test_config.py | sandyw777/networking-generic-switch | 82bdddc104e06f2a5dc2a14f220c6abadeb655bd | [
"Apache-2.0"
] | null | null | null | networking_generic_switch/tests/unit/test_config.py | sandyw777/networking-generic-switch | 82bdddc104e06f2a5dc2a14f220c6abadeb655bd | [
"Apache-2.0"
] | null | null | null | networking_generic_switch/tests/unit/test_config.py | sandyw777/networking-generic-switch | 82bdddc104e06f2a5dc2a14f220c6abadeb655bd | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mock
from oslo_config import fixture as config_fixture
from networking_generic_switch import config
fake_config = """
[genericswitch:foo]
device_type = foo_device
spam = eggs
[genericswitch:bar]
device_type = bar_device
ham = vikings
"""
class TestConfig(fixtures.TestWithFixtures):
def setUp(self):
super(TestConfig, self).setUp()
self.cfg = self.useFixture(config_fixture.Config())
self._patch_open()
self.cfg.conf(args=["--config-file=/some/config/path"])
def _patch_open(self):
m = mock.mock_open(read_data=fake_config)
# NOTE(pas-ha) mocks and iterators work differently in Py2 and Py3
# http://bugs.python.org/issue21258
m.return_value.__iter__ = lambda self: self
m.return_value.__next__ = lambda self: next(iter(self.readline, ''))
patcher = mock.patch('oslo_config.cfg.open', m)
patcher.start()
self.addCleanup(patcher.stop)
def test_get_devices(self):
device_list = config.get_devices()
self.assertEqual(set(device_list), set(['foo', 'bar']))
self.assertEqual({"device_type": "foo_device", "spam": "eggs"},
device_list['foo'])
self.assertEqual({"device_type": "bar_device", "ham": "vikings"},
device_list['bar'])
| 34.22807 | 78 | 0.675551 |
acf833c12cf45ed6f96fe42fb4c6792ba77e3dce | 6,484 | py | Python | airflow/providers/google/cloud/example_dags/example_dlp.py | daemon-demon/airflow | 6f96e81f0123b30750fb68ec496246023bf63f35 | [
"Apache-2.0"
] | 1 | 2020-12-23T05:03:17.000Z | 2020-12-23T05:03:17.000Z | airflow/providers/google/cloud/example_dags/example_dlp.py | daemon-demon/airflow | 6f96e81f0123b30750fb68ec496246023bf63f35 | [
"Apache-2.0"
] | 20 | 2021-01-23T12:33:08.000Z | 2021-12-07T22:30:37.000Z | airflow/providers/google/cloud/example_dags/example_dlp.py | daemon-demon/airflow | 6f96e81f0123b30750fb68ec496246023bf63f35 | [
"Apache-2.0"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that execute the following tasks using
Cloud DLP service in the Google Cloud:
1) Creating a content inspect template;
2) Using the created template to inspect content;
3) Deleting the template from Google Cloud .
"""
import os
from google.cloud.dlp_v2.types import ContentItem, InspectConfig, InspectTemplate
from airflow import models
from airflow.providers.google.cloud.operators.dlp import (
CloudDLPCreateInspectTemplateOperator,
CloudDLPCreateJobTriggerOperator,
CloudDLPCreateStoredInfoTypeOperator,
CloudDLPDeleteInspectTemplateOperator,
CloudDLPDeleteJobTriggerOperator,
CloudDLPDeleteStoredInfoTypeOperator,
CloudDLPInspectContentOperator,
CloudDLPUpdateJobTriggerOperator,
CloudDLPUpdateStoredInfoTypeOperator,
)
from airflow.utils.dates import days_ago
GCP_PROJECT = os.environ.get("GCP_PROJECT_ID", "example-project")
TEMPLATE_ID = "dlp-inspect-838746"
ITEM = ContentItem(
table={
"headers": [{"name": "column1"}],
"rows": [{"values": [{"string_value": "My phone number is (206) 555-0123"}]}],
}
)
INSPECT_CONFIG = InspectConfig(info_types=[{"name": "PHONE_NUMBER"}, {"name": "US_TOLLFREE_PHONE_NUMBER"}])
INSPECT_TEMPLATE = InspectTemplate(inspect_config=INSPECT_CONFIG)
OUTPUT_BUCKET = os.environ.get("DLP_OUTPUT_BUCKET", "gs://test-dlp-airflow")
OUTPUT_FILENAME = "test.txt"
OBJECT_GCS_URI = os.path.join(OUTPUT_BUCKET, "tmp")
OBJECT_GCS_OUTPUT_URI = os.path.join(OUTPUT_BUCKET, "tmp", OUTPUT_FILENAME)
with models.DAG(
"example_gcp_dlp",
schedule_interval=None, # Override to match your needs
start_date=days_ago(1),
tags=['example'],
) as dag1:
# [START howto_operator_dlp_create_inspect_template]
create_template = CloudDLPCreateInspectTemplateOperator(
project_id=GCP_PROJECT,
inspect_template=INSPECT_TEMPLATE,
template_id=TEMPLATE_ID,
task_id="create_template",
do_xcom_push=True,
)
# [END howto_operator_dlp_create_inspect_template]
# [START howto_operator_dlp_use_inspect_template]
inspect_content = CloudDLPInspectContentOperator(
task_id="inpsect_content",
project_id=GCP_PROJECT,
item=ITEM,
inspect_template_name="{{ task_instance.xcom_pull('create_template', key='return_value')['name'] }}",
)
# [END howto_operator_dlp_use_inspect_template]
# [START howto_operator_dlp_delete_inspect_template]
delete_template = CloudDLPDeleteInspectTemplateOperator(
task_id="delete_template", template_id=TEMPLATE_ID, project_id=GCP_PROJECT,
)
# [END howto_operator_dlp_delete_inspect_template]
create_template >> inspect_content >> delete_template
CUSTOM_INFO_TYPE_ID = "custom_info_type"
CUSTOM_INFO_TYPES = {
"large_custom_dictionary": {
"output_path": {"path": OBJECT_GCS_OUTPUT_URI},
"cloud_storage_file_set": {"url": OBJECT_GCS_URI + "/"},
}
}
UPDATE_CUSTOM_INFO_TYPE = {
"large_custom_dictionary": {
"output_path": {"path": OBJECT_GCS_OUTPUT_URI},
"cloud_storage_file_set": {"url": OBJECT_GCS_URI + "/"},
}
}
with models.DAG(
"example_gcp_dlp_info_types",
schedule_interval=None,
start_date=days_ago(1),
tags=["example", "dlp", "info-types"],
) as dag2:
# [START howto_operator_dlp_create_info_type]
create_info_type = CloudDLPCreateStoredInfoTypeOperator(
project_id=GCP_PROJECT,
config=CUSTOM_INFO_TYPES,
stored_info_type_id=CUSTOM_INFO_TYPE_ID,
task_id="create_info_type",
)
# [END howto_operator_dlp_create_info_type]
# [START howto_operator_dlp_update_info_type]
update_info_type = CloudDLPUpdateStoredInfoTypeOperator(
project_id=GCP_PROJECT,
stored_info_type_id=CUSTOM_INFO_TYPE_ID,
config=UPDATE_CUSTOM_INFO_TYPE,
task_id="update_info_type",
)
# [END howto_operator_dlp_update_info_type]
# [START howto_operator_dlp_delete_info_type]
delete_info_type = CloudDLPDeleteStoredInfoTypeOperator(
project_id=GCP_PROJECT, stored_info_type_id=CUSTOM_INFO_TYPE_ID, task_id="delete_info_type",
)
# [END howto_operator_dlp_delete_info_type]
create_info_type >> update_info_type >> delete_info_type
JOB_TRIGGER = {
"inspect_job": {
"storage_config": {
"datastore_options": {"partition_id": {"project_id": GCP_PROJECT}, "kind": {"name": "test"}}
}
},
"triggers": [{"schedule": {"recurrence_period_duration": {"seconds": 60 * 60 * 24}}}],
"status": "HEALTHY",
}
TRIGGER_ID = "example_trigger"
with models.DAG(
"example_gcp_dlp_job", schedule_interval=None, start_date=days_ago(1), tags=["example", "dlp_job"]
) as dag3: # [START howto_operator_dlp_create_job_trigger]
create_trigger = CloudDLPCreateJobTriggerOperator(
project_id=GCP_PROJECT, job_trigger=JOB_TRIGGER, trigger_id=TRIGGER_ID, task_id="create_trigger",
)
# [END howto_operator_dlp_create_job_trigger]
JOB_TRIGGER["triggers"] = [{"schedule": {"recurrence_period_duration": {"seconds": 2 * 60 * 60 * 24}}}]
# [START howto_operator_dlp_update_job_trigger]
update_trigger = CloudDLPUpdateJobTriggerOperator(
project_id=GCP_PROJECT,
job_trigger_id=TRIGGER_ID,
job_trigger=JOB_TRIGGER,
task_id="update_info_type",
)
# [END howto_operator_dlp_update_job_trigger]
# [START howto_operator_dlp_delete_job_trigger]
delete_trigger = CloudDLPDeleteJobTriggerOperator(
project_id=GCP_PROJECT, job_trigger_id=TRIGGER_ID, task_id="delete_info_type"
)
# [END howto_operator_dlp_delete_job_trigger]
create_trigger >> update_trigger >> delete_trigger
| 37.479769 | 109 | 0.738742 |
acf833fe9c5e6fd11c06f1d5f1f18e603a1c1aea | 2,044 | bzl | Python | tools/build_defs/oss/profilo_defs.bzl | amanjeetsingh150/profilo | a0d626d1c3889bdbaaa5c12ca2d2606f0fee45da | [
"Apache-2.0"
] | 1 | 2020-03-24T23:51:43.000Z | 2020-03-24T23:51:43.000Z | tools/build_defs/oss/profilo_defs.bzl | amanjeetsingh150/profilo | a0d626d1c3889bdbaaa5c12ca2d2606f0fee45da | [
"Apache-2.0"
] | 4 | 2021-03-11T04:11:14.000Z | 2022-02-27T09:35:19.000Z | tools/build_defs/oss/profilo_defs.bzl | amanjeetsingh150/profilo | a0d626d1c3889bdbaaa5c12ca2d2606f0fee45da | [
"Apache-2.0"
] | null | null | null | """Provides OSS compatibile macros."""
load("//tools/build_defs/android:fb_xplat_cxx_library.bzl", "fb_xplat_cxx_library")
def profilo_path(dep):
return "//" + dep
def profilo_import_path(dep):
return dep
def profilo_cxx_binary(**kwargs):
"""Delegates to the native cxx_test rule."""
native.cxx_binary(**kwargs)
def profilo_cxx_test(**kwargs):
"""Delegates to the native cxx_test rule."""
native.cxx_test(**kwargs)
def profilo_oss_android_library(**kwargs):
"""Delegates to the native android_library rule."""
native.android_library(**kwargs)
def profilo_oss_cxx_library(**kwargs):
"""Delegates to the native cxx_library rule."""
native.cxx_library(**kwargs)
def profilo_oss_java_library(**kwargs):
"""Delegates to the native java_library rule."""
native.java_library(**kwargs)
def profilo_oss_only_java_library(**kwargs):
profilo_oss_java_library(**kwargs)
def profilo_oss_maven_library(
name,
group,
artifact,
version,
sha1,
visibility,
packaging = "jar",
scope = "compiled",
deps = []):
"""
Creates remote_file and prebuilt_jar rules for a maven artifact.
"""
_ignore = scope
remote_file_name = "{}-remote".format(name)
remote_file(
name = remote_file_name,
out = "{}-{}.{}".format(name, version, packaging),
sha1 = sha1,
url = ":".join(["mvn", group, artifact, packaging, version]),
)
if packaging == "jar":
native.prebuilt_jar(
name = name,
binary_jar = ":{}".format(remote_file_name),
visibility = visibility,
deps = deps,
)
else:
native.android_prebuilt_aar(
name = name,
aar = ":{}".format(remote_file_name),
visibility = visibility,
deps = deps,
)
def profilo_oss_xplat_cxx_library(**kwargs):
fb_xplat_cxx_library(**kwargs)
def profilo_maybe_hidden_visibility():
return ["-fvisibility=hidden"]
| 26.894737 | 83 | 0.627202 |
acf8341f43c2bef8180f55d59239c97b02775520 | 4,495 | py | Python | src/data_cleaning.py | kant/open-solution-home-credit | 868da3788fc105d4acdb14da9dbc05937dff14d8 | [
"MIT"
] | 336 | 2018-06-28T21:42:12.000Z | 2019-10-08T08:03:13.000Z | src/data_cleaning.py | JerryCatLeung/open-solution-home-credit | 0105fcffadcfc7c743a71b1ff2ea99453b445ee8 | [
"MIT"
] | 105 | 2018-06-27T11:02:00.000Z | 2019-08-19T10:35:02.000Z | src/data_cleaning.py | JerryCatLeung/open-solution-home-credit | 0105fcffadcfc7c743a71b1ff2ea99453b445ee8 | [
"MIT"
] | 138 | 2018-07-03T08:16:03.000Z | 2019-09-09T20:13:54.000Z | import numpy as np
from steppy.base import BaseTransformer
from steppy.utils import get_logger
from . import pipeline_config as cfg
logger = get_logger()
class ApplicationCleaning(BaseTransformer):
def __init__(self, fill_missing=False, fill_value=0, **kwargs):
super().__init__()
self.fill_missing = fill_missing
self.fill_value = fill_value
def transform(self, application):
application['CODE_GENDER'].replace('XNA', np.nan, inplace=True)
application['DAYS_EMPLOYED'].replace(365243, np.nan, inplace=True)
application['DAYS_LAST_PHONE_CHANGE'].replace(0, np.nan, inplace=True)
application['NAME_FAMILY_STATUS'].replace('Unknown', np.nan, inplace=True)
application['ORGANIZATION_TYPE'].replace('XNA', np.nan, inplace=True)
application[cfg.CATEGORICAL_COLUMNS].fillna(-1, inplace=True)
if self.fill_missing:
application.fillna(self.fill_value, inplace=True)
return {'application': application}
class BureauCleaning(BaseTransformer):
def __init__(self, fill_missing=False, fill_value=0, **kwargs):
self.fill_missing = fill_missing
self.fill_value = fill_value
def transform(self, bureau):
bureau['DAYS_CREDIT_ENDDATE'][bureau['DAYS_CREDIT_ENDDATE'] < -40000] = np.nan
bureau['DAYS_CREDIT_UPDATE'][bureau['DAYS_CREDIT_UPDATE'] < -40000] = np.nan
bureau['DAYS_ENDDATE_FACT'][bureau['DAYS_ENDDATE_FACT'] < -40000] = np.nan
if self.fill_missing:
bureau.fillna(self.fill_value, inplace=True)
bureau['AMT_CREDIT_SUM'].fillna(self.fill_value, inplace=True)
bureau['AMT_CREDIT_SUM_DEBT'].fillna(self.fill_value, inplace=True)
bureau['AMT_CREDIT_SUM_OVERDUE'].fillna(self.fill_value, inplace=True)
bureau['CNT_CREDIT_PROLONG'].fillna(self.fill_value, inplace=True)
return {'bureau': bureau}
class BureauBalanceCleaning(BaseTransformer):
def __init__(self, fill_missing=False, fill_value=0, **kwargs):
self.fill_missing = fill_missing
self.fill_value = fill_value
def transform(self, bureau_balance):
if self.fill_missing:
bureau_balance.fillna(self.fill_value, inplace=True)
return {'bureau_balance': bureau_balance}
class CreditCardCleaning(BaseTransformer):
def __init__(self, fill_missing=False, fill_value=0, **kwargs):
super().__init__()
self.fill_missing = fill_missing
self.fill_value = fill_value
def transform(self, credit_card):
credit_card['AMT_DRAWINGS_ATM_CURRENT'][credit_card['AMT_DRAWINGS_ATM_CURRENT'] < 0] = np.nan
credit_card['AMT_DRAWINGS_CURRENT'][credit_card['AMT_DRAWINGS_CURRENT'] < 0] = np.nan
if self.fill_missing:
credit_card.fillna(self.fill_value, inplace=True)
return {'credit_card': credit_card}
class InstallmentPaymentsCleaning(BaseTransformer):
def __init__(self, fill_missing=False, fill_value=0, **kwargs):
self.fill_missing = fill_missing
self.fill_value = fill_value
def transform(self, installments):
if self.fill_missing:
installments.fillna(self.fill_value, inplace=True)
return {'installments': installments}
class PosCashCleaning(BaseTransformer):
def __init__(self, fill_missing=False, fill_value=0, **kwargs):
self.fill_missing = fill_missing
self.fill_value = fill_value
def transform(self, pos_cash):
if self.fill_missing:
pos_cash.fillna(self.fill_value, inplace=True)
return {'pos_cash': pos_cash}
class PreviousApplicationCleaning(BaseTransformer):
def __init__(self, fill_missing=False, fill_value=0, **kwargs):
super().__init__()
self.fill_missing = fill_missing
self.fill_value = fill_value
def transform(self, previous_application):
previous_application['DAYS_FIRST_DRAWING'].replace(365243, np.nan, inplace=True)
previous_application['DAYS_FIRST_DUE'].replace(365243, np.nan, inplace=True)
previous_application['DAYS_LAST_DUE_1ST_VERSION'].replace(365243, np.nan, inplace=True)
previous_application['DAYS_LAST_DUE'].replace(365243, np.nan, inplace=True)
previous_application['DAYS_TERMINATION'].replace(365243, np.nan, inplace=True)
if self.fill_missing:
previous_application.fillna(self.fill_value, inplace=True)
return {'previous_application': previous_application}
| 37.773109 | 101 | 0.705451 |
acf8343c058c6d3063f53f8692da6199bd615ed8 | 632 | py | Python | cobra/tests/testnewobj.py | vEpiphyte/vivisect | 14947a53c6781175f0aa83d49cc16c524a2e23a3 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | cobra/tests/testnewobj.py | vEpiphyte/vivisect | 14947a53c6781175f0aa83d49cc16c524a2e23a3 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | cobra/tests/testnewobj.py | vEpiphyte/vivisect | 14947a53c6781175f0aa83d49cc16c524a2e23a3 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | import unittest
from io import StringIO
import cobra
class NewObjectReturn:
@cobra.newobj
def open(self):
return StringIO('asdf'.decode('utf-8'))
class CobraNewObjTest(unittest.TestCase):
def test_cobra_newobj(self):
daemon = cobra.CobraDaemon(port=60500, msgpack=True)
objname = daemon.shareObject(NewObjectReturn())
daemon.fireThread()
t = cobra.CobraProxy('cobra://localhost:60500/%s?msgpack=1' % objname)
with t.open() as fd:
self.assertEqual(fd.read(), 'asdf')
self.assertEqual(len(daemon.shared.keys()), 1)
daemon.stopServer()
| 21.793103 | 78 | 0.651899 |
acf83471209b8c5aaf7d33c9956a33f921db6e8c | 3,463 | py | Python | app/resources/companies.py | luisfcofv/Superhero | dbda0287f0ceb858d7beb0f1c95d6744569a1359 | [
"MIT"
] | null | null | null | app/resources/companies.py | luisfcofv/Superhero | dbda0287f0ceb858d7beb0f1c95d6744569a1359 | [
"MIT"
] | null | null | null | app/resources/companies.py | luisfcofv/Superhero | dbda0287f0ceb858d7beb0f1c95d6744569a1359 | [
"MIT"
] | null | null | null | from app.models import Company, PostalCode, CompanyPostalCode
from flask_restful import Resource, reqparse
class Companies(Resource):
def get(self):
"""
List all restaurants
---
tags:
- Restaurants
definitions:
- schema:
id: Restaurant
properties:
id:
type: integer
description: the restaurant's id
email:
type: string
description: the restaurant's email
name:
type: string
description: the restaurant's name
logo_url:
type: string
description: the restaurant's logo url
address:
type: string
description: the restaurant's address
phone_number:
type: string
description: the restaurant's phone number
country_code:
type: string
description: the restaurant's country code
responses:
200:
description: Lists all restaurants
schema:
title: Restaurants
type: array
items:
$ref: '#/definitions/Restaurant'
"""
parser = reqparse.RequestParser()
parser.add_argument('country')
parser.add_argument('postal_code')
args = parser.parse_args()
country_code = args.get('country')
postal_code = args.get('postal_code')
company_query = Company.query
if country_code is not None:
company_query = company_query.filter(Company.country_code == country_code)
if country_code is not None and postal_code is not None:
postal_code = PostalCode.query.filter(PostalCode.country_code == country_code,
PostalCode.postal_code == postal_code).first()
if postal_code is None:
return 'Country code or postal code not found', 400
company_postal_codes = CompanyPostalCode.query.filter(CompanyPostalCode.postal_code_id == postal_code.id).all()
response = []
for company_postal_code in company_postal_codes:
company = Company.query.get(company_postal_code.company.id)
response.append(company.dictionary())
return response
else:
companies = company_query.all()
companies_array = []
for company in companies:
companies_array.append(company.dictionary())
return companies_array
class SingleCompany(Resource):
def get(self, company_id):
"""
Restaurant with company_id
---
tags:
- Restaurants
parameters:
- in: path
name: company_id
description: id of restaurant
type: integer
required: Yes
responses:
200:
description: Restaurant with company_id
schema:
$ref: '#/definitions/Restaurant'
404:
description: Restaurant not found
"""
company = Company.query.get(company_id)
if company is None:
return 'Company not found', 400
return company.dictionary()
| 31.198198 | 123 | 0.533353 |
acf834c6de34e4325e503df7d8d65941dbf27099 | 1,171 | py | Python | pyinfra/operations/xbps.py | harold-b/pyinfra | 50e2efd77c09b460e5b965b80f6c0f847d53fb7e | [
"MIT"
] | null | null | null | pyinfra/operations/xbps.py | harold-b/pyinfra | 50e2efd77c09b460e5b965b80f6c0f847d53fb7e | [
"MIT"
] | null | null | null | pyinfra/operations/xbps.py | harold-b/pyinfra | 50e2efd77c09b460e5b965b80f6c0f847d53fb7e | [
"MIT"
] | null | null | null | '''
Manage XBPS packages and repositories. Note that XBPS package names are case-sensitive.
'''
from pyinfra.api import operation
from .util.packaging import ensure_packages
@operation
def upgrade(state, host):
'''
Upgrades all XBPS packages.
'''
yield 'xbps-install -y -u'
_upgrade = upgrade # noqa: E305
@operation
def update(state, host):
'''
Update XBPS repositories.
'''
yield 'xbps-install -S'
_update = update # noqa: E305
@operation
def packages(
state, host,
packages=None, present=True,
update=False, upgrade=False,
):
'''
Install/remove/update XBPS packages.
+ packages: list of packages to ensure
+ present: whether the packages should be installed
Example:
.. code:: python
xbps.packages(
{'Install Vim and Vim Pager'},
['vimpager', 'vim'],
)
'''
if update:
yield _update(state, host)
if upgrade:
yield _upgrade(state, host)
yield ensure_packages(
packages, host.fact.xbps_packages, present,
install_command='xbps-install -y -u',
uninstall_command='xbps-remove -y',
)
| 17.742424 | 87 | 0.623399 |
acf834e44431e22e9b50012f642401f192e8f190 | 7,862 | py | Python | modeling/baseline.py | YINYIPENG-EN/ReID | 88706d8ae753f8d10aa2fea1415f67d71568012d | [
"MIT"
] | null | null | null | modeling/baseline.py | YINYIPENG-EN/ReID | 88706d8ae753f8d10aa2fea1415f67d71568012d | [
"MIT"
] | null | null | null | modeling/baseline.py | YINYIPENG-EN/ReID | 88706d8ae753f8d10aa2fea1415f67d71568012d | [
"MIT"
] | null | null | null | # encoding: utf-8
import torch
from torch import nn
from .backbones.resnet import ResNet, BasicBlock, Bottleneck
from .backbones.senet import SENet, SEResNetBottleneck, SEBottleneck, SEResNeXtBottleneck
from .backbones.resnet_ibn_a import resnet50_ibn_a
def weights_init_kaiming(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')
nn.init.constant_(m.bias, 0.0)
elif classname.find('Conv') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
if m.bias is not None:
nn.init.constant_(m.bias, 0.0)
elif classname.find('BatchNorm') != -1:
if m.affine:
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0.0)
def weights_init_classifier(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.normal_(m.weight, std=0.001)
if m.bias:
nn.init.constant_(m.bias, 0.0)
class Baseline(nn.Module):
in_planes = 2048
def __init__(self, num_classes, last_stride, model_path, neck, neck_feat, model_name, pretrain_choice):
super(Baseline, self).__init__()
if model_name == 'resnet18':
self.in_planes = 512
self.base = ResNet(last_stride=last_stride,
block=BasicBlock,
layers=[2, 2, 2, 2])
elif model_name == 'resnet34':
self.in_planes = 512
self.base = ResNet(last_stride=last_stride,
block=BasicBlock,
layers=[3, 4, 6, 3])
elif model_name == 'resnet50':
self.base = ResNet(last_stride=last_stride,
block=Bottleneck,
layers=[3, 4, 6, 3])
elif model_name == 'resnet101':
self.base = ResNet(last_stride=last_stride,
block=Bottleneck,
layers=[3, 4, 23, 3])
elif model_name == 'resnet152':
self.base = ResNet(last_stride=last_stride,
block=Bottleneck,
layers=[3, 8, 36, 3])
elif model_name == 'se_resnet50':
self.base = SENet(block=SEResNetBottleneck,
layers=[3, 4, 6, 3],
groups=1,
reduction=16,
dropout_p=None,
inplanes=64,
input_3x3=False,
downsample_kernel_size=1,
downsample_padding=0,
last_stride=last_stride)
elif model_name == 'se_resnet101':
self.base = SENet(block=SEResNetBottleneck,
layers=[3, 4, 23, 3],
groups=1,
reduction=16,
dropout_p=None,
inplanes=64,
input_3x3=False,
downsample_kernel_size=1,
downsample_padding=0,
last_stride=last_stride)
elif model_name == 'se_resnet152':
self.base = SENet(block=SEResNetBottleneck,
layers=[3, 8, 36, 3],
groups=1,
reduction=16,
dropout_p=None,
inplanes=64,
input_3x3=False,
downsample_kernel_size=1,
downsample_padding=0,
last_stride=last_stride)
elif model_name == 'se_resnext50':
self.base = SENet(block=SEResNeXtBottleneck,
layers=[3, 4, 6, 3],
groups=32,
reduction=16,
dropout_p=None,
inplanes=64,
input_3x3=False,
downsample_kernel_size=1,
downsample_padding=0,
last_stride=last_stride)
elif model_name == 'se_resnext101':
self.base = SENet(block=SEResNeXtBottleneck,
layers=[3, 4, 23, 3],
groups=32,
reduction=16,
dropout_p=None,
inplanes=64,
input_3x3=False,
downsample_kernel_size=1,
downsample_padding=0,
last_stride=last_stride)
elif model_name == 'senet154':
self.base = SENet(block=SEBottleneck,
layers=[3, 8, 36, 3],
groups=64,
reduction=16,
dropout_p=0.2,
last_stride=last_stride)
elif model_name == 'resnet50_ibn_a':
self.base = resnet50_ibn_a(last_stride)
if pretrain_choice == 'imagenet':
# ---------------------------------
model = self.base
model_dict = model.state_dict()
pretrained_dict = torch.load(model_path)
pretrained_dict = {k: v for k, v in pretrained_dict.items() if
k in model_dict.keys() == pretrained_dict.keys()}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
# ---------------------------------
#self.base.load_param(model_path)
print('Loading pretrained ImageNet model......')
self.gap = nn.AdaptiveAvgPool2d(1)
# self.gap = nn.AdaptiveMaxPool2d(1)
self.num_classes = num_classes
self.neck = neck
self.neck_feat = neck_feat
if self.neck == 'no':
self.classifier = nn.Linear(self.in_planes, self.num_classes)
# self.classifier = nn.Linear(self.in_planes, self.num_classes, bias=False) # new add by luo
# self.classifier.apply(weights_init_classifier) # new add by luo
elif self.neck == 'bnneck':
self.bottleneck = nn.BatchNorm1d(self.in_planes)
self.bottleneck.bias.requires_grad_(False) # no shift
self.classifier = nn.Linear(self.in_planes, self.num_classes, bias=False)
self.bottleneck.apply(weights_init_kaiming)
self.classifier.apply(weights_init_classifier)
def forward(self, x):
global_feat = self.gap(self.base(x)) # (b, 2048, 1, 1)
global_feat = global_feat.view(global_feat.shape[0], -1) # flatten to (bs, 2048)
if self.neck == 'no':
feat = global_feat
elif self.neck == 'bnneck':
feat = self.bottleneck(global_feat) # normalize for angular softmax
if self.training:
cls_score = self.classifier(feat)
return cls_score, global_feat # global feature for triplet loss
else:
if self.neck_feat == 'after':
# print("Test with feature after BN")
return feat
else:
# print("Test with feature before BN")
return global_feat
def load_param(self, trained_path):
param_dict = torch.load(trained_path)
for i in param_dict:
if 'classifier' in i:
continue
self.state_dict()[i].copy_(param_dict[i])
| 41.819149 | 108 | 0.480921 |
acf8353bbff1d1d356e62a57b538853a34668e04 | 4,508 | py | Python | app.py | vibhaskamal/MassEmailer | 45108813b86831460aeea4d1897417d8b87b19d9 | [
"MIT"
] | 1 | 2019-08-15T03:56:36.000Z | 2019-08-15T03:56:36.000Z | app.py | vibhaskamal/MassEmailer | 45108813b86831460aeea4d1897417d8b87b19d9 | [
"MIT"
] | null | null | null | app.py | vibhaskamal/MassEmailer | 45108813b86831460aeea4d1897417d8b87b19d9 | [
"MIT"
] | null | null | null | import xlrd
import smtplib
import ssl
import email
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from string import Template
"""
This function reads the data from a sheet in an excel file
@parameter filename: Name of the excel file (with the extension)
@parameter sheet: Name of the sheet within the excel file, from where the data has to be read
@return: Data within the specified sheet in the form of arrays
"""
def read_data(file_name, sheet):
# Opening the excel file and the specified sheet
workbook = xlrd.open_workbook(file_name)
worksheet = workbook.sheet_by_name(sheet)
# Finding number of rows and columns with data in it
num_rows = worksheet.nrows
num_cols = worksheet.ncols
# Looping through each row and storing values for all columns within a row in an array
# All the arrays will be part of a parent array: file_data
file_data =[]
for row in range(0, num_rows):
row_data = []
for col in range(0, num_cols):
data = worksheet.cell_value(row, col)
row_data.append(data)
file_data.append(row_data)
return file_data
"""
This functions sets up the SMTP server, starts a TLS session and enables the user to log into their account
@parameter sender_email: Sender's email
@parameter password: Password for sender's email
@parameter host
@parameter port
@return : SMTP server connection instance
"""
def setupServerConnection(sender_email, password, host="smtp.gmail.com", port=587):
# Setting up the SMTP server details
server_connection = smtplib.SMTP(host, port)
# Start the TLS session
server_connection.starttls()
server_connection.login(sender_email, password)
return server_connection
"""
This function terminates the SMTP server
@parameter server_connection: An SMTP server connection instance
"""
def terminateServerSession(server_connection):
server_connection.quit()
"""
This function sends the emails from the sender's account to the receiver accounts
@parameter server_connection: An SMTP server connection instance
@parameter sender_email: Sender's email
@parameter password
@parameter password: Password for sender's email
"""
def sendMail(server_connection, sender_email, password, receiver_email, subject, message):
msg = MIMEMultipart()
# Setting up the email's contents
msg['From'] = sender_email
msg['To'] = receiver_email
msg['Subject'] = subject
message_body = message
# Adding the message body
msg.attach(MIMEText(message_body, 'plain'))
# Send the email using the SMTP server connection instance
server_connection.send_message(msg)
del msg
# Testing purposes
# print("Done")
"""
This function reads the data in the filename given as a parameter
@parameter filname: Name of the file from which the data is to be read
@return : Data in the file
"""
def readFile(filename):
file = open(filename, "r")
data = file.read()
return data
"""
This function creates the message to be sent
@parameter text: The text which has to be sent as the body of the email
@parameter person_name: Name of the person (this parameter is hardcoded and will have to be modified based on the structure of Body.txt)
@parameter money_value: Amount of money (this parameter is hardcoded and will have to be modified based on the structure of Body.txt)
@return : Body of the email
"""
def createMessage(text, person_name, money_value):
body = text.format(NAME=person_name, AMOUNT=money_value)
return body
def main():
# Excel file and sheets from where the user data is to be extracted
file_name= 'Details.xlsx'
sheet_name = "Sheet1"
file_data = read_data(file_name, sheet_name)
sender_email = ""
sender_password = ""
connection = setupServerConnection(sender_email, sender_password)
# Looping through each row in the excel sheet and sending emails created using the data in the excel sheet and Body.txt
for i in range(1, len(file_data)):
name = file_data[i][1]
receiver_email = file_data[i][3]
amount = file_data[i][4]
text_file = readFile("Body.txt")
msg_body = createMessage(text_file, name, amount)
sendMail(connection, sender_email, sender_password, receiver_email, "Amount due", msg_body)
terminateServerSession(connection)
main()
print("END OF PROGRAM")
| 29.272727 | 173 | 0.715395 |
acf8357049ac351fa39ac43e78ff9d75580c2745 | 744 | py | Python | runs/bro/10KB/src8-tgt1/ssl-par-min-iter00100.cfg.py | Largio/broeval | 89e831d07f066100afdd1a5b220f9f08f1c10b3d | [
"MIT"
] | null | null | null | runs/bro/10KB/src8-tgt1/ssl-par-min-iter00100.cfg.py | Largio/broeval | 89e831d07f066100afdd1a5b220f9f08f1c10b3d | [
"MIT"
] | null | null | null | runs/bro/10KB/src8-tgt1/ssl-par-min-iter00100.cfg.py | Largio/broeval | 89e831d07f066100afdd1a5b220f9f08f1c10b3d | [
"MIT"
] | null | null | null |
# Write results to this file
OUTFILE = 'runs/bro/10KB/src8-tgt1/ssl-par-min-iter00100.result.csv'
# Source computers for the request
SOURCE = ['10.0.0.11', '10.0.0.12', '10.0.0.13', '10.0.0.14', '10.0.0.31', '10.0.0.32', '10.0.0.33', '10.0.0.34']
# Target machines for the requests (aka server)
TARGET = ['10.0.0.2']
# IDS Mode. (ATM: noids, min, max, http, ssl, ftp, icmp, mysql)
IDSMODE = 'min'
# Connection mode (par = parallel, seq = sequential)
MODE = 'par'
# Number of evaluation repititions to run
EPOCHS = 100
# Number of iterations to be run in each evaluation repitition
ITER = 100
# Size of the file to be downloaded from target (in Bytes * 10^SIZE)
SIZE = 4
# Protocol to be used e.g. HTTP, SSL, FTP, MYSQL
PROTOCOL = 'ssl' | 27.555556 | 113 | 0.672043 |
acf83622aa75ca8043ac8446185e49c9f7ea8532 | 6,421 | py | Python | YOLOv5/models/export.py | fan1071221/Traditional-Chinese-Scene-Character-Recognition-Contest-Advanced-Competition | fa622855bce9e8d6f4eae747cd9b3f8faa87228a | [
"MIT"
] | null | null | null | YOLOv5/models/export.py | fan1071221/Traditional-Chinese-Scene-Character-Recognition-Contest-Advanced-Competition | fa622855bce9e8d6f4eae747cd9b3f8faa87228a | [
"MIT"
] | null | null | null | YOLOv5/models/export.py | fan1071221/Traditional-Chinese-Scene-Character-Recognition-Contest-Advanced-Competition | fa622855bce9e8d6f4eae747cd9b3f8faa87228a | [
"MIT"
] | null | null | null | """Exports a YOLOv5 *.pt model to ONNX and TorchScript formats
Usage:
$ export PYTHONPATH="$PWD" && python models/export.py --weights yolov5s.pt --img 640 --batch 1
"""
import argparse
import sys
import time
from pathlib import Path
sys.path.append(Path(__file__).parent.parent.absolute().__str__()) # to run '$ python *.py' files in subdirectories
import torch
import torch.nn as nn
from torch.utils.mobile_optimizer import optimize_for_mobile
import models
from models.experimental import attempt_load
from utils.activations import Hardswish, SiLU
from utils.general import colorstr, check_img_size, check_requirements, file_size, set_logging
from utils.torch_utils import select_device
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--weights', type=str, default='./yolov5s.pt', help='weights path')
parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size') # height, width
parser.add_argument('--batch-size', type=int, default=1, help='batch size')
parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--half', action='store_true', help='FP16 half-precision export')
parser.add_argument('--inplace', action='store_true', help='set YOLOv5 Detect() inplace=True')
parser.add_argument('--train', action='store_true', help='model.train() mode')
parser.add_argument('--dynamic', action='store_true', help='dynamic ONNX axes') # ONNX-only
parser.add_argument('--simplify', action='store_true', help='simplify ONNX model') # ONNX-only
opt = parser.parse_args()
opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand
print(opt)
set_logging()
t = time.time()
# Load PyTorch model
device = select_device(opt.device)
model = attempt_load(opt.weights, map_location=device) # load FP32 model
labels = model.names
# Checks
gs = int(max(model.stride)) # grid size (max stride)
opt.img_size = [check_img_size(x, gs) for x in opt.img_size] # verify img_size are gs-multiples
assert not (opt.device.lower() == "cpu" and opt.half), '--half only compatible with GPU export, i.e. use --device 0'
# Input
img = torch.zeros(opt.batch_size, 3, *opt.img_size).to(device) # image size(1,3,320,192) iDetection
# Update model
if opt.half:
img, model = img.half(), model.half() # to FP16
if opt.train:
model.train() # training mode (no grid construction in Detect layer)
for k, m in model.named_modules():
m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility
if isinstance(m, models.common.Conv): # assign export-friendly activations
if isinstance(m.act, nn.Hardswish):
m.act = Hardswish()
elif isinstance(m.act, nn.SiLU):
m.act = SiLU()
elif isinstance(m, models.yolo.Detect):
m.inplace = opt.inplace
m.onnx_dynamic = opt.dynamic
# m.forward = m.forward_export # assign forward (optional)
for _ in range(2):
y = model(img) # dry runs
print(f"\n{colorstr('PyTorch:')} starting from {opt.weights} ({file_size(opt.weights):.1f} MB)")
# TorchScript export -----------------------------------------------------------------------------------------------
prefix = colorstr('TorchScript:')
try:
print(f'\n{prefix} starting export with torch {torch.__version__}...')
f = opt.weights.replace('.pt', '.torchscript.pt') # filename
ts = torch.jit.trace(model, img, strict=False)
optimize_for_mobile(ts).save(f) # https://pytorch.org/tutorials/recipes/script_optimized.html
print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
except Exception as e:
print(f'{prefix} export failure: {e}')
# ONNX export ------------------------------------------------------------------------------------------------------
prefix = colorstr('ONNX:')
try:
import onnx
print(f'{prefix} starting export with onnx {onnx.__version__}...')
f = opt.weights.replace('.pt', '.onnx') # filename
torch.onnx.export(model, img, f, verbose=False, opset_version=12, input_names=['images'],
dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # size(1,3,640,640)
'output': {0: 'batch', 2: 'y', 3: 'x'}} if opt.dynamic else None)
# Checks
model_onnx = onnx.load(f) # load onnx model
onnx.checker.check_model(model_onnx) # check onnx model
# print(onnx.helper.printable_graph(model_onnx.graph)) # print
# Simplify
if opt.simplify:
try:
check_requirements(['onnx-simplifier'])
import onnxsim
print(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...')
model_onnx, check = onnxsim.simplify(model_onnx,
dynamic_input_shape=opt.dynamic,
input_shapes={'images': list(img.shape)} if opt.dynamic else None)
assert check, 'assert check failed'
onnx.save(model_onnx, f)
except Exception as e:
print(f'{prefix} simplifier failure: {e}')
print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
except Exception as e:
print(f'{prefix} export failure: {e}')
# CoreML export ----------------------------------------------------------------------------------------------------
prefix = colorstr('CoreML:')
try:
import coremltools as ct
print(f'{prefix} starting export with coremltools {ct.__version__}...')
# convert model from torchscript and apply pixel scaling as per detect.py
model = ct.convert(ts, inputs=[ct.ImageType(name='image', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])])
f = opt.weights.replace('.pt', '.mlmodel') # filename
model.save(f)
print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
except Exception as e:
print(f'{prefix} export failure: {e}')
# Finish
print(f'\nExport complete ({time.time() - t:.2f}s). Visualize with https://github.com/lutzroeder/netron.')
| 47.213235 | 120 | 0.598193 |
acf837d26e0abf0866f7b1a2ed9434827076f56c | 4,885 | py | Python | videointelligence/google/cloud/videointelligence_v1p1beta1/gapic/transports/video_intelligence_service_grpc_transport.py | Kami/google-cloud-python | a14ffbaa50f7823c2792e91413a37cbc3ce687f5 | [
"Apache-2.0"
] | 1 | 2019-06-14T10:11:59.000Z | 2019-06-14T10:11:59.000Z | videointelligence/google/cloud/videointelligence_v1p1beta1/gapic/transports/video_intelligence_service_grpc_transport.py | Kami/google-cloud-python | a14ffbaa50f7823c2792e91413a37cbc3ce687f5 | [
"Apache-2.0"
] | null | null | null | videointelligence/google/cloud/videointelligence_v1p1beta1/gapic/transports/video_intelligence_service_grpc_transport.py | Kami/google-cloud-python | a14ffbaa50f7823c2792e91413a37cbc3ce687f5 | [
"Apache-2.0"
] | 1 | 2020-04-14T10:47:41.000Z | 2020-04-14T10:47:41.000Z | # -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import google.api_core.grpc_helpers
import google.api_core.operations_v1
from google.cloud.videointelligence_v1p1beta1.proto import video_intelligence_pb2_grpc
class VideoIntelligenceServiceGrpcTransport(object):
"""gRPC transport class providing stubs for
google.cloud.videointelligence.v1p1beta1 VideoIntelligenceService API.
The transport provides access to the raw gRPC stubs,
which can be used to take advantage of advanced
features of gRPC.
"""
# The scopes needed to make gRPC calls to all of the methods defined
# in this service.
_OAUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
def __init__(
self,
channel=None,
credentials=None,
address="videointelligence.googleapis.com:443",
):
"""Instantiate the transport class.
Args:
channel (grpc.Channel): A ``Channel`` instance through
which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
address (str): The address where the service is hosted.
"""
# If both `channel` and `credentials` are specified, raise an
# exception (channels come with credentials baked in already).
if channel is not None and credentials is not None:
raise ValueError(
"The `channel` and `credentials` arguments are mutually " "exclusive."
)
# Create the channel.
if channel is None:
channel = self.create_channel(address=address, credentials=credentials)
self._channel = channel
# gRPC uses objects called "stubs" that are bound to the
# channel and provide a basic method for each RPC.
self._stubs = {
"video_intelligence_service_stub": video_intelligence_pb2_grpc.VideoIntelligenceServiceStub(
channel
)
}
# Because this API includes a method that returns a
# long-running operation (proto: google.longrunning.Operation),
# instantiate an LRO client.
self._operations_client = google.api_core.operations_v1.OperationsClient(
channel
)
@classmethod
def create_channel(
cls, address="videointelligence.googleapis.com:443", credentials=None
):
"""Create and return a gRPC channel object.
Args:
address (str): The host for the channel to use.
credentials (~.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
Returns:
grpc.Channel: A gRPC channel object.
"""
return google.api_core.grpc_helpers.create_channel(
address, credentials=credentials, scopes=cls._OAUTH_SCOPES
)
@property
def channel(self):
"""The gRPC channel used by the transport.
Returns:
grpc.Channel: A gRPC channel object.
"""
return self._channel
@property
def annotate_video(self):
"""Return the gRPC stub for :meth:`VideoIntelligenceServiceClient.annotate_video`.
Performs asynchronous video annotation. Progress and results can be
retrieved through the ``google.longrunning.Operations`` interface.
``Operation.metadata`` contains ``AnnotateVideoProgress`` (progress).
``Operation.response`` contains ``AnnotateVideoResponse`` (results).
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["video_intelligence_service_stub"].AnnotateVideo
| 37.868217 | 104 | 0.66346 |
acf838fde8ee25e7bbed7842a13f419d2cbcede4 | 12,301 | py | Python | tensorflow_tts/models/fastspeech2.py | geneing/TensorFlowTTS | 0035ba00fec1b2b1184c8df32646d6a88b01ee5b | [
"Apache-2.0"
] | null | null | null | tensorflow_tts/models/fastspeech2.py | geneing/TensorFlowTTS | 0035ba00fec1b2b1184c8df32646d6a88b01ee5b | [
"Apache-2.0"
] | null | null | null | tensorflow_tts/models/fastspeech2.py | geneing/TensorFlowTTS | 0035ba00fec1b2b1184c8df32646d6a88b01ee5b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 The FastSpeech2 Authors and Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorflow Model modules for FastSpeech2."""
import tensorflow as tf
from tensorflow_tts.models.fastspeech import TFFastSpeech, get_initializer
class TFFastSpeechVariantPredictor(tf.keras.layers.Layer):
"""FastSpeech duration predictor module."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.conv_layers = []
for i in range(config.variant_prediction_num_conv_layers):
self.conv_layers.append(
tf.keras.layers.Conv1D(
config.variant_predictor_filter,
config.variant_predictor_kernel_size,
padding="same",
name="conv_._{}".format(i),
)
)
self.conv_layers.append(tf.keras.layers.Activation(tf.nn.relu))
self.conv_layers.append(
tf.keras.layers.LayerNormalization(
epsilon=config.layer_norm_eps, name="LayerNorm_._{}".format(i)
)
)
self.conv_layers.append(
tf.keras.layers.Dropout(config.variant_predictor_dropout_rate)
)
self.conv_layers_sequence = tf.keras.Sequential(self.conv_layers)
self.output_layer = tf.keras.layers.Dense(1)
if config.n_speakers > 1:
self.decoder_speaker_embeddings = tf.keras.layers.Embedding(
config.n_speakers,
config.encoder_self_attention_params.hidden_size,
embeddings_initializer=get_initializer(config.initializer_range),
name="speaker_embeddings",
)
self.speaker_fc = tf.keras.layers.Dense(
units=config.encoder_self_attention_params.hidden_size,
name="speaker_fc",
)
self.config = config
def call(self, inputs, training=False):
"""Call logic."""
encoder_hidden_states, speaker_ids, attention_mask = inputs
attention_mask = tf.cast(tf.expand_dims(attention_mask, 2), encoder_hidden_states.dtype)
if self.config.n_speakers > 1:
speaker_embeddings = self.decoder_speaker_embeddings(speaker_ids)
speaker_features = tf.math.softplus(self.speaker_fc(speaker_embeddings))
# extended speaker embeddings
extended_speaker_features = speaker_features[:, tf.newaxis, :]
encoder_hidden_states += extended_speaker_features
# mask encoder hidden states
masked_encoder_hidden_states = encoder_hidden_states * attention_mask
# pass though first layer
outputs = self.conv_layers_sequence(masked_encoder_hidden_states)
outputs = self.output_layer(outputs)
masked_outputs = outputs * attention_mask
outputs = tf.squeeze(masked_outputs, -1)
return outputs
class TFFastSpeech2(TFFastSpeech):
"""TF Fastspeech module."""
def __init__(self, config, **kwargs):
"""Init layers for fastspeech."""
super().__init__(config, **kwargs)
self.f0_predictor = TFFastSpeechVariantPredictor(config, name="f0_predictor")
self.energy_predictor = TFFastSpeechVariantPredictor(
config, name="energy_predictor",
)
self.duration_predictor = TFFastSpeechVariantPredictor(
config, name="duration_predictor"
)
# define f0_embeddings and energy_embeddings
self.f0_embeddings = tf.keras.layers.Conv1D(
filters=config.encoder_self_attention_params.hidden_size,
kernel_size=9,
padding="same",
name="f0_embeddings",
)
self.f0_dropout = tf.keras.layers.Dropout(0.5)
self.energy_embeddings = tf.keras.layers.Conv1D(
filters=config.encoder_self_attention_params.hidden_size,
kernel_size=9,
padding="same",
name="energy_embeddings",
)
self.energy_dropout = tf.keras.layers.Dropout(0.5)
def _build(self):
"""Dummy input for building model."""
# fake inputs
input_ids = tf.convert_to_tensor([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]], tf.int32)
speaker_ids = tf.convert_to_tensor([0], tf.int32)
duration_gts = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], tf.int32)
f0_gts = tf.convert_to_tensor(
[[10, 10, 10, 10, 10, 10, 10, 10, 10, 10]], tf.float32
)
energy_gts = tf.convert_to_tensor(
[[10, 10, 10, 10, 10, 10, 10, 10, 10, 10]], tf.float32
)
self(
input_ids=input_ids,
speaker_ids=speaker_ids,
duration_gts=duration_gts,
f0_gts=f0_gts,
energy_gts=energy_gts,
)
def call(
self,
input_ids,
speaker_ids,
duration_gts,
f0_gts,
energy_gts,
training=False,
**kwargs,
):
"""Call logic."""
attention_mask = tf.math.not_equal(input_ids, 0)
embedding_output = self.embeddings([input_ids, speaker_ids], training=training)
encoder_output = self.encoder(
[embedding_output, attention_mask], training=training
)
last_encoder_hidden_states = encoder_output[0]
# energy predictor, here use last_encoder_hidden_states, u can use more hidden_states layers
# rather than just use last_hidden_states of encoder for energy_predictor.
duration_outputs = self.duration_predictor(
[last_encoder_hidden_states, speaker_ids, attention_mask]
) # [batch_size, length]
f0_outputs = self.f0_predictor(
[last_encoder_hidden_states, speaker_ids, attention_mask], training=training
)
energy_outputs = self.energy_predictor(
[last_encoder_hidden_states, speaker_ids, attention_mask], training=training
)
f0_embedding = self.f0_embeddings(
tf.expand_dims(f0_gts, 2)
) # [barch_size, mel_length, feature]
energy_embedding = self.energy_embeddings(
tf.expand_dims(energy_gts, 2)
) # [barch_size, mel_length, feature]
# apply dropout both training/inference
f0_embedding = self.f0_dropout(f0_embedding, training=True)
energy_embedding = self.energy_dropout(energy_embedding, training=True)
# sum features
last_encoder_hidden_states += f0_embedding + energy_embedding
length_regulator_outputs, encoder_masks = self.length_regulator(
[last_encoder_hidden_states, duration_gts], training=training
)
# create decoder positional embedding
decoder_pos = tf.range(
1, tf.shape(length_regulator_outputs)[1] + 1, dtype=tf.int32
)
masked_decoder_pos = tf.expand_dims(decoder_pos, 0) * encoder_masks
decoder_output = self.decoder(
[length_regulator_outputs, speaker_ids, encoder_masks, masked_decoder_pos],
training=training,
)
last_decoder_hidden_states = decoder_output[0]
# here u can use sum or concat more than 1 hidden states layers from decoder.
mels_before = self.mel_dense(last_decoder_hidden_states)
mels_after = (
self.postnet([mels_before, encoder_masks], training=training) + mels_before
)
outputs = (
mels_before,
mels_after,
duration_outputs,
f0_outputs,
energy_outputs,
)
return outputs
def _inference(
self, input_ids, speaker_ids, speed_ratios, f0_ratios, energy_ratios, **kwargs,
):
"""Call logic."""
attention_mask = tf.math.not_equal(input_ids, 0)
embedding_output = self.embeddings([input_ids, speaker_ids], training=False)
encoder_output = self.encoder(
[embedding_output, attention_mask], training=False
)
last_encoder_hidden_states = encoder_output[0]
# expand ratios
speed_ratios = tf.expand_dims(speed_ratios, 1) # [B, 1]
f0_ratios = tf.expand_dims(f0_ratios, 1) # [B, 1]
energy_ratios = tf.expand_dims(energy_ratios, 1) # [B, 1]
# energy predictor, here use last_encoder_hidden_states, u can use more hidden_states layers
# rather than just use last_hidden_states of encoder for energy_predictor.
duration_outputs = self.duration_predictor(
[last_encoder_hidden_states, speaker_ids, attention_mask]
) # [batch_size, length]
duration_outputs = tf.nn.relu(tf.math.exp(duration_outputs) - 1.0)
duration_outputs = tf.cast(
tf.math.round(duration_outputs * speed_ratios), tf.int32
)
f0_outputs = self.f0_predictor(
[last_encoder_hidden_states, speaker_ids, attention_mask], training=False
)
f0_outputs *= f0_ratios
energy_outputs = self.energy_predictor(
[last_encoder_hidden_states, speaker_ids, attention_mask], training=False
)
energy_outputs *= energy_ratios
f0_embedding = self.f0_dropout(
self.f0_embeddings(tf.expand_dims(f0_outputs, 2)), training=True
)
energy_embedding = self.energy_dropout(
self.energy_embeddings(tf.expand_dims(energy_outputs, 2)), training=True
)
# sum features
last_encoder_hidden_states += f0_embedding + energy_embedding
length_regulator_outputs, encoder_masks = self.length_regulator(
[last_encoder_hidden_states, duration_outputs], training=False
)
# create decoder positional embedding
decoder_pos = tf.range(
1, tf.shape(length_regulator_outputs)[1] + 1, dtype=tf.int32
)
masked_decoder_pos = tf.expand_dims(decoder_pos, 0) * encoder_masks
decoder_output = self.decoder(
[length_regulator_outputs, speaker_ids, encoder_masks, masked_decoder_pos],
training=False,
)
last_decoder_hidden_states = decoder_output[0]
# here u can use sum or concat more than 1 hidden states layers from decoder.
mel_before = self.mel_dense(last_decoder_hidden_states)
mel_after = (
self.postnet([mel_before, encoder_masks], training=False) + mel_before
)
outputs = (mel_before, mel_after, duration_outputs, f0_outputs, energy_outputs)
return outputs
def setup_inference_fn(self):
self.inference = tf.function(
self._inference,
experimental_relax_shapes=True,
input_signature=[
tf.TensorSpec(shape=[None, None], dtype=tf.int32, name="input_ids"),
tf.TensorSpec(shape=[None,], dtype=tf.int32, name="speaker_ids"),
tf.TensorSpec(shape=[None,], dtype=tf.float32, name="speed_ratios"),
tf.TensorSpec(shape=[None,], dtype=tf.float32, name="f0_ratios"),
tf.TensorSpec(shape=[None,], dtype=tf.float32, name="energy_ratios"),
],
)
self.inference_tflite = tf.function(
self._inference,
experimental_relax_shapes=True,
input_signature=[
tf.TensorSpec(shape=[1, None], dtype=tf.int32, name="input_ids"),
tf.TensorSpec(shape=[1,], dtype=tf.int32, name="speaker_ids"),
tf.TensorSpec(shape=[1,], dtype=tf.float32, name="speed_ratios"),
tf.TensorSpec(shape=[1,], dtype=tf.float32, name="f0_ratios"),
tf.TensorSpec(shape=[1,], dtype=tf.float32, name="energy_ratios"),
],
)
| 39.809061 | 100 | 0.635152 |
acf83a988040d4929a0ca4fbb80144a1b0324174 | 18,748 | py | Python | great_expectations/datasource/sqlalchemy_datasource.py | hammadzz/great_expectations | 020c605000d9472e88a9da7b6baa2fae28fd02c7 | [
"Apache-2.0"
] | null | null | null | great_expectations/datasource/sqlalchemy_datasource.py | hammadzz/great_expectations | 020c605000d9472e88a9da7b6baa2fae28fd02c7 | [
"Apache-2.0"
] | null | null | null | great_expectations/datasource/sqlalchemy_datasource.py | hammadzz/great_expectations | 020c605000d9472e88a9da7b6baa2fae28fd02c7 | [
"Apache-2.0"
] | null | null | null | import datetime
import logging
from pathlib import Path
from string import Template
from urllib.parse import urlparse
from great_expectations.core.batch import Batch
from great_expectations.core.util import nested_update
from great_expectations.dataset.sqlalchemy_dataset import SqlAlchemyBatchReference
from great_expectations.datasource import Datasource
from great_expectations.datasource.types import BatchMarkers
from great_expectations.exceptions import (
DatasourceInitializationError,
DatasourceKeyPairAuthBadPassphraseError,
)
from great_expectations.types import ClassConfig
from great_expectations.types.configurations import classConfigSchema
logger = logging.getLogger(__name__)
try:
import sqlalchemy
from sqlalchemy import create_engine
except ImportError:
sqlalchemy = None
create_engine = None
logger.debug("Unable to import sqlalchemy.")
if sqlalchemy != None:
try:
import google.auth
datasource_initialization_exceptions = (
sqlalchemy.exc.OperationalError,
sqlalchemy.exc.DatabaseError,
sqlalchemy.exc.ArgumentError,
google.auth.exceptions.GoogleAuthError,
)
except ImportError:
datasource_initialization_exceptions = (
sqlalchemy.exc.OperationalError,
sqlalchemy.exc.DatabaseError,
sqlalchemy.exc.ArgumentError,
)
class SqlAlchemyDatasource(Datasource):
"""
A SqlAlchemyDatasource will provide data_assets converting batch_kwargs using the following rules:
- if the batch_kwargs include a table key, the datasource will provide a dataset object connected to that table
- if the batch_kwargs include a query key, the datasource will create a temporary table usingthat query. The query can be parameterized according to the standard python Template engine, which uses $parameter, with additional kwargs passed to the get_batch method.
--ge-feature-maturity-info--
id: datasource_postgresql
title: Datasource - PostgreSQL
icon:
short_description: Postgres
description: Support for using the open source PostgresQL database as an external datasource and execution engine.
how_to_guide_url:
maturity: Production
maturity_details:
api_stability: High
implementation_completeness: Complete
unit_test_coverage: Complete
integration_infrastructure_test_coverage: Complete
documentation_completeness: Medium (does not have a specific how-to, but easy to use overall)
bug_risk: Low
expectation_completeness: Moderate
id: datasource_bigquery
title: Datasource - BigQuery
icon:
short_description: BigQuery
description: Use Google BigQuery as an execution engine and external datasource to validate data.
how_to_guide_url: https://docs.greatexpectations.io/en/latest/how_to_guides/configuring_datasources/how_to_configure_a_bigquery_datasource.html
maturity: Beta
maturity_details:
api_stability: Unstable (table generator inability to work with triple-dotted, temp table usability, init flow calls setup "other")
implementation_completeness: Moderate
unit_test_coverage: Partial (no test coverage for temp table creation)
integration_infrastructure_test_coverage: Minimal
documentation_completeness: Partial (how-to does not cover all cases)
bug_risk: High (we *know* of several bugs, including inability to list tables, SQLAlchemy URL incomplete)
expectation_completeness: Moderate
id: datasource_redshift
title: Datasource - Amazon Redshift
icon:
short_description: Redshift
description: Use Amazon Redshift as an execution engine and external datasource to validate data.
how_to_guide_url: https://docs.greatexpectations.io/en/latest/how_to_guides/configuring_datasources/how_to_configure_a_redshift_datasource.html
maturity: Beta
maturity_details:
api_stability: Moderate (potential metadata/introspection method special handling for performance)
implementation_completeness: Complete
unit_test_coverage: Minimal
integration_infrastructure_test_coverage: Minimal (none automated)
documentation_completeness: Moderate
bug_risk: Moderate
expectation_completeness: Moderate
id: datasource_snowflake
title: Datasource - Snowflake
icon:
short_description: Snowflake
description: Use Snowflake Computing as an execution engine and external datasource to validate data.
how_to_guide_url: https://docs.greatexpectations.io/en/latest/how_to_guides/configuring_datasources/how_to_configure_a_snowflake_datasource.html
maturity: Production
maturity_details:
api_stability: High
implementation_completeness: Complete
unit_test_coverage: Complete
integration_infrastructure_test_coverage: Minimal (manual only)
documentation_completeness: Complete
bug_risk: Low
expectation_completeness: Complete
id: datasource_mssql
title: Datasource - Microsoft SQL Server
icon:
short_description: Microsoft SQL Server
description: Use Microsoft SQL Server as an execution engine and external datasource to validate data.
how_to_guide_url:
maturity: Experimental
maturity_details:
api_stability: High
implementation_completeness: Moderate
unit_test_coverage: Minimal (none)
integration_infrastructure_test_coverage: Minimal (none)
documentation_completeness: Minimal
bug_risk: High
expectation_completeness: Low (some required queries do not generate properly, such as related to nullity)
id: datasource_mysql
title: Datasource - MySQL
icon:
short_description: MySQL
description: Use MySQL as an execution engine and external datasource to validate data.
how_to_guide_url:
maturity: Experimental
maturity_details:
api_stability: Low (no consideration for temp tables)
implementation_completeness: Low (no consideration for temp tables)
unit_test_coverage: Minimal (none)
integration_infrastructure_test_coverage: Minimal (none)
documentation_completeness: Minimal (none)
bug_risk: Unknown
expectation_completeness: Unknown
id: datasource_mariadb
title: Datasource - MariaDB
icon:
short_description: MariaDB
description: Use MariaDB as an execution engine and external datasource to validate data.
how_to_guide_url:
maturity: Experimental
maturity_details:
api_stability: Low (no consideration for temp tables)
implementation_completeness: Low (no consideration for temp tables)
unit_test_coverage: Minimal (none)
integration_infrastructure_test_coverage: Minimal (none)
documentation_completeness: Minimal (none)
bug_risk: Unknown
expectation_completeness: Unknown
"""
recognized_batch_parameters = {"query_parameters", "limit", "dataset_options"}
@classmethod
def build_configuration(
cls, data_asset_type=None, batch_kwargs_generators=None, **kwargs
):
"""
Build a full configuration object for a datasource, potentially including generators with defaults.
Args:
data_asset_type: A ClassConfig dictionary
batch_kwargs_generators: Generator configuration dictionary
**kwargs: Additional kwargs to be part of the datasource constructor's initialization
Returns:
A complete datasource configuration.
"""
if data_asset_type is None:
data_asset_type = {
"class_name": "SqlAlchemyDataset",
"module_name": "great_expectations.dataset",
}
else:
data_asset_type = classConfigSchema.dump(ClassConfig(**data_asset_type))
configuration = kwargs
configuration["data_asset_type"] = data_asset_type
if batch_kwargs_generators is not None:
configuration["batch_kwargs_generators"] = batch_kwargs_generators
return configuration
def __init__(
self,
name="default",
data_context=None,
data_asset_type=None,
credentials=None,
batch_kwargs_generators=None,
**kwargs
):
if not sqlalchemy:
raise DatasourceInitializationError(
name, "ModuleNotFoundError: No module named 'sqlalchemy'"
)
configuration_with_defaults = SqlAlchemyDatasource.build_configuration(
data_asset_type, batch_kwargs_generators, **kwargs
)
data_asset_type = configuration_with_defaults.pop("data_asset_type")
batch_kwargs_generators = configuration_with_defaults.pop(
"batch_kwargs_generators", None
)
super().__init__(
name,
data_context=data_context,
data_asset_type=data_asset_type,
batch_kwargs_generators=batch_kwargs_generators,
**configuration_with_defaults
)
if credentials is not None:
self._datasource_config.update({"credentials": credentials})
else:
credentials = {}
try:
# if an engine was provided, use that
if "engine" in kwargs:
self.engine = kwargs.pop("engine")
# if a connection string or url was provided, use that
elif "connection_string" in kwargs:
connection_string = kwargs.pop("connection_string")
self.engine = create_engine(connection_string, **kwargs)
self.engine.connect()
elif "url" in credentials:
url = credentials.pop("url")
self.drivername = urlparse(url).scheme
self.engine = create_engine(url, **kwargs)
self.engine.connect()
# Otherwise, connect using remaining kwargs
else:
(
options,
create_engine_kwargs,
drivername,
) = self._get_sqlalchemy_connection_options(**kwargs)
self.drivername = drivername
self.engine = create_engine(options, **create_engine_kwargs)
self.engine.connect()
# since we switched to lazy loading of Datasources when we initialise a DataContext,
# the dialect of SQLAlchemy Datasources cannot be obtained reliably when we send
# "data_context.__init__" events.
# This event fills in the SQLAlchemy dialect.
if data_context is not None and getattr(
data_context, "_usage_statistics_handler", None
):
handler = data_context._usage_statistics_handler
handler.send_usage_message(
event="datasource.sqlalchemy.connect",
event_payload={
"anonymized_name": handler._datasource_anonymizer.anonymize(
self.name
),
"sqlalchemy_dialect": self.engine.name,
},
success=True,
)
except datasource_initialization_exceptions as sqlalchemy_error:
raise DatasourceInitializationError(self._name, str(sqlalchemy_error))
self._build_generators()
def _get_sqlalchemy_connection_options(self, **kwargs):
drivername = None
if "credentials" in self._datasource_config:
credentials = self._datasource_config["credentials"]
else:
credentials = {}
create_engine_kwargs = {}
connect_args = credentials.pop("connect_args", None)
if connect_args:
create_engine_kwargs["connect_args"] = connect_args
# if a connection string or url was provided in the profile, use that
if "connection_string" in credentials:
options = credentials["connection_string"]
elif "url" in credentials:
options = credentials["url"]
else:
# Update credentials with anything passed during connection time
drivername = credentials.pop("drivername")
schema_name = credentials.pop("schema_name", None)
if schema_name is not None:
logger.warning(
"schema_name specified creating a URL with schema is not supported. Set a default "
"schema on the user connecting to your database."
)
if "private_key_path" in credentials:
options, create_engine_kwargs = self._get_sqlalchemy_key_pair_auth_url(
drivername, credentials
)
else:
options = sqlalchemy.engine.url.URL(drivername, **credentials)
return options, create_engine_kwargs, drivername
def _get_sqlalchemy_key_pair_auth_url(self, drivername, credentials):
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.backends import default_backend
private_key_path = credentials.pop("private_key_path")
private_key_passphrase = credentials.pop("private_key_passphrase")
with Path(private_key_path).expanduser().resolve().open(mode="rb") as key:
try:
p_key = serialization.load_pem_private_key(
key.read(),
password=private_key_passphrase.encode()
if private_key_passphrase
else None,
backend=default_backend(),
)
except ValueError as e:
if "incorrect password" in str(e).lower():
raise DatasourceKeyPairAuthBadPassphraseError(
datasource_name="SqlAlchemyDatasource",
message="Decryption of key failed, was the passphrase incorrect?",
) from e
else:
raise e
pkb = p_key.private_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
)
credentials_driver_name = credentials.pop("drivername", None)
create_engine_kwargs = {"connect_args": {"private_key": pkb}}
return (
sqlalchemy.engine.url.URL(
drivername or credentials_driver_name, **credentials
),
create_engine_kwargs,
)
def get_batch(self, batch_kwargs, batch_parameters=None):
# We need to build a batch_id to be used in the dataframe
batch_markers = BatchMarkers(
{
"ge_load_time": datetime.datetime.now(datetime.timezone.utc).strftime(
"%Y%m%dT%H%M%S.%fZ"
)
}
)
if "bigquery_temp_table" in batch_kwargs:
query_support_table_name = batch_kwargs.get("bigquery_temp_table")
elif "snowflake_transient_table" in batch_kwargs:
# Snowflake uses a transient table, so we expect a table_name to be provided
query_support_table_name = batch_kwargs.get("snowflake_transient_table")
else:
query_support_table_name = None
if "query" in batch_kwargs:
if "limit" in batch_kwargs or "offset" in batch_kwargs:
logger.warning(
"Limit and offset parameters are ignored when using query-based batch_kwargs; consider "
"adding limit and offset directly to the generated query."
)
if "query_parameters" in batch_kwargs:
query = Template(batch_kwargs["query"]).safe_substitute(
batch_kwargs["query_parameters"]
)
else:
query = batch_kwargs["query"]
batch_reference = SqlAlchemyBatchReference(
engine=self.engine,
query=query,
table_name=query_support_table_name,
schema=batch_kwargs.get("schema"),
)
elif "table" in batch_kwargs:
table = batch_kwargs["table"]
limit = batch_kwargs.get("limit")
offset = batch_kwargs.get("offset")
if limit is not None or offset is not None:
logger.info(
"Generating query from table batch_kwargs based on limit and offset"
)
# In BigQuery the table name is already qualified with its schema name
if self.engine.dialect.name.lower() == "bigquery":
schema = None
else:
schema = batch_kwargs.get("schema")
raw_query = (
sqlalchemy.select([sqlalchemy.text("*")])
.select_from(
sqlalchemy.schema.Table(
table, sqlalchemy.MetaData(), schema=schema
)
)
.offset(offset)
.limit(limit)
)
query = str(
raw_query.compile(
self.engine, compile_kwargs={"literal_binds": True}
)
)
batch_reference = SqlAlchemyBatchReference(
engine=self.engine,
query=query,
table_name=query_support_table_name,
schema=batch_kwargs.get("schema"),
)
else:
batch_reference = SqlAlchemyBatchReference(
engine=self.engine,
table_name=table,
schema=batch_kwargs.get("schema"),
)
else:
raise ValueError(
"Invalid batch_kwargs: exactly one of 'table' or 'query' must be specified"
)
return Batch(
datasource_name=self.name,
batch_kwargs=batch_kwargs,
data=batch_reference,
batch_parameters=batch_parameters,
batch_markers=batch_markers,
data_context=self._data_context,
)
def process_batch_parameters(
self, query_parameters=None, limit=None, dataset_options=None
):
batch_kwargs = super().process_batch_parameters(
limit=limit, dataset_options=dataset_options,
)
nested_update(batch_kwargs, {"query_parameters": query_parameters})
return batch_kwargs
| 40.756522 | 267 | 0.641775 |
acf83c52ffd096300f8c86255acd99f890bad42d | 1,105 | py | Python | bluebottle/funding_vitepay/migrations/0006_auto_20190918_1632.py | terrameijar/bluebottle | b4f5ba9c4f03e678fdd36091b29240307ea69ffd | [
"BSD-3-Clause"
] | 10 | 2015-05-28T18:26:40.000Z | 2021-09-06T10:07:03.000Z | bluebottle/funding_vitepay/migrations/0006_auto_20190918_1632.py | terrameijar/bluebottle | b4f5ba9c4f03e678fdd36091b29240307ea69ffd | [
"BSD-3-Clause"
] | 762 | 2015-01-15T10:00:59.000Z | 2022-03-31T15:35:14.000Z | bluebottle/funding_vitepay/migrations/0006_auto_20190918_1632.py | terrameijar/bluebottle | b4f5ba9c4f03e678fdd36091b29240307ea69ffd | [
"BSD-3-Clause"
] | 9 | 2015-02-20T13:19:30.000Z | 2022-03-08T14:09:17.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-09-18 14:32
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('funding', '0031_plainpayoutaccount'),
('funding_vitepay', '0005_vitepaypayoutaccount'),
]
operations = [
migrations.CreateModel(
name='VitepayBankAccount',
fields=[
('bankaccount_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='funding.BankAccount')),
('account_name', models.CharField(max_length=40)),
],
options={
'abstract': False,
},
bases=('funding.bankaccount',),
),
migrations.RemoveField(
model_name='vitepaypayoutaccount',
name='payoutaccount_ptr',
),
migrations.DeleteModel(
name='VitepayPayoutAccount',
),
]
| 30.694444 | 201 | 0.60543 |
acf83c6d1f3f9306eae5d366417e62d1322d1ff4 | 173 | py | Python | .entorno/bin/django-admin.py | sperea/api-rest-boilerplate | 16c0a96c049b0b85823347e3527f0b950911c235 | [
"Apache-2.0"
] | null | null | null | .entorno/bin/django-admin.py | sperea/api-rest-boilerplate | 16c0a96c049b0b85823347e3527f0b950911c235 | [
"Apache-2.0"
] | null | null | null | .entorno/bin/django-admin.py | sperea/api-rest-boilerplate | 16c0a96c049b0b85823347e3527f0b950911c235 | [
"Apache-2.0"
] | null | null | null | #!/home/sergio/Proyectos/api-rest-boilerplate/.entorno/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| 28.833333 | 66 | 0.791908 |
acf83d17fd220d5b4662d9ead2a14c6e5b2d749d | 8,122 | py | Python | tfx/orchestration/experimental/core/task_scheduler.py | ajmarcus/tfx | 28ac2be5ace31ca733f6292495f8be83484a1730 | [
"Apache-2.0"
] | 1 | 2019-10-02T18:03:55.000Z | 2019-10-02T18:03:55.000Z | tfx/orchestration/experimental/core/task_scheduler.py | ajmarcus/tfx | 28ac2be5ace31ca733f6292495f8be83484a1730 | [
"Apache-2.0"
] | null | null | null | tfx/orchestration/experimental/core/task_scheduler.py | ajmarcus/tfx | 28ac2be5ace31ca733f6292495f8be83484a1730 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Task scheduler interface and registry."""
import abc
import typing
from typing import Dict, List, Optional, Type, TypeVar, Union
import attr
from tfx import types
from tfx.orchestration import metadata
from tfx.orchestration.experimental.core import task as task_lib
from tfx.proto.orchestration import execution_result_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import status as status_lib
@attr.s(auto_attribs=True, frozen=True)
class ExecutorNodeOutput:
"""Output of a node containing an executor.
Attributes:
executor_output: Output of node execution (if any).
"""
executor_output: Optional[execution_result_pb2.ExecutorOutput] = None
@attr.s(auto_attribs=True, frozen=True)
class ImporterNodeOutput:
"""Importer system node output.
Attributes:
output_artifacts: Output artifacts resulting from importer node execution.
"""
output_artifacts: Dict[str, List[types.Artifact]]
@attr.s(auto_attribs=True, frozen=True)
class ResolverNodeOutput:
"""Resolver system node output.
Attributes:
resolved_input_artifacts: Artifacts resolved by resolver system node.
"""
resolved_input_artifacts: Dict[str, List[types.Artifact]]
@attr.s(auto_attribs=True, frozen=True)
class TaskSchedulerResult:
"""Response from the task scheduler.
Attributes:
status: Scheduler status that reflects scheduler level issues, such as task
cancellation, failure to start the executor, etc.
output: Output of task scheduler execution.
"""
status: status_lib.Status
output: Union[ExecutorNodeOutput, ImporterNodeOutput,
ResolverNodeOutput] = ExecutorNodeOutput()
class TaskScheduler(abc.ABC):
"""Interface for task schedulers."""
def __init__(self, mlmd_handle: metadata.Metadata,
pipeline: pipeline_pb2.Pipeline, task: task_lib.Task):
"""Constructor.
Args:
mlmd_handle: A handle to the MLMD db.
pipeline: The pipeline IR proto.
task: Task to be executed.
"""
self.mlmd_handle = mlmd_handle
self.pipeline = pipeline
self.task = task
@abc.abstractmethod
def schedule(self) -> TaskSchedulerResult:
"""Schedules task execution and returns the results of execution.
This method blocks until task execution completes (successfully or not) or
until explicitly cancelled by a call to `cancel`. When cancelled, `schedule`
is expected to stop any ongoing work, clean up and return as soon as
possible. Note that `cancel` will be invoked from a different thread than
`schedule` and hence the concrete implementations must be thread safe. It's
technically possible for `cancel` to be invoked before `schedule`; scheduler
implementations should handle this case by returning from `schedule`
immediately.
"""
@abc.abstractmethod
def cancel(self) -> None:
"""Cancels task scheduler.
This method will be invoked from a different thread than the thread that's
blocked on call to `schedule`. `cancel` must return immediately when called.
Upon cancellation, `schedule` method is expected to stop any ongoing work,
clean up and return as soon as possible. It's technically possible for
`cancel` to be invoked before `schedule`; scheduler implementations should
handle this case by returning from `schedule` immediately.
"""
T = TypeVar('T', bound='TaskSchedulerRegistry')
class TaskSchedulerRegistry:
"""A registry for task schedulers."""
_task_scheduler_registry = {}
@classmethod
def register(cls: Type[T], url: str,
scheduler_class: Type[TaskScheduler]) -> None:
"""Registers a new task scheduler for the given url.
Args:
url: The URL associated with the task scheduler. It should either be the
node type url or executor spec url.
scheduler_class: The class that will be instantiated for a matching task.
Raises:
ValueError: If `url` is already in the registry.
"""
if url in cls._task_scheduler_registry:
raise ValueError(f'A task scheduler already exists for the url: {url}')
cls._task_scheduler_registry[url] = scheduler_class
@classmethod
def clear(cls: Type[T]) -> None:
cls._task_scheduler_registry.clear()
@classmethod
def create_task_scheduler(cls: Type[T], mlmd_handle: metadata.Metadata,
pipeline: pipeline_pb2.Pipeline,
task: task_lib.Task) -> TaskScheduler:
"""Creates a task scheduler for the given task.
The task is matched as follows:
1. The node type name of the node associated with the task is looked up in
the registry and a scheduler is instantiated if present.
2. Next, the executor spec url of the node (if one exists) is looked up in
the registry and a scheduler is instantiated if present. This assumes
deployment_config packed in the pipeline IR is of type
`IntermediateDeploymentConfig`.
3. Lastly, a ValueError is raised if no match can be found.
Args:
mlmd_handle: A handle to the MLMD db.
pipeline: The pipeline IR.
task: The task that needs to be scheduled.
Returns:
An instance of `TaskScheduler` for the given task.
Raises:
NotImplementedError: Raised if not an `ExecNodeTask`.
ValueError: If a scheduler could not be found in the registry for the
given task.
"""
if not task_lib.is_exec_node_task(task):
raise NotImplementedError(
'Can create a task scheduler only for an `ExecNodeTask`.')
task = typing.cast(task_lib.ExecNodeTask, task)
try:
scheduler_class = cls._scheduler_class_for_node_type(task)
except ValueError as e1:
try:
scheduler_class = cls._scheduler_class_for_executor_spec(pipeline, task)
except ValueError as e2:
raise ValueError(f'No task scheduler found: {e1}, {e2}') from None
return scheduler_class(
mlmd_handle=mlmd_handle, pipeline=pipeline, task=task)
@classmethod
def _scheduler_class_for_node_type(
cls: Type[T], task: task_lib.ExecNodeTask) -> Type[TaskScheduler]:
"""Returns scheduler class for node type or raises error if none registered."""
node_type = task.get_pipeline_node().node_info.type.name
scheduler_class = cls._task_scheduler_registry.get(node_type)
if scheduler_class is None:
raise ValueError(
f'No task scheduler registered for node type: {node_type}')
return scheduler_class
@classmethod
def _scheduler_class_for_executor_spec(
cls: Type[T], pipeline: pipeline_pb2.Pipeline,
task: task_lib.ExecNodeTask) -> Type[TaskScheduler]:
"""Returns scheduler class for executor spec url if feasible, raises error otherwise."""
if not pipeline.deployment_config.Is(
pipeline_pb2.IntermediateDeploymentConfig.DESCRIPTOR):
raise ValueError('No deployment config found in pipeline IR')
depl_config = pipeline_pb2.IntermediateDeploymentConfig()
pipeline.deployment_config.Unpack(depl_config)
node_id = task.node_uid.node_id
if node_id not in depl_config.executor_specs:
raise ValueError(f'Executor spec not found for node id: {node_id}')
executor_spec_type_url = depl_config.executor_specs[node_id].type_url
scheduler_class = cls._task_scheduler_registry.get(executor_spec_type_url)
if scheduler_class is None:
raise ValueError(
f'No task scheduler registered for executor spec type url: '
f'{executor_spec_type_url}')
return scheduler_class
| 36.751131 | 92 | 0.727284 |
acf83d2f5ab685874da6466c30addf7ef2982600 | 28,522 | py | Python | grr/server/front_end.py | nickamon/grr | ad1936c74728de00db90f6fafa47892b54cfc92d | [
"Apache-2.0"
] | null | null | null | grr/server/front_end.py | nickamon/grr | ad1936c74728de00db90f6fafa47892b54cfc92d | [
"Apache-2.0"
] | 1 | 2018-05-08T21:15:51.000Z | 2018-05-08T21:15:51.000Z | grr/server/front_end.py | nickamon/grr | ad1936c74728de00db90f6fafa47892b54cfc92d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""The GRR frontend server."""
import logging
import operator
import time
from grr import config
from grr.lib import communicator
from grr.lib import queues
from grr.lib import rdfvalue
from grr.lib import registry
from grr.lib import stats
from grr.lib import uploads
from grr.lib import utils
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import flows as rdf_flows
from grr.server import access_control
from grr.server import aff4
from grr.server import client_index
from grr.server import data_migration
from grr.server import data_store
from grr.server import events
from grr.server import file_store
from grr.server import flow
from grr.server import queue_manager
from grr.server import rekall_profile_server
from grr.server import threadpool
from grr.server.aff4_objects import aff4_grr
class ServerCommunicator(communicator.Communicator):
"""A communicator which stores certificates using AFF4."""
def __init__(self, certificate, private_key, token=None):
self.client_cache = utils.FastStore(1000)
self.token = token
super(ServerCommunicator, self).__init__(
certificate=certificate, private_key=private_key)
self.pub_key_cache = utils.FastStore(max_size=50000)
# Our common name as an RDFURN.
self.common_name = rdfvalue.RDFURN(self.certificate.GetCN())
def _GetRemotePublicKey(self, common_name):
try:
# See if we have this client already cached.
remote_key = self.pub_key_cache.Get(str(common_name))
stats.STATS.IncrementCounter("grr_pub_key_cache", fields=["hits"])
return remote_key
except KeyError:
stats.STATS.IncrementCounter("grr_pub_key_cache", fields=["misses"])
# Fetch the client's cert and extract the key.
client = aff4.FACTORY.Create(
common_name,
aff4.AFF4Object.classes["VFSGRRClient"],
mode="rw",
token=self.token)
cert = client.Get(client.Schema.CERT)
if not cert:
stats.STATS.IncrementCounter("grr_unique_clients")
raise communicator.UnknownClientCert("Cert not found")
if rdfvalue.RDFURN(cert.GetCN()) != rdfvalue.RDFURN(common_name):
logging.error("Stored cert mismatch for %s", common_name)
raise communicator.UnknownClientCert("Stored cert mismatch")
self.client_cache.Put(common_name, client)
stats.STATS.SetGaugeValue("grr_frontendserver_client_cache_size",
len(self.client_cache))
pub_key = cert.GetPublicKey()
self.pub_key_cache.Put(common_name, pub_key)
return pub_key
def VerifyMessageSignature(self, response_comms, packed_message_list, cipher,
cipher_verified, api_version, remote_public_key):
"""Verifies the message list signature.
In the server we check that the timestamp is later than the ping timestamp
stored with the client. This ensures that client responses can not be
replayed.
Args:
response_comms: The raw response_comms rdfvalue.
packed_message_list: The PackedMessageList rdfvalue from the server.
cipher: The cipher object that should be used to verify the message.
cipher_verified: If True, the cipher's signature is not verified again.
api_version: The api version we should use.
remote_public_key: The public key of the source.
Returns:
An rdf_flows.GrrMessage.AuthorizationState.
"""
if (not cipher_verified and
not cipher.VerifyCipherSignature(remote_public_key)):
stats.STATS.IncrementCounter("grr_unauthenticated_messages")
return rdf_flows.GrrMessage.AuthorizationState.UNAUTHENTICATED
try:
client_id = cipher.cipher_metadata.source
try:
client = self.client_cache.Get(client_id)
except KeyError:
client = aff4.FACTORY.Create(
client_id,
aff4.AFF4Object.classes["VFSGRRClient"],
mode="rw",
token=self.token)
self.client_cache.Put(client_id, client)
stats.STATS.SetGaugeValue("grr_frontendserver_client_cache_size",
len(self.client_cache))
ip = response_comms.orig_request.source_ip
client.Set(client.Schema.CLIENT_IP(ip))
# The very first packet we see from the client we do not have its clock
remote_time = client.Get(client.Schema.CLOCK) or rdfvalue.RDFDatetime(0)
client_time = packed_message_list.timestamp or rdfvalue.RDFDatetime(0)
# This used to be a strict check here so absolutely no out of
# order messages would be accepted ever. Turns out that some
# proxies can send your request with some delay even if the
# client has already timed out (and sent another request in
# the meantime, making the first one out of order). In that
# case we would just kill the whole flow as a
# precaution. Given the behavior of those proxies, this seems
# now excessive and we have changed the replay protection to
# only trigger on messages that are more than one hour old.
if client_time < long(remote_time - rdfvalue.Duration("1h")):
logging.warning("Message desynchronized for %s: %s >= %s", client_id,
long(remote_time), int(client_time))
# This is likely an old message
return rdf_flows.GrrMessage.AuthorizationState.DESYNCHRONIZED
stats.STATS.IncrementCounter("grr_authenticated_messages")
# Update the client and server timestamps only if the client
# time moves forward.
if client_time > long(remote_time):
client.Set(client.Schema.CLOCK, rdfvalue.RDFDatetime(client_time))
client.Set(client.Schema.PING, rdfvalue.RDFDatetime.Now())
clock = client_time
ping = rdfvalue.RDFDatetime.Now()
for label in client.Get(client.Schema.LABELS, []):
stats.STATS.IncrementCounter(
"client_pings_by_label", fields=[label.name])
else:
clock = None
ping = None
logging.warning("Out of order message for %s: %s >= %s", client_id,
long(remote_time), int(client_time))
client.Flush()
if data_store.RelationalDBWriteEnabled():
source_ip = response_comms.orig_request.source_ip
if source_ip:
last_ip = rdf_client.NetworkAddress(
human_readable_address=response_comms.orig_request.source_ip)
else:
last_ip = None
if ping or clock or last_ip:
data_store.REL_DB.WriteClientMetadata(
client_id.Basename(),
last_ip=last_ip,
last_clock=clock,
last_ping=ping,
fleetspeak_enabled=False)
except communicator.UnknownClientCert:
pass
return rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED
class RelationalServerCommunicator(communicator.Communicator):
"""A communicator which stores certificates using the relational db."""
def __init__(self, certificate, private_key):
super(RelationalServerCommunicator, self).__init__(
certificate=certificate, private_key=private_key)
self.pub_key_cache = utils.FastStore(max_size=50000)
self.common_name = self.certificate.GetCN()
def _GetRemotePublicKey(self, common_name):
remote_client_id = common_name.Basename()
try:
# See if we have this client already cached.
remote_key = self.pub_key_cache.Get(remote_client_id)
stats.STATS.IncrementCounter("grr_pub_key_cache", fields=["hits"])
return remote_key
except KeyError:
stats.STATS.IncrementCounter("grr_pub_key_cache", fields=["misses"])
md = data_store.REL_DB.ReadClientMetadata(remote_client_id)
if not md:
stats.STATS.IncrementCounter("grr_unique_clients")
raise communicator.UnknownClientCert("Cert not found")
cert = md.certificate
if rdfvalue.RDFURN(cert.GetCN()) != rdfvalue.RDFURN(common_name):
logging.error("Stored cert mismatch for %s", common_name)
raise communicator.UnknownClientCert("Stored cert mismatch")
pub_key = cert.GetPublicKey()
self.pub_key_cache.Put(common_name, pub_key)
return pub_key
def VerifyMessageSignature(self, response_comms, packed_message_list, cipher,
cipher_verified, api_version, remote_public_key):
"""Verifies the message list signature.
In the server we check that the timestamp is later than the ping timestamp
stored with the client. This ensures that client responses can not be
replayed.
Args:
response_comms: The raw response_comms rdfvalue.
packed_message_list: The PackedMessageList rdfvalue from the server.
cipher: The cipher object that should be used to verify the message.
cipher_verified: If True, the cipher's signature is not verified again.
api_version: The api version we should use.
remote_public_key: The public key of the source.
Returns:
An rdf_flows.GrrMessage.AuthorizationState.
"""
if (not cipher_verified and
not cipher.VerifyCipherSignature(remote_public_key)):
stats.STATS.IncrementCounter("grr_unauthenticated_messages")
return rdf_flows.GrrMessage.AuthorizationState.UNAUTHENTICATED
try:
client_id = cipher.cipher_metadata.source.Basename()
metadata = data_store.REL_DB.ReadClientMetadata(client_id)
client_time = packed_message_list.timestamp or rdfvalue.RDFDatetime(0)
# This used to be a strict check here so absolutely no out of
# order messages would be accepted ever. Turns out that some
# proxies can send your request with some delay even if the
# client has already timed out (and sent another request in
# the meantime, making the first one out of order). In that
# case we would just kill the whole flow as a
# precaution. Given the behavior of those proxies, this seems
# now excessive and we have changed the replay protection to
# only trigger on messages that are more than one hour old.
if metadata and metadata.clock:
stored_client_time = metadata.clock
if client_time < stored_client_time - rdfvalue.Duration("1h"):
logging.warning("Message desynchronized for %s: %s >= %s", client_id,
long(stored_client_time), long(client_time))
# This is likely an old message
return rdf_flows.GrrMessage.AuthorizationState.DESYNCHRONIZED
stats.STATS.IncrementCounter("grr_authenticated_messages")
# Update the client and server timestamps only if the client
# time moves forward.
if client_time <= stored_client_time:
logging.warning("Out of order message for %s: %s >= %s", client_id,
long(stored_client_time), long(client_time))
return rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED
stats.STATS.IncrementCounter("grr_authenticated_messages")
for label in data_store.REL_DB.ReadClientLabels(client_id):
stats.STATS.IncrementCounter(
"client_pings_by_label", fields=[label.name])
source_ip = response_comms.orig_request.source_ip
if source_ip:
last_ip = rdf_client.NetworkAddress(
human_readable_address=response_comms.orig_request.source_ip)
else:
last_ip = None
data_store.REL_DB.WriteClientMetadata(
client_id,
last_ip=last_ip,
last_clock=client_time,
last_ping=rdfvalue.RDFDatetime.Now(),
fleetspeak_enabled=False)
except communicator.UnknownClientCert:
pass
return rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED
class FrontEndServer(object):
"""This is the front end server.
This class interfaces clients into the GRR backend system. We process message
bundles to and from the client, without caring how message bundles are
transmitted to the client.
- receives an encrypted message parcel from clients.
- Decrypts messages from this.
- schedules the messages to their relevant queues.
- Collects the messages from the client queue
- Bundles and encrypts the messages for the client.
"""
def __init__(self,
certificate,
private_key,
max_queue_size=50,
message_expiry_time=120,
max_retransmission_time=10,
threadpool_prefix="grr_threadpool"):
# Identify ourselves as the server.
self.token = access_control.ACLToken(
username="GRRFrontEnd", reason="Implied.")
self.token.supervisor = True
if data_store.RelationalDBReadEnabled():
self._communicator = RelationalServerCommunicator(
certificate=certificate, private_key=private_key)
else:
self._communicator = ServerCommunicator(
certificate=certificate, private_key=private_key, token=self.token)
self.receive_thread_pool = {}
self.message_expiry_time = message_expiry_time
self.max_retransmission_time = max_retransmission_time
self.max_queue_size = max_queue_size
self.thread_pool = threadpool.ThreadPool.Factory(
threadpool_prefix,
min_threads=2,
max_threads=config.CONFIG["Threadpool.size"])
self.thread_pool.Start()
# Well known flows are run on the front end.
self.well_known_flows = (
flow.WellKnownFlow.GetAllWellKnownFlows(token=self.token))
well_known_flow_names = self.well_known_flows.keys()
for well_known_flow in well_known_flow_names:
if well_known_flow not in config.CONFIG["Frontend.well_known_flows"]:
del self.well_known_flows[well_known_flow]
self.well_known_flows_blacklist = set(
config.CONFIG["Frontend.DEBUG_well_known_flows_blacklist"])
@stats.Counted("grr_frontendserver_handle_num")
@stats.Timed("grr_frontendserver_handle_time")
def HandleMessageBundles(self, request_comms, response_comms):
"""Processes a queue of messages as passed from the client.
We basically dispatch all the GrrMessages in the queue to the task scheduler
for backend processing. We then retrieve from the TS the messages destined
for this client.
Args:
request_comms: A ClientCommunication rdfvalue with messages sent by the
client. source should be set to the client CN.
response_comms: A ClientCommunication rdfvalue of jobs destined to this
client.
Returns:
tuple of (source, message_count) where message_count is the number of
messages received from the client with common name source.
"""
messages, source, timestamp = self._communicator.DecodeMessages(
request_comms)
now = time.time()
if messages:
# Receive messages in line.
self.ReceiveMessages(source, messages)
# We send the client a maximum of self.max_queue_size messages
required_count = max(0, self.max_queue_size - request_comms.queue_size)
tasks = []
message_list = rdf_flows.MessageList()
# Only give the client messages if we are able to receive them in a
# reasonable time.
if time.time() - now < 10:
tasks = self.DrainTaskSchedulerQueueForClient(source, required_count)
message_list.job = tasks
# Encode the message_list in the response_comms using the same API version
# the client used.
try:
self._communicator.EncodeMessages(
message_list,
response_comms,
destination=str(source),
timestamp=timestamp,
api_version=request_comms.api_version)
except communicator.UnknownClientCert:
# We can not encode messages to the client yet because we do not have the
# client certificate - return them to the queue so we can try again later.
with data_store.DB.GetMutationPool() as pool:
queue_manager.QueueManager(token=self.token).Schedule(tasks, pool)
raise
return source, len(messages)
def DrainTaskSchedulerQueueForClient(self, client, max_count=None):
"""Drains the client's Task Scheduler queue.
1) Get all messages in the client queue.
2) Sort these into a set of session_ids.
3) Use data_store.DB.ResolvePrefix() to query all requests.
4) Delete all responses for retransmitted messages (if needed).
Args:
client: The ClientURN object specifying this client.
max_count: The maximum number of messages we will issue for the
client.
If not given, uses self.max_queue_size .
Returns:
The tasks respresenting the messages returned. If we can not send them,
we can reschedule them for later.
"""
if max_count is None:
max_count = self.max_queue_size
if max_count <= 0:
return []
client = rdf_client.ClientURN(client)
start_time = time.time()
# Drain the queue for this client
new_tasks = queue_manager.QueueManager(token=self.token).QueryAndOwn(
queue=client.Queue(),
limit=max_count,
lease_seconds=self.message_expiry_time)
initial_ttl = rdf_flows.GrrMessage().task_ttl
check_before_sending = []
result = []
for task in new_tasks:
if task.task_ttl < initial_ttl - 1:
# This message has been leased before.
check_before_sending.append(task)
else:
result.append(task)
if check_before_sending:
with queue_manager.QueueManager(token=self.token) as manager:
status_found = manager.MultiCheckStatus(check_before_sending)
# All messages that don't have a status yet should be sent again.
for task in check_before_sending:
if task not in status_found:
result.append(task)
else:
manager.DeQueueClientRequest(client, task.task_id)
stats.STATS.IncrementCounter("grr_messages_sent", len(result))
if result:
logging.debug("Drained %d messages for %s in %s seconds.", len(result),
client,
time.time() - start_time)
return result
def EnrolFleetspeakClient(self, client_id):
"""Enrols a Fleetspeak-enabled client for use with GRR."""
client_urn = rdf_client.ClientURN(client_id)
# If already enrolled, return.
if aff4.FACTORY.ExistsWithType(
client_urn, aff4_type=aff4_grr.VFSGRRClient, token=self.token):
return
logging.info("Enrolling a new Fleetspeak client: %r", client_id)
if data_store.RelationalDBWriteEnabled():
data_store.REL_DB.WriteClientMetadata(
client_id.Basename(), fleetspeak_enabled=True)
# TODO(fleetspeak-team,grr-team): If aff4 isn't reliable enough, we can
# catch exceptions from it and forward them to Fleetspeak by failing its
# gRPC call. Fleetspeak will then retry with a random, perhaps healthier,
# instance of the GRR frontend.
with aff4.FACTORY.Create(
client_urn,
aff4_type=aff4_grr.VFSGRRClient,
mode="rw",
token=self.token) as client:
client.Set(client.Schema.FLEETSPEAK_ENABLED, rdfvalue.RDFBool(True))
index = client_index.CreateClientIndex(token=self.token)
index.AddClient(client)
if data_store.RelationalDBWriteEnabled():
index = client_index.ClientIndex()
index.AddClient(client_urn.Basename(),
data_migration.ConvertVFSGRRClient(client))
enrollment_session_id = rdfvalue.SessionID(
queue=queues.ENROLLMENT, flow_name="Enrol")
publish_msg = rdf_flows.GrrMessage(
payload=client_urn,
session_id=enrollment_session_id,
# Fleetspeak ensures authentication.
auth_state=rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED,
source=enrollment_session_id,
priority=rdf_flows.GrrMessage.Priority.MEDIUM_PRIORITY)
# Publish the client enrollment message.
events.Events.PublishEvent(
"ClientEnrollment", publish_msg, token=self.token)
def RecordFleetspeakClientPing(self, client_id):
"""Records the last client contact in the datastore."""
with aff4.FACTORY.Create(
client_id,
aff4_type=aff4_grr.VFSGRRClient,
mode="w",
token=self.token,
force_new_version=False) as client:
client.Set(client.Schema.PING, rdfvalue.RDFDatetime.Now())
def ReceiveMessages(self, client_id, messages):
"""Receives and processes the messages from the source.
For each message we update the request object, and place the
response in that request's queue. If the request is complete, we
send a message to the worker.
Args:
client_id: The client which sent the messages.
messages: A list of GrrMessage RDFValues.
"""
now = time.time()
with queue_manager.QueueManager(token=self.token) as manager:
for session_id, msgs in utils.GroupBy(
messages, operator.attrgetter("session_id")).iteritems():
# Remove and handle messages to WellKnownFlows
unprocessed_msgs = self.HandleWellKnownFlows(msgs)
if not unprocessed_msgs:
continue
for msg in unprocessed_msgs:
manager.QueueResponse(msg)
for msg in unprocessed_msgs:
# Messages for well known flows should notify even though they don't
# have a status.
if msg.request_id == 0:
manager.QueueNotification(
session_id=msg.session_id, priority=msg.priority)
# Those messages are all the same, one notification is enough.
break
elif msg.type == rdf_flows.GrrMessage.Type.STATUS:
# If we receive a status message from the client it means the client
# has finished processing this request. We therefore can de-queue it
# from the client queue. msg.task_id will raise if the task id is
# not set (message originated at the client, there was no request on
# the server), so we have to check .HasTaskID() first.
if msg.HasTaskID():
manager.DeQueueClientRequest(client_id, msg.task_id)
manager.QueueNotification(
session_id=msg.session_id,
priority=msg.priority,
last_status=msg.request_id)
stat = rdf_flows.GrrStatus(msg.payload)
if stat.status == rdf_flows.GrrStatus.ReturnedStatus.CLIENT_KILLED:
# A client crashed while performing an action, fire an event.
crash_details = rdf_client.ClientCrash(
client_id=client_id,
session_id=session_id,
backtrace=stat.backtrace,
crash_message=stat.error_message,
nanny_status=stat.nanny_status,
timestamp=rdfvalue.RDFDatetime.Now())
msg = rdf_flows.GrrMessage(
source=client_id,
payload=crash_details,
auth_state=(
rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED))
events.Events.PublishEvent("ClientCrash", msg, token=self.token)
logging.debug("Received %s messages from %s in %s sec", len(messages),
client_id,
time.time() - now)
def HandleWellKnownFlows(self, messages):
"""Hands off messages to well known flows."""
msgs_by_wkf = {}
result = []
for msg in messages:
# Regular message - queue it.
if msg.response_id != 0:
result.append(msg)
continue
# Well known flows:
flow_name = msg.session_id.FlowName()
if flow_name in self.well_known_flows_blacklist:
continue
if flow_name in self.well_known_flows:
# This message should be processed directly on the front end.
msgs_by_wkf.setdefault(flow_name, []).append(msg)
# TODO(user): Deprecate in favor of 'well_known_flow_requests'
# metric.
stats.STATS.IncrementCounter("grr_well_known_flow_requests")
stats.STATS.IncrementCounter(
"well_known_flow_requests", fields=[str(msg.session_id)])
else:
# Message should be queued to be processed in the backend.
# Well known flows have a response_id==0, but if we queue up the state
# as that it will overwrite some other message that is queued. So we
# change it to a random number here.
msg.response_id = utils.PRNG.GetULong()
# Queue the message in the data store.
result.append(msg)
for flow_name, msg_list in msgs_by_wkf.iteritems():
wkf = self.well_known_flows[flow_name]
wkf.ProcessMessages(msg_list)
return result
def _GetClientPublicKey(self, client_id):
client_obj = aff4.FACTORY.Open(client_id, token=aff4.FACTORY.root_token)
return client_obj.Get(client_obj.Schema.CERT).GetPublicKey()
def HandleUpload(self, encoding_header, encoded_upload_token, data_generator):
"""Handles the upload of a file."""
if encoding_header != "chunked":
raise IOError("Only chunked uploads are allowed.")
# Extract request parameters.
if not encoded_upload_token:
raise IOError("Upload token not provided")
upload_token = rdf_client.UploadToken.FromSerializedString(
encoded_upload_token.decode("base64"))
if not upload_token.hmac:
raise IOError("HMAC not provided")
if not upload_token.encrypted_policy:
raise IOError("Policy not provided")
if not upload_token.iv:
raise IOError("IV not provided")
upload_token.VerifyHMAC()
policy = rdf_client.UploadPolicy.FromEncryptedPolicy(
upload_token.encrypted_policy, upload_token.iv)
if rdfvalue.RDFDatetime.Now() > policy.expires:
raise IOError("Client upload policy is too old.")
upload_store = file_store.UploadFileStore.GetPlugin(
config.CONFIG["Frontend.upload_store"])()
filestore_fd = upload_store.CreateFileStoreFile()
out_fd = uploads.GunzipWrapper(filestore_fd)
with uploads.DecryptStream(config.CONFIG["PrivateKeys.server_key"],
self._GetClientPublicKey(policy.client_id),
out_fd) as decrypt_fd:
for data in data_generator:
decrypt_fd.write(data)
return filestore_fd.Finalize()
def _GetRekallProfileServer(self):
try:
return self._rekall_profile_server
except AttributeError:
server_type = config.CONFIG["Rekall.profile_server"]
self._rekall_profile_server = rekall_profile_server.ProfileServer.classes[
server_type]()
return self._rekall_profile_server
def GetRekallProfile(self, name, version="v1.0"):
server = self._GetRekallProfileServer()
logging.debug("Serving Rekall profile %s/%s", version, name)
try:
return server.GetProfileByName(name, version)
# TODO(amoser): We raise too many different exceptions in profile server.
except Exception as e: # pylint: disable=broad-except
logging.debug("Unable to serve profile %s/%s: %s", version, name, e)
return None
class FrontendInit(registry.InitHook):
def RunOnce(self):
# Frontend metrics. These metrics should be used by the code that
# feeds requests into the frontend.
stats.STATS.RegisterCounterMetric(
"client_pings_by_label", fields=[("label", str)])
stats.STATS.RegisterGaugeMetric(
"frontend_active_count", int, fields=[("source", str)])
stats.STATS.RegisterGaugeMetric("frontend_max_active_count", int)
stats.STATS.RegisterCounterMetric(
"frontend_http_requests", fields=[("action", str), ("protocol", str)])
stats.STATS.RegisterCounterMetric(
"frontend_in_bytes", fields=[("source", str)])
stats.STATS.RegisterCounterMetric(
"frontend_out_bytes", fields=[("source", str)])
stats.STATS.RegisterCounterMetric(
"frontend_request_count", fields=[("source", str)])
# Client requests sent to an inactive datacenter. This indicates a
# misconfiguration.
stats.STATS.RegisterCounterMetric(
"frontend_inactive_request_count", fields=[("source", str)])
stats.STATS.RegisterEventMetric(
"frontend_request_latency", fields=[("source", str)])
stats.STATS.RegisterEventMetric("grr_frontendserver_handle_time")
stats.STATS.RegisterCounterMetric("grr_frontendserver_handle_num")
stats.STATS.RegisterGaugeMetric("grr_frontendserver_client_cache_size", int)
stats.STATS.RegisterCounterMetric("grr_messages_sent")
stats.STATS.RegisterCounterMetric(
"grr_pub_key_cache", fields=[("type", str)])
| 38.858311 | 80 | 0.6935 |
acf83e77b34623b45e7d7e43c9df6056f84389d6 | 999 | py | Python | main/migrations/0003_auto_20210330_0903.py | opustm/backend | 5bb33027137ccedfaf3aaef9735bc416a489584d | [
"PostgreSQL"
] | null | null | null | main/migrations/0003_auto_20210330_0903.py | opustm/backend | 5bb33027137ccedfaf3aaef9735bc416a489584d | [
"PostgreSQL"
] | 4 | 2021-04-20T03:32:02.000Z | 2021-12-01T18:36:11.000Z | main/migrations/0003_auto_20210330_0903.py | opustm/backend | 5bb33027137ccedfaf3aaef9735bc416a489584d | [
"PostgreSQL"
] | null | null | null | # Generated by Django 3.1.2 on 2021-03-30 14:03
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("main", "0002_auto_20210330_0855"),
]
operations = [
migrations.AlterField(
model_name="request",
name="team",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="teamRequest",
to="main.team",
),
),
migrations.AlterField(
model_name="request",
name="user",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="userRequest",
to=settings.AUTH_USER_MODEL,
),
),
]
| 26.289474 | 60 | 0.52953 |
acf83e934ef42dfa3eafb020232ccb85429693e7 | 4,086 | py | Python | sdk/formrecognizer/azure-ai-formrecognizer/tests/test_frc_business_card_from_url_async.py | semick-dev/azure-sdk-for-python | dbad6c19b2812e5490137aac886edd8a80e407f7 | [
"MIT"
] | 1 | 2022-03-09T08:59:13.000Z | 2022-03-09T08:59:13.000Z | sdk/formrecognizer/azure-ai-formrecognizer/tests/test_frc_business_card_from_url_async.py | acrofrank/azure-sdk-for-python | 699acfe143cc0ca570de2d040c8ffcf7cb2a3c55 | [
"MIT"
] | null | null | null | sdk/formrecognizer/azure-ai-formrecognizer/tests/test_frc_business_card_from_url_async.py | acrofrank/azure-sdk-for-python | 699acfe143cc0ca570de2d040c8ffcf7cb2a3c55 | [
"MIT"
] | null | null | null | # coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import pytest
import functools
from devtools_testutils.aio import recorded_by_proxy_async
from devtools_testutils import set_bodiless_matcher
from azure.ai.formrecognizer import FormRecognizerApiVersion
from azure.ai.formrecognizer.aio import FormRecognizerClient
from preparers import FormRecognizerPreparer
from asynctestcase import AsyncFormRecognizerTest
from preparers import GlobalClientPreparer as _GlobalClientPreparer
FormRecognizerClientPreparer = functools.partial(_GlobalClientPreparer, FormRecognizerClient)
class TestBusinessCardFromUrlAsync(AsyncFormRecognizerTest):
def teardown(self):
self.sleep(4)
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
@recorded_by_proxy_async
async def test_business_card_jpg_include_field_elements(self, client):
set_bodiless_matcher()
async with client:
poller = await client.begin_recognize_business_cards_from_url(self.business_card_url_jpg, include_field_elements=True)
result = await poller.result()
assert len(result) == 1
business_card = result[0]
self.assertFormPagesHasValues(business_card.pages)
for name, field in business_card.fields.items():
for f in field.value:
self.assertFieldElementsHasValues(f.value_data.field_elements, business_card.page_range.first_page_number)
# check dict values
assert len(business_card.fields.get("ContactNames").value) == 1
assert business_card.fields.get("ContactNames").value[0].value_data.page_number == 1
assert business_card.fields.get("ContactNames").value[0].value['FirstName'].value == 'Avery'
assert business_card.fields.get("ContactNames").value[0].value['LastName'].value == 'Smith'
assert len(business_card.fields.get("JobTitles").value) == 1
assert business_card.fields.get("JobTitles").value[0].value == "Senior Researcher"
assert len(business_card.fields.get("Departments").value) == 1
assert business_card.fields.get("Departments").value[0].value == "Cloud & Al Department"
assert len(business_card.fields.get("Emails").value) == 1
assert business_card.fields.get("Emails").value[0].value == "avery.smith@contoso.com"
assert len(business_card.fields.get("Websites").value) == 1
assert business_card.fields.get("Websites").value[0].value == "https://www.contoso.com/"
# The phone number values are not getting normalized to a phone number type. Just assert on text.
assert len(business_card.fields.get("MobilePhones").value) == 1
assert business_card.fields.get("MobilePhones").value[0].value_data.text == "+44 (0) 7911 123456"
assert len(business_card.fields.get("WorkPhones").value) == 1
assert business_card.fields.get("WorkPhones").value[0].value_data.text == "+44 (0) 20 9876 5432"
assert len(business_card.fields.get("Faxes").value) == 1
assert business_card.fields.get("Faxes").value[0].value_data.text == "+44 (0) 20 6789 2345"
assert len(business_card.fields.get("Addresses").value) == 1
assert business_card.fields.get("Addresses").value[0].value == "2 Kingdom Street Paddington, London, W2 6BD"
assert len(business_card.fields.get("CompanyNames").value) == 1
assert business_card.fields.get("CompanyNames").value[0].value == "Contoso"
@FormRecognizerPreparer()
@FormRecognizerClientPreparer(client_kwargs={"api_version": FormRecognizerApiVersion.V2_0})
async def test_business_card_v2(self, **kwargs):
client = kwargs.pop("client")
with pytest.raises(ValueError) as e:
async with client:
await client.begin_recognize_business_cards_from_url(self.business_card_url_jpg)
assert "Method 'begin_recognize_business_cards_from_url' is only available for API version V2_1 and up" in str(e.value)
| 48.070588 | 130 | 0.711698 |
acf83eca7f228bbef8d343c5709a1e5952ef1429 | 13,855 | py | Python | luigi_classes/transfer_soakdb.py | xchem/pipeline | 220ebc3b6526c65b41a56b5026c7803d96488cd7 | [
"Apache-2.0"
] | null | null | null | luigi_classes/transfer_soakdb.py | xchem/pipeline | 220ebc3b6526c65b41a56b5026c7803d96488cd7 | [
"Apache-2.0"
] | 139 | 2018-01-29T11:29:18.000Z | 2022-01-11T12:04:32.000Z | luigi_classes/transfer_soakdb.py | xchem/pipeline | 220ebc3b6526c65b41a56b5026c7803d96488cd7 | [
"Apache-2.0"
] | 6 | 2018-08-21T17:09:19.000Z | 2020-04-14T13:49:32.000Z | from sqlite3 import OperationalError
from setup_django import setup_django
setup_django()
import datetime
import luigi
from functions.luigi_transfer_soakdb_functions import *
from xchem_db.models import *
from .config_classes import SoakDBConfig, DirectoriesConfig
class FindSoakDBFiles(luigi.Task):
""" Find and return a list of all soakdb files within a specified directory
This class requires no prerequisite tasks to be completed to run
Args:
date: A date that will be used to create the output file...
filepath: The file/directory path to look for soakdb files.
"""
# date parameter - needs to be changed
date = luigi.DateParameter(default=datetime.datetime.now())
# filepath parameter can be changed elsewhere
filepath = luigi.Parameter(default=SoakDBConfig().default_path)
def output(self):
""" Returns the specified output for this task (:class:`FindSoakDBFiles`)
The naming convention for the output file follows : soakDBfiles/soakDB_%Y%m%d.txt given the date.
and should be located in the specified log directory.
Returns:
luigi.localTarget
"""
return luigi.LocalTarget(os.path.join(DirectoriesConfig().log_directory,
self.date.strftime('soakDBfiles/soakDB_%Y%m%d.txt')))
def run(self):
"""Performs `find_soak_db_files` and creates text file containing valid soakdb filepaths."""
out = find_soak_db_files(filepath=self.filepath)
with self.output().open('w') as f:
f.write(str(out))
class CheckFiles(luigi.Task):
""" Check if a given soakdb file has been uploaded to XCDB.
If the file is in XCDB this task requires :class:`FindSoakDBFiles` to be completed
If the file is NOT in XCDB then :class:`FindSoakDBFiles` and :class:`TransferAllFedIDsAndDatafiles` are required.
Args:
date: A date that will be used to create the output file
soak_db_filepath: The filepath pointing to a given soakdb file.
"""
resources = {'django': 1}
date = luigi.Parameter(default=datetime.datetime.now())
soak_db_filepath = luigi.Parameter(default=SoakDBConfig().default_path)
def requires(self):
""" CheckFiles requires :class:`FindSoakDBFiles` and :class:`TransferAllFedIDsAndDatafiles`
CheckFiles expects a soak_db_filepath as a parameter given under default_path
Returns:
[(output of TransferAllFedIDs or FindSoakDBFiles), output of FindSoakDBFiles]
"""
print('Finding soakdb files via CheckFiles')
soakdb = list(SoakdbFiles.objects.all())
if not soakdb:
return [TransferAllFedIDsAndDatafiles(soak_db_filepath=self.soak_db_filepath),
FindSoakDBFiles(filepath=self.soak_db_filepath)]
else:
return [FindSoakDBFiles(filepath=self.soak_db_filepath), FindSoakDBFiles(filepath=self.soak_db_filepath)]
def output(self):
""" Returns the target output for :class:`CheckFiles` task.
Naming convention for the output file is 'checked_files/files_%Y%m%d%H.checked'
Returns:
luigi.localTarget
"""
return luigi.LocalTarget(os.path.join(
DirectoriesConfig().log_directory,
self.date.strftime('checked_files/files_%Y%m%d%H.checked')))
def run(self):
""" Performs `check_files` function and writes '' to the expected log file"""
check_files(soak_db_filepath=self.input()[1].path)
# write output to signify job done
with self.output().open('w') as f:
f.write('')
class TransferAllFedIDsAndDatafiles(luigi.Task):
""" Transfer All FedID and Datafiles from within a soak-db file into XCDB?
This task requires :class:`FindSoakDBfiles` to be completed
Args:
date:
soak_db_filepath:
"""
resources = {'django': 1}
# date parameter for daily run - needs to be changed
date = luigi.Parameter(default=datetime.datetime.now())
soak_db_filepath = luigi.Parameter(default=SoakDBConfig().default_path)
# needs a list of soakDB files from the same day
def requires(self):
"""TransferAllFedIDsAndDatafiles requires :class:`FindSoakDBFiles` to be completed
TransferAllFedIDsAndDatafiles expects a soak_db_filepath as a parameter given under default_path
Returns:
[output of FindSoakDBFiles]
"""
return FindSoakDBFiles(filepath=self.soak_db_filepath)
# output is just a log file
def output(self):
"""Returns the target output for :class:`TransferAllFedIDsAndDatafiles`
Naming convention for the output file is 'transfer_logs/fedids_%Y%m%d%H.txt'
Returns:
luigi.localTarget
"""
return luigi.LocalTarget(os.path.join(DirectoriesConfig().log_directory,
self.date.strftime('transfer_logs/fedids_%Y%m%d%H.txt')))
# transfers data to a central postgres db
def run(self):
""" Performs `transfer_all_fed_ids_and_datafiles` function and writes 'TransferFeDIDs DONE' to the expected log file"""
transfer_all_fed_ids_and_datafiles(soak_db_filelist=self.input().path)
with self.output().open('w') as f:
f.write('TransferFeDIDs DONE')
class TransferChangedDataFile(luigi.Task):
"""Transfer soakdb files that have been modified since upload
Requires :class:`CheckFiles` task to be completed
Args:
data_file:
hit_directory:
"""
resources = {'django': 1}
data_file = luigi.Parameter()
soak_db_filepath = luigi.Parameter(default=SoakDBConfig().default_path)
hit_directory = luigi.Parameter(default=DirectoriesConfig().hit_directory)
def requires(self):
"""TransferChangedDataFile requires :class:`CheckFiles` to be completed.
Expects the data_file given as a parameter
Returns:
Output of CheckFiles
"""
return CheckFiles(soak_db_filepath=self.data_file)
def output(self):
"""Returns the target output for :class:`TransferChangedDataFile`
Naming convention for the output file is 'data+file+mod_date.transferred'
Returns:
luigi.localTarget
"""
modification_date = misc_functions.get_mod_date(self.data_file)
return luigi.LocalTarget(str(self.data_file + '_' + str(modification_date) + '.transferred'))
def run(self):
""" Performs `transfer_changed_datafile` function and writes '' to the expected log file"""
transfer_changed_datafile(data_file=self.data_file, hit_directory=self.hit_directory)
with self.output().open('w') as f:
f.write('')
class TransferNewDataFile(luigi.Task):
"""Transfer new soakdb files to XCDB.
Requires :class:`CheckFiles` task to be completed
Args:
soak_db_filepath:
data_file:
"""
resources = {'django': 1}
data_file = luigi.Parameter()
soak_db_filepath = luigi.Parameter(default=SoakDBConfig().default_path)
def requires(self):
"""Requirements for :class:`TransferNewDataFile` to be run
Requires :class:`CheckFiles` to be completed
"""
return CheckFiles(soak_db_filepath=self.soak_db_filepath)
def output(self):
"""Returns the target output for :class:`TransferNewDataFile`
Creates a log file: '[data_file]_[modification_date].transferred' in Luigi config dir.
Returns:
luigi.localTarget
"""
modification_date = misc_functions.get_mod_date(self.data_file)
return luigi.LocalTarget(str(self.data_file + '_' + str(modification_date) + '.transferred'))
def run(self):
""" Performs `transfer_file` function and writes '' to the expected log file"""
transfer_file(self.data_file)
with self.output().open('w') as f:
f.write('')
class StartTransfers(luigi.Task):
"""Initiate the transfer sequence of files into XCDB
Requires :class:`CheckFiles` or both :class:`TransferNewDataFile` and :class:`TransferChangedDataFile` to be completed
Args:
date:
soak_db_filepath:
"""
resources = {'django': 1}
date = luigi.Parameter(default=datetime.datetime.now().strftime("%Y%m%d%H"))
soak_db_filepath = luigi.Parameter(default=SoakDBConfig().default_path)
def get_file_list(self, status_code):
"""Get a list of files to attempt to transfer.
Args:
status_code: Not sure, ask Rachael
Returns:
list of filenames that are to be transferred to XCDB
"""
status_query = SoakdbFiles.objects.filter(status=status_code)
datafiles = [o.filename for o in status_query]
return datafiles
def requires(self):
"""Requirements for :class:`StartTransfers` to be run
Requires :class:`CheckFiles` to be completed or both :class:`TransferNewDataFile` and :class:`TransferChangedDataFile`
Returns:
Output of CheckFiles
or
Output of one of TransferNewDataFile or TransferChangedDataFile depending on state of soakdb file.
"""
if not os.path.isfile(CheckFiles(soak_db_filepath=self.soak_db_filepath).output().path):
return CheckFiles(soak_db_filepath=self.soak_db_filepath)
else:
new_list = self.get_file_list(0)
changed_list = self.get_file_list(1)
return [TransferNewDataFile(data_file=datafile, soak_db_filepath=self.soak_db_filepath)
for datafile in new_list], \
[TransferChangedDataFile(data_file=datafile, soak_db_filepath=self.soak_db_filepath)
for datafile in changed_list]
def output(self):
"""Returns the target output for :class:`StartTransfers`
Creates a log file: 'transfer_logs/transfers_[date].done' in Luigi config dir.
Returns:
luigi.localTarget
"""
return luigi.LocalTarget(os.path.join(DirectoriesConfig().log_directory,
str('transfer_logs/transfers_' + str(self.date) + '.done')))
def run(self):
"""Write to the output file, otherwise schedules required tasks."""
with self.output().open('w') as f:
f.write('')
class CheckFileUpload(luigi.Task):
"""Check if a file has uploaded correctly
Has no requirements
Args:
filename:
model:
"""
resources = {'django': 1}
filename = luigi.Parameter()
model = luigi.Parameter()
def requires(self):
pass
def output(self):
"""Returns the target output for :class:`CheckFileUpload`
Creates a log file: 'filename.date.checked' in Luigi config dir.
Returns:
luigi.localTarget
"""
mod_date = misc_functions.get_mod_date(self.filename)
return luigi.LocalTarget(str(self.filename + '.' + mod_date + '.checked'))
def run(self):
"""Performs the check_file_upload function on the given filename and model"""
check_file_upload(filename=self.filename, model=self.model)
with self.output().open('w') as f:
f.write('')
class CheckUploadedFiles(luigi.Task):
"""Check whether or not all specified soakdb files have uploaded correctly
Requires :class:`StartTransfers` or `CheckFileUpload` to be completed
Args:
date:
soak_db_filepath:
"""
resources = {'django': 1}
date = luigi.DateParameter(default=datetime.datetime.now())
soak_db_filepath = luigi.Parameter(default=SoakDBConfig().default_path)
def requires(self):
"""Requirements for :class:`CheckUploadedFiles` to be run
Requires :class:`StartTransfers` to be completed if file is missing or
:class:`CheckFileUpload` if exists
Returns:
Output of CheckFileUpload (if in XCDB) or StartTransfers (if not in XCDB)
"""
if not os.path.isfile(StartTransfers(date=self.date, soak_db_filepath=self.soak_db_filepath).output().path):
return StartTransfers(date=self.date, soak_db_filepath=self.soak_db_filepath)
else:
soakdb_files = [obj.filename for obj in SoakdbFiles.objects.all()]
m = [Lab, Dimple, DataProcessing, Refinement]
zipped = []
for filename in soakdb_files:
for model in m:
try:
maint_exists = db_functions.check_table_sqlite(filename, 'mainTable')
except OperationalError:
if not os.path.isfile(filename):
f = SoakdbFiles.objects.get(filename=filename)
f.delete()
continue
else:
raise Exception(str(traceback.format_exc() + '; db_file=' + filename))
if maint_exists == 1:
zipped.append(tuple([filename, model]))
return [CheckFileUpload(filename=filename, model=model) for (filename, model) in zipped]
def output(self):
"""Returns the target output for:class:`CheckUploadedFiles`
Creates a log file: 'soakDBfiles/soakDB_checked_%Y%m%d.txt' in Luigi config dir.
Returns:
luigi.localTarget
"""
return luigi.LocalTarget(os.path.join(DirectoriesConfig().log_directory,
self.date.strftime('soakDBfiles/soakDB_checked_%Y%m%d.txt')))
def run(self):
"""Write to the output file, otherwise schedules required tasks."""
with self.output().open('w') as f:
f.write('')
| 34.987374 | 127 | 0.64742 |
acf83f4d8ba792e4c1d45edd68b5220495b12c63 | 145,523 | py | Python | src/sage/algebras/commutative_dga.py | dimpase/sagetrac-mirror | 473cd41f19ec23df7e207391cfb0cf41c7c4ef46 | [
"BSL-1.0"
] | null | null | null | src/sage/algebras/commutative_dga.py | dimpase/sagetrac-mirror | 473cd41f19ec23df7e207391cfb0cf41c7c4ef46 | [
"BSL-1.0"
] | null | null | null | src/sage/algebras/commutative_dga.py | dimpase/sagetrac-mirror | 473cd41f19ec23df7e207391cfb0cf41c7c4ef46 | [
"BSL-1.0"
] | null | null | null | r"""
Commutative Differential Graded Algebras
An algebra is said to be *graded commutative* if it is endowed with a
grading and its multiplication satisfies the Koszul sign convention:
`yx = (-1)^{ij} xy` if `x` and `y` are homogeneous of degrees `i` and
`j`, respectively. Thus the multiplication is anticommutative for odd
degree elements, commutative otherwise. *Commutative differential
graded algebras* are graded commutative algebras endowed with a graded
differential of degree 1. These algebras can be graded over the
integers or they can be multi-graded (i.e., graded over a finite rank
free abelian group `\ZZ^n`); if multi-graded, the total degree is used
in the Koszul sign convention, and the differential must have total
degree 1.
EXAMPLES:
All of these algebras may be constructed with the function
:func:`GradedCommutativeAlgebra`. For most users, that will be the
main function of interest. See its documentation for many more
examples.
We start by constructing some graded commutative algebras. Generators
have degree 1 by default::
sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ)
sage: x.degree()
1
sage: x^2
0
sage: y*x
-x*y
sage: B.<a,b> = GradedCommutativeAlgebra(QQ, degrees = (2,3))
sage: a.degree()
2
sage: b.degree()
3
Once we have defined a graded commutative algebra, it is easy to
define a differential on it using the :meth:`GCAlgebra.cdg_algebra` method::
sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(1,1,2))
sage: B = A.cdg_algebra({x: x*y, y: -x*y})
sage: B
Commutative Differential Graded Algebra with generators ('x', 'y', 'z') in degrees (1, 1, 2) over Rational Field with differential:
x --> x*y
y --> -x*y
z --> 0
sage: B.cohomology(3)
Free module generated by {[x*z + y*z]} over Rational Field
sage: B.cohomology(4)
Free module generated by {[z^2]} over Rational Field
We can also compute algebra generators for the cohomology in a range
of degrees, and in this case we compute up to degree 10::
sage: B.cohomology_generators(10)
{1: [x + y], 2: [z]}
AUTHORS:
- Miguel Marco, John Palmieri (2014-07): initial version
"""
# ****************************************************************************
# Copyright (C) 2014 Miguel Marco <mmarco@unizar.es>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
from sage.structure.unique_representation import UniqueRepresentation, CachedRepresentation
from sage.structure.sage_object import SageObject
from sage.misc.cachefunc import cached_method
from sage.misc.inherit_comparison import InheritComparisonClasscallMetaclass
from sage.misc.functional import is_odd, is_even
from sage.misc.misc_c import prod
from sage.categories.chain_complexes import ChainComplexes
from sage.categories.algebras import Algebras
from sage.categories.morphism import Morphism
from sage.categories.modules import Modules
from sage.categories.homset import Hom
from sage.algebras.free_algebra import FreeAlgebra
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
from sage.combinat.free_module import CombinatorialFreeModule
from sage.combinat.integer_vector_weighted import WeightedIntegerVectors
from sage.groups.additive_abelian.additive_abelian_group import AdditiveAbelianGroup
from sage.matrix.constructor import matrix
from sage.modules.free_module import VectorSpace
from sage.modules.free_module_element import vector
from sage.rings.all import ZZ
from sage.rings.homset import RingHomset_generic
from sage.rings.morphism import RingHomomorphism_im_gens
from sage.rings.polynomial.term_order import TermOrder
from sage.rings.quotient_ring import QuotientRing_nc
from sage.rings.quotient_ring_element import QuotientRingElement
from sage.misc.cachefunc import cached_function
from sage.misc.superseded import deprecated_function_alias
def sorting_keys(element):
r"""
Auxiliary function to sort the elements of a basis of a Cohomology group.
It is needed to ensure that elements of a cohomology group are represented
in a consistent way.
INPUT:
- ``element`` - A CohomologyClass
OUTPUT:
Its coordinates in the corresponding cohomology_raw quotoent vector space
EXAMPLES::
sage: from sage.algebras.commutative_dga import sorting_keys
sage: A.<e1,e2,e3,e4,e5> = GradedCommutativeAlgebra(QQ)
sage: B = A.cdg_algebra({e5:e1*e2+e3*e4})
sage: B.inject_variables()
Defining e1, e2, e3, e4, e5
sage: C = B.cohomology(3)
sage: [sorting_keys(el) for el in C.basis().keys()]
[[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1]]
"""
x = element._x
P = x.parent()
CR = P.cohomology_raw(x.degree())
V = CR.V()
return list(CR(V(x.basis_coefficients())))
class Differential(UniqueRepresentation, Morphism,
metaclass=InheritComparisonClasscallMetaclass):
r"""
Differential of a commutative graded algebra.
INPUT:
- ``A`` -- algebra where the differential is defined
- ``im_gens`` -- tuple containing the image of each generator
EXAMPLES::
sage: A.<x,y,z,t> = GradedCommutativeAlgebra(QQ, degrees=(1, 1, 2, 3))
sage: B = A.cdg_algebra({x: x*y, y: -x*y , z: t})
sage: B
Commutative Differential Graded Algebra with generators ('x', 'y', 'z', 't') in degrees (1, 1, 2, 3) over Rational Field with differential:
x --> x*y
y --> -x*y
z --> t
t --> 0
sage: B.differential()(x)
x*y
"""
@staticmethod
def __classcall__(cls, A, im_gens):
r"""
Normalize input to ensure a unique representation.
TESTS::
sage: A.<x,y,z,t> = GradedCommutativeAlgebra(QQ, degrees=(1, 1, 2, 3))
sage: d1 = A.cdg_algebra({x: x*y, y: -x*y, z: t}).differential()
sage: d2 = A.cdg_algebra({x: x*y, z: t, y: -x*y, t: 0}).differential()
sage: d1 is d2
True
"""
if isinstance(im_gens, (list, tuple)):
im_gens = {A.gen(i): x for i, x in enumerate(im_gens)}
R = A.cover_ring()
I = A.defining_ideal()
if A.base_ring().characteristic() != 2:
squares = R.ideal([R.gen(i)**2 for i, d in enumerate(A._degrees)
if is_odd(d)], side='twosided')
else:
squares = R.ideal(0, side='twosided')
if I != squares:
A_free = GCAlgebra(A.base(), names=A._names, degrees=A._degrees)
free_diff = {A_free(a): A_free(im_gens[a]) for a in im_gens}
B = A_free.cdg_algebra(free_diff)
IB = B.ideal([B(g) for g in I.gens()])
BQ = GCAlgebra.quotient(B, IB)
# We check that the differential respects the
# relations in the quotient method, but we also have
# to check this here, in case a GCAlgebra with
# relations is defined first, and then a differential
# imposed on it.
for g in IB.gens():
if not BQ(g.differential()).is_zero():
raise ValueError("The differential does not preserve the ideal")
im_gens = {A(a): A(im_gens[a]) for a in im_gens}
for i in im_gens:
x = im_gens[i]
if (not x.is_zero()
and (not x.is_homogeneous()
or total_degree(x.degree())
!= total_degree(i.degree()) + 1)):
raise ValueError("The given dictionary does not determine a degree 1 map")
im_gens = tuple(im_gens.get(x, A.zero()) for x in A.gens())
return super(Differential, cls).__classcall__(cls, A, im_gens)
def __init__(self, A, im_gens):
r"""
Initialize ``self``.
INPUT:
- ``A`` -- algebra where the differential is defined
- ``im_gens`` -- tuple containing the image of each generator
EXAMPLES::
sage: A.<x,y,z,t> = GradedCommutativeAlgebra(QQ)
sage: B = A.cdg_algebra({x: x*y, y: x*y, z: z*t, t: t*z})
sage: [B.cohomology(i).dimension() for i in range(6)]
[1, 2, 1, 0, 0, 0]
sage: d = B.differential()
We skip the category test because homsets/morphisms aren't
proper parents/elements yet::
sage: TestSuite(d).run(skip="_test_category")
An error is raised if the differential `d` does not have
degree 1 or if `d \circ d` is not zero::
sage: A.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=(1,2,3))
sage: A.cdg_algebra({a:b, b:c})
Traceback (most recent call last):
...
ValueError: The given dictionary does not determine a valid differential
"""
self._dic_ = {A.gen(i): x for i, x in enumerate(im_gens)}
Morphism.__init__(self, Hom(A, A, category=Modules(A.base_ring())))
for i in A.gens():
if not self(self(i)).is_zero():
raise ValueError("The given dictionary does not determine a valid differential")
def _call_(self, x):
r"""
Apply the differential to ``x``.
INPUT:
- ``x`` -- an element of the domain of this differential
EXAMPLES::
sage: A.<x,y,z,t> = GradedCommutativeAlgebra(QQ)
sage: B = A.cdg_algebra({x: x*y, y: x*y, z: z*t, t: t*z})
sage: D = B.differential()
sage: D(x*t+1/2*t*x*y) # indirect doctest
-1/2*x*y*z*t + x*y*t + x*z*t
Test positive characteristic::
sage: A.<x,y> = GradedCommutativeAlgebra(GF(17), degrees=(2, 3))
sage: B = A.cdg_algebra(differential={x:y})
sage: B.differential()(x^17)
0
"""
if x.is_zero():
return self.codomain().zero()
res = self.codomain().zero()
dic = x.dict()
for key in dic:
keyl = list(key)
coef = dic[key]
idx = 0
while keyl:
exp = keyl.pop(0)
if exp > 0:
v1 = (exp * self._dic_[x.parent().gen(idx)]
* x.parent().gen(idx)**(exp - 1))
v2 = prod(x.parent().gen(i + idx + 1)**keyl[i] for i in
range(len(keyl)))
res += coef * v1 * v2
coef *= ((-1) ** total_degree(x.parent()._degrees[idx])
* x.parent().gen(idx)**exp)
idx += 1
return res
def _repr_defn(self):
r"""
Return a string showing where ``self`` sends each generator.
EXAMPLES::
sage: A.<x,y,z,t> = GradedCommutativeAlgebra(QQ)
sage: B = A.cdg_algebra({x: x*y, y: x*y, z: z*t, t: t*z})
sage: D = B.differential()
sage: print(D._repr_defn())
x --> x*y
y --> x*y
z --> z*t
t --> -z*t
"""
return '\n'.join("{} --> {}".format(i, self(i))
for i in self.domain().gens())
def _repr_(self):
r"""
Return a string representation of ``self``.
EXAMPLES::
sage: A.<x,y,z,t> = GradedCommutativeAlgebra(QQ)
sage: D = A.differential({x: x*y, y: x*y, z: z*t, t: t*z})
sage: D
Differential of Graded Commutative Algebra with generators ('x', 'y', 'z', 't') in degrees (1, 1, 1, 1) over Rational Field
Defn: x --> x*y
y --> x*y
z --> z*t
t --> -z*t
"""
if self.domain() is None:
return "Defunct morphism"
s = "Differential of {}".format(self.domain()._base_repr())
s += "\n Defn: " + '\n '.join(self._repr_defn().split('\n'))
return s
@cached_method
def differential_matrix(self, n):
r"""
The matrix that gives the differential in degree ``n``.
INPUT:
- ``n`` -- degree
EXAMPLES::
sage: A.<x,y,z,t> = GradedCommutativeAlgebra(GF(5), degrees=(2, 2, 3, 4))
sage: d = A.differential({t: x*z, x: z, y: z})
sage: d.differential_matrix(4)
[2 0]
[1 1]
[0 2]
[1 0]
sage: A.inject_variables()
Defining x, y, z, t
sage: d(t)
x*z
sage: d(y^2)
2*y*z
sage: d(x*y)
x*z + y*z
sage: d(x^2)
2*x*z
"""
A = self.domain()
dom = A.basis(n)
cod = A.basis(n + 1)
cokeys = [next(iter(a.lift().dict().keys())) for a in cod]
m = matrix(A.base_ring(), len(dom), len(cod))
for i in range(len(dom)):
im = self(dom[i])
dic = im.lift().dict()
for j in dic.keys():
k = cokeys.index(j)
m[i, k] = dic[j]
m.set_immutable()
return m
def coboundaries(self, n):
r"""
The ``n``-th coboundary group of the algebra.
This is a vector space over the base field `F`, and it is
returned as a subspace of the vector space `F^d`, where the
``n``-th homogeneous component has dimension `d`.
INPUT:
- ``n`` -- degree
EXAMPLES::
sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(1, 1, 2))
sage: d = A.differential({z: x*z})
sage: d.coboundaries(2)
Vector space of degree 2 and dimension 0 over Rational Field
Basis matrix:
[]
sage: d.coboundaries(3)
Vector space of degree 2 and dimension 1 over Rational Field
Basis matrix:
[1 0]
sage: d.coboundaries(1)
Vector space of degree 2 and dimension 0 over Rational Field
Basis matrix:
[]
"""
A = self.domain()
F = A.base_ring()
if n == 0:
return VectorSpace(F, 0)
if n == 1:
V0 = VectorSpace(F, len(A.basis(1)))
return V0.subspace([])
M = self.differential_matrix(n - 1)
V0 = VectorSpace(F, M.nrows())
V1 = VectorSpace(F, M.ncols())
mor = V0.Hom(V1)(M)
return mor.image()
def cocycles(self, n):
r"""
The ``n``-th cocycle group of the algebra.
This is a vector space over the base field `F`, and it is
returned as a subspace of the vector space `F^d`, where the
``n``-th homogeneous component has dimension `d`.
INPUT:
- ``n`` -- degree
EXAMPLES::
sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(1, 1, 2))
sage: d = A.differential({z: x*z})
sage: d.cocycles(2)
Vector space of degree 2 and dimension 1 over Rational Field
Basis matrix:
[1 0]
"""
A = self.domain()
F = A.base_ring()
if n == 0:
return VectorSpace(F, 1)
M = self.differential_matrix(n)
V0 = VectorSpace(F, M.nrows())
V1 = VectorSpace(F, M.ncols())
mor = V0.Hom(V1)(M)
return mor.kernel()
def cohomology_raw(self, n):
r"""
The ``n``-th cohomology group of ``self``.
This is a vector space over the base ring, and it is returned
as the quotient cocycles/coboundaries.
INPUT:
- ``n`` -- degree
.. SEEALSO::
:meth:`cohomology`
EXAMPLES::
sage: A.<x,y,z,t> = GradedCommutativeAlgebra(QQ, degrees=(2, 2, 3, 4))
sage: d = A.differential({t: x*z, x: z, y: z})
sage: d.cohomology_raw(4)
Vector space quotient V/W of dimension 2 over Rational Field where
V: Vector space of degree 4 and dimension 2 over Rational Field
Basis matrix:
[ 1 0 0 -2]
[ 0 1 -1/2 -1]
W: Vector space of degree 4 and dimension 0 over Rational Field
Basis matrix:
[]
Compare to :meth:`cohomology`::
sage: d.cohomology(4)
Free module generated by {[x^2 - 2*t], [x*y - 1/2*y^2 - t]} over Rational Field
"""
return self.cocycles(n).quotient(self.coboundaries(n))
def cohomology(self, n):
r"""
The ``n``-th cohomology group of ``self``.
This is a vector space over the base ring, defined as the
quotient cocycles/coboundaries. The elements of the quotient
are lifted to the vector space of cocycles, and this is
described in terms of those lifts.
INPUT:
- ``n`` -- degree
.. SEEALSO::
:meth:`cohomology_raw`
EXAMPLES::
sage: A.<a,b,c,d,e> = GradedCommutativeAlgebra(QQ, degrees=(1, 1, 1, 1, 1))
sage: d = A.differential({d: a*b, e: b*c})
sage: d.cohomology(2)
Free module generated by {[a*c], [a*d], [b*d], [c*d - a*e], [b*e], [c*e]} over Rational Field
Compare to :meth:`cohomology_raw`::
sage: d.cohomology_raw(2)
Vector space quotient V/W of dimension 6 over Rational Field where
V: Vector space of degree 10 and dimension 8 over Rational Field
Basis matrix:
[ 1 0 0 0 0 0 0 0 0 0]
[ 0 1 0 0 0 0 0 0 0 0]
[ 0 0 1 0 0 0 0 0 0 0]
[ 0 0 0 1 0 0 0 0 0 0]
[ 0 0 0 0 1 0 0 0 0 0]
[ 0 0 0 0 0 1 -1 0 0 0]
[ 0 0 0 0 0 0 0 1 0 0]
[ 0 0 0 0 0 0 0 0 1 0]
W: Vector space of degree 10 and dimension 2 over Rational Field
Basis matrix:
[1 0 0 0 0 0 0 0 0 0]
[0 0 1 0 0 0 0 0 0 0]
"""
H = self.cohomology_raw(n)
H_basis_raw = [H.lift(H.basis()[i]) for i in range(H.dimension())]
A = self.domain()
B = A.basis(n)
H_basis = [sum(c * b for (c, b) in zip(coeffs, B)) for coeffs in
H_basis_raw]
# Put brackets around classes.
H_basis_brackets = [CohomologyClass(b) for b in H_basis]
return CombinatorialFreeModule(A.base_ring(),
H_basis_brackets,
sorting_key=sorting_keys,
monomial_reverse=True)
homology = cohomology
def _is_nonzero(self):
"""
Return ``True`` iff this morphism is nonzero.
This is used by the :meth:`Morphism.__nonzero__` method, which
in turn is used by the :func:`TestSuite` test
``_test_nonzero_equal``.
EXAMPLES::
sage: A.<x,y,z,t> = GradedCommutativeAlgebra(QQ, degrees=(1, 1, 2, 3))
sage: B = A.cdg_algebra({x: x*y, y: -x*y , z: t})
sage: B.differential()._is_nonzero()
True
sage: bool(B.differential())
True
sage: C = A.cdg_algebra({x: 0, y: 0, z: 0})
sage: C.differential()._is_nonzero()
False
sage: bool(C.differential())
False
"""
return any(x for x in self._dic_.values())
class Differential_multigraded(Differential):
"""
Differential of a commutative multi-graded algebra.
"""
def __init__(self, A, im_gens):
"""
Initialize ``self``.
EXAMPLES::
sage: A.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=((1, 0), (0, 1), (0, 2)))
sage: d = A.differential({a: c})
We skip the category test because homsets/morphisms aren't
proper parents/elements yet::
sage: TestSuite(d).run(skip="_test_category")
"""
Differential.__init__(self, A, im_gens)
# Check that the differential has a well-defined degree.
# diff_deg = [self(x).degree() - x.degree() for x in A.gens()]
diff_deg = []
for x in A.gens():
y = self(x)
if y != 0:
diff_deg.append(y.degree() - x.degree())
if len(set(diff_deg)) > 1:
raise ValueError("The differential does not have a well-defined degree")
self._degree_of_differential = diff_deg[0]
@cached_method
def differential_matrix_multigraded(self, n, total=False):
"""
The matrix that gives the differential in degree ``n``.
.. TODO::
Rename this to ``differential_matrix`` once inheritance,
overriding, and cached methods work together better. See
:trac:`17201`.
INPUT:
- ``n`` -- degree
- ``total`` -- (default: ``False``) if ``True``,
return the matrix corresponding to total degree ``n``
If ``n`` is an integer rather than a multi-index, then the
total degree is used in that case as well.
EXAMPLES::
sage: A.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=((1, 0), (0, 1), (0, 2)))
sage: d = A.differential({a: c})
sage: d.differential_matrix_multigraded((1, 0))
[1]
sage: d.differential_matrix_multigraded(1, total=True)
[0 1]
[0 0]
sage: d.differential_matrix_multigraded((1, 0), total=True)
[0 1]
[0 0]
sage: d.differential_matrix_multigraded(1)
[0 1]
[0 0]
"""
if total or n in ZZ:
return Differential.differential_matrix(self, total_degree(n))
A = self.domain()
G = AdditiveAbelianGroup([0] * A._grading_rank)
n = G(vector(n))
dom = A.basis(n)
cod = A.basis(n + self._degree_of_differential)
cokeys = [next(iter(a.lift().dict().keys())) for a in cod]
m = matrix(self.base_ring(), len(dom), len(cod))
for i in range(len(dom)):
im = self(dom[i])
dic = im.lift().dict()
for j in dic.keys():
k = cokeys.index(j)
m[i, k] = dic[j]
m.set_immutable()
return m
def coboundaries(self, n, total=False):
"""
The ``n``-th coboundary group of the algebra.
This is a vector space over the base field `F`, and it is
returned as a subspace of the vector space `F^d`, where the
``n``-th homogeneous component has dimension `d`.
INPUT:
- ``n`` -- degree
- ``total`` (default ``False``) -- if ``True``, return the
coboundaries in total degree ``n``
If ``n`` is an integer rather than a multi-index, then the
total degree is used in that case as well.
EXAMPLES::
sage: A.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=((1, 0), (0, 1), (0, 2)))
sage: d = A.differential({a: c})
sage: d.coboundaries((0, 2))
Vector space of degree 1 and dimension 1 over Rational Field
Basis matrix:
[1]
sage: d.coboundaries(2)
Vector space of degree 2 and dimension 1 over Rational Field
Basis matrix:
[0 1]
"""
if total or n in ZZ:
return Differential.coboundaries(self, total_degree(n))
A = self.domain()
G = AdditiveAbelianGroup([0] * A._grading_rank)
n = G(vector(n))
F = A.base_ring()
if total_degree(n) == 0:
return VectorSpace(F, 0)
if total_degree(n) == 1:
return VectorSpace(F, 0)
M = self.differential_matrix_multigraded(n - self._degree_of_differential)
V0 = VectorSpace(F, M.nrows())
V1 = VectorSpace(F, M.ncols())
mor = V0.Hom(V1)(M)
return mor.image()
def cocycles(self, n, total=False):
r"""
The ``n``-th cocycle group of the algebra.
This is a vector space over the base field `F`, and it is
returned as a subspace of the vector space `F^d`, where the
``n``-th homogeneous component has dimension `d`.
INPUT:
- ``n`` -- degree
- ``total`` -- (default: ``False``) if ``True``, return the
cocycles in total degree ``n``
If ``n`` is an integer rather than a multi-index, then the
total degree is used in that case as well.
EXAMPLES::
sage: A.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=((1, 0), (0, 1), (0, 2)))
sage: d = A.differential({a: c})
sage: d.cocycles((0, 1))
Vector space of degree 1 and dimension 1 over Rational Field
Basis matrix:
[1]
sage: d.cocycles((0, 1), total=True)
Vector space of degree 2 and dimension 1 over Rational Field
Basis matrix:
[0 1]
"""
if total or n in ZZ:
return Differential.cocycles(self, total_degree(n))
A = self.domain()
G = AdditiveAbelianGroup([0] * A._grading_rank)
n = G(vector(n))
F = A.base_ring()
if total_degree(n) == 0:
return VectorSpace(F, 1)
M = self.differential_matrix_multigraded(n)
V0 = VectorSpace(F, M.nrows())
V1 = VectorSpace(F, M.ncols())
mor = V0.Hom(V1)(M)
return mor.kernel()
def cohomology_raw(self, n, total=False):
r"""
The ``n``-th cohomology group of the algebra.
This is a vector space over the base ring, and it is returned
as the quotient cocycles/coboundaries.
INPUT:
- ``n`` -- degree
- ``total`` -- (default: ``False``) if ``True``, return the
cohomology in total degree ``n``
If ``n`` is an integer rather than a multi-index, then the
total degree is used in that case as well.
.. SEEALSO::
:meth:`cohomology`
EXAMPLES::
sage: A.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=((1, 0), (0, 1), (0, 2)))
sage: d = A.differential({a: c})
sage: d.cohomology_raw((0, 2))
Vector space quotient V/W of dimension 0 over Rational Field where
V: Vector space of degree 1 and dimension 1 over Rational Field
Basis matrix:
[1]
W: Vector space of degree 1 and dimension 1 over Rational Field
Basis matrix:
[1]
sage: d.cohomology_raw(1)
Vector space quotient V/W of dimension 1 over Rational Field where
V: Vector space of degree 2 and dimension 1 over Rational Field
Basis matrix:
[0 1]
W: Vector space of degree 2 and dimension 0 over Rational Field
Basis matrix:
[]
"""
return self.cocycles(n, total).quotient(self.coboundaries(n, total))
def cohomology(self, n, total=False):
r"""
The ``n``-th cohomology group of the algebra.
This is a vector space over the base ring, defined as the
quotient cocycles/coboundaries. The elements of the quotient
are lifted to the vector space of cocycles, and this is
described in terms of those lifts.
INPUT:
- ``n`` -- degree
- ``total`` -- (default: ``False``) if ``True``, return the
cohomology in total degree ``n``
If ``n`` is an integer rather than a multi-index, then the
total degree is used in that case as well.
.. SEEALSO::
:meth:`cohomology_raw`
EXAMPLES::
sage: A.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=((1, 0), (0, 1), (0, 2)))
sage: d = A.differential({a: c})
sage: d.cohomology((0, 2))
Free module generated by {} over Rational Field
sage: d.cohomology(1)
Free module generated by {[b]} over Rational Field
"""
H = self.cohomology_raw(n, total)
H_basis_raw = [H.lift(H.basis()[i]) for i in range(H.dimension())]
A = self.domain()
B = A.basis(n, total)
H_basis = [sum(c * b for (c, b) in zip(coeffs, B))
for coeffs in H_basis_raw]
# Put brackets around classes.
H_basis_brackets = [CohomologyClass(b) for b in H_basis]
return CombinatorialFreeModule(A.base_ring(),
H_basis_brackets,
sorting_key=sorting_keys,
monomial_reverse=True)
homology = cohomology
###########################################################
# Commutative graded algebras
class GCAlgebra(UniqueRepresentation, QuotientRing_nc):
r"""
A graded commutative algebra.
INPUT:
- ``base`` -- the base field
- ``names`` -- (optional) names of the generators: a list of
strings or a single string with the names separated by
commas. If not specified, the generators are named "x0", "x1",
...
- ``degrees`` -- (optional) a tuple or list specifying the degrees
of the generators; if omitted, each generator is given degree
1, and if both ``names`` and ``degrees`` are omitted, an error is
raised.
- ``R`` (optional, default None) -- the ring over which the
algebra is defined: if this is specified, the algebra is defined
to be ``R/I``.
- ``I`` (optional, default None) -- an ideal in ``R``. It is
should include, among other relations, the squares of the
generators of odd degree
As described in the module-level documentation, these are graded
algebras for which oddly graded elements anticommute and evenly
graded elements commute.
The arguments ``R`` and ``I`` are primarily for use by the
:meth:`quotient` method.
These algebras should be graded over the integers; multi-graded
algebras should be constructed using
:class:`GCAlgebra_multigraded` instead.
EXAMPLES::
sage: A.<a,b> = GradedCommutativeAlgebra(QQ, degrees = (2, 3))
sage: a.degree()
2
sage: B = A.quotient(A.ideal(a**2*b))
sage: B
Graded Commutative Algebra with generators ('a', 'b') in degrees (2, 3) with relations [a^2*b] over Rational Field
sage: A.basis(7)
[a^2*b]
sage: B.basis(7)
[]
Note that the function :func:`GradedCommutativeAlgebra` can also be used to
construct these algebras.
"""
# TODO: This should be a __classcall_private__?
@staticmethod
def __classcall__(cls, base, names=None, degrees=None, R=None, I=None, category=None):
r"""
Normalize the input for the :meth:`__init__` method and the
unique representation.
INPUT:
- ``base`` -- the base ring of the algebra
- ``names`` -- the names of the variables; by default, set to ``x1``,
``x2``, etc.
- ``degrees`` -- the degrees of the generators; by default, set to 1
- ``R`` -- an underlying `g`-algebra; only meant to be used by the
quotient method
- ``I`` -- a two-sided ideal in ``R``, with the desired relations;
Only meant to be used by the quotient method
TESTS::
sage: A1 = GradedCommutativeAlgebra(GF(2), 'x,y', (3, 6))
sage: A2 = GradedCommutativeAlgebra(GF(2), ['x', 'y'], [3, 6])
sage: A1 is A2
True
Testing the single generator case (:trac:`25276`)::
sage: A3.<z> = GradedCommutativeAlgebra(QQ)
sage: z**2 == 0
True
sage: A4.<z> = GradedCommutativeAlgebra(QQ, degrees=[4])
sage: z**2 == 0
False
sage: A5.<z> = GradedCommutativeAlgebra(GF(2))
sage: z**2 == 0
False
"""
if names is None:
if degrees is None:
raise ValueError("You must specify names or degrees")
else:
n = len(degrees)
names = tuple('x{}'.format(i) for i in range(n))
elif isinstance(names, str):
names = tuple(names.split(','))
n = len(names)
else:
n = len(names)
names = tuple(names)
if degrees is None:
degrees = tuple([1 for i in range(n)])
else:
# Deal with multigrading: convert lists and tuples to elements
# of an additive abelian group.
if degrees:
multigrade = False
try:
rank = len(list(degrees[0]))
G = AdditiveAbelianGroup([0] * rank)
degrees = [G(vector(d)) for d in degrees]
multigrade = True
except TypeError:
# The entries of degrees are not iterables, so
# treat as singly-graded.
pass
if multigrade:
if sorted(map(sum, degrees)) != list(map(sum, degrees)):
raise ValueError("the generators should be ordered in increased total degree")
else:
if sorted(degrees) != list(degrees):
raise ValueError("the generators should be ordered in increasing degree")
degrees = tuple(degrees)
if not R or not I:
if n > 1:
F = FreeAlgebra(base, n, names)
else: # n = 1
F = PolynomialRing(base, n, names)
gens = F.gens()
rels = {}
tot_degs = [total_degree(d) for d in degrees]
for i in range(len(gens) - 1):
for j in range(i + 1, len(gens)):
rels[gens[j] * gens[i]] = ((-1)**(tot_degs[i] * tot_degs[j])
* gens[i] * gens[j])
if n > 1:
R = F.g_algebra(rels, order=TermOrder('wdegrevlex', tot_degs))
else: # n = 1
R = F.quotient(rels)
if base.characteristic() == 2:
I = R.ideal(0, side='twosided')
else:
I = R.ideal([R.gen(i)**2
for i in range(n) if is_odd(tot_degs[i])],
side='twosided')
return super(GCAlgebra, cls).__classcall__(cls, base=base, names=names,
degrees=degrees, R=R, I=I,
category=category)
def __init__(self, base, R=None, I=None, names=None, degrees=None, category=None):
"""
Initialize ``self``.
INPUT:
- ``base`` -- the base field
- ``R`` -- (optional) the ring over which the algebra is defined
- ``I`` -- (optional) an ideal over the corresponding `g`-algebra;
it is meant to include, among other relations, the squares of the
generators of odd degree
- ``names`` -- (optional) the names of the generators; if omitted,
this uses the names ``x0``, ``x1``, ...
- ``degrees`` -- (optional) the degrees of the generators; if
omitted, they are given degree 1
EXAMPLES::
sage: A.<x,y,z,t> = GradedCommutativeAlgebra(QQ)
sage: TestSuite(A).run()
sage: A = GradedCommutativeAlgebra(QQ, ('x','y','z'), [2,3,4])
sage: TestSuite(A).run()
sage: A = GradedCommutativeAlgebra(QQ, ('x','y','z','t'), [1,2,3,4])
sage: TestSuite(A).run()
"""
self._degrees = tuple(degrees)
category = Algebras(R.base_ring()).Graded().or_subcategory(category)
QuotientRing_nc.__init__(self, R, I, names, category=category)
def _repr_(self):
"""
Print representation.
EXAMPLES::
sage: A.<x,y,z,t> = GradedCommutativeAlgebra(QQ, degrees=[1, 2, 3, 4])
sage: A
Graded Commutative Algebra with generators ('x', 'y', 'z', 't') in degrees (1, 2, 3, 4) over Rational Field
sage: A.quotient(A.ideal(3*x*t - 2*y*z))
Graded Commutative Algebra with generators ('x', 'y', 'z', 't') in degrees (1, 2, 3, 4) with relations [-2*y*z + 3*x*t] over Rational Field
"""
s = "Graded Commutative Algebra with generators {} in degrees {}".format(self._names, self._degrees)
# Find any nontrivial relations.
I = self.defining_ideal()
R = self.cover_ring()
degrees = self._degrees
if self.base().characteristic() != 2:
squares = [R.gen(i)**2
for i in range(len(degrees)) if is_odd(degrees[i])]
else:
squares = [R.zero()]
relns = [g for g in I.gens() if g not in squares]
if relns:
s = s + " with relations {}".format(relns)
return s + " over {}".format(self.base_ring())
_base_repr = _repr_
@cached_method
def _basis_for_free_alg(self, n):
r"""
Basis of the associated free commutative DGA in degree ``n``.
That is, ignore the relations when computing the basis:
compute the basis of the free commutative DGA with generators
in degrees given by ``self._degrees``.
INPUT:
- ``n`` -- integer
OUTPUT:
Tuple of basis elements in degree ``n``, as tuples of exponents.
EXAMPLES::
sage: A.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=(1,2,3))
sage: A._basis_for_free_alg(3)
[(0, 0, 1), (1, 1, 0)]
sage: B = A.quotient(A.ideal(a*b, b**2+a*c))
sage: B._basis_for_free_alg(3)
[(0, 0, 1), (1, 1, 0)]
sage: GradedCommutativeAlgebra(QQ, degrees=(1,1))._basis_for_free_alg(3)
[]
sage: GradedCommutativeAlgebra(GF(2), degrees=(1,1))._basis_for_free_alg(3)
[(0, 3), (1, 2), (2, 1), (3, 0)]
sage: A = GradedCommutativeAlgebra(GF(2), degrees=(4,8,12))
sage: A._basis_for_free_alg(399)
[]
"""
if n == 0:
return ((0,) * len(self._degrees),)
if self.base_ring().characteristic() == 2:
return [tuple(_) for _ in WeightedIntegerVectors(n, self._degrees)]
even_degrees = []
odd_degrees = []
for a in self._degrees:
if is_even(a):
even_degrees.append(a)
else:
odd_degrees.append(a)
if not even_degrees: # No even generators.
return [tuple(_)
for _ in exterior_algebra_basis(n, tuple(odd_degrees))]
if not odd_degrees: # No odd generators.
return [tuple(_)
for _ in WeightedIntegerVectors(n, tuple(even_degrees))]
# General case: both even and odd generators.
result = []
for dim in range(n + 1):
# First find the even part of the basis.
if dim == 0:
even_result = [[0] * len(even_degrees)]
else:
even_result = WeightedIntegerVectors(dim, tuple(even_degrees))
# Now find the odd part of the basis.
for even_mono in even_result:
deg = n - dim
odd_result = exterior_algebra_basis(deg, tuple(odd_degrees))
for odd_mono in odd_result:
temp_even = list(even_mono)
temp_odd = list(odd_mono)
mono = []
for a in self._degrees:
if is_even(a):
mono.append(temp_even.pop(0))
else:
mono.append(temp_odd.pop(0))
result.append(tuple(mono))
return result
def basis(self, n):
"""
Return a basis of the ``n``-th homogeneous component of ``self``.
EXAMPLES::
sage: A.<x,y,z,t> = GradedCommutativeAlgebra(QQ, degrees=(1, 2, 2, 3))
sage: A.basis(2)
[y, z]
sage: A.basis(3)
[x*y, x*z, t]
sage: A.basis(4)
[y^2, y*z, z^2, x*t]
sage: A.basis(5)
[x*y^2, x*y*z, x*z^2, y*t, z*t]
sage: A.basis(6)
[y^3, y^2*z, y*z^2, z^3, x*y*t, x*z*t]
"""
free_basis = self._basis_for_free_alg(n)
fb_reversed_entries = [list(reversed(e)) for e in free_basis]
fb_reversed_entries.sort()
free_basis = [tuple(reversed(e)) for e in fb_reversed_entries]
basis = []
for v in free_basis:
el = prod([self.gen(i)**v[i] for i in range(len(v))])
di = el.dict()
if len(di) == 1:
k, = di.keys()
if tuple(k) == v:
basis.append(el)
return basis
def quotient(self, I, check=True):
"""
Create the quotient of this algebra by a two-sided ideal ``I``.
INPUT:
- ``I`` -- a two-sided homogeneous ideal of this algebra
- ``check`` -- (default: ``True``) if ``True``, check whether
``I`` is generated by homogeneous elements
EXAMPLES::
sage: A.<x,y,z,t> = GradedCommutativeAlgebra(GF(5), degrees=(2, 2, 3, 4))
sage: I = A.ideal([x*t+z^2, x*y - t])
sage: B = A.quotient(I)
sage: B
Graded Commutative Algebra with generators ('x', 'y', 'z', 't') in degrees (2, 2, 3, 4) with relations [x*t, x*y - t] over Finite Field of size 5
sage: B(x*t)
0
sage: B(x*y)
t
sage: A.basis(7)
[x^2*z, x*y*z, y^2*z, z*t]
sage: B.basis(7)
[x^2*z, y^2*z, z*t]
"""
if check and any(not i.is_homogeneous() for i in I.gens()):
raise ValueError("The ideal must be homogeneous")
NCR = self.cover_ring()
gens1 = list(self.defining_ideal().gens())
gens2 = [i.lift() for i in I.gens()]
gens = [g for g in gens1 + gens2 if g != NCR.zero()]
J = NCR.ideal(gens, side='twosided')
return GCAlgebra(self.base_ring(), self._names, self._degrees, NCR, J)
def _coerce_map_from_(self, other):
r"""
Return ``True`` if there is a coercion map from ``R`` to ``self``.
EXAMPLES::
sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(1,1,2))
sage: B = A.cdg_algebra({y:x*y, x: x*y})
sage: A._coerce_map_from_(B)
True
sage: B._coerce_map_from_(A)
True
sage: B._coerce_map_from_(QQ)
True
sage: B._coerce_map_from_(GF(3))
False
"""
if isinstance(other, GCAlgebra):
if self._names != other._names or self._degrees != other._degrees:
return False
if set(self.defining_ideal().gens()) != set(other
.defining_ideal()
.gens()):
return False
return self.cover_ring().has_coerce_map_from(other.cover_ring())
return super(GCAlgebra, self)._coerce_map_from_(other)
def _element_constructor_(self, x, coerce=True):
r"""
EXAMPLES::
sage: A.<x,y,z,t> = GradedCommutativeAlgebra(QQ, degrees=(2, 2, 3, 4))
sage: A({(1,3,0,1): 2, (2,2,1,2): 3})
3*x^2*y^2*z*t^2 + 2*x*y^3*t
sage: A.<x,y,z,t> = GradedCommutativeAlgebra(GF(5))
sage: A({(1,3,0,1): 2, (2,2,1,2): 3})
0
TESTS::
sage: B = A.cdg_algebra({})
sage: B(x, coerce=False)
x
"""
if isinstance(x, QuotientRingElement):
if x.parent() is self:
return x
x = x.lift()
if isinstance(x, dict):
res = self.zero()
for i in x.keys():
mon = prod(self.gen(j)**i[j] for j in range(len(i)))
res += x[i] * mon
return res
if coerce:
R = self.cover_ring()
x = R(x)
from sage.interfaces.singular import is_SingularElement
if is_SingularElement(x):
# self._singular_().set_ring()
x = self.element_class(self, x.sage_poly(self.cover_ring()))
return x
return self.element_class(self, x)
def _Hom_(self, B, category):
"""
Return the homset from ``self`` to ``B`` in the category ``category``.
INPUT:
- ``B`` -- a graded commutative algebra
- ``category`` -- a subcategory of graded algebras or ``None``
EXAMPLES::
sage: A.<x,y> = GradedCommutativeAlgebra(QQ)
sage: B.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=(1,2,3))
sage: C.<d> = GradedCommutativeAlgebra(GF(17))
sage: Hom(A,A)
Set of Homomorphisms from Graded Commutative Algebra with generators ('x', 'y') in degrees (1, 1) over Rational Field to Graded Commutative Algebra with generators ('x', 'y') in degrees (1, 1) over Rational Field
sage: Hom(A,B)
Set of Homomorphisms from Graded Commutative Algebra with generators ('x', 'y') in degrees (1, 1) over Rational Field to Graded Commutative Algebra with generators ('a', 'b', 'c') in degrees (1, 2, 3) over Rational Field
sage: Hom(A,C)
Traceback (most recent call last):
...
NotImplementedError: homomorphisms of graded commutative algebras have only been implemented when the base rings are the same
"""
R = self.base_ring()
# The base rings need to be checked before the categories, or
# else the function sage.categories.homset.Hom catches the
# TypeError and uses the wrong category (the meet of the
# categories for self and B, which might be the category of
# rings).
if R != B.base_ring():
raise NotImplementedError('homomorphisms of graded commutative '
'algebras have only been implemented '
'when the base rings are the same')
cat = Algebras(R).Graded()
if category is not None and not category.is_subcategory(cat):
raise TypeError("{} is not a subcategory of graded algebras"
.format(category))
return GCAlgebraHomset(self, B, category=category)
def differential(self, diff):
"""
Construct a differential on ``self``.
INPUT:
- ``diff`` -- a dictionary defining a differential
The keys of the dictionary are generators of the algebra, and
the associated values are their targets under the
differential. Any generators which are not specified are
assumed to have zero differential.
EXAMPLES::
sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(1, 1, 2))
sage: A.differential({y:x*y, x: x*y})
Differential of Graded Commutative Algebra with generators ('x', 'y', 'z') in degrees (1, 1, 2) over Rational Field
Defn: x --> x*y
y --> x*y
z --> 0
sage: B.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=(1, 2, 2))
sage: d = B.differential({b:a*c, c:a*c})
sage: d(b*c)
a*b*c + a*c^2
"""
return Differential(self, diff)
def cdg_algebra(self, differential):
r"""
Construct a differential graded commutative algebra from ``self``
by specifying a differential.
INPUT:
- ``differential`` -- a dictionary defining a differential or
a map defining a valid differential
The keys of the dictionary are generators of the algebra, and
the associated values are their targets under the
differential. Any generators which are not specified are
assumed to have zero differential. Alternatively, the
differential can be defined using the :meth:`differential`
method; see below for an example.
.. SEEALSO::
:meth:`differential`
EXAMPLES::
sage: A.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=(1, 1, 1))
sage: B = A.cdg_algebra({a: b*c, b: a*c})
sage: B
Commutative Differential Graded Algebra with generators ('a', 'b', 'c') in degrees (1, 1, 1) over Rational Field with differential:
a --> b*c
b --> a*c
c --> 0
Note that ``differential`` can also be a map::
sage: d = A.differential({a: b*c, b: a*c})
sage: d
Differential of Graded Commutative Algebra with generators ('a', 'b', 'c') in degrees (1, 1, 1) over Rational Field
Defn: a --> b*c
b --> a*c
c --> 0
sage: A.cdg_algebra(d) is B
True
"""
return DifferentialGCAlgebra(self, differential)
# TODO: Do we want a fully spelled out alias?
# commutative_differential_graded_algebra = cdg_algebra
class Element(QuotientRingElement):
r"""
An element of a graded commutative algebra.
"""
def __init__(self, A, rep):
r"""
Initialize ``self``.
INPUT:
- ``parent`` -- the graded commutative algebra in which
this element lies, viewed as a quotient `R / I`
- ``rep`` -- a representative of the element in `R`; this is used
as the internal representation of the element
EXAMPLES::
sage: B.<x,y> = GradedCommutativeAlgebra(QQ, degrees=(2, 2))
sage: a = B({(1,1): -3, (2,5): 1/2})
sage: a
1/2*x^2*y^5 - 3*x*y
sage: TestSuite(a).run()
sage: b = x^2*y^3+2
sage: b
x^2*y^3 + 2
"""
QuotientRingElement.__init__(self, A, rep)
def degree(self, total=False):
r"""
The degree of this element.
If the element is not homogeneous, this returns the
maximum of the degrees of its monomials.
INPUT:
- ``total`` -- ignored, present for compatibility with the
multi-graded case
EXAMPLES::
sage: A.<x,y,z,t> = GradedCommutativeAlgebra(QQ, degrees=(1, 2, 3, 3))
sage: el = z*t+2*x*y-y^2*z
sage: el.degree()
7
sage: el.monomials()
[y^2*z, z*t, x*y]
sage: [i.degree() for i in el.monomials()]
[7, 6, 3]
sage: A(0).degree()
Traceback (most recent call last):
...
ValueError: The zero element does not have a well-defined degree
"""
if self.is_zero():
raise ValueError("The zero element does not have a well-defined degree")
exps = self.lift().dict().keys()
degrees = self.parent()._degrees
n = self.parent().ngens()
l = [sum(e[i] * degrees[i] for i in range(n)) for e in exps]
return max(l)
def is_homogeneous(self, total=False):
r"""
Return ``True`` if ``self`` is homogeneous and ``False`` otherwise.
INPUT:
- ``total`` -- boolean (default ``False``); only used in the
multi-graded case, in which case if ``True``, check to see
if ``self`` is homogeneous with respect to total degree
EXAMPLES::
sage: A.<x,y,z,t> = GradedCommutativeAlgebra(QQ, degrees=(1, 2, 3, 3))
sage: el = z*t + 2*x*y - y^2*z
sage: el.degree()
7
sage: el.monomials()
[y^2*z, z*t, x*y]
sage: [i.degree() for i in el.monomials()]
[7, 6, 3]
sage: el.is_homogeneous()
False
sage: em = y^3 - 5*z*t + 3/2*x*y*t
sage: em.is_homogeneous()
True
sage: em.monomials()
[y^3, x*y*t, z*t]
sage: [i.degree() for i in em.monomials()]
[6, 6, 6]
The element 0 is homogeneous, even though it doesn't have
a well-defined degree::
sage: A(0).is_homogeneous()
True
A multi-graded example::
sage: B.<c,d> = GradedCommutativeAlgebra(QQ, degrees=((2, 0), (0, 4)))
sage: (c^2 - 1/2 * d).is_homogeneous()
False
sage: (c^2 - 1/2 * d).is_homogeneous(total=True)
True
"""
degree = None
for m in self.monomials():
if degree is None:
degree = m.degree(total)
else:
if degree != m.degree(total):
return False
return True
def homogeneous_parts(self):
r"""
Return the homogeneous parts of the element. The result is given as
a dictionary indexed by degree.
EXAMPLES::
sage: A.<e1,e2,e3,e4,e5> = GradedCommutativeAlgebra(QQ)
sage: a = e1*e3*e5-3*e2*e3*e5 + e1*e2 -2*e3 + e5
sage: a.homogeneous_parts()
{1: -2*e3 + e5, 2: e1*e2, 3: e1*e3*e5 - 3*e2*e3*e5}
"""
dic = self.dict()
terms = [self.parent()({t: dic[t]}) for t in dic.keys()]
res = {}
for term in terms:
deg = term.degree()
if deg in res:
res[deg] += term
else:
res[deg] = term
return {i: res[i] for i in sorted(res.keys())}
homogenous_parts = deprecated_function_alias(30585, homogeneous_parts)
def dict(self):
r"""
A dictionary that determines the element.
The keys of this dictionary are the tuples of exponents of each
monomial, and the values are the corresponding coefficients.
EXAMPLES::
sage: A.<x,y,z,t> = GradedCommutativeAlgebra(QQ, degrees=(1, 2, 2, 3))
sage: dic = (x*y - 5*y*z + 7*x*y^2*z^3*t).dict()
sage: sorted(dic.items())
[((0, 1, 1, 0), -5), ((1, 1, 0, 0), 1), ((1, 2, 3, 1), 7)]
"""
return self.lift().dict()
def basis_coefficients(self, total=False):
"""
Return the coefficients of this homogeneous element with
respect to the basis in its degree.
For example, if this is the sum of the 0th and 2nd basis
elements, return the list ``[1, 0, 1]``.
Raise an error if the element is not homogeneous.
INPUT:
- ``total`` -- boolean (default ``False``); this
is only used in the multi-graded case, in which case if
``True``, it returns the coefficients with respect to
the basis for the total degree of this element
OUTPUT:
A list of elements of the base field.
EXAMPLES::
sage: A.<x,y,z,t> = GradedCommutativeAlgebra(QQ, degrees=(1, 2, 2, 3))
sage: A.basis(3)
[x*y, x*z, t]
sage: (t + 3*x*y).basis_coefficients()
[3, 0, 1]
sage: (t + x).basis_coefficients()
Traceback (most recent call last):
...
ValueError: This element is not homogeneous
sage: B.<c,d> = GradedCommutativeAlgebra(QQ, degrees=((2,0), (0,4)))
sage: B.basis(4)
[c^2, d]
sage: (c^2 - 1/2 * d).basis_coefficients(total=True)
[1, -1/2]
sage: (c^2 - 1/2 * d).basis_coefficients()
Traceback (most recent call last):
...
ValueError: This element is not homogeneous
"""
if not self.is_homogeneous(total):
raise ValueError('This element is not homogeneous')
basis = self.parent().basis(self.degree(total))
lift = self.lift()
return [lift.monomial_coefficient(x.lift()) for x in basis]
class GCAlgebra_multigraded(GCAlgebra):
"""
A multi-graded commutative algebra.
INPUT:
- ``base`` -- the base field
- ``degrees`` -- a tuple or list specifying the degrees of the
generators
- ``names`` -- (optional) names of the generators: a list of
strings or a single string with the names separated by
commas; if not specified, the generators are named ``x0``,
``x1``, ...
- ``R`` -- (optional) the ring over which the algebra is defined
- ``I`` -- (optional) an ideal in ``R``; it should include, among
other relations, the squares of the generators of odd degree
When defining such an algebra, each entry of ``degrees`` should be
a list, tuple, or element of an additive (free) abelian
group. Regardless of how the user specifies the degrees, Sage
converts them to group elements.
The arguments ``R`` and ``I`` are primarily for use by the
:meth:`GCAlgebra.quotient` method.
EXAMPLES::
sage: A.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=((1,0), (0,1), (1,1)))
sage: A
Graded Commutative Algebra with generators ('a', 'b', 'c') in degrees ((1, 0), (0, 1), (1, 1)) over Rational Field
sage: a**2
0
sage: c.degree(total=True)
2
sage: c**2
c^2
sage: c.degree()
(1, 1)
Although the degree of ``c`` was defined using a Python tuple, it
is returned as an element of an additive abelian group, and so it
can be manipulated via arithmetic operations::
sage: type(c.degree())
<class 'sage.groups.additive_abelian.additive_abelian_group.AdditiveAbelianGroup_fixed_gens_with_category.element_class'>
sage: 2 * c.degree()
(2, 2)
sage: (a*b).degree() == a.degree() + b.degree()
True
The :meth:`basis` method and the :meth:`Element.degree` method both accept
the boolean keyword ``total``. If ``True``, use the total degree::
sage: A.basis(2, total=True)
[a*b, c]
sage: c.degree(total=True)
2
"""
def __init__(self, base, degrees, names=None, R=None, I=None, category=None):
"""
Initialize ``self``.
EXAMPLES::
sage: A.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=((1,0), (0,1), (1,1)))
sage: TestSuite(A).run()
sage: B.<w> = GradedCommutativeAlgebra(GF(2), degrees=((3,2),))
sage: TestSuite(B).run(skip=['_test_construction'])
sage: C = GradedCommutativeAlgebra(GF(7), degrees=((3,2),))
sage: TestSuite(C).run()
"""
total_degs = [total_degree(d) for d in degrees]
GCAlgebra.__init__(self, base, R=R, I=I, names=names,
degrees=total_degs, category=category)
self._degrees_multi = degrees
self._grading_rank = len(list(degrees[0]))
def _repr_(self):
"""
Print representation.
EXAMPLES::
sage: GradedCommutativeAlgebra(QQ, degrees=((1,0,0), (0,0,1), (1,1,1)))
Graded Commutative Algebra with generators ('x0', 'x1', 'x2') in degrees ((1, 0, 0), (0, 0, 1), (1, 1, 1)) over Rational Field
"""
s = GCAlgebra._repr_(self)
old = '{}'.format(self._degrees)
new = '{}'.format(self._degrees_multi)
return s.replace(old, new)
_base_repr = _repr_
def quotient(self, I, check=True):
"""
Create the quotient of this algebra by a two-sided ideal ``I``.
INPUT:
- ``I`` -- a two-sided homogeneous ideal of this algebra
- ``check`` -- (default: ``True``) if ``True``, check whether
``I`` is generated by homogeneous elements
EXAMPLES::
sage: A.<x,y,z,t> = GradedCommutativeAlgebra(GF(5), degrees=(2, 2, 3, 4))
sage: I = A.ideal([x*t+z^2, x*y - t])
sage: B = A.quotient(I)
sage: B
Graded Commutative Algebra with generators ('x', 'y', 'z', 't') in degrees (2, 2, 3, 4) with relations [x*t, x*y - t] over Finite Field of size 5
sage: B(x*t)
0
sage: B(x*y)
t
sage: A.basis(7)
[x^2*z, x*y*z, y^2*z, z*t]
sage: B.basis(7)
[x^2*z, y^2*z, z*t]
"""
if check and any(not i.is_homogeneous() for i in I.gens()):
raise ValueError("The ideal must be homogeneous")
NCR = self.cover_ring()
gens1 = list(self.defining_ideal().gens())
gens2 = [i.lift() for i in I.gens()]
gens = [g for g in gens1 + gens2 if g != NCR.zero()]
J = NCR.ideal(gens, side='twosided')
return GCAlgebra_multigraded(self.base_ring(), self._names,
self._degrees_multi, NCR, J)
def _coerce_map_from_(self, other):
r"""
Return ``True`` if there is a coercion map from ``R`` to ``self``.
EXAMPLES::
sage: A.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=((1,0), (0, 1), (0,2)))
sage: B = A.cdg_algebra({a: c})
sage: B._coerce_map_from_(A)
True
sage: B._coerce_map_from_(QQ)
True
sage: B._coerce_map_from_(GF(3))
False
"""
if isinstance(other, GCAlgebra_multigraded):
if self._degrees_multi != other._degrees_multi:
return False
elif isinstance(other, GCAlgebra): # Not multigraded
return False
return super(GCAlgebra_multigraded, self)._coerce_map_from_(other)
def basis(self, n, total=False):
"""
Basis in degree ``n``.
- ``n`` -- degree or integer
- ``total`` (optional, default False) -- if True, return the
basis in total degree ``n``.
If ``n`` is an integer rather than a multi-index, then the
total degree is used in that case as well.
EXAMPLES::
sage: A.<a,b,c> = GradedCommutativeAlgebra(GF(2), degrees=((1,0), (0,1), (1,1)))
sage: A.basis((1,1))
[a*b, c]
sage: A.basis(2, total=True)
[a^2, a*b, b^2, c]
Since 2 is a not a multi-index, we don't need to specify ``total=True``::
sage: A.basis(2)
[a^2, a*b, b^2, c]
If ``total==True``, then ``n`` can still be a tuple, list,
etc., and its total degree is used instead::
sage: A.basis((1,1), total=True)
[a^2, a*b, b^2, c]
"""
tot_basis = GCAlgebra.basis(self, total_degree(n))
if total or n in ZZ:
return tot_basis
G = AdditiveAbelianGroup([0] * self._grading_rank)
n = G(vector(n))
return [b for b in tot_basis if b.degree() == n]
def differential(self, diff):
"""
Construct a differential on ``self``.
INPUT:
- ``diff`` -- a dictionary defining a differential
The keys of the dictionary are generators of the algebra, and
the associated values are their targets under the
differential. Any generators which are not specified are
assumed to have zero differential.
EXAMPLES::
sage: A.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=((1,0), (0, 1), (0,2)))
sage: A.differential({a: c})
Differential of Graded Commutative Algebra with generators ('a', 'b', 'c') in degrees ((1, 0), (0, 1), (0, 2)) over Rational Field
Defn: a --> c
b --> 0
c --> 0
"""
return Differential_multigraded(self, diff)
def cdg_algebra(self, differential):
r"""
Construct a differential graded commutative algebra from ``self``
by specifying a differential.
INPUT:
- ``differential`` -- a dictionary defining a differential or
a map defining a valid differential
The keys of the dictionary are generators of the algebra, and
the associated values are their targets under the
differential. Any generators which are not specified are
assumed to have zero differential. Alternatively, the
differential can be defined using the :meth:`differential`
method; see below for an example.
.. SEEALSO::
:meth:`differential`
EXAMPLES::
sage: A.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=((1,0), (0, 1), (0,2)))
sage: A.cdg_algebra({a: c})
Commutative Differential Graded Algebra with generators ('a', 'b', 'c') in degrees ((1, 0), (0, 1), (0, 2)) over Rational Field with differential:
a --> c
b --> 0
c --> 0
sage: d = A.differential({a: c})
sage: A.cdg_algebra(d)
Commutative Differential Graded Algebra with generators ('a', 'b', 'c') in degrees ((1, 0), (0, 1), (0, 2)) over Rational Field with differential:
a --> c
b --> 0
c --> 0
"""
return DifferentialGCAlgebra_multigraded(self, differential)
class Element(GCAlgebra.Element):
def degree(self, total=False):
"""
Return the degree of this element.
INPUT:
- ``total`` -- if ``True``, return the total degree, an
integer; otherwise, return the degree as an element of
an additive free abelian group
If not requesting the total degree, raise an error if the
element is not homogeneous.
EXAMPLES::
sage: A.<a,b,c> = GradedCommutativeAlgebra(GF(2), degrees=((1,0), (0,1), (1,1)))
sage: (a**2*b).degree()
(2, 1)
sage: (a**2*b).degree(total=True)
3
sage: (a**2*b + c).degree()
Traceback (most recent call last):
...
ValueError: This element is not homogeneous
sage: (a**2*b + c).degree(total=True)
3
sage: A(0).degree()
Traceback (most recent call last):
...
ValueError: The zero element does not have a well-defined degree
"""
if total:
return GCAlgebra.Element.degree(self)
if self.is_zero():
raise ValueError("The zero element does not have a well-defined degree")
degrees = self.parent()._degrees_multi
n = self.parent().ngens()
exps = self.lift().dict().keys()
l = [sum(exp[i] * degrees[i] for i in range(n)) for exp in exps]
if len(set(l)) == 1:
return l[0]
else:
raise ValueError('This element is not homogeneous')
###########################################################
# Differential algebras
class DifferentialGCAlgebra(GCAlgebra):
"""
A commutative differential graded algebra.
INPUT:
- ``A`` -- a graded commutative algebra; that is, an instance
of :class:`GCAlgebra`
- ``differential`` -- a differential
As described in the module-level documentation, these are graded
algebras for which oddly graded elements anticommute and evenly
graded elements commute, and on which there is a graded
differential of degree 1.
These algebras should be graded over the integers; multi-graded
algebras should be constructed using
:class:`DifferentialGCAlgebra_multigraded` instead.
Note that a natural way to construct these is to use the
:func:`GradedCommutativeAlgebra` function and the
:meth:`GCAlgebra.cdg_algebra` method.
EXAMPLES::
sage: A.<x,y,z,t> = GradedCommutativeAlgebra(QQ, degrees=(2, 2, 3, 3))
sage: A.cdg_algebra({z: x*y})
Commutative Differential Graded Algebra with generators ('x', 'y', 'z', 't') in degrees (2, 2, 3, 3) over Rational Field with differential:
x --> 0
y --> 0
z --> x*y
t --> 0
Alternatively, starting with :func:`GradedCommutativeAlgebra`::
sage: A.<x,y,z,t> = GradedCommutativeAlgebra(QQ, degrees=(2, 2, 3, 3))
sage: A.cdg_algebra(differential={z: x*y})
Commutative Differential Graded Algebra with generators ('x', 'y', 'z', 't') in degrees (2, 2, 3, 3) over Rational Field with differential:
x --> 0
y --> 0
z --> x*y
t --> 0
See the function :func:`GradedCommutativeAlgebra` for more examples.
"""
@staticmethod
def __classcall__(cls, A, differential):
"""
Normalize input to ensure a unique representation.
EXAMPLES::
sage: A.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=(1,1,1))
sage: D1 = A.cdg_algebra({a: b*c, b: a*c})
sage: D2 = A.cdg_algebra(D1.differential())
sage: D1 is D2
True
sage: from sage.algebras.commutative_dga import DifferentialGCAlgebra
sage: D1 is DifferentialGCAlgebra(A, {a: b*c, b: a*c, c: 0})
True
"""
if not isinstance(differential, Differential):
differential = A.differential(differential)
elif differential.parent() != A:
differential = Differential(A, differential._dic_)
return super(GCAlgebra, cls).__classcall__(cls, A, differential)
def __init__(self, A, differential):
"""
Initialize ``self``
INPUT:
- ``A`` -- a graded commutative algebra
- ``differential`` -- a differential
EXAMPLES::
sage: A.<x,y,z,t> = GradedCommutativeAlgebra(QQ, degrees=(2, 2, 3, 3))
sage: D = A.cdg_algebra({z: x*y})
sage: TestSuite(D).run()
The degree of the differential must be 1::
sage: A.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=(1,1,1))
sage: A.cdg_algebra({a: a*b*c})
Traceback (most recent call last):
...
ValueError: The given dictionary does not determine a degree 1 map
The differential composed with itself must be zero::
sage: A.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=(1,2,3))
sage: A.cdg_algebra({a:b, b:c})
Traceback (most recent call last):
...
ValueError: The given dictionary does not determine a valid differential
"""
cat = Algebras(A.base()).Graded() & ChainComplexes(A.base())
GCAlgebra.__init__(self, A.base(), names=A._names,
degrees=A._degrees, R=A.cover_ring(),
I=A.defining_ideal(), category=cat)
self._differential = Differential(self, differential._dic_)
self._minimalmodels = {}
self._numerical_invariants = {}
def graded_commutative_algebra(self):
"""
Return the base graded commutative algebra of ``self``.
EXAMPLES::
sage: A.<x,y,z,t> = GradedCommutativeAlgebra(QQ, degrees=(2, 2, 3, 3))
sage: D = A.cdg_algebra({z: x*y})
sage: D.graded_commutative_algebra() == A
True
"""
return GCAlgebra(self.base(), names=self._names, degrees=self._degrees,
R=self.cover_ring(), I=self.defining_ideal())
def _base_repr(self):
"""
Return the base string representation of ``self``.
EXAMPLES::
sage: A.<x,y,z,t> = GradedCommutativeAlgebra(QQ, degrees=[1, 2, 3, 4])
sage: A.cdg_algebra({x:y, z:t})._base_repr()
"Commutative Differential Graded Algebra with generators ('x', 'y', 'z', 't') in degrees (1, 2, 3, 4) over Rational Field"
"""
return GCAlgebra._repr_(self).replace('Graded Commutative', 'Commutative Differential Graded')
def _repr_(self):
"""
EXAMPLES::
sage: A.<x,y,z,t> = GradedCommutativeAlgebra(QQ, degrees=[1, 2, 3, 4])
sage: A.cdg_algebra({x:y, z:t})
Commutative Differential Graded Algebra with generators ('x', 'y', 'z', 't') in degrees (1, 2, 3, 4) over Rational Field with differential:
x --> y
y --> 0
z --> t
t --> 0
"""
d = self._differential._repr_defn().replace('\n', '\n ')
return self._base_repr() + " with differential:{}".format('\n ' + d)
def quotient(self, I, check=True):
"""
Create the quotient of this algebra by a two-sided ideal ``I``.
INPUT:
- ``I`` -- a two-sided homogeneous ideal of this algebra
- ``check`` -- (default: ``True``) if ``True``, check whether
``I`` is generated by homogeneous elements
EXAMPLES::
sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(1,1,2))
sage: B = A.cdg_algebra({y:x*y, z:x*z})
sage: B.inject_variables()
Defining x, y, z
sage: I = B.ideal([y*z])
sage: C = B.quotient(I)
sage: (y*z).differential()
2*x*y*z
sage: C((y*z).differential())
0
sage: C(y*z)
0
It is checked that the differential maps the ideal into itself, to make
sure that the quotient inherits a differential structure::
sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(1,2,2))
sage: B = A.cdg_algebra({x:y})
sage: B.quotient(B.ideal(y*x))
Traceback (most recent call last):
...
ValueError: The differential does not preserve the ideal
sage: B.quotient(B.ideal(x))
Traceback (most recent call last):
...
ValueError: The differential does not preserve the ideal
"""
J = self.ideal(I)
AQ = GCAlgebra.quotient(self, J, check)
for g in I.gens():
if not AQ(g.differential()).is_zero():
raise ValueError("The differential does not preserve the ideal")
dic = {AQ(a): AQ(a.differential()) for a in self.gens()}
return AQ.cdg_algebra(dic)
def differential(self, x=None):
r"""
The differential of ``self``.
This returns a map, and so it may be evaluated on elements of
this algebra.
EXAMPLES::
sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(1,1,2))
sage: B = A.cdg_algebra({y:x*y, x: y*x})
sage: d = B.differential(); d
Differential of Commutative Differential Graded Algebra with generators ('x', 'y', 'z') in degrees (1, 1, 2) over Rational Field
Defn: x --> -x*y
y --> x*y
z --> 0
sage: d(y)
x*y
"""
return self._differential
def coboundaries(self, n):
"""
The ``n``-th coboundary group of the algebra.
This is a vector space over the base field `F`, and it is
returned as a subspace of the vector space `F^d`, where the
``n``-th homogeneous component has dimension `d`.
INPUT:
- ``n`` -- degree
EXAMPLES::
sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(1,1,2))
sage: B = A.cdg_algebra(differential={z: x*z})
sage: B.coboundaries(2)
Vector space of degree 2 and dimension 0 over Rational Field
Basis matrix:
[]
sage: B.coboundaries(3)
Vector space of degree 2 and dimension 1 over Rational Field
Basis matrix:
[1 0]
sage: B.basis(3)
[x*z, y*z]
"""
return self._differential.coboundaries(n)
def cocycles(self, n):
"""
The ``n``-th cocycle group of the algebra.
This is a vector space over the base field `F`, and it is
returned as a subspace of the vector space `F^d`, where the
``n``-th homogeneous component has dimension `d`.
INPUT:
- ``n`` -- degree
EXAMPLES::
sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(1,1,2))
sage: B = A.cdg_algebra(differential={z: x*z})
sage: B.cocycles(2)
Vector space of degree 2 and dimension 1 over Rational Field
Basis matrix:
[1 0]
sage: B.basis(2)
[x*y, z]
"""
return self._differential.cocycles(n)
def cohomology_raw(self, n):
"""
The ``n``-th cohomology group of ``self``.
This is a vector space over the base ring, and it is returned
as the quotient cocycles/coboundaries.
INPUT:
- ``n`` -- degree
EXAMPLES::
sage: A.<x,y,z,t> = GradedCommutativeAlgebra(QQ, degrees = (2,2,3,4))
sage: B = A.cdg_algebra({t: x*z, x: z, y: z})
sage: B.cohomology_raw(4)
Vector space quotient V/W of dimension 2 over Rational Field where
V: Vector space of degree 4 and dimension 2 over Rational Field
Basis matrix:
[ 1 0 0 -2]
[ 0 1 -1/2 -1]
W: Vector space of degree 4 and dimension 0 over Rational Field
Basis matrix:
[]
Compare to :meth:`cohomology`::
sage: B.cohomology(4)
Free module generated by {[x^2 - 2*t], [x*y - 1/2*y^2 - t]} over Rational Field
"""
return self._differential.cohomology_raw(n)
def cohomology(self, n):
"""
The ``n``-th cohomology group of ``self``.
This is a vector space over the base ring, defined as the
quotient cocycles/coboundaries. The elements of the quotient
are lifted to the vector space of cocycles, and this is
described in terms of those lifts.
INPUT:
- ``n`` -- degree
EXAMPLES::
sage: A.<a,b,c,d,e> = GradedCommutativeAlgebra(QQ, degrees=(1,1,1,1,1))
sage: B = A.cdg_algebra({d: a*b, e: b*c})
sage: B.cohomology(2)
Free module generated by {[a*c], [a*d], [b*d], [c*d - a*e], [b*e], [c*e]} over Rational Field
Compare to :meth:`cohomology_raw`::
sage: B.cohomology_raw(2)
Vector space quotient V/W of dimension 6 over Rational Field where
V: Vector space of degree 10 and dimension 8 over Rational Field
Basis matrix:
[ 1 0 0 0 0 0 0 0 0 0]
[ 0 1 0 0 0 0 0 0 0 0]
[ 0 0 1 0 0 0 0 0 0 0]
[ 0 0 0 1 0 0 0 0 0 0]
[ 0 0 0 0 1 0 0 0 0 0]
[ 0 0 0 0 0 1 -1 0 0 0]
[ 0 0 0 0 0 0 0 1 0 0]
[ 0 0 0 0 0 0 0 0 1 0]
W: Vector space of degree 10 and dimension 2 over Rational Field
Basis matrix:
[1 0 0 0 0 0 0 0 0 0]
[0 0 1 0 0 0 0 0 0 0]
TESTS:
Check that the issue discovered in :trac:`28155` is solved::
sage: A.<e1,e2,e3,e4,e5> = GradedCommutativeAlgebra(QQ)
sage: B = A.cdg_algebra({e5:e1*e2+e3*e4})
sage: B.cohomology(3) is B.cohomology(3)
True
"""
return self._differential.cohomology(n)
homology = cohomology
def cohomology_generators(self, max_degree):
"""
Return lifts of algebra generators for cohomology in degrees at
most ``max_degree``.
INPUT:
- ``max_degree`` -- integer
OUTPUT:
A dictionary keyed by degree, where the corresponding
value is a list of cohomology generators in that degree.
Actually, the elements are lifts of cohomology generators,
which means that they lie in this differential graded
algebra. It also means that they are only well-defined up to
cohomology, not on the nose.
ALGORITHM:
Reduce a basis of the `n`'th cohomology modulo all the degree $n$
products of the lower degree cohomologies.
EXAMPLES::
sage: A.<a,x,y> = GradedCommutativeAlgebra(QQ, degrees=(1,2,2))
sage: B = A.cdg_algebra(differential={y: a*x})
sage: B.cohomology_generators(3)
{1: [a], 2: [x], 3: [a*y]}
The previous example has infinitely generated cohomology:
$a y^n$ is a cohomology generator for each $n$::
sage: B.cohomology_generators(10)
{1: [a], 2: [x], 3: [a*y], 5: [a*y^2], 7: [a*y^3], 9: [a*y^4]}
In contrast, the corresponding algebra in characteristic $p$
has finitely generated cohomology::
sage: A3.<a,x,y> = GradedCommutativeAlgebra(GF(3), degrees=(1,2,2))
sage: B3 = A3.cdg_algebra(differential={y: a*x})
sage: B3.cohomology_generators(20)
{1: [a], 2: [x], 3: [a*y], 5: [a*y^2], 6: [y^3]}
This method works with both singly graded and multi-graded algebras::
sage: Cs.<a,b,c,d> = GradedCommutativeAlgebra(GF(2), degrees=(1,2,2,3))
sage: Ds = Cs.cdg_algebra({a:c, b:d})
sage: Ds.cohomology_generators(10)
{2: [a^2], 4: [b^2]}
sage: Cm.<a,b,c,d> = GradedCommutativeAlgebra(GF(2), degrees=((1,0), (1,1), (0,2), (0,3)))
sage: Dm = Cm.cdg_algebra({a:c, b:d})
sage: Dm.cohomology_generators(10)
{2: [a^2], 4: [b^2]}
TESTS:
Test that coboundaries do not appear as cohomology generators::
sage: X.<x,y> = GradedCommutativeAlgebra(QQ, degrees=(1,2))
sage: acyclic = X.cdg_algebra({x: y})
sage: acyclic.cohomology_generators(3)
{}
Test that redundant generators are eliminated::
sage: A.<e1,e2,e3,e4> = GradedCommutativeAlgebra(QQ)
sage: d = A.differential({e1:e4*e3,e2:e4*e3})
sage: B = A.cdg_algebra(d)
sage: B.cohomology_generators(3)
{1: [e1 - e2, e3, e4], 2: [e1*e3, e1*e4]}
"""
if not (max_degree in ZZ and max_degree > 0):
raise ValueError('the given maximal degree must be a '
'positive integer')
def vector_to_element(v, deg):
"""
If an element of this algebra in degree ``deg`` is represented
by a raw vector ``v``, convert it back to an element of the
algebra again.
"""
return sum(c * b for (c, b) in zip(v, self.basis(deg)))
if max_degree == 1:
cohom1 = self.cohomology(1).basis().keys()
if not cohom1:
return {}
return {1: [g.representative() for g in cohom1]}
smaller_degree = {i: [g.representative() for g in
self.cohomology(i).basis().keys()] for i in
range(1, max_degree)}
already_generated = []
for i in range(1, max_degree):
already_generated += [a * b for a in smaller_degree[i] for b in
smaller_degree[max_degree - i]]
CR = self.cohomology_raw(max_degree)
V = CR.V()
S = CR.submodule([CR(V(g.basis_coefficients(total=True))) for g in
already_generated if not g.is_zero()])
Q = CR.quotient(S)
res = self.cohomology_generators(max_degree - 1)
if Q.basis():
res[max_degree] = [vector_to_element(CR.lift(Q.lift(g)),
max_degree)
for g in Q.basis()]
return res
def minimal_model(self, i=3, max_iterations=3):
r"""
Try to compute a map from a ``i``-minimal gcda that is a
``i``-quasi-isomorphism to self.
INPUT:
- ``i`` -- integer (default: `3`); degree to which the result is
required to induce an isomorphism in cohomology, and the domain is
required to be minimal.
- ``max_iterations`` -- integer (default: `3`); the number of
iterations of the method at each degree. If the algorithm does not
finish in this many iterations at each degree, an error is raised.
OUTPUT:
A morphism from a minimal Sullivan (up to degree ``i``) CDGA's to self,
that induces an isomorphism in cohomology up to degree ``i``, and a
monomorphism in degree ``i+1``.
EXAMPLES::
sage: S.<x, y, z> = GradedCommutativeAlgebra(QQ, degrees = (1, 1, 2))
sage: d = S.differential({x:x*y, y:x*y})
sage: R = S.cdg_algebra(d)
sage: p = R.minimal_model()
sage: T = p.domain()
sage: p
Commutative Differential Graded Algebra morphism:
From: Commutative Differential Graded Algebra with generators ('x1_0', 'x2_0') in degrees (1, 2) over Rational Field with differential:
x1_0 --> 0
x2_0 --> 0
To: Commutative Differential Graded Algebra with generators ('x', 'y', 'z') in degrees (1, 1, 2) over Rational Field with differential:
x --> x*y
y --> x*y
z --> 0
Defn: (x1_0, x2_0) --> (x - y, z)
sage: R.cohomology(1)
Free module generated by {[x - y]} over Rational Field
sage: T.cohomology(1)
Free module generated by {[x1_0]} over Rational Field
sage: [p(g.representative()) for g in T.cohomology(1).basis().keys()]
[x - y]
sage: R.cohomology(2)
Free module generated by {[z]} over Rational Field
sage: T.cohomology(2)
Free module generated by {[x2_0]} over Rational Field
sage: [p(g.representative()) for g in T.cohomology(2).basis().keys()]
[z]
sage: A.<e1, e2, e3, e4, e5, e6, e7> = GradedCommutativeAlgebra(QQ)
sage: d = A.differential({e1:e1*e7, e2:e2*e7, e3:-e3*e7, e4:-e4*e7})
sage: B = A.cdg_algebra(d)
sage: phi = B.minimal_model(i=3)
sage: M = phi.domain()
sage: M
Commutative Differential Graded Algebra with generators ('x1_0', 'x1_1', 'x1_2', 'x2_0', 'x2_1', 'x2_2', 'x2_3', 'y3_0', 'y3_1', 'y3_2', 'y3_3', 'y3_4', 'y3_5', 'y3_6', 'y3_7', 'y3_8') in degrees (1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3) over Rational Field with differential:
x1_0 --> 0
x1_1 --> 0
x1_2 --> 0
x2_0 --> 0
x2_1 --> 0
x2_2 --> 0
x2_3 --> 0
y3_0 --> x2_0^2
y3_1 --> x2_0*x2_1
y3_2 --> x2_1^2
y3_3 --> x2_0*x2_2
y3_4 --> x2_1*x2_2 + x2_0*x2_3
y3_5 --> x2_2^2
y3_6 --> x2_1*x2_3
y3_7 --> x2_2*x2_3
y3_8 --> x2_3^2
sage: phi
Commutative Differential Graded Algebra morphism:
From: Commutative Differential Graded Algebra with generators ('x1_0', 'x1_1', 'x1_2', 'x2_0', 'x2_1', 'x2_2', 'x2_3', 'y3_0', 'y3_1', 'y3_2', 'y3_3', 'y3_4', 'y3_5', 'y3_6', 'y3_7', 'y3_8') in degrees (1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3) over Rational Field with differential:
x1_0 --> 0
x1_1 --> 0
x1_2 --> 0
x2_0 --> 0
x2_1 --> 0
x2_2 --> 0
x2_3 --> 0
y3_0 --> x2_0^2
y3_1 --> x2_0*x2_1
y3_2 --> x2_1^2
y3_3 --> x2_0*x2_2
y3_4 --> x2_1*x2_2 + x2_0*x2_3
y3_5 --> x2_2^2
y3_6 --> x2_1*x2_3
y3_7 --> x2_2*x2_3
y3_8 --> x2_3^2
To: Commutative Differential Graded Algebra with generators ('e1', 'e2', 'e3', 'e4', 'e5', 'e6', 'e7') in degrees (1, 1, 1, 1, 1, 1, 1) over Rational Field with differential:
e1 --> e1*e7
e2 --> e2*e7
e3 --> -e3*e7
e4 --> -e4*e7
e5 --> 0
e6 --> 0
e7 --> 0
Defn: (x1_0, x1_1, x1_2, x2_0, x2_1, x2_2, x2_3, y3_0, y3_1, y3_2, y3_3, y3_4, y3_5, y3_6, y3_7, y3_8) --> (e5, e6, e7, e1*e3, e2*e3, e1*e4, e2*e4, 0, 0, 0, 0, 0, 0, 0, 0, 0)
sage: [B.cohomology(i).dimension() for i in [1..3]]
[3, 7, 13]
sage: [M.cohomology(i).dimension() for i in [1..3]]
[3, 7, 13]
ALGORITHM:
We follow the algorithm described in [Man2019]_. It consists in
constructing the minimal Sullivan algebra ``S`` by iteratively adding
generators to it. Start with one closed generator of degree 1 for each
element in the basis of the first cohomology of the algebra. Then
proceed degree by degree. At each degree `d`, we keep adding generators
of degree `d-1` whose differential kills the elements in the kernel of
the map `H^d(S)\to H^d(self)`. Once this map is made injective, we add
the needed closed generators in degree `d` to make it surjective.
.. WARNING::
The method is not granted to finish (it can't, since the minimal
model could be infinitely generated in some degrees).
The parameter ``max_iterations`` controls how many iterations of
the method are attempted at each degree. In case they are not
enough, an exception is raised. If you think that the result will
be finitely generated, you can try to run it again with a higher
value for ``max_iterations``.
.. SEEALSO::
:wikipedia:`Rational_homotopy_theory#Sullivan_algebras`
TESTS::
sage: A.<x, y, z, t> = GradedCommutativeAlgebra(QQ,degrees = (1, 2, 3, 3))
sage: d = A.differential({x:y})
sage: B = A.cdg_algebra(d)
sage: B.minimal_model(i=3)
Commutative Differential Graded Algebra morphism:
From: Commutative Differential Graded Algebra with generators ('x3_0', 'x3_1') in degrees (3, 3) over Rational Field with differential:
x3_0 --> 0
x3_1 --> 0
To: Commutative Differential Graded Algebra with generators ('x', 'y', 'z', 't') in degrees (1, 2, 3, 3) over Rational Field with differential:
x --> y
y --> 0
z --> 0
t --> 0
Defn: (x3_0, x3_1) --> (z, t)
REFERENCES:
- [Fel2001]_
- [Man2019]_
"""
max_degree = int(i)
if max_degree < 1:
raise ValueError("the degree must be a positive integer")
if max_iterations not in ZZ or max_iterations < 1:
raise ValueError("max_iterations must be a positive integer")
if max_degree in self._minimalmodels:
return self._minimalmodels[max_degree]
from copy import copy
def extend(phi, ndegrees, ndifs, nimags, nnames):
"""
Extend phi to a new algebra with new generators, labeled by nnames
"""
B = phi.domain()
names = [str(g) for g in B.gens()]
degrees = [g.degree() for g in B.gens()]
A = GradedCommutativeAlgebra(B.base_ring(), names=names + nnames,
degrees=degrees + ndegrees)
h = B.hom(A.gens()[:B.ngens()], check=False)
d = B.differential()
diff = {h(g): h(d(g)) for g in B.gens()}
cndifs = copy(ndifs)
for g in A.gens()[B.ngens():]:
diff[g] = h(cndifs.pop(0))
NB = A.cdg_algebra(diff)
Nphi = NB.hom([phi(g) for g in B.gens()] + nimags, check=False)
return Nphi
def extendx(phi, degree):
B = phi.domain()
imagesbcohom = [phi(g.representative())
for g in B.cohomology(degree).basis().keys()]
CS = self.cohomology_raw(degree)
VS = CS.V()
CB = B.cohomology_raw(degree)
imagesphico = []
for g in imagesbcohom:
if g.is_zero():
imagesphico.append(CS.zero())
else:
imagesphico.append(CS(VS(g.basis_coefficients())))
phico = CB.hom(imagesphico, codomain=CS)
QI = CS.quotient(phico.image())
self._numerical_invariants[degree] = [QI.dimension()]
if QI.dimension() > 0:
nnames = ['x{}_{}'.format(degree, j)
for j in range(QI.dimension())]
nbasis = []
bbasis = self.basis(degree)
for v in QI.basis():
vl = CS.lift(QI.lift(v))
g = sum(bbasis[j] * vl[j] for j in range(len(bbasis)))
nbasis.append(g)
nimags = nbasis
ndegrees = [degree for _ in nbasis]
return extend(phi, ndegrees, [B.zero() for _ in nimags],
nimags, nnames)
return phi
def extendy(phi, degree):
nnamesy = 0
for iteration in range(max_iterations):
B = phi.domain()
imagesbcohom = [phi(g.representative()) for g in
B.cohomology(degree).basis().keys()]
CS = self.cohomology_raw(degree)
VS = CS.V()
CB = B.cohomology_raw(degree)
imagesphico = []
for g in imagesbcohom:
if g.is_zero():
imagesphico.append(CS.zero())
else:
imagesphico.append(CS(VS(g.basis_coefficients())))
phico = CB.hom(imagesphico, codomain=CS)
K = phico.kernel()
self._numerical_invariants[degree - 1].append(K.dimension())
if K.dimension() == 0:
return phi
if iteration == max_iterations - 1:
raise ValueError("could not cover all relations in max iterations in degree {}".format(degree))
ndifs = [CB.lift(g) for g in K.basis()]
basisdegree = B.basis(degree)
ndifs = [sum(basisdegree[j] * g[j] for j in
range(len(basisdegree))) for g in ndifs]
MS = self.differential().differential_matrix(degree - 1)
nimags = []
for g in ndifs:
if phi(g).is_zero():
nimags.append(vector(MS.nrows() * [0]))
else:
nimags.append(MS.solve_left(vector(phi(g).basis_coefficients())))
nimags = [sum(self.basis(degree - 1)[j] * g[j]
for j in range(len(self.basis(degree - 1)))
) for g in nimags]
ndegrees = [degree - 1 for g in nimags]
nnames = ['y{}_{}'.format(degree - 1, j + nnamesy)
for j in range(len(nimags))]
nnamesy += len(nimags)
phi = extend(phi, ndegrees, ndifs, nimags, nnames)
if not self._minimalmodels:
degnzero = 1
while self.cohomology(degnzero).dimension() == 0:
self._numerical_invariants[degnzero] = [0]
degnzero += 1
if degnzero > max_degree:
raise ValueError("cohomology is trivial up to max_degree")
gens = [g.representative()
for g in self.cohomology(degnzero).basis().keys()]
self._numerical_invariants[degnzero] = [len(gens)]
names = ['x{}_{}'.format(degnzero, j) for j in range(len(gens))]
A = GradedCommutativeAlgebra(self.base_ring(),
names,
degrees=[degnzero for _ in names])
B = A.cdg_algebra(A.differential({}))
# Solve case that fails with one generator return B,gens
phi = B.hom(gens)
phi = extendy(phi, degnzero + 1)
self._minimalmodels[degnzero] = phi
else:
degnzero = max(self._minimalmodels)
phi = self._minimalmodels[degnzero]
for degree in range(degnzero + 1, max_degree + 1):
phi = extendx(phi, degree)
phi = extendy(phi, degree + 1)
self._minimalmodels[degree] = phi
return phi
def cohomology_algebra(self, max_degree=3):
"""
Compute a CDGA with trivial differential, that is isomorphic to the cohomology of
self up to``max_degree``
INPUT:
- ``max_degree`` -- integer (default: `3`); degree to which the result is required to
be isomorphic to self's cohomology.
EXAMPLES::
sage: A.<e1, e2, e3, e4, e5, e6, e7> = GradedCommutativeAlgebra(QQ)
sage: d = A.differential({e1:-e1*e6, e2:-e2*e6, e3:-e3*e6, e4:-e5*e6, e5:e4*e6})
sage: B = A.cdg_algebra(d)
sage: M = B.cohomology_algebra()
sage: M
Commutative Differential Graded Algebra with generators ('x0', 'x1', 'x2') in degrees (1, 1, 2) over Rational Field with differential:
x0 --> 0
x1 --> 0
x2 --> 0
sage: M.cohomology(1)
Free module generated by {[x0], [x1]} over Rational Field
sage: B.cohomology(1)
Free module generated by {[e6], [e7]} over Rational Field
sage: M.cohomology(2)
Free module generated by {[x0*x1], [x2]} over Rational Field
sage: B.cohomology(2)
Free module generated by {[e4*e5], [e6*e7]} over Rational Field
sage: M.cohomology(3)
Free module generated by {[x0*x2], [x1*x2]} over Rational Field
sage: B.cohomology(3)
Free module generated by {[e4*e5*e6], [e4*e5*e7]} over Rational Field
"""
cohomgens = self.cohomology_generators(max_degree)
if not cohomgens:
raise ValueError("Cohomology ring has no generators")
chgens = []
degrees = []
for d in cohomgens:
for g in cohomgens[d]:
degrees.append(d)
chgens.append(g)
A = GradedCommutativeAlgebra(self.base_ring(),
['x{}'.format(i) for i in range(len(chgens))],
degrees)
rels = []
for d in range(1, max_degree + 1):
B1 = A.basis(d)
V2 = self.cohomology_raw(d)
images = []
for g in B1:
ig = g._im_gens_(self, chgens)
if ig.is_zero():
images.append(V2.zero())
else:
images.append(V2(V2.V()(ig.basis_coefficients())))
V1 = self.base_ring()**len(B1)
h = V1.hom(images, codomain=V2)
K = h.kernel()
for g in K.basis():
newrel = sum(g[i] * B1[i] for i in range(len(B1)))
rels.append(newrel)
return A.quotient(A.ideal(rels)).cdg_algebra({})
def numerical_invariants(self, max_degree=3, max_iterations=3):
r"""
Return the numerical invariants of the algebra, up to degree ``d``. The
numerical invariants reflect the number of generators added at each step
of the construction of the minimal model.
The numerical invariants are the dimensions of the subsequent Hirsch
extensions used at each degree to compute the minimal model.
INPUT:
- ``max_degree`` -- integer (default: `3`); the degree up to which the
numerical invariants are computed
- ``max_iterations`` -- integer (default: `3`); the maximum number of iterations
used to compute the minimal model, if it is not already cached
EXAMPLES::
sage: A.<e1, e2, e3> = GradedCommutativeAlgebra(QQ)
sage: B = A.cdg_algebra({e3 : e1*e2})
sage: B.minimal_model(4)
Commutative Differential Graded Algebra morphism:
From: Commutative Differential Graded Algebra with generators ('x1_0', 'x1_1', 'y1_0') in degrees (1, 1, 1) over Rational Field with differential:
x1_0 --> 0
x1_1 --> 0
y1_0 --> x1_0*x1_1
To: Commutative Differential Graded Algebra with generators ('e1', 'e2', 'e3') in degrees (1, 1, 1) over Rational Field with differential:
e1 --> 0
e2 --> 0
e3 --> e1*e2
Defn: (x1_0, x1_1, y1_0) --> (e1, e2, e3)
sage: B.numerical_invariants(2)
{1: [2, 1, 0], 2: [0, 0]}
ALGORITHM:
The numerical invariants are stored as the minimal model is constructed.
.. WARNING::
The method is not granted to finish (it can't, since the minimal
model could be infinitely generated in some degrees).
The parameter ``max_iterations`` controls how many iterations of
the method are attempted at each degree. In case they are not
enough, an exception is raised. If you think that the result will
be finitely generated, you can try to run it again with a higher
value for ``max_iterations``.
REFERENCES:
For a precise definition and properties, see [Man2019]_ .
"""
self.minimal_model(max_degree, max_iterations)
return {i: self._numerical_invariants[i]
for i in range(1, max_degree + 1)}
def is_formal(self, i, max_iterations=3):
r"""
Check if the algebra is ``i``-formal. That is, if it is ``i``-quasi-isomorphic
to its cohomology algebra.
INPUT:
- ``i`` -- integer; the degree up to which the formality is checked
- ``max_iterations`` -- integer (default: `3`); the maximum number of
iterations used in the computation of the minimal model
.. WARNING::
The method is not granted to finish (it can't, since the minimal
model could be infinitely generated in some degrees).
The parameter ``max_iterations`` controls how many iterations of
the method are attempted at each degree. In case they are not
enough, an exception is raised. If you think that the result will
be finitely generated, you can try to run it again with a higher
value for ``max_iterations``.
Moreover, the method uses criteria that are often enough to conclude
that the algebra is either formal or non-formal. However, it could
happen that the used criteria can not determine the formality. In
that case, an error is raised.
EXAMPLES::
sage: A.<e1, e2, e3, e4, e5> = GradedCommutativeAlgebra(QQ)
sage: B = A.cdg_algebra({e5 : e1*e2 + e3*e4})
sage: B.is_formal(1)
True
sage: B.is_formal(2)
False
ALGORITHM:
Apply the criteria in [Man2019]_ . Both the `i`-minimal model of the
algebra and its cohomology algebra are computed. If the numerical
invariants are different, the algebra is not `i`-formal.
If the numerical invariants match, the `\psi` condition is checked.
"""
phi = self.minimal_model(i, max_iterations)
M = phi.domain()
H = M.cohomology_algebra(i + 1)
try:
H.minimal_model(i, max_iterations)
except ValueError: # If we could compute the minimal model in max_iterations
return False # but not for the cohomology, the invariants are distinct
N1 = self.numerical_invariants(i, max_iterations)
N2 = H.numerical_invariants(i, max_iterations)
if any(N1[n] != N2[n] for n in range(1, i + 1)):
return False # numerical invariants don't match
subsdict = {y.lift(): 0 for y in M.gens() if not y.differential().is_zero()}
tocheck = [M(g.differential().lift().subs(subsdict)) for g in M.gens()]
if all(c.is_coboundary() for c in tocheck):
return True # the morphism xi->[xi], yi->0 is i-quasi-iso
raise NotImplementedError("the implemented criteria cannot determine formality")
class Element(GCAlgebra.Element):
def differential(self):
"""
The differential on this element.
EXAMPLES::
sage: A.<x,y,z,t> = GradedCommutativeAlgebra(QQ, degrees = (2, 2, 3, 4))
sage: B = A.cdg_algebra({t: x*z, x: z, y: z})
sage: B.inject_variables()
Defining x, y, z, t
sage: x.differential()
z
sage: (-1/2 * x^2 + t).differential()
0
"""
return self.parent().differential()(self)
def is_coboundary(self):
"""
Return ``True`` if ``self`` is a coboundary and ``False``
otherwise.
This raises an error if the element is not homogeneous.
EXAMPLES::
sage: A.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=(1,2,2))
sage: B = A.cdg_algebra(differential={b: a*c})
sage: x,y,z = B.gens()
sage: x.is_coboundary()
False
sage: (x*z).is_coboundary()
True
sage: (x*z+x*y).is_coboundary()
False
sage: (x*z+y**2).is_coboundary()
Traceback (most recent call last):
...
ValueError: This element is not homogeneous
"""
if not self.is_homogeneous():
raise ValueError('This element is not homogeneous')
# To avoid taking the degree of 0, we special-case it.
if self.is_zero():
return True
v = vector(self.basis_coefficients())
return v in self.parent().coboundaries(self.degree())
def is_cohomologous_to(self, other):
"""
Return ``True`` if ``self`` is cohomologous to ``other``
and ``False`` otherwise.
INPUT:
- ``other`` -- another element of this algebra
EXAMPLES::
sage: A.<a,b,c,d> = GradedCommutativeAlgebra(QQ, degrees=(1,1,1,1))
sage: B = A.cdg_algebra(differential={a:b*c-c*d})
sage: w, x, y, z = B.gens()
sage: (x*y).is_cohomologous_to(y*z)
True
sage: (x*y).is_cohomologous_to(x*z)
False
sage: (x*y).is_cohomologous_to(x*y)
True
Two elements whose difference is not homogeneous are
cohomologous if and only if they are both coboundaries::
sage: w.is_cohomologous_to(y*z)
False
sage: (x*y-y*z).is_cohomologous_to(x*y*z)
True
sage: (x*y*z).is_cohomologous_to(0) # make sure 0 works
True
"""
if other.is_zero():
return self.is_coboundary()
if (not isinstance(other, DifferentialGCAlgebra.Element)
or self.parent() is not other.parent()):
raise ValueError('The element {} does not lie in this DGA'
.format(other))
if (self - other).is_homogeneous():
return (self - other).is_coboundary()
else:
return (self.is_coboundary() and other.is_coboundary())
def cohomology_class(self):
r"""
Return the cohomology class of an homogeneous cycle, as an element
of the corresponding cohomology group.
EXAMPLES::
sage: A.<e1,e2,e3,e4,e5> = GradedCommutativeAlgebra(QQ)
sage: B = A.cdg_algebra({e5:e1*e2+e3*e4})
sage: B.inject_variables()
Defining e1, e2, e3, e4, e5
sage: a = e1*e3*e5-3*e2*e3*e5
sage: a.cohomology_class()
B[[e1*e3*e5]] - 3*B[[e2*e3*e5]]
TESTS::
sage: A.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=(1, 2, 3))
sage: B = A.cdg_algebra({a:b})
sage: B.inject_variables()
Defining a, b, c
sage: b.cohomology_class()
0
sage: b.cohomology_class().parent()
Free module generated by {} over Rational Field
Check that the issue detected in :trac:`28155` is solved::
sage: A.<e1,e2,e3,e4,e5> = GradedCommutativeAlgebra(QQ)
sage: B = A.cdg_algebra({e5:e1*e2+e3*e4})
sage: B.inject_variables()
Defining e1, e2, e3, e4, e5
sage: a = e1*e3*e5-3*e2*e3*e5
sage: ca = a.cohomology_class()
sage: C = B.cohomology(3)
sage: ca in C
True
"""
if not self.is_homogeneous():
raise ValueError("The element is not homogeneous")
if not self.differential().is_zero():
raise ValueError("The element is not closed")
d = self.degree()
C = self.parent().cohomology(d)
CR = self.parent().cohomology_raw(d)
V = CR.V()
cohomcoefs = CR(V(self.basis_coefficients()))
return C.sum(a * b for (a, b) in zip(cohomcoefs, C.basis().values()))
def _cohomology_class_dict(self):
r"""
Return the dictionary that represents the cohomology class of
the cycle expressed in terms of the cohomology generators.
This can be used to map the cycle to the cohomology algebra.
EXAMPLES::
sage: A.<e1,e2,e3,e4,e5> = GradedCommutativeAlgebra(QQ)
sage: B = A.cdg_algebra({e5:e1*e2+e3*e4})
sage: a = B(e1*e3*e5-3*e2*e3*e5)
sage: a._cohomology_class_dict()
{(0, 0, 0, 0, 0, 0, 1, 0, 0): -3, (0, 0, 0, 0, 0, 1, 0, 0, 0): 1}
sage: H = B.cohomology_algebra(3)
sage: H(a._cohomology_class_dict())
x5 - 3*x6
sage: B.cohomology_generators(3)
{1: [e1, e2, e3, e4],
3: [e1*e2*e5 - e3*e4*e5, e1*e3*e5, e2*e3*e5, e1*e4*e5, e2*e4*e5]}
sage: [H(g._cohomology_class_dict()) for g in flatten(B.cohomology_generators(3).values())]
[x0, x1, x2, x3, x4, x5, x6, x7, x8]
"""
from sage.misc.flatten import flatten
if not self.differential().is_zero():
raise ValueError("The element is not closed")
if not self.is_homogeneous():
res = {}
for d in self.homogeneous_parts().values():
res.update(d._cohomology_class_dict())
return res
d = self.degree()
gens = flatten(self.parent().cohomology_generators(d).values())
ebasis = exterior_algebra_basis(d, tuple(g.degree() for g in gens))
gensd = [prod([gens[i]**b[i]
for i in range(len(b))]) for b in ebasis]
m = matrix([g.cohomology_class()._vector_() for g in gensd])
coeffs = m.solve_left(self.cohomology_class()._vector_())
return {tuple(ebasis[i]): coeffs[i]
for i in range(len(ebasis)) if coeffs[i]}
class DifferentialGCAlgebra_multigraded(DifferentialGCAlgebra,
GCAlgebra_multigraded):
"""
A commutative differential multi-graded algebras.
INPUT:
- ``A`` -- a commutative multi-graded algebra
- ``differential`` -- a differential
EXAMPLES::
sage: A.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=((1,0), (0, 1), (0,2)))
sage: B = A.cdg_algebra(differential={a: c})
sage: B.basis((1,0))
[a]
sage: B.basis(1, total=True)
[a, b]
sage: B.cohomology((1, 0))
Free module generated by {} over Rational Field
sage: B.cohomology(1, total=True)
Free module generated by {[b]} over Rational Field
"""
def __init__(self, A, differential):
"""
Initialize ``self``.
INPUT:
- ``A`` -- a multi-graded commutative algebra
- ``differential`` -- a differential
EXAMPLES::
sage: A.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=((1,0), (0, 1), (0,2)))
sage: B = A.cdg_algebra(differential={a: c})
Trying to define a differential which is not multi-graded::
sage: A.<t,x,y,z> = GradedCommutativeAlgebra(QQ, degrees=((1,0),(1,0),(2,0),(0,2)))
sage: B = A.cdg_algebra(differential={x:y}) # good
sage: B = A.cdg_algebra(differential={t:z}) # good
sage: B = A.cdg_algebra(differential={x:y, t:z}) # bad
Traceback (most recent call last):
...
ValueError: The differential does not have a well-defined degree
"""
cat = Algebras(A.base()).Graded() & ChainComplexes(A.base())
GCAlgebra_multigraded.__init__(self, A.base(), names=A._names,
degrees=A._degrees_multi,
R=A.cover_ring(), I=A.defining_ideal(),
category=cat)
self._differential = Differential_multigraded(self, differential._dic_)
def _base_repr(self):
"""
Return the base string representation of ``self``.
EXAMPLES::
sage: A.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=((1,0), (0, 1), (0,2)))
sage: A.cdg_algebra(differential={a: c})._base_repr()
"Commutative Differential Graded Algebra with generators ('a', 'b', 'c') in degrees ((1, 0), (0, 1), (0, 2)) over Rational Field"
"""
s = DifferentialGCAlgebra._base_repr(self)
old = '{}'.format(self._degrees)
new = '{}'.format(self._degrees_multi)
return s.replace(old, new)
def coboundaries(self, n, total=False):
"""
The ``n``-th coboundary group of the algebra.
This is a vector space over the base field `F`, and it is
returned as a subspace of the vector space `F^d`, where the
``n``-th homogeneous component has dimension `d`.
INPUT:
- ``n`` -- degree
- ``total`` (default ``False``) -- if ``True``, return the
coboundaries in total degree ``n``
If ``n`` is an integer rather than a multi-index, then the
total degree is used in that case as well.
EXAMPLES::
sage: A.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=((1,0), (0, 1), (0,2)))
sage: B = A.cdg_algebra(differential={a: c})
sage: B.coboundaries((0,2))
Vector space of degree 1 and dimension 1 over Rational Field
Basis matrix:
[1]
sage: B.coboundaries(2)
Vector space of degree 2 and dimension 1 over Rational Field
Basis matrix:
[0 1]
"""
return self._differential.coboundaries(n, total)
def cocycles(self, n, total=False):
r"""
The ``n``-th cocycle group of the algebra.
This is a vector space over the base field `F`, and it is
returned as a subspace of the vector space `F^d`, where the
``n``-th homogeneous component has dimension `d`.
INPUT:
- ``n`` -- degree
- ``total`` -- (default: ``False``) if ``True``, return the
cocycles in total degree ``n``
If ``n`` is an integer rather than a multi-index, then the
total degree is used in that case as well.
EXAMPLES::
sage: A.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=((1,0), (0, 1), (0,2)))
sage: B = A.cdg_algebra(differential={a: c})
sage: B.cocycles((0,1))
Vector space of degree 1 and dimension 1 over Rational Field
Basis matrix:
[1]
sage: B.cocycles((0,1), total=True)
Vector space of degree 2 and dimension 1 over Rational Field
Basis matrix:
[0 1]
"""
return self._differential.cocycles(n, total)
def cohomology_raw(self, n, total=False):
"""
The ``n``-th cohomology group of the algebra.
This is a vector space over the base ring, and it is returned
as the quotient cocycles/coboundaries.
Compare to :meth:`cohomology`.
INPUT:
- ``n`` -- degree
- ``total`` -- (default: ``False``) if ``True``, return the
cohomology in total degree ``n``
If ``n`` is an integer rather than a multi-index, then the
total degree is used in that case as well.
EXAMPLES::
sage: A.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=((1,0), (0, 1), (0,2)))
sage: B = A.cdg_algebra(differential={a: c})
sage: B.cohomology_raw((0,2))
Vector space quotient V/W of dimension 0 over Rational Field where
V: Vector space of degree 1 and dimension 1 over Rational Field
Basis matrix:
[1]
W: Vector space of degree 1 and dimension 1 over Rational Field
Basis matrix:
[1]
sage: B.cohomology_raw(1)
Vector space quotient V/W of dimension 1 over Rational Field where
V: Vector space of degree 2 and dimension 1 over Rational Field
Basis matrix:
[0 1]
W: Vector space of degree 2 and dimension 0 over Rational Field
Basis matrix:
[]
"""
return self._differential.cohomology_raw(n, total)
def cohomology(self, n, total=False):
"""
The ``n``-th cohomology group of the algebra.
This is a vector space over the base ring, defined as the
quotient cocycles/coboundaries. The elements of the quotient
are lifted to the vector space of cocycles, and this is
described in terms of those lifts.
Compare to :meth:`cohomology_raw`.
INPUT:
- ``n`` -- degree
- ``total`` -- (default: ``False``) if ``True``, return the
cohomology in total degree ``n``
If ``n`` is an integer rather than a multi-index, then the
total degree is used in that case as well.
EXAMPLES::
sage: A.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=((1,0), (0, 1), (0,2)))
sage: B = A.cdg_algebra(differential={a: c})
sage: B.cohomology((0,2))
Free module generated by {} over Rational Field
sage: B.cohomology(1)
Free module generated by {[b]} over Rational Field
"""
return self._differential.cohomology(n, total)
homology = cohomology
class Element(GCAlgebra_multigraded.Element, DifferentialGCAlgebra.Element):
"""
Element class of a commutative differential multi-graded algebra.
"""
################################################
# Main entry point
def GradedCommutativeAlgebra(ring, names=None, degrees=None, relations=None):
r"""
A graded commutative algebra.
INPUT:
There are two ways to call this. The first way defines a free
graded commutative algebra:
- ``ring`` -- the base field over which to work
- ``names`` -- names of the generators. You may also use Sage's
``A.<x,y,...> = ...`` syntax to define the names. If no names
are specified, the generators are named ``x0``, ``x1``, ...
- ``degrees`` -- degrees of the generators; if this is omitted,
the degree of each generator is 1, and if both ``names`` and
``degrees`` are omitted, an error is raised
Once such an algebra has been defined, one can use its associated
methods to take a quotient, impose a differential, etc. See the
examples below.
The second way takes a graded commutative algebra and imposes
relations:
- ``ring`` -- a graded commutative algebra
- ``relations`` -- a list or tuple of elements of ``ring``
EXAMPLES:
Defining a graded commutative algebra::
sage: GradedCommutativeAlgebra(QQ, 'x, y, z')
Graded Commutative Algebra with generators ('x', 'y', 'z') in degrees (1, 1, 1) over Rational Field
sage: GradedCommutativeAlgebra(QQ, degrees=(2, 3, 4))
Graded Commutative Algebra with generators ('x0', 'x1', 'x2') in degrees (2, 3, 4) over Rational Field
As usual in Sage, the ``A.<...>`` notation defines both the
algebra and the generator names::
sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(1, 1, 2))
sage: x^2
0
sage: y*x # Odd classes anticommute.
-x*y
sage: z*y # z is central since it is in degree 2.
y*z
sage: (x*y*z**3).degree()
8
sage: A.basis(3) # basis of homogeneous degree 3 elements
[x*z, y*z]
Defining a quotient::
sage: I = A.ideal(x*z)
sage: AQ = A.quotient(I)
sage: AQ
Graded Commutative Algebra with generators ('x', 'y', 'z') in degrees (1, 1, 2) with relations [x*z] over Rational Field
sage: AQ.basis(3)
[y*z]
Note that ``AQ`` has no specified differential. This is reflected in
its print representation: ``AQ`` is described as a "graded commutative
algebra" -- the word "differential" is missing. Also, it has no
default ``differential``::
sage: AQ.differential() # py2
Traceback (most recent call last):
...
TypeError: differential() takes exactly 2 arguments (1 given)
sage: AQ.differential() # py3
Traceback (most recent call last):
...
TypeError: differential() missing 1 required positional argument:
'diff'
Now we add a differential to ``AQ``::
sage: B = AQ.cdg_algebra({z:y*z})
sage: B
Commutative Differential Graded Algebra with generators ('x', 'y', 'z') in degrees (1, 1, 2) with relations [x*z] over Rational Field with differential:
x --> 0
y --> 0
z --> y*z
sage: B.differential()
Differential of Commutative Differential Graded Algebra with generators ('x', 'y', 'z') in degrees (1, 1, 2) with relations [x*z] over Rational Field
Defn: x --> 0
y --> 0
z --> y*z
sage: B.cohomology(1)
Free module generated by {[x], [y]} over Rational Field
sage: B.cohomology(2)
Free module generated by {[x*y]} over Rational Field
We compute algebra generators for cohomology in a range of
degrees. This cohomology algebra appears to be finitely
generated::
sage: B.cohomology_generators(15)
{1: [x, y]}
We can construct multi-graded rings as well. We work in characteristic 2
for a change, so the algebras here are honestly commutative::
sage: C.<a,b,c,d> = GradedCommutativeAlgebra(GF(2), degrees=((1,0), (1,1), (0,2), (0,3)))
sage: D = C.cdg_algebra(differential={a:c, b:d})
sage: D
Commutative Differential Graded Algebra with generators ('a', 'b', 'c', 'd') in degrees ((1, 0), (1, 1), (0, 2), (0, 3)) over Finite Field of size 2 with differential:
a --> c
b --> d
c --> 0
d --> 0
We can examine ``D`` using both total degrees and multidegrees.
Use tuples, lists, vectors, or elements of additive
abelian groups to specify degrees::
sage: D.basis(3) # basis in total degree 3
[a^3, a*b, a*c, d]
sage: D.basis((1,2)) # basis in degree (1,2)
[a*c]
sage: D.basis([1,2])
[a*c]
sage: D.basis(vector([1,2]))
[a*c]
sage: G = AdditiveAbelianGroup([0,0]); G
Additive abelian group isomorphic to Z + Z
sage: D.basis(G(vector([1,2])))
[a*c]
At this point, ``a``, for example, is an element of ``C``. We can
redefine it so that it is instead an element of ``D`` in several
ways, for instance using :meth:`gens` method::
sage: a, b, c, d = D.gens()
sage: a.differential()
c
Or the :meth:`inject_variables` method::
sage: D.inject_variables()
Defining a, b, c, d
sage: (a*b).differential()
b*c + a*d
sage: (a*b*c**2).degree()
(2, 5)
Degrees are returned as elements of additive abelian groups::
sage: (a*b*c**2).degree() in G
True
sage: (a*b*c**2).degree(total=True) # total degree
7
sage: D.cohomology(4)
Free module generated by {[a^4], [b^2]} over Finite Field of size 2
sage: D.cohomology((2,2))
Free module generated by {[b^2]} over Finite Field of size 2
TESTS:
We need to specify either name or degrees::
sage: GradedCommutativeAlgebra(QQ)
Traceback (most recent call last):
...
ValueError: You must specify names or degrees
"""
multi = False
if degrees:
try:
for d in degrees:
list(d)
# If the previous line doesn't raise an error, looks multi-graded.
multi = True
except TypeError:
pass
if multi:
return GCAlgebra_multigraded(ring, names=names, degrees=degrees)
else:
return GCAlgebra(ring, names=names, degrees=degrees)
################################################
# Morphisms
class GCAlgebraMorphism(RingHomomorphism_im_gens):
"""
Create a morphism between two :class:`graded commutative algebras <GCAlgebra>`.
INPUT:
- ``parent`` -- the parent homset
- ``im_gens`` -- the images, in the codomain, of the generators of
the domain
- ``check`` -- boolean (default: ``True``); check whether the
proposed map is actually an algebra map; if the domain and
codomain have differentials, also check that the map respects
those.
EXAMPLES::
sage: A.<x,y> = GradedCommutativeAlgebra(QQ)
sage: H = Hom(A,A)
sage: f = H([y,x])
sage: f
Graded Commutative Algebra endomorphism of Graded Commutative Algebra with generators ('x', 'y') in degrees (1, 1) over Rational Field
Defn: (x, y) --> (y, x)
sage: f(x*y)
-x*y
"""
def __init__(self, parent, im_gens, check=True):
r"""
TESTS:
The entries in ``im_gens`` must lie in the codomain::
sage: A.<x,y> = GradedCommutativeAlgebra(QQ, degrees=(1,2))
sage: B.<a,b> = GradedCommutativeAlgebra(QQ, degrees=(1,2))
sage: H = Hom(A,A)
sage: H([x,b])
Traceback (most recent call last):
...
ValueError: not all elements of im_gens are in the codomain
Note that morphisms do not need to respect the grading;
whether they do can be tested with the method
:meth:`is_graded`::
sage: A.<x,y> = GradedCommutativeAlgebra(QQ, degrees=(1,2))
sage: H = Hom(A,A)
sage: f = H([x,x])
sage: f
Graded Commutative Algebra endomorphism of Graded Commutative Algebra with generators ('x', 'y') in degrees (1, 2) over Rational Field
Defn: (x, y) --> (x, x)
sage: f.is_graded()
False
sage: TestSuite(f).run(skip="_test_category")
Since `x^2=0` but `y^2 \neq 0`, the following does not define a valid morphism::
sage: H([y,y])
Traceback (most recent call last):
...
ValueError: the proposed morphism does not respect the relations
This is okay in characteristic two since then `x^2 \neq 0`::
sage: A2.<x,y> = GradedCommutativeAlgebra(GF(2), degrees=(1,2))
sage: H2 = Hom(A2,A2)
sage: H2([y,y])
Graded Commutative Algebra endomorphism of Graded Commutative Algebra with generators ('x', 'y') in degrees (1, 2) over Finite Field of size 2
Defn: (x, y) --> (y, y)
The "nc-relations" `a*b = -b*a`, for `a` and `b` in odd
degree, are checked first, and we can see this when using more
generators::
sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(1,1,2))
sage: Hom(A,A)([x,z,z])
Traceback (most recent call last):
...
ValueError: the proposed morphism does not respect the nc-relations
Other relations::
sage: B.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(1,1,1))
sage: D = B.quotient(B.ideal(x*y))
sage: H = Hom(D,D)
sage: D.inject_variables()
Defining x, y, z
sage: H([x,z,z])
Traceback (most recent call last):
...
ValueError: the proposed morphism does not respect the relations
The morphisms must respect the differentials, when present::
sage: B.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(1,1,1))
sage: C = B.cdg_algebra({z: x*y})
sage: C.inject_variables()
Defining x, y, z
sage: H = Hom(C,C)
sage: H([x,z,z])
Traceback (most recent call last):
...
ValueError: the proposed morphism does not respect the differentials
In the case of only one generator, the cover ring is a polynomial ring,
hence the noncommutativity relations should not be checked::
sage: A.<e1> = GradedCommutativeAlgebra(QQ)
sage: A.cover_ring()
Multivariate Polynomial Ring in e1 over Rational Field
sage: A.hom([2*e1])
Graded Commutative Algebra endomorphism of Graded Commutative Algebra with generators ('e1',) in degrees (1,) over Rational Field
Defn: (e1,) --> (2*e1,)
"""
domain = parent.domain()
codomain = parent.codomain()
# We use check=False here because checking of nc-relations is
# not implemented in RingHomomorphism_im_gens.__init__.
# We check these relations below.
RingHomomorphism_im_gens.__init__(self, parent=parent,
im_gens=im_gens,
check=False)
self._im_gens = tuple(im_gens)
# Now check that the relations are respected.
if check:
if any(x not in codomain for x in im_gens):
raise ValueError('not all elements of im_gens are in '
'the codomain')
R = domain.cover_ring()
from_R = dict(zip(R.gens(), im_gens))
if hasattr(R, 'free_algebra'):
from_free = dict(zip(R.free_algebra().gens(), im_gens))
# First check the nc-relations: x*y=-y*x for x, y in odd
# degrees. These are in the form of a dictionary, with
# typical entry left:right.
for left in R.relations():
zero = left.subs(from_free) - R.relations()[left].subs(from_R)
if zero:
raise ValueError('the proposed morphism does not respect '
'the nc-relations')
# Now check any extra relations, including x**2=0 for x in
# odd degree. These are defined by a list of generators of
# the defining ideal.
for g in domain.defining_ideal().gens():
zero = g.subs(from_R)
if zero:
raise ValueError('the proposed morphism does not respect '
'the relations')
# If the domain and codomain have differentials, check
# those, too.
if (isinstance(domain, DifferentialGCAlgebra)
and isinstance(codomain, DifferentialGCAlgebra)):
dom_diff = domain.differential()
cod_diff = codomain.differential()
if any(cod_diff(self(g)) != self(dom_diff(g))
for g in domain.gens()):
raise ValueError('the proposed morphism does not respect '
'the differentials')
def _call_(self, x):
"""
Evaluate this morphism on ``x``.
INPUT:
- ``x`` -- an element of the domain
EXAMPLES::
sage: A.<x,y> = GradedCommutativeAlgebra(GF(2))
sage: H = Hom(A,A)
sage: g = H([y,y])
sage: g(x)
y
sage: g(x*y)
y^2
sage: B.<x,y,z> = GradedCommutativeAlgebra(QQ)
sage: H = Hom(B,B)
sage: f = H([y,x,x])
sage: f(x)
y
sage: f(3*x*y)
-3*x*y
sage: f(y*z)
0
sage: f(1)
1
"""
codomain = self.codomain()
result = codomain.zero()
for mono, coeff in x.dict().items():
term = prod([gen**y for (y, gen) in zip(mono, self.im_gens())],
codomain.one())
result += coeff * term
return result
def is_graded(self, total=False):
"""
Return ``True`` if this morphism is graded.
That is, return ``True`` if `f(x)` is zero, or if `f(x)` is
homogeneous and has the same degree as `x`, for each generator
`x`.
INPUT:
- ``total`` (optional, default ``False``) -- if ``True``, use
the total degree to determine whether the morphism is graded
(relevant only in the multigraded case)
EXAMPLES::
sage: C.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=(1,1,2))
sage: H = Hom(C,C)
sage: H([a, b, a*b + 2*a]).is_graded()
False
sage: H([a, b, a*b]).is_graded()
True
sage: A.<w,x> = GradedCommutativeAlgebra(QQ, degrees=((1,0), (1,0)))
sage: B.<y,z> = GradedCommutativeAlgebra(QQ, degrees=((1,0), (0,1)))
sage: H = Hom(A,B)
sage: H([y,0]).is_graded()
True
sage: H([z,z]).is_graded()
False
sage: H([z,z]).is_graded(total=True)
True
"""
return all(not y # zero is always allowed as an image
or (y.is_homogeneous()
and x.degree(total=total) == y.degree(total=total))
for (x, y) in zip(self.domain().gens(), self.im_gens()))
def _repr_type(self):
"""
EXAMPLES::
sage: B.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(1,1,1))
sage: C = B.cdg_algebra({z: x*y})
sage: Hom(B,B)([z,y,x])._repr_type()
'Graded Commutative Algebra'
sage: C.inject_variables()
Defining x, y, z
sage: Hom(C,C)([x,0,0])._repr_type()
'Commutative Differential Graded Algebra'
"""
if (isinstance(self.domain(), DifferentialGCAlgebra)
and isinstance(self.codomain(), DifferentialGCAlgebra)):
return "Commutative Differential Graded Algebra"
return "Graded Commutative Algebra"
def _repr_defn(self):
"""
EXAMPLES::
sage: A.<x,y> = GradedCommutativeAlgebra(QQ)
sage: Hom(A,A)([y,x])._repr_defn()
'(x, y) --> (y, x)'
"""
gens = self.domain().gens()
return "{} --> {}".format(gens, self._im_gens)
################################################
# Homsets
class GCAlgebraHomset(RingHomset_generic):
"""
Set of morphisms between two graded commutative algebras.
.. NOTE::
Homsets (and thus morphisms) have only been implemented when
the base fields are the same for the domain and codomain.
EXAMPLES::
sage: A.<x,y> = GradedCommutativeAlgebra(QQ, degrees=(1,2))
sage: H = Hom(A,A)
sage: H([x,y]) == H.identity()
True
sage: H([x,x]) == H.identity()
False
sage: A.<w,x> = GradedCommutativeAlgebra(QQ, degrees=(1,2))
sage: B.<y,z> = GradedCommutativeAlgebra(QQ, degrees=(1,1))
sage: H = Hom(A,B)
sage: H([y,0])
Graded Commutative Algebra morphism:
From: Graded Commutative Algebra with generators ('w', 'x') in degrees (1, 2) over Rational Field
To: Graded Commutative Algebra with generators ('y', 'z') in degrees (1, 1) over Rational Field
Defn: (w, x) --> (y, 0)
sage: H([y,y*z])
Graded Commutative Algebra morphism:
From: Graded Commutative Algebra with generators ('w', 'x') in degrees (1, 2) over Rational Field
To: Graded Commutative Algebra with generators ('y', 'z') in degrees (1, 1) over Rational Field
Defn: (w, x) --> (y, y*z)
"""
@cached_method
def zero(self):
"""
Construct the "zero" morphism of this homset: the map sending each
generator to zero.
EXAMPLES::
sage: A.<x,y> = GradedCommutativeAlgebra(QQ, degrees=(1,2))
sage: B.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=(1,1,1))
sage: zero = Hom(A,B).zero()
sage: zero(x) == zero(y) == 0
True
"""
return GCAlgebraMorphism(self, [self.codomain().zero()]
* self.domain().ngens())
@cached_method
def identity(self):
"""
Construct the identity morphism of this homset.
EXAMPLES::
sage: A.<x,y> = GradedCommutativeAlgebra(QQ, degrees=(1,2))
sage: H = Hom(A,A)
sage: H([x,y]) == H.identity()
True
sage: H([x,x]) == H.identity()
False
"""
if self.domain() != self.codomain():
raise TypeError('identity map is only defined for '
'endomorphism sets')
return GCAlgebraMorphism(self, self.domain().gens())
def __call__(self, im_gens, check=True):
"""
Create a homomorphism.
INPUT:
- ``im_gens`` -- the images of the generators of the domain
EXAMPLES::
sage: A.<w,x> = GradedCommutativeAlgebra(QQ, degrees=(1,2))
sage: B.<y,z> = GradedCommutativeAlgebra(QQ, degrees=(1,1))
sage: H = Hom(A,B)
sage: H([y,0])
Graded Commutative Algebra morphism:
From: Graded Commutative Algebra with generators ('w', 'x') in degrees (1, 2) over Rational Field
To: Graded Commutative Algebra with generators ('y', 'z') in degrees (1, 1) over Rational Field
Defn: (w, x) --> (y, 0)
sage: H([y,y*z])
Graded Commutative Algebra morphism:
From: Graded Commutative Algebra with generators ('w', 'x') in degrees (1, 2) over Rational Field
To: Graded Commutative Algebra with generators ('y', 'z') in degrees (1, 1) over Rational Field
Defn: (w, x) --> (y, y*z)
"""
from sage.categories.map import Map
if isinstance(im_gens, Map):
return self._coerce_impl(im_gens)
else:
return GCAlgebraMorphism(self, im_gens, check=check)
################################################
# Miscellaneous utility classes and functions
class CohomologyClass(SageObject, CachedRepresentation):
"""
A class for representing cohomology classes.
This just has ``_repr_`` and ``_latex_`` methods which put
brackets around the object's name.
EXAMPLES::
sage: from sage.algebras.commutative_dga import CohomologyClass
sage: CohomologyClass(3)
[3]
sage: A.<x,y,z,t> = GradedCommutativeAlgebra(QQ, degrees = (2,2,3,3))
sage: CohomologyClass(x^2+2*y*z)
[2*y*z + x^2]
"""
def __init__(self, x):
"""
EXAMPLES::
sage: from sage.algebras.commutative_dga import CohomologyClass
sage: CohomologyClass(x-2)
[x - 2]
"""
self._x = x
def __hash__(self):
r"""
TESTS::
sage: from sage.algebras.commutative_dga import CohomologyClass
sage: hash(CohomologyClass(sin)) == hash(sin)
True
"""
return hash(self._x)
def _repr_(self):
"""
EXAMPLES::
sage: from sage.algebras.commutative_dga import CohomologyClass
sage: CohomologyClass(sin)
[sin]
"""
return '[{}]'.format(self._x)
def _latex_(self):
r"""
EXAMPLES::
sage: from sage.algebras.commutative_dga import CohomologyClass
sage: latex(CohomologyClass(sin))
\left[ \sin \right]
sage: latex(CohomologyClass(x^2))
\left[ x^{2} \right]
"""
from sage.misc.latex import latex
return '\\left[ {} \\right]'.format(latex(self._x))
def representative(self):
"""
Return the representative of ``self``.
EXAMPLES::
sage: from sage.algebras.commutative_dga import CohomologyClass
sage: x = CohomologyClass(sin)
sage: x.representative() == sin
True
"""
return self._x
@cached_function
def exterior_algebra_basis(n, degrees):
"""
Basis of an exterior algebra in degree ``n``, where the
generators are in degrees ``degrees``.
INPUT:
- ``n`` - integer
- ``degrees`` - iterable of integers
Return list of lists, each list representing exponents for the
corresponding generators. (So each list consists of 0's and 1's.)
EXAMPLES::
sage: from sage.algebras.commutative_dga import exterior_algebra_basis
sage: exterior_algebra_basis(1, (1,3,1))
[[0, 0, 1], [1, 0, 0]]
sage: exterior_algebra_basis(4, (1,3,1))
[[0, 1, 1], [1, 1, 0]]
sage: exterior_algebra_basis(10, (1,5,1,1))
[]
"""
if n == 0:
return [[0 for j in degrees]]
if len(degrees) == 1:
if degrees[0] == n:
return [[1]]
else:
return []
if not degrees:
return []
if min(degrees) > n:
return []
if sum(degrees) < n:
return []
if sum(degrees) == n:
return [[1 for j in degrees]]
i = len(degrees) // 2
res = []
for j in range(n + 1):
v1 = exterior_algebra_basis(j, degrees[:i])
v2 = exterior_algebra_basis(n - j, degrees[i:])
res += [l1 + l2 for l1 in v1 for l2 in v2]
res.sort()
return res
def total_degree(deg):
"""
Total degree of ``deg``.
INPUT:
- ``deg`` - an element of a free abelian group.
In fact, ``deg`` could be an integer, a Python int, a list, a
tuple, a vector, etc. This function returns the sum of the
components of ``deg``.
EXAMPLES::
sage: from sage.algebras.commutative_dga import total_degree
sage: total_degree(12)
12
sage: total_degree(range(5))
10
sage: total_degree(vector(range(5)))
10
sage: G = AdditiveAbelianGroup((0,0))
sage: x = G.gen(0); y = G.gen(1)
sage: 3*x+4*y
(3, 4)
sage: total_degree(3*x+4*y)
7
"""
if deg in ZZ:
return deg
return sum(deg)
| 36.692637 | 303 | 0.532905 |
acf83fdf7badd3d9bd890826863e311040b71cef | 3,135 | py | Python | before_commit/languages/conda.py | pre-commit-fork/pre-commit | 53283596529f4e9bc1a34b4e62051a84fda78caa | [
"MIT"
] | null | null | null | before_commit/languages/conda.py | pre-commit-fork/pre-commit | 53283596529f4e9bc1a34b4e62051a84fda78caa | [
"MIT"
] | 2 | 2022-03-31T17:06:36.000Z | 2022-03-31T17:07:15.000Z | before_commit/languages/conda.py | pre-commit-fork/pre-commit | 53283596529f4e9bc1a34b4e62051a84fda78caa | [
"MIT"
] | 1 | 2022-03-30T22:16:24.000Z | 2022-03-30T22:16:24.000Z | from __future__ import annotations
import contextlib
import os
from typing import Generator
from typing import Sequence
from before_commit.envcontext import envcontext
from before_commit.envcontext import PatchesT
from before_commit.envcontext import SubstitutionT
from before_commit.envcontext import UNSET
from before_commit.envcontext import Var
from before_commit.hook import Hook
from before_commit.languages import helpers
from before_commit.prefix import Prefix
from before_commit.util import clean_path_on_failure
from before_commit.util import cmd_output_b
ENVIRONMENT_DIR = 'conda'
get_default_version = helpers.basic_get_default_version
health_check = helpers.basic_health_check
def get_env_patch(env: str) -> PatchesT:
# On non-windows systems executable live in $CONDA_PREFIX/bin, on Windows
# they can be in $CONDA_PREFIX/bin, $CONDA_PREFIX/Library/bin,
# $CONDA_PREFIX/Scripts and $CONDA_PREFIX. Whereas the latter only
# seems to be used for python.exe.
path: SubstitutionT = (os.path.join(env, 'bin'), os.pathsep, Var('PATH'))
if os.name == 'nt': # pragma: no cover (platform specific)
path = (env, os.pathsep, *path)
path = (os.path.join(env, 'Scripts'), os.pathsep, *path)
path = (os.path.join(env, 'Library', 'bin'), os.pathsep, *path)
return (
('PYTHONHOME', UNSET),
('VIRTUAL_ENV', UNSET),
('CONDA_PREFIX', env),
('PATH', path),
)
@contextlib.contextmanager
def in_env(
prefix: Prefix,
language_version: str,
) -> Generator[None, None, None]:
directory = helpers.environment_dir(ENVIRONMENT_DIR, language_version)
envdir = prefix.path(directory)
with envcontext(get_env_patch(envdir)):
yield
def _conda_exe() -> str:
if os.environ.get('PRE_COMMIT_USE_MICROMAMBA'):
return 'micromamba'
elif os.environ.get('PRE_COMMIT_USE_MAMBA'):
return 'mamba'
else:
return 'conda'
def install_environment(
prefix: Prefix,
version: str,
additional_dependencies: Sequence[str],
) -> None:
helpers.assert_version_default('conda', version)
directory = helpers.environment_dir(ENVIRONMENT_DIR, version)
conda_exe = _conda_exe()
env_dir = prefix.path(directory)
with clean_path_on_failure(env_dir):
cmd_output_b(
conda_exe, 'env', 'create', '-p', env_dir, '--file',
'environment.yml', cwd=prefix.prefix_dir,
)
if additional_dependencies:
cmd_output_b(
conda_exe, 'install', '-p', env_dir, *additional_dependencies,
cwd=prefix.prefix_dir,
)
def run_hook(
hook: Hook,
file_args: Sequence[str],
color: bool,
) -> tuple[int, bytes]:
# TODO: Some rare commands need to be run using `conda run` but mostly we
# can run them without which is much quicker and produces a better
# output.
# cmd = ('conda', 'run', '-p', env_dir) + hook.cmd
with in_env(hook.prefix, hook.language_version):
return helpers.run_xargs(hook, hook.cmd, file_args, color=color)
| 32.319588 | 78 | 0.68134 |
acf83ff08a61cf9e14d3d5007c4e904c8e160372 | 6,132 | py | Python | fbpmp/pid/service/pid_service/tests/test_pid_shard_stage.py | peking2/fbpcs-1 | 234bc748f24046a13fbd14ee7794df5d70ab348b | [
"MIT"
] | null | null | null | fbpmp/pid/service/pid_service/tests/test_pid_shard_stage.py | peking2/fbpcs-1 | 234bc748f24046a13fbd14ee7794df5d70ab348b | [
"MIT"
] | null | null | null | fbpmp/pid/service/pid_service/tests/test_pid_shard_stage.py | peking2/fbpcs-1 | 234bc748f24046a13fbd14ee7794df5d70ab348b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from unittest.mock import patch, MagicMock
from fbpmp.data_processing.sharding.sharding import ShardType
from fbpmp.data_processing.sharding.sharding_cpp import CppShardingService
from fbpmp.onedocker_binary_config import OneDockerBinaryConfig
from fbpmp.pcf.tests.async_utils import to_sync
from fbpmp.pid.entity.pid_instance import PIDStageStatus
from fbpmp.pid.entity.pid_stages import UnionPIDStage
from fbpmp.pid.repository.pid_instance_local import LocalPIDInstanceRepository
from fbpmp.pid.service.pid_service.pid_shard_stage import PIDShardStage
from fbpmp.pid.service.pid_service.pid_stage_input import PIDStageInput
CONFIG = {
"s3_coordination_file": "ip_config"
}
async def async_wrapper(value):
return value
class TestPIDShardStage(unittest.TestCase):
@to_sync
@patch("fbpmp.pid.repository.pid_instance.PIDInstanceRepository")
async def test_ready(self, mock_instance_repo):
stage = PIDShardStage(
stage=UnionPIDStage.PUBLISHER_SHARD,
config=CONFIG,
instance_repository=mock_instance_repo,
storage_svc="STORAGE",
onedocker_svc="ONEDOCKER",
onedocker_binary_config="OD_CONFIG",
)
stage_input = PIDStageInput(
input_paths=["in"],
output_paths=["out"],
num_shards=123,
instance_id="444",
)
with patch.object(PIDShardStage, "files_exist") as mock_fe:
mock_fe.return_value = True
res = await stage._ready(stage_input)
self.assertTrue(res)
@patch.object(
PIDShardStage,
"shard",
return_value=async_wrapper(PIDStageStatus.COMPLETED),
)
@to_sync
async def test_run(self, mock_shard):
mock_instance_repo = LocalPIDInstanceRepository(base_dir=".")
mock_instance_repo.read = MagicMock()
mock_instance_repo.update = MagicMock()
stage = PIDShardStage(
stage=UnionPIDStage.PUBLISHER_SHARD,
config=CONFIG,
instance_repository=mock_instance_repo,
storage_svc="STORAGE",
onedocker_svc="ONEDOCKER",
onedocker_binary_config="OD_CONFIG",
)
instance_id = "444"
stage_input = PIDStageInput(
input_paths=["in"],
output_paths=["out"],
num_shards=123,
instance_id=instance_id,
)
# Basic test: All good
with patch.object(PIDShardStage, "files_exist") as mock_fe:
mock_fe.return_value = True
stage = PIDShardStage(
stage=UnionPIDStage.PUBLISHER_SHARD,
config=CONFIG,
instance_repository=mock_instance_repo,
storage_svc="STORAGE",
onedocker_svc="ONEDOCKER",
onedocker_binary_config="OD_CONFIG",
)
status = await stage.run(stage_input)
# instance status is updated to READY, STARTED, then COMPLETED
mock_instance_repo.read.assert_called_with(instance_id)
self.assertEqual(mock_instance_repo.read.call_count, 3)
self.assertEqual(mock_instance_repo.update.call_count, 3)
# Input not ready
with patch.object(PIDShardStage, "files_exist") as mock_fe:
mock_fe.return_value = False
status = await stage.run(stage_input)
self.assertEqual(PIDStageStatus.FAILED, status)
# Multiple input paths (invariant exception)
with patch.object(PIDShardStage, "files_exist") as mock_fe:
with self.assertRaises(ValueError):
mock_fe.return_value = True
stage_input.input_paths = ["in1", "in2"]
stage = PIDShardStage(
stage=UnionPIDStage.PUBLISHER_SHARD,
config=CONFIG,
instance_repository=mock_instance_repo,
storage_svc="STORAGE",
onedocker_svc="ONEDOCKER",
onedocker_binary_config="OD_CONFIG",
)
status = await stage.run(stage_input)
self.assertEqual(PIDStageStatus.COMPLETED, status)
mock_shard.assert_called_once_with("in1", "out", 123)
@patch.object(CppShardingService, "shard_on_container_async")
@patch("fbpmp.pid.repository.pid_instance.PIDInstanceRepository")
@to_sync
async def test_shard(self, mock_instance_repo, mock_sharder):
test_onedocker_binary_config = OneDockerBinaryConfig(
tmp_directory="/test_tmp_directory/",
binary_version="latest",
)
stage = PIDShardStage(
stage=UnionPIDStage.PUBLISHER_SHARD,
config=CONFIG,
instance_repository=mock_instance_repo,
storage_svc="STORAGE",
onedocker_svc="ONEDOCKER",
onedocker_binary_config=test_onedocker_binary_config,
)
test_input_path = "foo"
test_output_path = "bar"
test_num_shards = 1
test_hmac_key = "CoXbp7BOEvAN9L1CB2DAORHHr3hB7wE7tpxMYm07tc0="
shard_path = PIDShardStage.get_sharded_filepath(test_output_path, 0)
self.assertEqual(f"{test_output_path}_0", shard_path)
res = await stage.shard(
test_input_path, test_output_path, test_num_shards, test_hmac_key
)
self.assertEqual(PIDStageStatus.COMPLETED, res)
mock_sharder.assert_called_once_with(
ShardType.HASHED_FOR_PID,
test_input_path,
output_base_path=test_output_path,
file_start_index=0,
num_output_files=test_num_shards,
onedocker_svc=stage.onedocker_svc,
binary_version=test_onedocker_binary_config.binary_version,
tmp_directory=test_onedocker_binary_config.tmp_directory,
hmac_key=test_hmac_key,
)
| 38.086957 | 78 | 0.653457 |
acf8409f731e1a13d69a05a4f4ebbb8ec4a5691d | 127 | py | Python | manage.py | SeungGiJeong/lecture-aws-ec2 | f3e835f7414d00458db6d7326b1b6b288697aa4a | [
"MIT"
] | null | null | null | manage.py | SeungGiJeong/lecture-aws-ec2 | f3e835f7414d00458db6d7326b1b6b288697aa4a | [
"MIT"
] | null | null | null | manage.py | SeungGiJeong/lecture-aws-ec2 | f3e835f7414d00458db6d7326b1b6b288697aa4a | [
"MIT"
] | 2 | 2021-08-04T06:24:41.000Z | 2021-08-04T06:31:44.000Z | from app import create_app
app = create_app()
if __name__ == "__main__":
app.run(port=5000, debug=False, host='0.0.0.0')
| 18.142857 | 51 | 0.677165 |
acf8429ce9ce9ebaac4c0747dde01693f77efdd0 | 3,524 | py | Python | indy_node/test/request_handlers/conftest.py | andkononykhin/sovrin-node | d49bd91e1473d63527ea6011fcc5f158eae8211f | [
"Apache-2.0"
] | null | null | null | indy_node/test/request_handlers/conftest.py | andkononykhin/sovrin-node | d49bd91e1473d63527ea6011fcc5f158eae8211f | [
"Apache-2.0"
] | 1 | 2019-02-07T18:11:15.000Z | 2019-02-07T18:14:06.000Z | indy_node/test/request_handlers/conftest.py | andkononykhin/sovrin-node | d49bd91e1473d63527ea6011fcc5f158eae8211f | [
"Apache-2.0"
] | null | null | null | import pytest
from indy_common.constants import SCHEMA, CONFIG_LEDGER_ID, REVOC_REG_DEF, CRED_DEF_ID, REVOC_TYPE, TAG
from indy_node.persistence.idr_cache import IdrCache
from indy_node.server.request_handlers.domain_req_handlers.revoc_reg_def_handler import RevocRegDefHandler
from indy_node.server.request_handlers.domain_req_handlers.schema_handler import SchemaHandler
from indy_node.test.request_handlers.helper import get_fake_ledger, add_to_idr
from indy_node.test.request_handlers.test_schema_handler import make_schema_exist
from plenum.common.constants import KeyValueStorageType, DOMAIN_LEDGER_ID, IDR_CACHE_LABEL, POOL_LEDGER_ID
from plenum.common.request import Request
from plenum.common.util import randomString
from plenum.server.database_manager import DatabaseManager
from plenum.test.testing_utils import FakeSomething
from state.pruning_state import PruningState
from state.state import State
from storage.helper import initKeyValueStorage
from storage.kv_in_memory import KeyValueStorageInMemory
@pytest.fixture(scope="module")
def idr_cache(tconf, tdir):
name = 'name'
idr_cache = IdrCache(name,
initKeyValueStorage(KeyValueStorageType.Rocksdb,
tdir,
tconf.idrCacheDbName,
db_config=tconf.db_idr_cache_db_config))
return idr_cache
@pytest.fixture(scope="module")
def schema_handler(db_manager, write_auth_req_validator):
return SchemaHandler(db_manager, write_auth_req_validator)
@pytest.fixture(scope="module")
def db_manager(tconf, tdir, idr_cache):
db_manager = DatabaseManager()
db_manager.register_new_store(IDR_CACHE_LABEL, idr_cache)
db_manager.register_new_database(DOMAIN_LEDGER_ID, get_fake_ledger(),
PruningState(KeyValueStorageInMemory()))
db_manager.register_new_database(CONFIG_LEDGER_ID, get_fake_ledger(),
PruningState(KeyValueStorageInMemory()))
db_manager.register_new_database(POOL_LEDGER_ID, get_fake_ledger(),
PruningState(KeyValueStorageInMemory()))
return db_manager
@pytest.fixture(scope="function")
def schema_request():
return Request(identifier=randomString(),
reqId=5,
signature="sig",
operation={'type': SCHEMA,
'data': {
'version': '1.0',
'name': 'Degree',
'attr_names': ['last_name',
'first_name', ]
}})
@pytest.fixture(scope="module")
def revoc_reg_def_handler(db_manager, write_auth_req_validator):
return RevocRegDefHandler(db_manager, write_auth_req_validator)
@pytest.fixture(scope="module")
def revoc_reg_def_request():
return Request(identifier=randomString(),
reqId=5,
signature="sig",
operation={'type': REVOC_REG_DEF,
CRED_DEF_ID: "credDefId",
REVOC_TYPE: randomString(),
TAG: randomString(),
})
@pytest.fixture(scope="module")
def creator(db_manager):
identifier = randomString()
idr = db_manager.idr_cache
add_to_idr(idr, identifier, None)
return identifier
| 40.045455 | 106 | 0.648695 |
acf8444f31f965a249a384f5e5732626b76a0776 | 6,726 | py | Python | test/functional/rpc_net.py | bvbfan/ain | 71e3b3456f90a858d1325f612bd44393789d74d2 | [
"MIT"
] | null | null | null | test/functional/rpc_net.py | bvbfan/ain | 71e3b3456f90a858d1325f612bd44393789d74d2 | [
"MIT"
] | null | null | null | test/functional/rpc_net.py | bvbfan/ain | 71e3b3456f90a858d1325f612bd44393789d74d2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPC calls related to net.
Tests correspond to code in rpc/net.cpp.
"""
from decimal import Decimal
from test_framework.test_framework import DefiTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than_or_equal,
assert_greater_than,
assert_raises_rpc_error,
connect_nodes_bi,
p2p_port,
wait_until,
)
from test_framework.mininode import P2PInterface
from test_framework.messages import CAddress, msg_addr, NODE_NETWORK, NODE_WITNESS
class NetTest(DefiTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [["-minrelaytxfee=0.00001000"],["-minrelaytxfee=0.00000500"]]
def run_test(self):
self._test_connection_count()
self._test_getnettotals()
self._test_getnetworkinginfo()
self._test_getaddednodeinfo()
self._test_getpeerinfo()
self._test_getnodeaddresses()
def _test_connection_count(self):
# connect_nodes_bi connects each node to the other
assert_equal(self.nodes[0].getconnectioncount(), 2)
def _test_getnettotals(self):
# getnettotals totalbytesrecv and totalbytessent should be
# consistent with getpeerinfo. Since the RPC calls are not atomic,
# and messages might have been recvd or sent between RPC calls, call
# getnettotals before and after and verify that the returned values
# from getpeerinfo are bounded by those values.
net_totals_before = self.nodes[0].getnettotals()
peer_info = self.nodes[0].getpeerinfo()
net_totals_after = self.nodes[0].getnettotals()
assert_equal(len(peer_info), 2)
peers_recv = sum([peer['bytesrecv'] for peer in peer_info])
peers_sent = sum([peer['bytessent'] for peer in peer_info])
assert_greater_than_or_equal(peers_recv, net_totals_before['totalbytesrecv'])
assert_greater_than_or_equal(net_totals_after['totalbytesrecv'], peers_recv)
assert_greater_than_or_equal(peers_sent, net_totals_before['totalbytessent'])
assert_greater_than_or_equal(net_totals_after['totalbytessent'], peers_sent)
# test getnettotals and getpeerinfo by doing a ping
# the bytes sent/received should change
# note ping and pong are 32 bytes each
self.nodes[0].ping()
wait_until(lambda: (self.nodes[0].getnettotals()['totalbytessent'] >= net_totals_after['totalbytessent'] + 32 * 2), timeout=1)
wait_until(lambda: (self.nodes[0].getnettotals()['totalbytesrecv'] >= net_totals_after['totalbytesrecv'] + 32 * 2), timeout=1)
peer_info_after_ping = self.nodes[0].getpeerinfo()
for before, after in zip(peer_info, peer_info_after_ping):
assert_greater_than_or_equal(after['bytesrecv_per_msg'].get('pong', 0), before['bytesrecv_per_msg'].get('pong', 0) + 32)
assert_greater_than_or_equal(after['bytessent_per_msg'].get('ping', 0), before['bytessent_per_msg'].get('ping', 0) + 32)
def _test_getnetworkinginfo(self):
assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], True)
assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2)
self.nodes[0].setnetworkactive(state=False)
assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], False)
# Wait a bit for all sockets to close
wait_until(lambda: self.nodes[0].getnetworkinfo()['connections'] == 0, timeout=3)
self.nodes[0].setnetworkactive(state=True)
connect_nodes_bi(self.nodes, 0, 1)
assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], True)
assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2)
def _test_getaddednodeinfo(self):
assert_equal(self.nodes[0].getaddednodeinfo(), [])
# add a node (node2) to node0
ip_port = "127.0.0.1:{}".format(p2p_port(2))
self.nodes[0].addnode(node=ip_port, command='add')
# check that the node has indeed been added
added_nodes = self.nodes[0].getaddednodeinfo(ip_port)
assert_equal(len(added_nodes), 1)
assert_equal(added_nodes[0]['addednode'], ip_port)
# check that a non-existent node returns an error
assert_raises_rpc_error(-24, "Node has not been added", self.nodes[0].getaddednodeinfo, '1.1.1.1')
def _test_getpeerinfo(self):
peer_info = [x.getpeerinfo() for x in self.nodes]
# check both sides of bidirectional connection between nodes
# the address bound to on one side will be the source address for the other node
assert_equal(peer_info[0][0]['addrbind'], peer_info[1][0]['addr'])
assert_equal(peer_info[1][0]['addrbind'], peer_info[0][0]['addr'])
assert_equal(peer_info[0][0]['minfeefilter'], Decimal("0.00000500"))
assert_equal(peer_info[1][0]['minfeefilter'], Decimal("0.00001000"))
def _test_getnodeaddresses(self):
self.nodes[0].add_p2p_connection(P2PInterface())
# send some addresses to the node via the p2p message addr
msg = msg_addr()
imported_addrs = []
for i in range(256):
a = "123.123.123.{}".format(i)
imported_addrs.append(a)
addr = CAddress()
addr.time = 100000000
addr.nServices = NODE_NETWORK | NODE_WITNESS
addr.ip = a
addr.port = 8555
msg.addrs.append(addr)
self.nodes[0].p2p.send_and_ping(msg)
# obtain addresses via rpc call and check they were ones sent in before
REQUEST_COUNT = 10
node_addresses = self.nodes[0].getnodeaddresses(REQUEST_COUNT)
assert_equal(len(node_addresses), REQUEST_COUNT)
for a in node_addresses:
assert_greater_than(a["time"], 1527811200) # 1st June 2018
assert_equal(a["services"], NODE_NETWORK | NODE_WITNESS)
assert a["address"] in imported_addrs
assert_equal(a["port"], 8555)
assert_raises_rpc_error(-8, "Address count out of range", self.nodes[0].getnodeaddresses, -1)
# addrman's size cannot be known reliably after insertion, as hash collisions may occur
# so only test that requesting a large number of addresses returns less than that
LARGE_REQUEST_COUNT = 10000
node_addresses = self.nodes[0].getnodeaddresses(LARGE_REQUEST_COUNT)
assert_greater_than(LARGE_REQUEST_COUNT, len(node_addresses))
if __name__ == '__main__':
NetTest().main()
| 46.386207 | 134 | 0.6854 |
acf845232ebc0f7382f98a50ae32638871ee88f8 | 55,235 | py | Python | python/samples_tcod.py | mcgallag/itemdisplay | 5b69ce537fd2a336cb8adc3f2f077757212e3eaf | [
"MIT"
] | null | null | null | python/samples_tcod.py | mcgallag/itemdisplay | 5b69ce537fd2a336cb8adc3f2f077757212e3eaf | [
"MIT"
] | null | null | null | python/samples_tcod.py | mcgallag/itemdisplay | 5b69ce537fd2a336cb8adc3f2f077757212e3eaf | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
This code demonstrates various usages of python-tcod.
"""
# To the extent possible under law, the libtcod maintainers have waived all
# copyright and related or neighboring rights to these samples.
# https://creativecommons.org/publicdomain/zero/1.0/
import os
import copy
import math
import random
import time
import numpy as np
import tcod
import tcod.event
SAMPLE_SCREEN_WIDTH = 46
SAMPLE_SCREEN_HEIGHT = 20
SAMPLE_SCREEN_X = 20
SAMPLE_SCREEN_Y = 10
# FONT = "data/fonts/consolas10x10_gs_tc.png"
FONT = "dejavu16x16_gs_tc.png"
root_console = None
sample_console = tcod.console.Console(
SAMPLE_SCREEN_WIDTH, SAMPLE_SCREEN_HEIGHT, order="F"
)
class Sample(tcod.event.EventDispatch):
def __init__(self, name: str = ""):
self.name = name
def on_enter(self):
pass
def on_draw(self):
pass
def ev_keydown(self, event: tcod.event.KeyDown):
global cur_sample
if event.sym == tcod.event.K_DOWN:
cur_sample = (cur_sample + 1) % len(SAMPLES)
SAMPLES[cur_sample].on_enter()
draw_samples_menu()
elif event.sym == tcod.event.K_UP:
cur_sample = (cur_sample - 1) % len(SAMPLES)
SAMPLES[cur_sample].on_enter()
draw_samples_menu()
elif (
event.sym == tcod.event.K_RETURN
and event.mod & tcod.event.KMOD_LALT
):
tcod.console_set_fullscreen(not tcod.console_is_fullscreen())
elif event.sym == tcod.event.K_PRINTSCREEN or event.sym == ord("p"):
print("screenshot")
if event.mod & tcod.event.KMOD_LALT:
tcod.console_save_apf(None, "samples.apf")
print("apf")
else:
tcod.sys_save_screenshot()
print("png")
elif event.sym == tcod.event.K_ESCAPE:
raise SystemExit()
elif event.sym in RENDERER_KEYS:
tcod.sys_set_renderer(RENDERER_KEYS[event.sym])
draw_renderer_menu()
def ev_quit(self, event: tcod.event.Quit):
raise SystemExit()
class TrueColorSample(Sample):
def __init__(self):
self.name = "True colors"
# corner colors
self.colors = np.array(
[(50, 40, 150), (240, 85, 5), (50, 35, 240), (10, 200, 130)],
dtype=np.int16,
)
# color shift direction
self.slide_dir = np.array(
[[1, 1, 1], [-1, -1, 1], [1, -1, 1], [1, 1, -1]], dtype=np.int16
)
# corner indexes
self.corners = np.array([0, 1, 2, 3])
# sample screen interpolation mesh-grid
self.interp_x, self.interp_y = np.mgrid[
0 : 1 : SAMPLE_SCREEN_WIDTH * 1j, 0 : 1 : SAMPLE_SCREEN_HEIGHT * 1j
]
def on_enter(self):
tcod.sys_set_fps(0)
def on_draw(self):
self.slide_corner_colors()
self.interpolate_corner_colors()
self.darken_background_characters()
self.randomize_sample_conole()
self.print_banner()
def slide_corner_colors(self):
# pick random RGB channels for each corner
rand_channels = np.random.randint(low=0, high=3, size=4)
# shift picked color channels in the direction of slide_dir
self.colors[self.corners, rand_channels] += (
self.slide_dir[self.corners, rand_channels] * 5
)
# reverse slide_dir values when limits are reached
self.slide_dir[self.colors[:] == 255] = -1
self.slide_dir[self.colors[:] == 0] = 1
def interpolate_corner_colors(self):
# interpolate corner colors across the sample console
for i in range(3): # for each color channel
left = (
self.colors[2, i] - self.colors[0, i]
) * self.interp_y + self.colors[0, i]
right = (
self.colors[3, i] - self.colors[1, i]
) * self.interp_y + self.colors[1, i]
sample_console.bg[:, :, i] = (right - left) * self.interp_x + left
def darken_background_characters(self):
# darken background characters
sample_console.fg[:] = sample_console.bg[:]
sample_console.fg[:] //= 2
def randomize_sample_conole(self):
# randomize sample console characters
sample_console.ch[:] = np.random.randint(
low=ord("a"),
high=ord("z") + 1,
size=sample_console.ch.size,
dtype=np.intc,
).reshape(sample_console.ch.shape)
def print_banner(self):
# print text on top of samples
sample_console.print_box(
x=1,
y=5,
width=sample_console.width - 2,
height=sample_console.height - 1,
string="The Doryen library uses 24 bits colors, for both "
"background and foreground.",
fg=tcod.white,
bg=tcod.grey,
bg_blend=tcod.BKGND_MULTIPLY,
alignment=tcod.CENTER,
)
class OffscreenConsoleSample(Sample):
def __init__(self):
self.name = "Offscreen console"
self.secondary = tcod.console.Console(
sample_console.width // 2, sample_console.height // 2
)
self.screenshot = tcod.console.Console(
sample_console.width, sample_console.height
)
self.counter = 0
self.x = 0
self.y = 0
self.xdir = 1
self.ydir = 1
self.secondary.draw_frame(
0,
0,
sample_console.width // 2,
sample_console.height // 2,
"Offscreen console",
False,
fg=tcod.white,
bg=tcod.black,
)
self.secondary.print_box(
1,
2,
sample_console.width // 2 - 2,
sample_console.height // 2,
"You can render to an offscreen console and blit in on another "
"one, simulating alpha transparency.",
fg=tcod.white,
bg=None,
alignment=tcod.CENTER,
)
def on_enter(self):
self.counter = time.perf_counter()
tcod.sys_set_fps(0)
# get a "screenshot" of the current sample screen
sample_console.blit(dest=self.screenshot)
def on_draw(self):
if time.perf_counter() - self.counter >= 1:
self.counter = time.perf_counter()
self.x += self.xdir
self.y += self.ydir
if self.x == sample_console.width / 2 + 5:
self.xdir = -1
elif self.x == -5:
self.xdir = 1
if self.y == sample_console.height / 2 + 5:
self.ydir = -1
elif self.y == -5:
self.ydir = 1
self.screenshot.blit(sample_console)
self.secondary.blit(
sample_console,
self.x,
self.y,
0,
0,
sample_console.width // 2,
sample_console.height // 2,
1.0,
0.75,
)
class LineDrawingSample(Sample):
FLAG_NAMES = [
"BKGND_NONE",
"BKGND_SET",
"BKGND_MULTIPLY",
"BKGND_LIGHTEN",
"BKGND_DARKEN",
"BKGND_SCREEN",
"BKGND_COLOR_DODGE",
"BKGND_COLOR_BURN",
"BKGND_ADD",
"BKGND_ADDALPHA",
"BKGND_BURN",
"BKGND_OVERLAY",
"BKGND_ALPHA",
]
def __init__(self):
self.name = "Line drawing"
self.mk_flag = tcod.BKGND_SET
self.bk_flag = tcod.BKGND_SET
self.bk = tcod.console.Console(
sample_console.width, sample_console.height, order="F"
)
# initialize the colored background
self.bk.bg[:, :, 0] = np.linspace(0, 255, self.bk.width)[:, np.newaxis]
self.bk.bg[:, :, 2] = np.linspace(0, 255, self.bk.height)
self.bk.bg[:, :, 1] = (
self.bk.bg[:, :, 0].astype(int) + self.bk.bg[:, :, 2]
) / 2
self.bk.ch[:] = ord(" ")
def ev_keydown(self, event: tcod.event.KeyDown):
if event.sym in (tcod.event.K_RETURN, tcod.event.K_KP_ENTER):
self.bk_flag += 1
if (self.bk_flag & 0xFF) > tcod.BKGND_ALPH:
self.bk_flag = tcod.BKGND_NONE
else:
super().ev_keydown(event)
def on_enter(self):
tcod.sys_set_fps(0)
def on_draw(self):
alpha = 0.0
if (self.bk_flag & 0xFF) == tcod.BKGND_ALPH:
# for the alpha mode, update alpha every frame
alpha = (1.0 + math.cos(time.time() * 2)) / 2.0
self.bk_flag = tcod.BKGND_ALPHA(alpha)
elif (self.bk_flag & 0xFF) == tcod.BKGND_ADDA:
# for the add alpha mode, update alpha every frame
alpha = (1.0 + math.cos(time.time() * 2)) / 2.0
self.bk_flag = tcod.BKGND_ADDALPHA(alpha)
self.bk.blit(sample_console)
recty = int(
(sample_console.height - 2) * ((1.0 + math.cos(time.time())) / 2.0)
)
for x in range(sample_console.width):
col = [x * 255 // sample_console.width] * 3
tcod.console_set_char_background(
sample_console, x, recty, col, self.bk_flag
)
tcod.console_set_char_background(
sample_console, x, recty + 1, col, self.bk_flag
)
tcod.console_set_char_background(
sample_console, x, recty + 2, col, self.bk_flag
)
angle = time.time() * 2.0
cos_angle = math.cos(angle)
sin_angle = math.sin(angle)
xo = int(sample_console.width // 2 * (1 + cos_angle))
yo = int(
sample_console.height // 2 + sin_angle * sample_console.width // 2
)
xd = int(sample_console.width // 2 * (1 - cos_angle))
yd = int(
sample_console.height // 2 - sin_angle * sample_console.width // 2
)
# draw the line
# in python the easiest way is to use the line iterator
for x, y in tcod.line_iter(xo, yo, xd, yd):
if (
0 <= x < sample_console.width
and 0 <= y < sample_console.height
):
tcod.console_set_char_background(
sample_console, x, y, tcod.light_blue, self.bk_flag
)
sample_console.print(
2,
2,
"%s (ENTER to change)" % self.FLAG_NAMES[self.bk_flag & 0xFF],
fg=tcod.white,
bg=None,
)
class NoiseSample(Sample):
NOISE_OPTIONS = [ # [name, algorithm, implementation],
["perlin noise", tcod.NOISE_PERLIN, tcod.noise.SIMPLE],
["simplex noise", tcod.NOISE_SIMPLEX, tcod.noise.SIMPLE],
["wavelet noise", tcod.NOISE_WAVELET, tcod.noise.SIMPLE],
["perlin fbm", tcod.NOISE_PERLIN, tcod.noise.FBM],
["perlin turbulence", tcod.NOISE_PERLIN, tcod.noise.TURBULENCE],
["simplex fbm", tcod.NOISE_SIMPLEX, tcod.noise.FBM],
["simplex turbulence", tcod.NOISE_SIMPLEX, tcod.noise.TURBULENCE],
["wavelet fbm", tcod.NOISE_WAVELET, tcod.noise.FBM],
["wavelet turbulence", tcod.NOISE_WAVELET, tcod.noise.TURBULENCE],
]
def __init__(self):
self.name = "Noise"
self.func = 0
self.dx = 0.0
self.dy = 0.0
self.octaves = 4.0
self.zoom = 3.0
self.hurst = tcod.NOISE_DEFAULT_HURST
self.lacunarity = tcod.NOISE_DEFAULT_LACUNARITY
self.noise = self.get_noise()
self.img = tcod.image_new(
SAMPLE_SCREEN_WIDTH * 2, SAMPLE_SCREEN_HEIGHT * 2
)
@property
def algorithm(self):
return self.NOISE_OPTIONS[self.func][1]
@property
def implementation(self):
return self.NOISE_OPTIONS[self.func][2]
def get_noise(self):
return tcod.noise.Noise(
2,
self.algorithm,
self.implementation,
self.hurst,
self.lacunarity,
self.octaves,
seed=None,
)
def on_enter(self):
tcod.sys_set_fps(0)
def on_draw(self):
self.dx = time.perf_counter() * 0.25
self.dy = time.perf_counter() * 0.25
for y in range(2 * sample_console.height):
for x in range(2 * sample_console.width):
f = [
self.zoom * x / (2 * sample_console.width) + self.dx,
self.zoom * y / (2 * sample_console.height) + self.dy,
]
value = self.noise.get_point(*f)
c = int((value + 1.0) / 2.0 * 255)
c = max(0, min(c, 255))
self.img.put_pixel(x, y, (c // 2, c // 2, c))
rectw = 24
recth = 13
if self.implementation == tcod.noise.SIMPLE:
recth = 10
self.img.blit_2x(sample_console, 0, 0)
sample_console.draw_rect(
2,
2,
rectw,
recth,
ch=0,
fg=None,
bg=tcod.grey,
bg_blend=tcod.BKGND_MULTIPLY,
)
sample_console.fg[2 : 2 + rectw, 2 : 2 + recth] = (
sample_console.fg[2 : 2 + rectw, 2 : 2 + recth] * tcod.grey / 255
)
for curfunc in range(len(self.NOISE_OPTIONS)):
text = "%i : %s" % (curfunc + 1, self.NOISE_OPTIONS[curfunc][0])
if curfunc == self.func:
sample_console.print(
2, 2 + curfunc, text, fg=tcod.white, bg=tcod.light_blue
)
else:
sample_console.print(
2, 2 + curfunc, text, fg=tcod.grey, bg=None
)
sample_console.print(
2, 11, "Y/H : zoom (%2.1f)" % self.zoom, fg=tcod.white, bg=None
)
if self.implementation != tcod.noise.SIMPLE:
sample_console.print(
2,
12,
"E/D : hurst (%2.1f)" % self.hurst,
fg=tcod.white,
bg=None,
)
sample_console.print(
2,
13,
"R/F : lacunarity (%2.1f)" % self.lacunarity,
fg=tcod.white,
bg=None,
)
sample_console.print(
2,
14,
"T/G : octaves (%2.1f)" % self.octaves,
fg=tcod.white,
bg=None,
)
def ev_keydown(self, event: tcod.event.KeyDown):
if ord("9") >= event.sym >= ord("1"):
self.func = event.sym - ord("1")
self.noise = self.get_noise()
elif event.sym == ord("e"):
self.hurst += 0.1
self.noise = self.get_noise()
elif event.sym == ord("d"):
self.hurst -= 0.1
self.noise = self.get_noise()
elif event.sym == ord("r"):
self.lacunarity += 0.5
self.noise = self.get_noise()
elif event.sym == ord("f"):
self.lacunarity -= 0.5
self.noise = self.get_noise()
elif event.sym == ord("t"):
self.octaves += 0.5
self.noise.octaves = self.octaves
elif event.sym == ord("g"):
self.octaves -= 0.5
self.noise.octaves = self.octaves
elif event.sym == ord("y"):
self.zoom += 0.2
elif event.sym == ord("h"):
self.zoom -= 0.2
else:
super().ev_keydown(event)
#############################################
# field of view sample
#############################################
DARK_WALL = (0, 0, 100)
LIGHT_WALL = (130, 110, 50)
DARK_GROUND = (50, 50, 150)
LIGHT_GROUND = (200, 180, 50)
SAMPLE_MAP = [
"##############################################",
"####################### #################",
"##################### # ###############",
"###################### ### ###########",
"################## ##### ####",
"################ ######## ###### ####",
"############### #################### ####",
"################ ###### ##",
"######## ####### ###### # # # ##",
"######## ###### ### ##",
"######## ##",
"#### ###### ### # # # ##",
"#### ### ########## #### ##",
"#### ### ########## ###########=##########",
"#### ################## ##### #####",
"#### ### #### ##### #####",
"#### # #### #####",
"######## # #### ##### #####",
"######## ##### ####################",
"##############################################",
]
SAMPLE_MAP = np.array([list(line) for line in SAMPLE_MAP]).transpose()
FOV_ALGO_NAMES = [
"BASIC ",
"DIAMOND ",
"SHADOW ",
"PERMISSIVE0",
"PERMISSIVE1",
"PERMISSIVE2",
"PERMISSIVE3",
"PERMISSIVE4",
"PERMISSIVE5",
"PERMISSIVE6",
"PERMISSIVE7",
"PERMISSIVE8",
"RESTRICTIVE",
]
TORCH_RADIUS = 10
SQUARED_TORCH_RADIUS = TORCH_RADIUS * TORCH_RADIUS
class FOVSample(Sample):
def __init__(self):
self.name = "Field of view"
self.px = 20
self.py = 10
self.recompute = True
self.torch = False
self.map = None
self.noise = None
self.torchx = 0.0
self.light_walls = True
self.algo_num = 0
# 1d noise for the torch flickering
self.noise = tcod.noise_new(1, 1.0, 1.0)
self.map = tcod.map.Map(
SAMPLE_SCREEN_WIDTH, SAMPLE_SCREEN_HEIGHT, order="F"
)
self.map.walkable[:] = SAMPLE_MAP[:] == " "
self.map.transparent[:] = self.map.walkable[:] | (SAMPLE_MAP == "=")
self.light_map_bg = np.full(
SAMPLE_MAP.shape + (3,), LIGHT_GROUND, dtype=np.uint8
)
self.light_map_bg[SAMPLE_MAP[:] == "#"] = LIGHT_WALL
self.dark_map_bg = np.full(
SAMPLE_MAP.shape + (3,), DARK_GROUND, dtype=np.uint8
)
self.dark_map_bg[SAMPLE_MAP[:] == "#"] = DARK_WALL
def draw_ui(self):
sample_console.print(
1,
1,
"IJKL : move around\n"
"T : torch fx %s\n"
"W : light walls %s\n"
"+-: algo %s"
% (
"on " if self.torch else "off",
"on " if self.light_walls else "off",
FOV_ALGO_NAMES[self.algo_num],
),
fg=tcod.white,
bg=None,
)
def on_enter(self):
tcod.sys_set_fps(60)
# we draw the foreground only the first time.
# during the player movement, only the @ is redrawn.
# the rest impacts only the background color
# draw the help text & player @
sample_console.clear()
self.draw_ui()
tcod.console_put_char(
sample_console, self.px, self.py, "@", tcod.BKGND_NONE
)
# draw windows
sample_console.ch[np.where(SAMPLE_MAP == "=")] = tcod.CHAR_DHLINE
sample_console.fg[np.where(SAMPLE_MAP == "=")] = tcod.black
def on_draw(self):
dx = 0.0
dy = 0.0
di = 0.0
if self.recompute:
self.recompute = False
self.map.compute_fov(
self.px,
self.py,
TORCH_RADIUS if self.torch else 0,
self.light_walls,
self.algo_num,
)
sample_console.bg[:] = self.dark_map_bg[:]
if self.torch:
# slightly change the perlin noise parameter
self.torchx += 0.1
# randomize the light position between -1.5 and 1.5
tdx = [self.torchx + 20.0]
dx = tcod.noise_get(self.noise, tdx, tcod.NOISE_SIMPLEX) * 1.5
tdx[0] += 30.0
dy = tcod.noise_get(self.noise, tdx, tcod.NOISE_SIMPLEX) * 1.5
di = 0.2 * tcod.noise_get(
self.noise, [self.torchx], tcod.NOISE_SIMPLEX
)
# where_fov = np.where(self.map.fov[:])
mgrid = np.mgrid[:SAMPLE_SCREEN_WIDTH, :SAMPLE_SCREEN_HEIGHT]
# get squared distance
light = (mgrid[0] - self.px + dx) ** 2 + (
mgrid[1] - self.py + dy
) ** 2
light = light.astype(np.float16)
visible = (light < SQUARED_TORCH_RADIUS) & self.map.fov[:]
light[...] = SQUARED_TORCH_RADIUS - light
light[...] /= SQUARED_TORCH_RADIUS
light[...] += di
light[...] = light.clip(0, 1)
light[~visible] = 0
sample_console.bg[...] = (
self.light_map_bg.astype(np.float16) - self.dark_map_bg
) * light[..., np.newaxis] + self.dark_map_bg
else:
where_fov = np.where(self.map.fov[:])
sample_console.bg[where_fov] = self.light_map_bg[where_fov]
def ev_keydown(self, event: tcod.event.KeyDown):
MOVE_KEYS = {
ord("i"): (0, -1),
ord("j"): (-1, 0),
ord("k"): (0, 1),
ord("l"): (1, 0),
}
FOV_SELECT_KEYS = {ord("-"): -1, ord("="): 1}
if event.sym in MOVE_KEYS:
x, y = MOVE_KEYS[event.sym]
if SAMPLE_MAP[self.px + x, self.py + y] == " ":
tcod.console_put_char(
sample_console, self.px, self.py, " ", tcod.BKGND_NONE
)
self.px += x
self.py += y
tcod.console_put_char(
sample_console, self.px, self.py, "@", tcod.BKGND_NONE
)
self.recompute = True
elif event.sym == ord("t"):
self.torch = not self.torch
self.draw_ui()
self.recompute = True
elif event.sym == ord("w"):
self.light_walls = not self.light_walls
self.draw_ui()
self.recompute = True
elif event.sym in FOV_SELECT_KEYS:
self.algo_num += FOV_SELECT_KEYS[event.sym]
self.algo_num %= tcod.NB_FOV_ALGORITHMS
self.draw_ui()
self.recompute = True
else:
super().ev_keydown(event)
class PathfindingSample(Sample):
def __init__(self):
self.name = "Path finding"
self.px = 20
self.py = 10
self.dx = 24
self.dy = 1
self.map = None
self.path = None
self.dijk_dist = 0.0
self.using_astar = True
self.dijk = None
self.recalculate = False
self.busy = 0.0
self.oldchar = " "
self.map = tcod.map_new(SAMPLE_SCREEN_WIDTH, SAMPLE_SCREEN_HEIGHT)
for y in range(SAMPLE_SCREEN_HEIGHT):
for x in range(SAMPLE_SCREEN_WIDTH):
if SAMPLE_MAP[x, y] == " ":
# ground
tcod.map_set_properties(self.map, x, y, True, True)
elif SAMPLE_MAP[x, y] == "=":
# window
tcod.map_set_properties(self.map, x, y, True, False)
self.path = tcod.path_new_using_map(self.map)
self.dijk = tcod.dijkstra_new(self.map)
def on_enter(self):
tcod.sys_set_fps(60)
# we draw the foreground only the first time.
# during the player movement, only the @ is redrawn.
# the rest impacts only the background color
# draw the help text & player @
sample_console.clear()
sample_console.ch[self.dx, self.dy] = ord("+")
sample_console.fg[self.dx, self.dy] = tcod.white
sample_console.ch[self.px, self.py] = ord("@")
sample_console.fg[self.px, self.py] = tcod.white
sample_console.print(
1,
1,
"IJKL / mouse :\nmove destination\nTAB : A*/dijkstra",
fg=tcod.white,
bg=None,
)
sample_console.print(1, 4, "Using : A*", fg=tcod.white, bg=None)
# draw windows
for y in range(SAMPLE_SCREEN_HEIGHT):
for x in range(SAMPLE_SCREEN_WIDTH):
if SAMPLE_MAP[x, y] == "=":
tcod.console_put_char(
sample_console, x, y, tcod.CHAR_DHLINE, tcod.BKGND_NONE
)
self.recalculate = True
def on_draw(self):
if self.recalculate:
if self.using_astar:
tcod.path_compute(
self.path, self.px, self.py, self.dx, self.dy
)
else:
self.dijk_dist = 0.0
# compute dijkstra grid (distance from px,py)
tcod.dijkstra_compute(self.dijk, self.px, self.py)
# get the maximum distance (needed for rendering)
for y in range(SAMPLE_SCREEN_HEIGHT):
for x in range(SAMPLE_SCREEN_WIDTH):
d = tcod.dijkstra_get_distance(self.dijk, x, y)
if d > self.dijk_dist:
self.dijk_dist = d
# compute path from px,py to dx,dy
tcod.dijkstra_path_set(self.dijk, self.dx, self.dy)
self.recalculate = False
self.busy = 0.2
# draw the dungeon
for y in range(SAMPLE_SCREEN_HEIGHT):
for x in range(SAMPLE_SCREEN_WIDTH):
if SAMPLE_MAP[x, y] == "#":
tcod.console_set_char_background(
sample_console, x, y, DARK_WALL, tcod.BKGND_SET
)
else:
tcod.console_set_char_background(
sample_console, x, y, DARK_GROUND, tcod.BKGND_SET
)
# draw the path
if self.using_astar:
for i in range(tcod.path_size(self.path)):
x, y = tcod.path_get(self.path, i)
tcod.console_set_char_background(
sample_console, x, y, LIGHT_GROUND, tcod.BKGND_SET
)
else:
for y in range(SAMPLE_SCREEN_HEIGHT):
for x in range(SAMPLE_SCREEN_WIDTH):
if SAMPLE_MAP[x, y] != "#":
tcod.console_set_char_background(
sample_console,
x,
y,
tcod.color_lerp(
LIGHT_GROUND,
DARK_GROUND,
0.9
* tcod.dijkstra_get_distance(self.dijk, x, y)
/ self.dijk_dist,
),
tcod.BKGND_SET,
)
for i in range(tcod.dijkstra_size(self.dijk)):
x, y = tcod.dijkstra_get(self.dijk, i)
tcod.console_set_char_background(
sample_console, x, y, LIGHT_GROUND, tcod.BKGND_SET
)
# move the creature
self.busy -= tcod.sys_get_last_frame_length()
if self.busy <= 0.0:
self.busy = 0.2
if self.using_astar:
if not tcod.path_is_empty(self.path):
tcod.console_put_char(
sample_console, self.px, self.py, " ", tcod.BKGND_NONE
)
self.px, self.py = tcod.path_walk(self.path, True)
tcod.console_put_char(
sample_console, self.px, self.py, "@", tcod.BKGND_NONE
)
else:
if not tcod.dijkstra_is_empty(self.dijk):
tcod.console_put_char(
sample_console, self.px, self.py, " ", tcod.BKGND_NONE
)
self.px, self.py = tcod.dijkstra_path_walk(self.dijk)
tcod.console_put_char(
sample_console, self.px, self.py, "@", tcod.BKGND_NONE
)
self.recalculate = True
def ev_keydown(self, event: tcod.event.KeyDown):
if event.sym == ord("i") and self.dy > 0:
# destination move north
tcod.console_put_char(
sample_console, self.dx, self.dy, self.oldchar, tcod.BKGND_NONE
)
self.dy -= 1
self.oldchar = sample_console.ch[self.dx, self.dy]
tcod.console_put_char(
sample_console, self.dx, self.dy, "+", tcod.BKGND_NONE
)
if SAMPLE_MAP[self.dx, self.dy] == " ":
self.recalculate = True
elif event.sym == ord("k") and self.dy < SAMPLE_SCREEN_HEIGHT - 1:
# destination move south
tcod.console_put_char(
sample_console, self.dx, self.dy, self.oldchar, tcod.BKGND_NONE
)
self.dy += 1
self.oldchar = sample_console.ch[self.dx, self.dy]
tcod.console_put_char(
sample_console, self.dx, self.dy, "+", tcod.BKGND_NONE
)
if SAMPLE_MAP[self.dx, self.dy] == " ":
self.recalculate = True
elif event.sym == ord("j") and self.dx > 0:
# destination move west
tcod.console_put_char(
sample_console, self.dx, self.dy, self.oldchar, tcod.BKGND_NONE
)
self.dx -= 1
self.oldchar = sample_console.ch[self.dx, self.dy]
tcod.console_put_char(
sample_console, self.dx, self.dy, "+", tcod.BKGND_NONE
)
if SAMPLE_MAP[self.dx, self.dy] == " ":
self.recalculate = True
elif event.sym == ord("l") and self.dx < SAMPLE_SCREEN_WIDTH - 1:
# destination move east
tcod.console_put_char(
sample_console, self.dx, self.dy, self.oldchar, tcod.BKGND_NONE
)
self.dx += 1
self.oldchar = sample_console.ch[self.dx, self.dy]
tcod.console_put_char(
sample_console, self.dx, self.dy, "+", tcod.BKGND_NONE
)
if SAMPLE_MAP[self.dx, self.dy] == " ":
self.recalculate = True
elif event.sym == tcod.event.K_TAB:
self.using_astar = not self.using_astar
if self.using_astar:
tcod.console_print(sample_console, 1, 4, "Using : A* ")
else:
tcod.console_print(sample_console, 1, 4, "Using : Dijkstra")
self.recalculate = True
else:
super().ev_keydown(event)
def ev_mousemotion(self, event: tcod.event.MouseMotion):
mx = event.tile.x - SAMPLE_SCREEN_X
my = event.tile.y - SAMPLE_SCREEN_Y
if (
0 <= mx < SAMPLE_SCREEN_WIDTH
and 0 <= my < SAMPLE_SCREEN_HEIGHT
and (self.dx != mx or self.dy != my)
):
tcod.console_put_char(
sample_console, self.dx, self.dy, self.oldchar, tcod.BKGND_NONE
)
self.dx = mx
self.dy = my
self.oldchar = sample_console.ch[self.dx, self.dy]
tcod.console_put_char(
sample_console, self.dx, self.dy, "+", tcod.BKGND_NONE
)
if SAMPLE_MAP[self.dx, self.dy] == " ":
self.recalculate = True
#############################################
# bsp sample
#############################################
bsp_depth = 8
bsp_min_room_size = 4
# a room fills a random part of the node or the maximum available space ?
bsp_random_room = False
# if true, there is always a wall on north & west side of a room
bsp_room_walls = True
# draw a vertical line
def vline(m, x, y1, y2):
if y1 > y2:
y1, y2 = y2, y1
for y in range(y1, y2 + 1):
m[x][y] = True
# draw a vertical line up until we reach an empty space
def vline_up(m, x, y):
while y >= 0 and not m[x][y]:
m[x][y] = True
y -= 1
# draw a vertical line down until we reach an empty space
def vline_down(m, x, y):
while y < SAMPLE_SCREEN_HEIGHT and not m[x][y]:
m[x][y] = True
y += 1
# draw a horizontal line
def hline(m, x1, y, x2):
if x1 > x2:
x1, x2 = x2, x1
for x in range(x1, x2 + 1):
m[x][y] = True
# draw a horizontal line left until we reach an empty space
def hline_left(m, x, y):
while x >= 0 and not m[x][y]:
m[x][y] = True
x -= 1
# draw a horizontal line right until we reach an empty space
def hline_right(m, x, y):
while x < SAMPLE_SCREEN_WIDTH and not m[x][y]:
m[x][y] = True
x += 1
# the class building the dungeon from the bsp nodes
def traverse_node(bsp_map, node):
if not node.children:
# calculate the room size
if bsp_room_walls:
node.width -= 1
node.height -= 1
if bsp_random_room:
new_width = random.randint(
min(node.width, bsp_min_room_size), node.width
)
new_height = random.randint(
min(node.height, bsp_min_room_size), node.height
)
node.x += random.randint(0, node.width - new_width)
node.y += random.randint(0, node.height - new_height)
node.width, node.height = new_width, new_height
# dig the room
for x in range(node.x, node.x + node.width):
for y in range(node.y, node.y + node.height):
bsp_map[x][y] = True
else:
# resize the node to fit its sons
left, right = node.children
node.x = min(left.x, right.x)
node.y = min(left.y, right.y)
node.w = max(left.x + left.w, right.x + right.w) - node.x
node.h = max(left.y + left.h, right.y + right.h) - node.y
# create a corridor between the two lower nodes
if node.horizontal:
# vertical corridor
if left.x + left.w - 1 < right.x or right.x + right.w - 1 < left.x:
# no overlapping zone. we need a Z shaped corridor
x1 = random.randint(left.x, left.x + left.w - 1)
x2 = random.randint(right.x, right.x + right.w - 1)
y = random.randint(left.y + left.h, right.y)
vline_up(bsp_map, x1, y - 1)
hline(bsp_map, x1, y, x2)
vline_down(bsp_map, x2, y + 1)
else:
# straight vertical corridor
minx = max(left.x, right.x)
maxx = min(left.x + left.w - 1, right.x + right.w - 1)
x = random.randint(minx, maxx)
vline_down(bsp_map, x, right.y)
vline_up(bsp_map, x, right.y - 1)
else:
# horizontal corridor
if left.y + left.h - 1 < right.y or right.y + right.h - 1 < left.y:
# no overlapping zone. we need a Z shaped corridor
y1 = random.randint(left.y, left.y + left.h - 1)
y2 = random.randint(right.y, right.y + right.h - 1)
x = random.randint(left.x + left.w, right.x)
hline_left(bsp_map, x - 1, y1)
vline(bsp_map, x, y1, y2)
hline_right(bsp_map, x + 1, y2)
else:
# straight horizontal corridor
miny = max(left.y, right.y)
maxy = min(left.y + left.h - 1, right.y + right.h - 1)
y = random.randint(miny, maxy)
hline_left(bsp_map, right.x - 1, y)
hline_right(bsp_map, right.x, y)
return True
class BSPSample(Sample):
def __init__(self):
self.name = "Bsp toolkit"
self.bsp = tcod.bsp.BSP(
1, 1, SAMPLE_SCREEN_WIDTH - 1, SAMPLE_SCREEN_HEIGHT - 1
)
self.bsp_map = np.zeros(
(SAMPLE_SCREEN_WIDTH, SAMPLE_SCREEN_HEIGHT), dtype=bool, order="F"
)
self.bsp_generate()
def bsp_generate(self):
self.bsp.children = ()
if bsp_room_walls:
self.bsp.split_recursive(
bsp_depth,
bsp_min_room_size + 1,
bsp_min_room_size + 1,
1.5,
1.5,
)
else:
self.bsp.split_recursive(
bsp_depth, bsp_min_room_size, bsp_min_room_size, 1.5, 1.5
)
self.bsp_refresh()
def bsp_refresh(self):
self.bsp_map[...] = False
for node in copy.deepcopy(self.bsp).inverted_level_order():
traverse_node(self.bsp_map, node)
def on_draw(self):
sample_console.clear()
rooms = "OFF"
if bsp_random_room:
rooms = "ON"
sample_console.print(
1,
1,
"ENTER : rebuild bsp\n"
"SPACE : rebuild dungeon\n"
"+-: bsp depth %d\n"
"*/: room size %d\n"
"1 : random room size %s" % (bsp_depth, bsp_min_room_size, rooms),
fg=tcod.white,
bg=None,
)
if bsp_random_room:
walls = "OFF"
if bsp_room_walls:
walls = "ON"
sample_console.print(
1, 6, "2 : room walls %s" % walls, fg=tcod.white, bg=None
)
# render the level
for y in range(SAMPLE_SCREEN_HEIGHT):
for x in range(SAMPLE_SCREEN_WIDTH):
color = DARK_GROUND if self.bsp_map[x][y] else DARK_WALL
tcod.console_set_char_background(
sample_console, x, y, color, tcod.BKGND_SET
)
def ev_keydown(self, event: tcod.event.KeyDown):
global bsp_random_room, bsp_room_walls, bsp_depth, bsp_min_room_size
if event.sym in (tcod.event.K_RETURN, tcod.event.K_KP_ENTER):
self.bsp_generate()
elif event.sym == ord(" "):
self.bsp_refresh()
elif event.sym in (tcod.event.K_EQUALS, tcod.event.K_KP_PLUS):
bsp_depth += 1
self.bsp_generate()
elif event.sym in (tcod.event.K_MINUS, tcod.event.K_KP_MINUS):
bsp_depth = max(1, bsp_depth - 1)
self.bsp_generate()
elif event.sym in (tcod.event.K_8, tcod.event.K_KP_MULTIPLY):
bsp_min_room_size += 1
self.bsp_generate()
elif event.sym in (tcod.event.K_SLASH, tcod.event.K_KP_DIVIDE):
bsp_min_room_size = max(2, bsp_min_room_size - 1)
self.bsp_generate()
elif event.sym in (tcod.event.K_1, tcod.event.K_KP_1):
bsp_random_room = not bsp_random_room
if not bsp_random_room:
bsp_room_walls = True
self.bsp_refresh()
elif event.sym in (tcod.event.K_2, tcod.event.K_KP_2):
bsp_room_walls = not bsp_room_walls
self.bsp_refresh()
else:
super().ev_keydown(event)
class ImageSample(Sample):
def __init__(self):
self.name = "Image toolkit"
self.img = tcod.image_load("data/img/skull.png")
self.img.set_key_color(tcod.black)
self.circle = tcod.image_load("data/img/circle.png")
def on_enter(self):
tcod.sys_set_fps(0)
def on_draw(self):
sample_console.clear()
x = sample_console.width / 2 + math.cos(time.time()) * 10.0
y = sample_console.height / 2
scalex = 0.2 + 1.8 * (1.0 + math.cos(time.time() / 2)) / 2.0
scaley = scalex
angle = time.perf_counter()
if int(time.time()) % 2:
# split the color channels of circle.png
# the red channel
sample_console.draw_rect(0, 3, 15, 15, 0, None, (255, 0, 0))
self.circle.blit_rect(
sample_console, 0, 3, -1, -1, tcod.BKGND_MULTIPLY
)
# the green channel
sample_console.draw_rect(15, 3, 15, 15, 0, None, (0, 255, 0))
self.circle.blit_rect(
sample_console, 15, 3, -1, -1, tcod.BKGND_MULTIPLY
)
# the blue channel
sample_console.draw_rect(30, 3, 15, 15, 0, None, (0, 0, 255))
self.circle.blit_rect(
sample_console, 30, 3, -1, -1, tcod.BKGND_MULTIPLY
)
else:
# render circle.png with normal blitting
self.circle.blit_rect(sample_console, 0, 3, -1, -1, tcod.BKGND_SET)
self.circle.blit_rect(
sample_console, 15, 3, -1, -1, tcod.BKGND_SET
)
self.circle.blit_rect(
sample_console, 30, 3, -1, -1, tcod.BKGND_SET
)
self.img.blit(
sample_console, x, y, tcod.BKGND_SET, scalex, scaley, angle
)
class MouseSample(Sample):
def __init__(self):
self.name = "Mouse support"
self.motion = tcod.event.MouseMotion()
self.lbut = self.mbut = self.rbut = 0
self.log = []
def on_enter(self):
tcod.mouse_move(320, 200)
tcod.mouse_show_cursor(True)
tcod.sys_set_fps(60)
def ev_mousemotion(self, event: tcod.event.MouseMotion):
self.motion = event
def ev_mousebuttondown(self, event: tcod.event.MouseButtonDown):
if event.button == tcod.event.BUTTON_LEFT:
self.lbut = True
elif event.button == tcod.event.BUTTON_MIDDLE:
self.mbut = True
elif event.button == tcod.event.BUTTON_RIGHT:
self.rbut = True
def ev_mousebuttonup(self, event: tcod.event.MouseButtonUp):
if event.button == tcod.event.BUTTON_LEFT:
self.lbut = False
elif event.button == tcod.event.BUTTON_MIDDLE:
self.mbut = False
elif event.button == tcod.event.BUTTON_RIGHT:
self.rbut = False
def on_draw(self):
sample_console.clear(bg=tcod.grey)
sample_console.print(
1,
1,
"Mouse position : %4dx%4d\n"
"Mouse cell : %4dx%4d\n"
"Mouse movement : %4dx%4d\n"
"Left button : %s\n"
"Right button : %s\n"
"Middle button : %s\n"
% (
self.motion.pixel.x,
self.motion.pixel.y,
self.motion.tile.x,
self.motion.tile.y,
self.motion.tile_motion.x,
self.motion.tile_motion.y,
("OFF", "ON")[self.lbut],
("OFF", "ON")[self.rbut],
("OFF", "ON")[self.mbut],
),
fg=tcod.light_yellow,
bg=None,
)
sample_console.print(
1,
10,
"1 : Hide cursor\n2 : Show cursor",
fg=tcod.light_yellow,
bg=None,
)
def ev_keydown(self, event: tcod.event.KeyDown):
if event.sym == ord("1"):
tcod.mouse_show_cursor(False)
elif event.sym == ord("2"):
tcod.mouse_show_cursor(True)
else:
super().ev_keydown(event)
class NameGeneratorSample(Sample):
def __init__(self):
self.name = "Name generator"
self.curset = 0
self.nbsets = 0
self.delay = 0.0
self.names = []
self.sets = None
def on_enter(self):
tcod.sys_set_fps(60)
def on_draw(self):
if self.nbsets == 0:
# parse all *.cfg files in data/namegen
for file in os.listdir("data/namegen"):
if file.find(".cfg") > 0:
tcod.namegen_parse(os.path.join("data/namegen", file))
# get the sets list
self.sets = tcod.namegen_get_sets()
print(self.sets)
self.nbsets = len(self.sets)
while len(self.names) > 15:
self.names.pop(0)
sample_console.clear(bg=tcod.grey)
sample_console.print(
1,
1,
"%s\n\n+ : next generator\n- : prev generator"
% self.sets[self.curset],
fg=tcod.white,
bg=None,
)
for i in range(len(self.names)):
sample_console.print(
SAMPLE_SCREEN_WIDTH - 2,
2 + i,
self.names[i],
fg=tcod.white,
bg=None,
alignment=tcod.RIGHT,
)
self.delay += tcod.sys_get_last_frame_length()
if self.delay > 0.5:
self.delay -= 0.5
self.names.append(tcod.namegen_generate(self.sets[self.curset]))
def ev_keydown(self, event: tcod.event.KeyDown):
if event.sym == ord("="):
self.curset += 1
if self.curset == self.nbsets:
self.curset = 0
self.names.append("======")
elif event.sym == ord("-"):
self.curset -= 1
if self.curset < 0:
self.curset = self.nbsets - 1
self.names.append("======")
else:
super().ev_keydown(event)
#############################################
# python fast render sample
#############################################
numpy_available = True
use_numpy = numpy_available # default option
SCREEN_W = SAMPLE_SCREEN_WIDTH
SCREEN_H = SAMPLE_SCREEN_HEIGHT
HALF_W = SCREEN_W // 2
HALF_H = SCREEN_H // 2
RES_U = 80 # texture resolution
RES_V = 80
TEX_STRETCH = 5 # texture stretching with tunnel depth
SPEED = 15
LIGHT_BRIGHTNESS = (
3.5
) # brightness multiplier for all lights (changes their radius)
LIGHTS_CHANCE = 0.07 # chance of a light appearing
MAX_LIGHTS = 6
MIN_LIGHT_STRENGTH = 0.2
LIGHT_UPDATE = (
0.05
) # how much the ambient light changes to reflect current light sources
AMBIENT_LIGHT = 0.8 # brightness of tunnel texture
# the coordinates of all tiles in the screen, as numpy arrays.
# example: (4x3 pixels screen)
# xc = [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]
# yc = [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]]
if numpy_available:
(xc, yc) = np.meshgrid(range(SCREEN_W), range(SCREEN_H))
# translate coordinates of all pixels to center
xc = xc - HALF_W
yc = yc - HALF_H
noise2d = tcod.noise_new(2, 0.5, 2.0)
if numpy_available: # the texture starts empty
texture = np.zeros((RES_U, RES_V))
class Light:
def __init__(self, x, y, z, r, g, b, strength):
self.x, self.y, self.z = x, y, z # pos.
self.r, self.g, self.b = r, g, b # color
self.strength = strength # between 0 and 1, defines brightness
class FastRenderSample(Sample):
def __init__(self):
self.name = "Python fast render"
def on_enter(self):
global frac_t, abs_t, lights, tex_r, tex_g, tex_b
tcod.sys_set_fps(0)
sample_console.clear() # render status message
sample_console.print(
1, SCREEN_H - 3, "Renderer: NumPy", fg=tcod.white, bg=None
)
# time is represented in number of pixels of the texture, start later
# in time to initialize texture
frac_t = RES_V - 1
abs_t = RES_V - 1
lights = [] # lights list, and current color of the tunnel texture
tex_r, tex_g, tex_b = 0, 0, 0
def on_draw(self):
global use_numpy, frac_t, abs_t, lights, tex_r, tex_g, tex_b, xc, yc
global texture, texture2, brightness2, R2, G2, B2
time_delta = tcod.sys_get_last_frame_length() * SPEED # advance time
frac_t += time_delta # increase fractional (always < 1.0) time
abs_t += time_delta # increase absolute elapsed time
# integer time units that passed this frame (number of texture pixels
# to advance)
int_t = int(frac_t)
frac_t -= int_t # keep this < 1.0
# change texture color according to presence of lights (basically, sum
# them to get ambient light and smoothly change the current color into
# that)
ambient_r = AMBIENT_LIGHT * sum(
light.r * light.strength for light in lights
)
ambient_g = AMBIENT_LIGHT * sum(
light.g * light.strength for light in lights
)
ambient_b = AMBIENT_LIGHT * sum(
light.b * light.strength for light in lights
)
alpha = LIGHT_UPDATE * time_delta
tex_r = tex_r * (1 - alpha) + ambient_r * alpha
tex_g = tex_g * (1 - alpha) + ambient_g * alpha
tex_b = tex_b * (1 - alpha) + ambient_b * alpha
if int_t >= 1:
# roll texture (ie, advance in tunnel) according to int_t
# can't roll more than the texture's size (can happen when
# time_delta is large)
int_t = int_t % RES_V
# new pixels are based on absolute elapsed time
int_abs_t = int(abs_t)
texture = np.roll(texture, -int_t, 1)
# replace new stretch of texture with new values
for v in range(RES_V - int_t, RES_V):
for u in range(0, RES_U):
tex_v = (v + int_abs_t) / float(RES_V)
texture[u, v] = tcod.noise_get_fbm(
noise2d, [u / float(RES_U), tex_v], 32.0
) + tcod.noise_get_fbm(
noise2d, [1 - u / float(RES_U), tex_v], 32.0
)
# squared distance from center,
# clipped to sensible minimum and maximum values
sqr_dist = xc ** 2 + yc ** 2
sqr_dist = sqr_dist.clip(1.0 / RES_V, RES_V ** 2)
# one coordinate into the texture, represents depth in the tunnel
v = TEX_STRETCH * float(RES_V) / sqr_dist + frac_t
v = v.clip(0, RES_V - 1)
# another coordinate, represents rotation around the tunnel
u = np.mod(RES_U * (np.arctan2(yc, xc) / (2 * np.pi) + 0.5), RES_U)
# retrieve corresponding pixels from texture
brightness = texture[u.astype(int), v.astype(int)] / 4.0 + 0.5
# use the brightness map to compose the final color of the tunnel
R = brightness * tex_r
G = brightness * tex_g
B = brightness * tex_b
# create new light source
if (
random.random() <= time_delta * LIGHTS_CHANCE
and len(lights) < MAX_LIGHTS
):
x = random.uniform(-0.5, 0.5)
y = random.uniform(-0.5, 0.5)
strength = random.uniform(MIN_LIGHT_STRENGTH, 1.0)
color = tcod.Color(0, 0, 0) # create bright colors with random hue
hue = random.uniform(0, 360)
tcod.color_set_hsv(color, hue, 0.5, strength)
lights.append(
Light(x, y, TEX_STRETCH, color.r, color.g, color.b, strength)
)
# eliminate lights that are going to be out of view
lights = [
light for light in lights if light.z - time_delta > 1.0 / RES_V
]
for light in lights: # render lights
# move light's Z coordinate with time, then project its XYZ
# coordinates to screen-space
light.z -= float(time_delta) / TEX_STRETCH
xl = light.x / light.z * SCREEN_H
yl = light.y / light.z * SCREEN_H
# calculate brightness of light according to distance from viewer
# and strength, then calculate brightness of each pixel with
# inverse square distance law
light_brightness = (
LIGHT_BRIGHTNESS
* light.strength
* (1.0 - light.z / TEX_STRETCH)
)
brightness = light_brightness / ((xc - xl) ** 2 + (yc - yl) ** 2)
# make all pixels shine around this light
R += brightness * light.r
G += brightness * light.g
B += brightness * light.b
# truncate values
R = R.clip(0, 255)
G = G.clip(0, 255)
B = B.clip(0, 255)
# fill the screen with these background colors
sample_console.bg.transpose(2, 1, 0)[...] = (R, G, B)
#############################################
# main loop
#############################################
RENDERER_KEYS = {
tcod.event.K_F1: tcod.RENDERER_GLSL,
tcod.event.K_F2: tcod.RENDERER_OPENGL,
tcod.event.K_F3: tcod.RENDERER_SDL,
tcod.event.K_F4: tcod.RENDERER_SDL2,
tcod.event.K_F5: tcod.RENDERER_OPENGL2,
}
RENDERER_NAMES = (
"F1 GLSL ",
"F2 OPENGL ",
"F3 SDL ",
"F4 SDL2 ",
"F5 OPENGL2",
)
SAMPLES = (
TrueColorSample(),
OffscreenConsoleSample(),
LineDrawingSample(),
NoiseSample(),
FOVSample(),
PathfindingSample(),
BSPSample(),
ImageSample(),
MouseSample(),
NameGeneratorSample(),
FastRenderSample(),
)
cur_sample = 0
def main():
global cur_sample, root_console
tcod.console_set_custom_font(
FONT, tcod.FONT_TYPE_GREYSCALE | tcod.FONT_LAYOUT_TCOD
)
root_console = tcod.console_init_root(
80, 50, "python-tcod samples", False, tcod.RENDERER_SDL2, order="F"
)
credits_end = False
SAMPLES[cur_sample].on_enter()
draw_samples_menu()
draw_renderer_menu()
while not tcod.console_is_window_closed():
root_console.clear()
draw_samples_menu()
draw_renderer_menu()
# render credits
if not credits_end:
credits_end = tcod.console_credits_render(60, 43, 0)
# render the sample
SAMPLES[cur_sample].on_draw()
sample_console.blit(root_console, SAMPLE_SCREEN_X, SAMPLE_SCREEN_Y)
draw_stats()
tcod.console_flush()
handle_events()
def handle_events():
for event in tcod.event.get():
SAMPLES[cur_sample].dispatch(event)
def draw_samples_menu():
for i, sample in enumerate(SAMPLES):
if i == cur_sample:
fg = tcod.white
bg = tcod.light_blue
else:
fg = tcod.grey
bg = tcod.black
root_console.print(
2,
46 - (len(SAMPLES) - i),
" %s" % sample.name.ljust(19),
fg,
bg,
alignment=tcod.LEFT,
)
def draw_stats():
root_console.print(
79,
46,
" last frame : %3d ms (%3d fps)"
% (tcod.sys_get_last_frame_length() * 1000.0, tcod.sys_get_fps()),
fg=tcod.grey,
bg=None,
alignment=tcod.RIGHT,
)
root_console.print(
79,
47,
"elapsed : %8d ms %4.2fs"
% (time.perf_counter() * 1000, time.perf_counter()),
fg=tcod.grey,
bg=None,
alignment=tcod.RIGHT,
)
def draw_renderer_menu():
current_renderer = tcod.sys_get_renderer()
root_console.print(
42,
46 - (tcod.NB_RENDERERS + 1),
"Renderer :",
fg=tcod.grey,
bg=tcod.black,
)
for i, name in enumerate(RENDERER_NAMES):
if i == current_renderer:
fg = tcod.white
bg = tcod.light_blue
else:
fg = tcod.grey
bg = tcod.black
root_console.print(42, 46 - tcod.NB_RENDERERS + i, name, fg, bg)
if __name__ == "__main__":
main()
| 34.180074 | 79 | 0.517154 |
acf846a2a74d6c25993fe84592d1e5f6d5877e81 | 918 | py | Python | sicwebapp/users/models.py | Dheerajdoppalapudi/Summer-Internship-Club-Website | 9ffa0863d0c86ac895fd0900649c43cf56c8cb59 | [
"MIT"
] | 1 | 2022-01-19T10:51:51.000Z | 2022-01-19T10:51:51.000Z | sicwebapp/users/models.py | Dheerajdoppalapudi/Summer-Internship-Club-Website | 9ffa0863d0c86ac895fd0900649c43cf56c8cb59 | [
"MIT"
] | null | null | null | sicwebapp/users/models.py | Dheerajdoppalapudi/Summer-Internship-Club-Website | 9ffa0863d0c86ac895fd0900649c43cf56c8cb59 | [
"MIT"
] | 3 | 2022-01-18T18:30:35.000Z | 2022-01-20T08:15:05.000Z | from django.db import models
from django.contrib.auth.models import User
EVENT_CHOICES = (
('Registration Link', 'Registration Link'),
('Recording Link', 'Recording Link'),
)
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
image = models.ImageField(default='default.jpg', upload_to='profile_pics')
def __str__(self):
return f'{self.user.username} Profile'
class Event(models.Model):
event_name = models.CharField(max_length=100, blank=False)
description = models.TextField(blank=False)
event_registration = models.CharField(max_length=50, choices=EVENT_CHOICES, default='Recording Link')
date = models.DateField(null=True)
event_link_OR_registration_link = models.CharField(max_length=200)
image = models.ImageField(upload_to='images/', blank=True, null=True)
def __str__(self):
return f'{self.event_name}'
| 34 | 105 | 0.730937 |
acf8479bd2b175e64fbfc6e04d6cce20e29fa6d8 | 463 | py | Python | examples/example.py | kuz/pyactivetwo | 5a145b1b49d0ab6ddf5ea01e7820ea856a53ebe5 | [
"MIT"
] | 12 | 2015-11-17T08:07:30.000Z | 2021-07-07T16:47:57.000Z | examples/example.py | kuz/pyactivetwo | 5a145b1b49d0ab6ddf5ea01e7820ea856a53ebe5 | [
"MIT"
] | null | null | null | examples/example.py | kuz/pyactivetwo | 5a145b1b49d0ab6ddf5ea01e7820ea856a53ebe5 | [
"MIT"
] | 6 | 2017-02-02T04:17:28.000Z | 2022-01-14T21:44:19.000Z | """
Python BioSemi ActiveTwo: an example how to read and visualize raw signal
Copyright 2015, Ilya Kuzovkin
Licensed under MIT
"""
from pyactivetwo import ActiveTwo
if __name__ == '__main__':
# initialize the device
device = ActiveTwo(host='127.0.0.1', sfreq=512, port=778, nchannels=32, tcpsamples=4)
# read 30 seconds of signal and print out the data
for run in range(30):
rawdata = device.read(duration=1.0)
print rawdata
| 23.15 | 89 | 0.699784 |
acf8482ba55f989034360279ecf9b121ab97c483 | 1,436 | py | Python | Leetcode/Python Solutions/IntersectionofTwoArraysII.py | Mostofa-Najmus-Sakib/Applied-Algorithm | bc656fd655617407856e0ce45b68585fa81c5035 | [
"MIT"
] | 1 | 2020-01-06T02:21:56.000Z | 2020-01-06T02:21:56.000Z | Leetcode/Python Solutions/IntersectionofTwoArraysII.py | Mostofa-Najmus-Sakib/Applied-Algorithm | bc656fd655617407856e0ce45b68585fa81c5035 | [
"MIT"
] | null | null | null | Leetcode/Python Solutions/IntersectionofTwoArraysII.py | Mostofa-Najmus-Sakib/Applied-Algorithm | bc656fd655617407856e0ce45b68585fa81c5035 | [
"MIT"
] | 3 | 2021-02-22T17:41:01.000Z | 2022-01-13T05:03:19.000Z | """
LeetCode Problem: 350. Intersection of Two Arrays II
Link: https://leetcode.com/problems/intersection-of-two-arrays-ii/
Language: Python
Written by: Mostofa Adib Shakib
"""
"""
1) Brute Force: Uses two arrays
Time complexity: O(n^2)
2) Hashmap
Time complexity: O(n)
"""
# This solution uses hashmap
class Solution(object):
def intersect(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
if len(nums2) > len(nums1):
nums1, nums2 = nums2, nums1
result = []
hmap = {}
for i in nums1:
if i not in hmap:
hmap[i] = 1
else:
hmap[i] += 1
for i in nums2:
if i in hmap and hmap[i] > 0:
hmap[i] -= 1
result.append(i)
return result
# This is a brute force solution
class Solution(object):
def intersect(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
if len(nums2) > len(nums1):
nums1, nums2 = nums2, nums1
result = []
for i in nums1:
if i in nums2:
result.append(i)
nums2.pop(nums2.index(i))
return result
| 19.405405 | 66 | 0.475627 |
acf8485494cd966dda4f243db67788e84ff36685 | 647 | py | Python | rec_to_nwb/processing/tools/validate_parameters.py | jihyunbak/rec_to_nwb | 6e65f8bf0a4faa4d986483ec2442ba19d70c92a9 | [
"Apache-2.0"
] | 8 | 2020-05-29T13:48:35.000Z | 2021-11-19T04:24:48.000Z | rec_to_nwb/processing/tools/validate_parameters.py | jihyunbak/rec_to_nwb | 6e65f8bf0a4faa4d986483ec2442ba19d70c92a9 | [
"Apache-2.0"
] | 8 | 2020-07-13T00:42:35.000Z | 2020-11-16T16:17:12.000Z | rec_to_nwb/processing/tools/validate_parameters.py | jihyunbak/rec_to_nwb | 6e65f8bf0a4faa4d986483ec2442ba19d70c92a9 | [
"Apache-2.0"
] | 1 | 2020-08-28T01:34:35.000Z | 2020-08-28T01:34:35.000Z | from rec_to_nwb.processing.exceptions.not_equal_param_length_exception import NotEqualParamLengthException
from rec_to_nwb.processing.exceptions.none_param_exception import NoneParamException
def validate_parameters_not_none(class_name, *args):
for arg in args:
if arg is None:
raise NoneParamException('None parameter passed to ' + class_name)
def validate_parameters_equal_length(class_name, *args):
previous_arg = args[0]
for arg in args:
if len(arg) != len(previous_arg):
raise NotEqualParamLengthException('Parameters lengths are not equal in ' + class_name)
previous_arg = arg
| 38.058824 | 106 | 0.755796 |
acf84ab8688ca81c0caabaf5b9b5de97ac6f1dc1 | 1,019 | py | Python | class_with_useless_args.py | csitedexperts/Python2.xPractice | fd03ceebe920a76ca47aec3651a69ba5a1cc412b | [
"Apache-2.0"
] | 3 | 2019-04-13T04:00:42.000Z | 2020-10-02T01:14:42.000Z | class_with_useless_args.py | csitedexperts/Python2.xPractice | fd03ceebe920a76ca47aec3651a69ba5a1cc412b | [
"Apache-2.0"
] | null | null | null | class_with_useless_args.py | csitedexperts/Python2.xPractice | fd03ceebe920a76ca47aec3651a69ba5a1cc412b | [
"Apache-2.0"
] | 2 | 2019-03-30T18:55:32.000Z | 2020-05-10T16:30:34.000Z |
class Add2Numbers():
num1 = 0
num2 = 0
sum = 0
def __init__(self):
pass
# this __init__ coould not be used
def EnterAndAddAny2Numbers(self, num1, num2):
print "Inside the EnterAndAddAny2Numbers()"
self.num1 = float(raw_input("Enter num1: "))
self.num2 = float(raw_input("Enter num2: "))
print "num1 = %r" %self.num1
print "num2 = %r" %self.num2
sum = float(self.num1) + float(self.num2)
print "sum = %r" %sum
#sum = num1 + num2)
n = Add2Numbers()
n.num1 = 100
n.num2 = 200
sum = n.num1 + n.num2
print "\nWithout calling the method .... "
print "num1 = %s" %n.num1
print "num2 = %s" %n.num2
print "sum = %s" %sum
print "\nBut this is more convenient, just calling the methods ... "
n.EnterAndAddAny2Numbers(0, 0)
print """
\nNote that the given 0, 0 arguments have not been used here...
But ...
n.EnterAndAddAny2Numbers()
... would cause an error
"""
| 22.152174 | 68 | 0.570167 |
acf84b637f00932a3251e79c2132a4267eee17f0 | 5,292 | py | Python | deepstream/python_apps/camera_to_pravega.py | jdmaguire/gstreamer-pravega | 5d684c1699284632fbb5333bb30e3aa52a140a5b | [
"Apache-2.0"
] | 7 | 2021-04-27T07:53:20.000Z | 2022-03-28T09:33:40.000Z | deepstream/python_apps/camera_to_pravega.py | mcne65/BlockyMusic | 16da5c3461296ac2dd81f3f1a21b5f32353fce6a | [
"Apache-2.0"
] | 13 | 2021-04-29T01:00:07.000Z | 2021-08-04T23:49:51.000Z | deepstream/python_apps/camera_to_pravega.py | mcne65/BlockyMusic | 16da5c3461296ac2dd81f3f1a21b5f32353fce6a | [
"Apache-2.0"
] | 1 | 2021-07-30T21:54:45.000Z | 2021-07-30T21:54:45.000Z | #!/usr/bin/env python3
#
# Copyright (c) Dell Inc., or its subsidiaries. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
import argparse
import logging
import os
import sys
import time
import traceback
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst
from common.is_aarch_64 import is_aarch64
from common.bus_call import bus_call
def main():
parser = argparse.ArgumentParser(description='Record video from a camera to a Pravega stream')
parser.add_argument('--bitrate_kilobytes_per_sec', type=float, default=1000.0)
parser.add_argument('--controller', default='192.168.1.123:9090')
parser.add_argument('--log_level', type=int, default=logging.INFO, help='10=DEBUG,20=INFO')
# parser.add_argument('--no_create_scope', dest='create_scope', action='store_false')
# parser.add_argument('--no_create_stream', dest='create_stream', action='store_false')
parser.add_argument('--scope', default='examples')
parser.add_argument('--stream', default='jetsoncamera1')
args = parser.parse_args()
logging.basicConfig(level=args.log_level)
logging.info('args=%s' % str(args))
# Set GStreamer plugin path
# gst_plugin_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))), "target/debug")
# logging.debug("gst_plugin_dir=%s" % gst_plugin_dir)
# os.environ["GST_PLUGIN_PATH"] = gst_plugin_dir
# Set GStreamer log level
os.environ["GST_DEBUG"] = "pravegasink:INFO"
# Standard GStreamer initialization
GObject.threads_init()
Gst.init(None)
# Create gstreamer elements
# Create Pipeline element that will form a connection of other elements
logging.info("Creating Pipeline")
pipeline = Gst.Pipeline()
if not pipeline:
raise Exception('Unable to create Pipeline')
# Source element for reading from the Jetson Nano camera
logging.info("Creating Source")
source = Gst.ElementFactory.make("nvarguscamerasrc", "source")
if not source:
raise Exception('Unable to create nvarguscamerasrc')
caps_source = Gst.ElementFactory.make("capsfilter", "caps_source")
if not caps_source:
raise Exception('Unable to create capsfilter')
caps_source.set_property('caps', Gst.Caps.from_string("video/x-raw(memory:NVMM),width=1280,height=720,framerate=30/1,format=NV12"))
# videoconvert to make sure a superset of raw formats are supported
logging.info("Creating Video Converter")
vidconvsrc = Gst.ElementFactory.make("videoconvert", "vidconvsrc")
if not vidconvsrc:
raise Exception('Unable to create videoconvert')
# nvvideoconvert to convert incoming raw buffers to NVMM Mem (NvBufSurface API)
nvvidconvsrc = Gst.ElementFactory.make("nvvidconv", "nvvidconvsrc")
if not nvvidconvsrc:
raise Exception('Unable to create nvvidconv')
caps_vidconvsrc = Gst.ElementFactory.make("capsfilter", "caps_vidconvsrc")
if not caps_vidconvsrc:
raise Exception('Unable to create capsfilter')
caps_vidconvsrc.set_property('caps', Gst.Caps.from_string("video/x-raw(memory:NVMM)"))
video_encoder = Gst.ElementFactory.make("nvv4l2h264enc", "video_encoder")
if not video_encoder:
raise Exception('Unable to create nvv4l2h264enc')
video_encoder.set_property("maxperf-enable", 1)
video_encoder.set_property("preset-level", 1)
video_encoder.set_property("control-rate", 1)
bitrate_bits_per_sec = int(8000 * args.bitrate_kilobytes_per_sec)
video_encoder.set_property("bitrate", bitrate_bits_per_sec)
mpegtsmux = Gst.ElementFactory.make("mpegtsmux", "mpegtsmux")
if not mpegtsmux:
raise Exception('Unable to create mpegtsmux')
pravegasink = Gst.ElementFactory.make("pravegasink", "pravegasink")
if not pravegasink:
raise Exception('Unable to create pravegasink')
pravegasink.set_property('controller', args.controller)
pravegasink.set_property('stream', '%s/%s' % (args.scope, args.stream))
logging.info("Adding elements to Pipeline")
pipeline.add(source)
pipeline.add(caps_source)
pipeline.add(vidconvsrc)
pipeline.add(nvvidconvsrc)
pipeline.add(video_encoder)
pipeline.add(mpegtsmux)
pipeline.add(pravegasink)
# we link the elements together
logging.info("Linking elements in the Pipeline")
source.link(caps_source)
caps_source.link(vidconvsrc)
vidconvsrc.link(nvvidconvsrc)
nvvidconvsrc.link(video_encoder)
video_encoder.link(mpegtsmux)
mpegtsmux.link(pravegasink)
# create an event loop and feed gstreamer bus mesages to it
loop = GObject.MainLoop()
bus = pipeline.get_bus()
bus.add_signal_watch()
bus.connect("message", bus_call, loop)
# start play back and listen to events
logging.info("Starting pipeline")
pipeline.set_state(Gst.State.PLAYING)
try:
loop.run()
except:
logging.error(traceback.format_exc())
# Cleanup GStreamer elements
pipeline.set_state(Gst.State.NULL)
raise
if __name__ == '__main__':
main()
| 36.75 | 135 | 0.720522 |
acf84c41c160e52c77f8eb43101069862020ddc0 | 3,209 | py | Python | jenkins_jobs/xml_config.py | unbreakab1e/jenkins-job-builder | d5a60242a9fbc6b9c87ac93887ac1a69f88939dc | [
"Apache-2.0"
] | null | null | null | jenkins_jobs/xml_config.py | unbreakab1e/jenkins-job-builder | d5a60242a9fbc6b9c87ac93887ac1a69f88939dc | [
"Apache-2.0"
] | null | null | null | jenkins_jobs/xml_config.py | unbreakab1e/jenkins-job-builder | d5a60242a9fbc6b9c87ac93887ac1a69f88939dc | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright (C) 2015 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Manage Jenkins XML config file output.
import hashlib
import pkg_resources
from xml.dom import minidom
import xml.etree.ElementTree as XML
from jenkins_jobs import errors
__all__ = [
"XmlJobGenerator",
"XmlJob"
]
def remove_ignorable_whitespace(node):
"""Remove insignificant whitespace from XML nodes
It should only remove whitespace in between elements and sub elements.
This should be safe for Jenkins due to how it's XML serialization works
but may not be valid for other XML documents. So use this method with
caution outside of this specific library.
"""
# strip tail whitespace if it's not significant
if node.tail and node.tail.strip() == "":
node.tail = None
for child in node:
# only strip whitespace from the text node if there are subelement
# nodes as this means we are removing leading whitespace before such
# sub elements. Otherwise risk removing whitespace from an element
# that only contains whitespace
if node.text and node.text.strip() == "":
node.text = None
remove_ignorable_whitespace(child)
class XmlJob(object):
def __init__(self, xml, name):
self.xml = xml
self.name = name
def md5(self):
return hashlib.md5(self.output()).hexdigest()
def output(self):
out = minidom.parseString(XML.tostring(self.xml, encoding='UTF-8'))
return out.toprettyxml(indent=' ', encoding='utf-8')
class XmlJobGenerator(object):
""" This class is responsible for generating Jenkins Configuration XML from
a compatible intermediate representation of Jenkins Jobs.
"""
def __init__(self, registry):
self.registry = registry
def generateXML(self, jobdict_list):
xml_jobs = []
for job in jobdict_list:
xml_jobs.append(self.__getXMLForJob(job))
return xml_jobs
def __getXMLForJob(self, data):
kind = data.get('project-type', 'freestyle')
for ep in pkg_resources.iter_entry_points(
group='jenkins_jobs.projects', name=kind):
Mod = ep.load()
mod = Mod(self.registry)
xml = mod.root_xml(data)
self.__gen_xml(xml, data)
job = XmlJob(xml, data['name'])
return job
raise errors.JenkinsJobsException("Unrecognized project type: '%s'"
% kind)
def __gen_xml(self, xml, data):
for module in self.registry.modules:
if hasattr(module, 'gen_xml'):
module.gen_xml(xml, data)
| 32.414141 | 79 | 0.664693 |
acf84ca81a1277de46a6d2d528f064efda9abb08 | 2,662 | py | Python | scripts/rebalance_uni_optimism.py | SBfin/UniStrategy | 0380a8f49a0b4eace72a56a2d7831100bdf4d276 | [
"Unlicense"
] | 2 | 2021-11-10T17:41:11.000Z | 2022-03-17T08:46:05.000Z | scripts/rebalance_uni_optimism.py | i001962/UniStrategy | 0380a8f49a0b4eace72a56a2d7831100bdf4d276 | [
"Unlicense"
] | null | null | null | scripts/rebalance_uni_optimism.py | i001962/UniStrategy | 0380a8f49a0b4eace72a56a2d7831100bdf4d276 | [
"Unlicense"
] | 1 | 2021-11-10T17:41:05.000Z | 2021-11-10T17:41:05.000Z | from brownie import accounts, UniStrategy, UniVault, project, Contract
from brownie.network.gas.strategies import ExponentialScalingStrategy
import os
import math
"""
REBALANCE VAULT
ON OPTIMISM
"""
def main():
FACTORY="0x1F98431c8aD98523631AE4a59f267346ea31F984"
UniswapV3Core = project.load("Uniswap/uniswap-v3-core@1.0.0")
factory = UniswapV3Core.interface.IUniswapV3Factory(FACTORY)
eth = Contract("0x7Dd703927F7BD4972b78F64A43A48aC5e9185954")
decimal0 = 1e18
dai = Contract("0xA6d0aE178b75b5BECfC909Aa408611cbc1a30170")
decimal1 = 1e18
strategy = UniStrategy.at("0x6b91e1A4f29543cD35dB0796b46ED51ef2202f77")
vault = UniVault.at("0xa919F8Dd481dE4050F660738c0052a17d62c1d09")
pool = UniswapV3Core.interface.IUniswapV3Pool(factory.getPool(eth, dai, 3000))
keeper = accounts.load("deployer")
user = keeper
balance = keeper.balance()
price = (1.0001 ** pool.slot0()[1]) #price = (pool.slot0()[0] / (1 << 96))**2
min_sqrt = int(math.sqrt(price*0.9) * (1 << 96)) # sqrt(100) * 1*(2**96) For positive and negative x values, x << y is equivalent to x * 2**y
max_sqrt = int(math.sqrt(price*2) * (1 << 96))
gas_strategy = ExponentialScalingStrategy("50 gwei", "1000 gwei")
print("price is " + str(price) + "\n"
+ "min_sqrt is " + str(min_sqrt) + "\n" +
"max_sqrt is " + str(max_sqrt))
"""
eth.approve(vault, 100e18, {"from": user})
usdc.approve(vault, 10000e18, {"from": user})
"""
#vault.deposit(0.5*1e18, 90000*1e6, 0, 0, user, {"from": user})
balance0 = vault.getBalance0() / decimal0
balance1 = vault.getBalance1() / decimal1
value0 = (vault.getBalance0() / decimal0)*(price * (decimal0 - decimal1))
value1 = vault.getBalance1() / decimal1
print("In vault: \n" +
"eth Q \n" + str(balance0) + "\n" +
"usdc Q \n" + str(balance1) + "\n"
"eth value \n" + str(value0) + "\n" +
"usdc value \n" + str(value1) + "\n")
amount = (int(balance0*((value0-(value1))/2)/(value0)))*decimal0 if value0 > value1 else (int(balance1*((value0-(value1))/2)/(value0)))*decimal1
print("amount to swap " + str(amount))
sqrt = max_sqrt - 1 if amount < 0 else min_sqrt
print(str(sqrt))
try:
strategy.rebalance(amount, sqrt, {"from": keeper, "gas_price": gas_strategy})
print("Rebalanced!")
except ValueError as e:
print(e)
print(f"Gas used: {(balance - keeper.balance()) / 1e18:.4f} ETH")
print(f"New balance: {keeper.balance() / 1e18:.4f} ETH")
print(f"Vault eth: {vault.getBalance0() / 1e18:.4f} ETH")
print(f"Vault usdc: {vault.getBalance1() / 1e6:.4f} DAI")
| 40.333333 | 148 | 0.648009 |
acf84ceec5e4486b0657ae5b75875ca51d35ef47 | 4,262 | gyp | Python | chrome_elf/chrome_elf.gyp | iplo/Chain | 8bc8943d66285d5258fffc41bed7c840516c4422 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 231 | 2015-01-08T09:04:44.000Z | 2021-12-30T03:03:10.000Z | chrome_elf/chrome_elf.gyp | JasonEric/chromium | c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2017-02-14T21:55:58.000Z | 2017-02-14T21:55:58.000Z | chrome_elf/chrome_elf.gyp | JasonEric/chromium | c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 268 | 2015-01-21T05:53:28.000Z | 2022-03-25T22:09:01.000Z | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
},
'includes': [
'../build/util/version.gypi',
'../build/win_precompile.gypi',
'blacklist.gypi',
],
'targets': [
{
'target_name': 'chrome_elf',
'type': 'shared_library',
'include_dirs': [
'..',
],
'sources': [
'chrome_elf.def',
'chrome_elf_main.cc',
'chrome_elf_main.h',
],
'dependencies': [
'blacklist',
'chrome_elf_lib',
],
'msvs_settings': {
'VCLinkerTool': {
'BaseAddress': '0x01c20000',
# Set /SUBSYSTEM:WINDOWS.
'SubSystem': '2',
'AdditionalDependencies!': [
'user32.lib',
],
'IgnoreDefaultLibraryNames': [
'user32.lib',
],
},
},
},
{
'target_name': 'chrome_elf_unittests_exe',
'product_name': 'chrome_elf_unittests',
'type': 'executable',
'sources': [
'blacklist/test/blacklist_test.cc',
'create_file/chrome_create_file_unittest.cc',
'elf_imports_unittest.cc',
'ntdll_cache_unittest.cc',
],
'include_dirs': [
'..',
'<(SHARED_INTERMEDIATE_DIR)',
],
'dependencies': [
'chrome_elf_lib',
'../base/base.gyp:base',
'../base/base.gyp:run_all_unittests',
'../base/base.gyp:test_support_base',
'../sandbox/sandbox.gyp:sandbox',
'../testing/gtest.gyp:gtest',
'blacklist',
'blacklist_test_dll_1',
'blacklist_test_dll_2',
'blacklist_test_dll_3',
'blacklist_test_main_dll',
],
'conditions': [
['component=="shared_library"', {
# In component builds, all targets depend on chrome_redirects by
# default. Remove it here so we are able to test it.
'dependencies!': [
'../chrome_elf/chrome_elf.gyp:chrome_redirects',
],
}],
],
},
{
# A dummy target to ensure that chrome_elf.dll and chrome.exe gets built
# when building chrome_elf_unittests.exe without introducing an
# explicit runtime dependency.
'target_name': 'chrome_elf_unittests',
'type': 'none',
'dependencies': [
'../chrome/chrome.gyp:chrome',
'chrome_elf',
'chrome_elf_unittests_exe',
],
},
{
'target_name': 'chrome_elf_lib',
'type': 'static_library',
'include_dirs': [
'..',
],
'sources': [
'chrome_elf_constants.cc',
'chrome_elf_constants.h',
'chrome_elf_types.h',
'create_file/chrome_create_file.cc',
'create_file/chrome_create_file.h',
'ntdll_cache.cc',
'ntdll_cache.h',
],
'conditions': [
['component=="shared_library"', {
# In component builds, all targets depend on chrome_redirects by
# default. Remove it here to avoid a circular dependency.
'dependencies!': [
'../chrome_elf/chrome_elf.gyp:chrome_redirects',
],
}],
],
},
], # targets
'conditions': [
['component=="shared_library"', {
'targets': [
{
'target_name': 'chrome_redirects',
'type': 'shared_library',
'include_dirs': [
'..',
],
'sources': [
'chrome_redirects.def',
],
'dependencies': [
'chrome_elf_lib',
],
'msvs_settings': {
'VCLinkerTool': {
'BaseAddress': '0x01c10000',
# Set /SUBSYSTEM:WINDOWS.
'SubSystem': '2',
},
},
'conditions': [
['component=="shared_library"', {
# In component builds, all targets depend on chrome_redirects by
# default. Remove it here to avoid a circular dependency.
'dependencies!': [
'../chrome_elf/chrome_elf.gyp:chrome_redirects',
],
}],
],
},
],
}],
],
}
| 27.496774 | 78 | 0.513609 |
acf84daa41b28f95a710764bf1f0797572541ddb | 1,416 | py | Python | catkin_ws/build/srrg2_map_server/catkin_generated/generate_cached_setup.py | laaners/progetto-labiagi_pick_e_delivery | 3453bfbc1dd7562c78ba06c0f79b069b0a952c0e | [
"MIT"
] | null | null | null | catkin_ws/build/srrg2_map_server/catkin_generated/generate_cached_setup.py | laaners/progetto-labiagi_pick_e_delivery | 3453bfbc1dd7562c78ba06c0f79b069b0a952c0e | [
"MIT"
] | null | null | null | catkin_ws/build/srrg2_map_server/catkin_generated/generate_cached_setup.py | laaners/progetto-labiagi_pick_e_delivery | 3453bfbc1dd7562c78ba06c0f79b069b0a952c0e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import print_function
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/melodic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/melodic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in '/home/alessiohu/Desktop/progetto-labiagi/catkin_ws/devel;/opt/ros/melodic'.split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/alessiohu/Desktop/progetto-labiagi/catkin_ws/devel/.private/srrg2_map_server/env.sh')
output_filename = '/home/alessiohu/Desktop/progetto-labiagi/catkin_ws/build/srrg2_map_server/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
# print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| 45.677419 | 127 | 0.747881 |
acf84eae060e3e2d8e853938c0153b63a42ec502 | 894 | py | Python | ensemble_submits/ensemble.py | tecufly/TextClassify | aa479ae0941c008602631c50124d8c07d159bfb1 | [
"Apache-2.0"
] | 216 | 2019-11-27T05:32:00.000Z | 2022-01-30T05:01:29.000Z | ensemble_submits/ensemble.py | linhaow/TextClassification | aa479ae0941c008602631c50124d8c07d159bfb1 | [
"Apache-2.0"
] | 2 | 2019-12-03T14:39:51.000Z | 2020-01-05T09:05:06.000Z | ensemble_submits/ensemble.py | linhaow/TextClassification | aa479ae0941c008602631c50124d8c07d159bfb1 | [
"Apache-2.0"
] | 49 | 2019-11-27T05:32:02.000Z | 2022-02-06T08:29:30.000Z | import pandas as pd
import numpy as np
#vote 文件
submits_path='./submits'
#需要进行vote的文件
submits = ['0.82414645.csv','0.8172323.csv','0.81546885000.csv']
#vote时文件的权重
file_weight = [3,2,2]
#vote时标签的权重
label_weight =[1,1,1]
files = []
data = []
for f in submits:
if 'csv' in f:
files.append(f)
data.append(pd.read_csv(submits_path+f).values)
print(len(files))
output = np.zeros([len(data[0]), 3])
for i in range(len(data)):
for j in range(len(data[0])):
if data[i][j][1] == 0:
output[j][0] += file_weight[i]*label_weight
elif data[i][j][1] == 1:
output[j][1] += file_weight[i]*label_weight
elif data[i][j][1] == 2:
output[j][2] += file_weight[i]*label_weight
#读取提交模板,需要设置
submit = pd.read_csv('sub_teample.csv')
submit['label'] = np.argmax(output, axis = 1)
submit.to_csv('submit.csv',index=None)
| 24.162162 | 64 | 0.608501 |
acf84edbd7e5525f102f1f5c2c2081113f9644ca | 2,230 | py | Python | locations/spiders/sierra.py | radarlabs/alltheplaces | 0291b8d58dc66270cc288351eeba3d183bdc4297 | [
"MIT"
] | null | null | null | locations/spiders/sierra.py | radarlabs/alltheplaces | 0291b8d58dc66270cc288351eeba3d183bdc4297 | [
"MIT"
] | null | null | null | locations/spiders/sierra.py | radarlabs/alltheplaces | 0291b8d58dc66270cc288351eeba3d183bdc4297 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import json
import re
import scrapy
from locations.items import GeojsonPointItem
from locations.hours import OpeningHours
class SierraSpider(scrapy.Spider):
name = "sierra"
allowed_domains = ["sierra.com"]
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'
}
def start_requests(self):
yield scrapy.Request(url='https://www.sierra.com/lp2/retail-stores/', headers=self.headers)
def parse_hours(self, hours):
opening_hours = OpeningHours()
for hour in hours:
try:
opening_hours.add_range(day=hour["dayOfWeek"].replace("http://schema.org/", "")[:2],
open_time=hour["opens"],
close_time=hour["closes"],
time_format="%H:%M:%S")
except:
continue # closed or no time range given
return opening_hours.as_opening_hours()
def parse_store(self, response):
data = json.loads(response.xpath('//script[@type="application/ld+json" and contains(text(), "streetAddress")]/text()').extract_first())
properties = {
'name': data["name"],
'ref': data["branchCode"],
'addr_full': data["address"]["streetAddress"],
'city': data["address"]["addressLocality"],
'state': data["address"]["addressRegion"],
'postcode': data["address"]["postalCode"],
'phone': data.get("telephone"),
'website': data.get("url") or response.url,
'lat': float(data["geo"]["latitude"]),
'lon': float(data["geo"]["longitude"]),
}
hours = self.parse_hours(data['openingHoursSpecification'])
if hours:
properties["opening_hours"] = hours
yield GeojsonPointItem(**properties)
def parse(self, response):
urls = response.xpath('//li[contains(@class, "item")]//h4/a/@href').extract()
for url in urls:
yield scrapy.Request(response.urljoin(url), headers=self.headers, callback=self.parse_store)
| 36.557377 | 145 | 0.576233 |
acf84f5021f549fe327944055322b93c56431b25 | 2,444 | py | Python | var/spack/repos/builtin/packages/intel-daal/package.py | HaochengLIU/spack | 26e51ff1705a4d6234e2a0cf734f93f7f95df5cb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 | 2018-11-27T03:39:44.000Z | 2021-09-06T15:50:35.000Z | var/spack/repos/builtin/packages/intel-daal/package.py | HaochengLIU/spack | 26e51ff1705a4d6234e2a0cf734f93f7f95df5cb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/intel-daal/package.py | HaochengLIU/spack | 26e51ff1705a4d6234e2a0cf734f93f7f95df5cb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null | # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class IntelDaal(IntelPackage):
"""Intel Data Analytics Acceleration Library."""
homepage = "https://software.intel.com/en-us/daal"
version('2019.0.117', 'd42fb6c3e8b31b1288049e89df37f2e8',
url="http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/13577/l_daal_2019.0.117.tgz")
version('2018.3.222', 'e688825c563e357b7b626ece610d6a85',
url="http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/13007/l_daal_2018.3.222.tgz")
version('2018.2.199', 'd015ff34a87a18922736b5fba0d0b0e0',
url="http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/12727/l_daal_2018.2.199.tgz")
version('2018.1.163', '12a9586734a03a956095440161fd741a',
url="http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/12414/l_daal_2018.1.163.tgz")
version('2018.0.128', '5779e670f67c33cc1c6cdcdca5e4636e',
url="http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/12072/l_daal_2018.0.128.tgz")
version('2017.4.239', 'b47e9b92d948ee312e8a98170a1c0640',
url="http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/12148/l_daal_2017.4.239.tgz")
version('2017.3.196', '93221eaeb560917a129d42fb2cf02500',
url="http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/11546/l_daal_2017.3.196.tgz")
version('2017.2.174', 'f067d5d7b0f70914fba1f78da0361065',
url="http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/11308/l_daal_2017.2.174.tgz")
version('2017.1.132', '56eef8cc45219f92a27de03ae914eba4',
url="http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/10983/l_daal_2017.1.132.tgz")
version('2017.0.098', 'b4eb234de12beff4a5cba4b81ea60673',
url="http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/9664/l_daal_2017.0.098.tgz")
version('2016.3.210', 'ad747c0dd97dace4cad03cf2266cad28',
url="http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/9099/l_daal_2016.3.210.tgz")
version('2016.2.181', 'aad2aa70e5599ebfe6f85b29d8719d46',
url="http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/8687/l_daal_2016.2.181.tgz")
provides('daal')
| 61.1 | 109 | 0.732406 |
acf85054ba01bd71196dcee47758cff584ada189 | 1,209 | py | Python | h2o-py/tests/testdir_algos/deeplearning/pyunit_metricsDeeplearning.py | kyoren/https-github.com-h2oai-h2o-3 | 77b27109c84c4739f9f1b7a3078f8992beefc813 | [
"Apache-2.0"
] | 1 | 2016-09-30T05:58:18.000Z | 2016-09-30T05:58:18.000Z | h2o-py/tests/testdir_algos/deeplearning/pyunit_metricsDeeplearning.py | kyoren/https-github.com-h2oai-h2o-3 | 77b27109c84c4739f9f1b7a3078f8992beefc813 | [
"Apache-2.0"
] | null | null | null | h2o-py/tests/testdir_algos/deeplearning/pyunit_metricsDeeplearning.py | kyoren/https-github.com-h2oai-h2o-3 | 77b27109c84c4739f9f1b7a3078f8992beefc813 | [
"Apache-2.0"
] | null | null | null | import sys
sys.path.insert(1,"../../../")
import h2o, tests
def deep_learning_metrics_test():
# connect to existing cluster
df = h2o.import_file(path=tests.locate("smalldata/logreg/prostate.csv"))
df.drop("ID") # remove ID
df['CAPSULE'] = df['CAPSULE'].asfactor() # make CAPSULE categorical
vol = df['VOL']
vol[vol == 0] = float("nan") # 0 VOL means 'missing'
r = vol.runif() # random train/test split
train = df[r < 0.8]
test = df[r >= 0.8]
# See that the data is ready
train.describe()
train.head()
train.tail()
test.describe()
test.head()
test.tail()
# Run DeepLearning
print "Train a Deeplearning model: "
dl = h2o.deeplearning(x = train[1:],
y = train['CAPSULE'],
epochs = 100,
hidden = [10, 10, 10],
loss = 'CrossEntropy')
print "Binomial Model Metrics: "
print
dl.show()
dl.model_performance(test).show()
if __name__ == "__main__":
tests.run_test(sys.argv, deep_learning_metrics_test)
| 28.116279 | 76 | 0.514475 |
acf85123cbae1a2a02ab7d7baf5cab0ed491bc95 | 1,154 | py | Python | lale/__init__.py | vishalbelsare/lale | 654ca29ec0234b478d26724a25df28b28f5c0bc0 | [
"Apache-2.0"
] | null | null | null | lale/__init__.py | vishalbelsare/lale | 654ca29ec0234b478d26724a25df28b28f5c0bc0 | [
"Apache-2.0"
] | null | null | null | lale/__init__.py | vishalbelsare/lale | 654ca29ec0234b478d26724a25df28b28f5c0bc0 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
__version__ = "0.5.7"
try:
# This variable is injected in the __builtins__ by the build
# process. It is used to not try to import rest of the lale packages when
# it is being installed.
__LALE_SETUP__ # type: ignore
except NameError:
__LALE_SETUP__ = False
if __LALE_SETUP__: # type: ignore
sys.stderr.write("Partial import of lale during the build process.\n")
# We are not importing the rest of lale during the build
# process.
else:
# all other code will go here.
from .operator_wrapper import wrap_imported_operators
| 33.941176 | 77 | 0.742634 |
acf8518ef3bb1320c6ef38fb0d3961cbd37f98b3 | 3,858 | py | Python | task5/core/features.py | tqbl/dcase2020_task5 | 413afb0ed4ed528597687ed3401d905eff42440c | [
"ISC"
] | 5 | 2020-06-22T21:42:47.000Z | 2022-03-22T23:47:26.000Z | task5/core/features.py | tqbl/dcase2020_task5 | 413afb0ed4ed528597687ed3401d905eff42440c | [
"ISC"
] | 1 | 2022-03-21T12:12:32.000Z | 2022-03-27T08:17:41.000Z | task5/core/features.py | tqbl/dcase2020_task5 | 413afb0ed4ed528597687ed3401d905eff42440c | [
"ISC"
] | 1 | 2020-06-30T01:46:33.000Z | 2020-06-30T01:46:33.000Z | import ast
from datetime import datetime
import h5py
import librosa
import numpy as np
def extract(audio_paths, extractor, output_path,
clip_duration=None, overwrite=False):
mode = 'w' if overwrite else 'a'
with h5py.File(output_path, mode) as f:
# Create/load the relevant HDF5 datasets
size = len(audio_paths)
str_dtype = h5py.string_dtype(encoding='utf-8')
timestamps = f.require_dataset('timestamps', (size,), str_dtype)
if clip_duration is None:
dtype = h5py.vlen_dtype('float32')
feats = f.require_dataset('F', (size,), dtype)
# Record shape of reference feature vector. Used to infer
# the original shape of a vector prior to flattening.
feats.attrs['shape'] = extractor.output_shape(1)[1:]
else:
shape = (size,) + extractor.output_shape(clip_duration)
feats = f.require_dataset('F', shape, dtype='float32')
indexes = dict()
for i, path in enumerate(audio_paths):
# Associate index of feature vector with file name
indexes[path.name] = i
# Skip if feature vector exists and should not be recomputed
if timestamps[i] and not overwrite:
continue
# Extract feature vector
x, sample_rate = librosa.load(
path, sr=None, duration=clip_duration)
if clip_duration is None:
feats[i] = extractor.extract(x, sample_rate).flatten()
else:
x = librosa.util.fix_length(x, sample_rate * clip_duration)
feats[i] = extractor.extract(x, sample_rate)
# Record timestamp in ISO format
timestamps[i] = datetime.now().isoformat()
# Store `indexes` dictionary as a string
f.require_dataset('indexes', (), dtype=str_dtype)
if indexes:
indexes_prev = ast.literal_eval(f['indexes'][()] or '{}')
f['indexes'][()] = str({**indexes_prev, **indexes})
def load(path, file_names=None):
with h5py.File(path, 'r') as f:
# Determine the corresponding indexes for each file name
mapping = ast.literal_eval(f['indexes'][()])
indexes = np.array([mapping[name] for name in file_names])
# Ensure indexes are in ascending order for h5py indexing
# Reverse the permutation after loading the h5py dataset subset
sort_indexes = indexes.argsort()
unsort_indexes = sort_indexes.argsort()
indexes = indexes[sort_indexes]
x = np.array(f['F'][indexes])[unsort_indexes]
shape = f['F'].attrs.get('shape')
return np.array(x), shape
class LogmelExtractor:
def __init__(self,
sample_rate=32000,
n_fft=1024,
hop_length=512,
n_mels=64,
):
self.sample_rate = sample_rate
self.n_fft = n_fft
self.hop_length = hop_length
# Create Mel filterbank matrix
self.mel_fb = librosa.filters.mel(sr=sample_rate,
n_fft=n_fft,
n_mels=n_mels,
)
def output_shape(self, clip_duration):
n_samples = clip_duration * self.sample_rate
n_frames = n_samples // self.hop_length + 1
return (n_frames, self.mel_fb.shape[0])
def extract(self, x, sample_rate):
# Resample to target sample rate
x = librosa.resample(x, sample_rate, self.sample_rate)
# Compute mel-scaled spectrogram
D = librosa.stft(x, n_fft=self.n_fft, hop_length=self.hop_length)
S = np.dot(self.mel_fb, np.abs(D) ** 2).T
# Apply log non-linearity
return librosa.power_to_db(S, ref=0., top_db=None)
| 37.096154 | 75 | 0.587869 |
acf852b9b66d6708f8c7681372f28bef7fcd8f25 | 1,426 | py | Python | tests/natural_bm/test_models.py | alexhunterlang/natural_bm | b2a1cb15694f4f3a80a3a1cc6f8423892563806d | [
"MIT"
] | 1 | 2018-06-07T00:54:17.000Z | 2018-06-07T00:54:17.000Z | tests/natural_bm/test_models.py | alexhunterlang/natural_bm | b2a1cb15694f4f3a80a3a1cc6f8423892563806d | [
"MIT"
] | null | null | null | tests/natural_bm/test_models.py | alexhunterlang/natural_bm | b2a1cb15694f4f3a80a3a1cc6f8423892563806d | [
"MIT"
] | null | null | null | #%%
import pytest
from natural_bm import initializers, optimizers, training
from natural_bm.models import Model
from natural_bm.datasets import random
from natural_bm.callbacks import History
from natural_bm.utils_testing import nnet_for_testing
#%%
# NOTE: the dbm tests are slow, so I am leaving it out for now
#@pytest.mark.parametrize('nnet_type', ['rbm', 'dbm', 'dbm_complex'],
# ids=['rbm', 'dbm', 'dbm_complex'])
@pytest.mark.parametrize('nnet_type', ['rbm'], ids=['rbm'])
def test_models(nnet_type):
batch_size = 6
n_epoch = 1
data = random.Random('probability')
nnet = nnet_for_testing(nnet_type)
nnet = initializers.init_standard(nnet, data)
optimizer = optimizers.SGD()
trainer = training.CD(nnet, nb_pos_steps=2, nb_neg_steps=2)
model = Model(nnet, optimizer, trainer)
# test train_on_batch
out = model.train_on_batch(data.train.data)
assert out.size == 1
# predict_on_batch
out = model.predict_on_batch(data.valid.data)
assert out.size == 1
# test fit
out = model.fit(data.train.data, n_epoch=n_epoch, batch_size=batch_size)
assert isinstance(out, History)
# test validation data
out = model.fit(data.train.data, n_epoch=n_epoch, batch_size=batch_size,
validation_data=data.valid.data)
assert isinstance(out, History)
#%%
if __name__ == '__main__':
pytest.main([__file__])
| 28.52 | 76 | 0.692847 |
acf854448c9338ea84217349a884d85a8d9991e2 | 5,327 | py | Python | selfdrive/controls/lib/longcontrol.py | mohammedx49/kishOPescalade | 24fde10504325b049d0c03a493f62edf10f9a353 | [
"MIT"
] | null | null | null | selfdrive/controls/lib/longcontrol.py | mohammedx49/kishOPescalade | 24fde10504325b049d0c03a493f62edf10f9a353 | [
"MIT"
] | null | null | null | selfdrive/controls/lib/longcontrol.py | mohammedx49/kishOPescalade | 24fde10504325b049d0c03a493f62edf10f9a353 | [
"MIT"
] | null | null | null | from cereal import log
from common.numpy_fast import clip, interp
from selfdrive.controls.lib.pid import PIController
from selfdrive.kegman_conf import KegmanConf
kegman = KegmanConf()
LongCtrlState = log.ControlsState.LongControlState
STOPPING_EGO_SPEED = 0.5
MIN_CAN_SPEED = 0.3 # TODO: parametrize this in car interface
STOPPING_TARGET_SPEED = MIN_CAN_SPEED + 0.01
STARTING_TARGET_SPEED = 0.5
BRAKE_THRESHOLD_TO_PID = 0.2
STOPPING_BRAKE_RATE = 0.2 # brake_travel/s while trying to stop
STARTING_BRAKE_RATE = 0.8 # brake_travel/s while releasing on restart
BRAKE_STOPPING_TARGET = float(kegman.conf['brakeStoppingTarget']) # apply at least this amount of brake to maintain the vehicle stationary
_MAX_SPEED_ERROR_BP = [0., 30.] # speed breakpoints
_MAX_SPEED_ERROR_V = [1.5, .8] # max positive v_pid error VS actual speed; this avoids controls windup due to slow pedal resp
RATE = 100.0
def long_control_state_trans(active, long_control_state, v_ego, v_target, v_pid,
output_gb, brake_pressed, cruise_standstill):
"""Update longitudinal control state machine"""
stopping_condition = (v_ego < 2.0 and cruise_standstill) or \
(v_ego < STOPPING_EGO_SPEED and \
((v_pid < STOPPING_TARGET_SPEED and v_target < STOPPING_TARGET_SPEED) or
brake_pressed))
starting_condition = v_target > STARTING_TARGET_SPEED and not cruise_standstill
if not active:
long_control_state = LongCtrlState.off
else:
if long_control_state == LongCtrlState.off:
if active:
long_control_state = LongCtrlState.pid
elif long_control_state == LongCtrlState.pid:
if stopping_condition:
long_control_state = LongCtrlState.stopping
elif long_control_state == LongCtrlState.stopping:
if starting_condition:
long_control_state = LongCtrlState.starting
elif long_control_state == LongCtrlState.starting:
if stopping_condition:
long_control_state = LongCtrlState.stopping
elif output_gb >= -BRAKE_THRESHOLD_TO_PID:
long_control_state = LongCtrlState.pid
return long_control_state
class LongControl():
def __init__(self, CP, compute_gb):
self.long_control_state = LongCtrlState.off # initialized to off
self.pid = PIController((CP.longitudinalTuning.kpBP, CP.longitudinalTuning.kpV),
(CP.longitudinalTuning.kiBP, CP.longitudinalTuning.kiV),
rate=RATE,
sat_limit=0.8,
convert=compute_gb)
self.v_pid = 0.0
self.last_output_gb = 0.0
def reset(self, v_pid):
"""Reset PID controller and change setpoint"""
self.pid.reset()
self.v_pid = v_pid
def update(self, active, v_ego, brake_pressed, standstill, cruise_standstill, v_cruise, v_target, v_target_future, a_target, CP):
"""Update longitudinal control. This updates the state machine and runs a PID loop"""
# Actuation limits
gas_max = interp(v_ego, CP.gasMaxBP, CP.gasMaxV)
brake_max = interp(v_ego, CP.brakeMaxBP, CP.brakeMaxV)
# Update state machine
output_gb = self.last_output_gb
self.long_control_state = long_control_state_trans(active, self.long_control_state, v_ego,
v_target_future, self.v_pid, output_gb,
brake_pressed, cruise_standstill)
v_ego_pid = max(v_ego, MIN_CAN_SPEED) # Without this we get jumps, CAN bus reports 0 when speed < 0.3
if self.long_control_state == LongCtrlState.off:
self.v_pid = v_ego_pid
self.pid.reset()
output_gb = 0.
# tracking objects and driving
elif self.long_control_state == LongCtrlState.pid:
self.v_pid = v_target
self.pid.pos_limit = gas_max
self.pid.neg_limit = - brake_max
# Toyota starts braking more when it thinks you want to stop
# Freeze the integrator so we don't accelerate to compensate, and don't allow positive acceleration
prevent_overshoot = not CP.stoppingControl and v_ego < 1.5 and v_target_future < 0.7
deadzone = interp(v_ego_pid, CP.longitudinalTuning.deadzoneBP, CP.longitudinalTuning.deadzoneV)
output_gb = self.pid.update(self.v_pid, v_ego_pid, speed=v_ego_pid, deadzone=deadzone, feedforward=a_target, freeze_integrator=prevent_overshoot)
if prevent_overshoot:
output_gb = min(output_gb, 0.0)
# Intention is to stop, switch to a different brake control until we stop
elif self.long_control_state == LongCtrlState.stopping:
# Keep applying brakes until the car is stopped
if not standstill or output_gb > -BRAKE_STOPPING_TARGET:
output_gb -= STOPPING_BRAKE_RATE / RATE
output_gb = clip(output_gb, -brake_max, gas_max)
self.v_pid = v_ego
self.pid.reset()
# Intention is to move again, release brake fast before handing control to PID
elif self.long_control_state == LongCtrlState.starting:
if output_gb < -0.2:
output_gb += STARTING_BRAKE_RATE / RATE
self.v_pid = v_ego
self.pid.reset()
self.last_output_gb = output_gb
final_gas = clip(output_gb, 0., gas_max)
final_brake = -clip(output_gb, -brake_max, 0.)
return final_gas, final_brake
| 40.052632 | 151 | 0.698892 |
acf85461e13da88b65843598f5386dd4527ad04a | 669 | py | Python | migrations/versions/42b0e8a2e8cb_.py | ant31/support-rotation | 16a706a2c2e186459fde71c77de347570073f40e | [
"MIT"
] | 1 | 2021-12-14T09:53:57.000Z | 2021-12-14T09:53:57.000Z | migrations/versions/42b0e8a2e8cb_.py | ant31/support-rotation | 16a706a2c2e186459fde71c77de347570073f40e | [
"MIT"
] | 13 | 2021-05-25T09:42:33.000Z | 2021-07-30T02:17:33.000Z | migrations/versions/42b0e8a2e8cb_.py | ant31/support-rotation | 16a706a2c2e186459fde71c77de347570073f40e | [
"MIT"
] | 1 | 2021-05-31T21:59:28.000Z | 2021-05-31T21:59:28.000Z | """empty message
Revision ID: 42b0e8a2e8cb
Revises: 586dedba0986
Create Date: 2021-12-11 18:32:13.801314
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '42b0e8a2e8cb'
down_revision = '586dedba0986'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user_chores', sa.Column('last_turn', sa.DateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user_chores', 'last_turn')
# ### end Alembic commands ###
| 23.068966 | 86 | 0.698057 |
acf855d946c06a92ad989b9634674a305753702f | 7,551 | py | Python | eventsourcing/infrastructure/sequenceditemmapper.py | gerbyzation/eventsourcing | a9e9ecf123af658762832cf97a9f00f8f7064393 | [
"BSD-3-Clause"
] | null | null | null | eventsourcing/infrastructure/sequenceditemmapper.py | gerbyzation/eventsourcing | a9e9ecf123af658762832cf97a9f00f8f7064393 | [
"BSD-3-Clause"
] | null | null | null | eventsourcing/infrastructure/sequenceditemmapper.py | gerbyzation/eventsourcing | a9e9ecf123af658762832cf97a9f00f8f7064393 | [
"BSD-3-Clause"
] | null | null | null | from abc import ABC, abstractmethod
from json import JSONDecodeError
from typing import Any, Dict, Generic, NamedTuple, Optional, Tuple, Type
from eventsourcing.infrastructure.sequenceditem import (
SequencedItem,
SequencedItemFieldNames,
)
from eventsourcing.utils.cipher.aes import AESCipher
from eventsourcing.utils.topic import get_topic, reconstruct_object, resolve_topic
from eventsourcing.utils.transcoding import ObjectJSONDecoder, ObjectJSONEncoder
from eventsourcing.whitehead import TEvent
class AbstractSequencedItemMapper(Generic[TEvent], ABC):
def __init__(self, **kwargs: Any):
"""
Initialises mapper.
"""
@abstractmethod
def item_from_event(self, domain_event: TEvent) -> NamedTuple:
"""
Constructs and returns a sequenced item for given domain event.
"""
@abstractmethod
def event_from_item(self, sequenced_item: NamedTuple) -> TEvent:
"""
Constructs and returns a domain event for given sequenced item.
"""
@abstractmethod
def json_dumps(self, o: object) -> bytes:
"""
Encodes given object as JSON.
"""
@abstractmethod
def json_loads(self, s: str) -> object:
"""
Decodes given JSON as object.
"""
@abstractmethod
def event_from_topic_and_state(self, topic: str, state: bytes) -> TEvent:
"""
Resolves topic to an event class, decodes state, and constructs an event.
"""
@abstractmethod
def event_from_notification(self, notification):
"""
Reconstructs domain event from an event notification.
:param notification: The event notification.
:return: A domain event.
"""
class SequencedItemMapper(AbstractSequencedItemMapper[TEvent]):
"""
Uses JSON to transcode domain events.
"""
def __init__(
self,
sequenced_item_class: Optional[Type[NamedTuple]] = None,
sequence_id_attr_name: Optional[str] = None,
position_attr_name: Optional[str] = None,
json_encoder_class: Optional[Type[ObjectJSONEncoder]] = None,
sort_keys: bool = False,
json_decoder_class: Optional[Type[ObjectJSONDecoder]] = None,
cipher: Optional[AESCipher] = None,
compressor: Any = None,
other_attr_names: Tuple[str, ...] = (),
):
if sequenced_item_class is not None:
self.sequenced_item_class = sequenced_item_class
else:
self.sequenced_item_class = SequencedItem # type: ignore
self.json_encoder_class = json_encoder_class or ObjectJSONEncoder
self.json_encoder = self.json_encoder_class(sort_keys=sort_keys)
self.json_decoder_class = json_decoder_class or ObjectJSONDecoder
self.json_decoder = self.json_decoder_class()
self.cipher = cipher
self.compressor = compressor
self.field_names = SequencedItemFieldNames(self.sequenced_item_class)
self.sequence_id_attr_name = (
sequence_id_attr_name or self.field_names.sequence_id
)
self.position_attr_name = position_attr_name or self.field_names.position
self.other_attr_names = other_attr_names or self.field_names.other_names
def item_from_event(self, domain_event: TEvent) -> NamedTuple:
"""
Constructs a sequenced item from a domain event.
"""
item_args = self.construct_item_args(domain_event)
return self.construct_sequenced_item(item_args)
def construct_item_args(self, domain_event: TEvent) -> Tuple:
"""
Constructs attributes of a sequenced item from the given domain event.
"""
# Get the sequence ID.
sequence_id = domain_event.__dict__[self.sequence_id_attr_name]
# Get the position in the sequence.
position = getattr(domain_event, self.position_attr_name, None)
# Get topic and data.
topic, state = self.get_item_topic_and_state(
domain_event.__class__, domain_event.__dict__
)
# Get the 'other' args.
# - these are meant to be derivative of the other attributes,
# to populate database fields, and shouldn't affect the hash.
other_args = tuple(
(getattr(domain_event, name) for name in self.other_attr_names)
)
return (sequence_id, position, topic, state) + other_args
def get_item_topic_and_state(
self, domain_event_class: type, event_attrs: Dict[str, Any]
) -> Tuple[str, bytes]:
# Get the topic from the event attrs, otherwise from the class.
topic = get_topic(domain_event_class)
# Serialise the event attributes.
statebytes = self.json_dumps(event_attrs)
# Compress plaintext bytes.
if self.compressor:
# Zlib reduces length by about 25% to 50%.
statebytes = self.compressor.compress(statebytes)
# Encrypt serialised state.
if self.cipher:
# Increases length by about 10%.
statebytes = self.cipher.encrypt(statebytes)
return topic, statebytes
def json_dumps(self, o: object) -> bytes:
return self.json_encoder.encode(o)
def construct_sequenced_item(self, item_args: Tuple) -> NamedTuple:
return self.sequenced_item_class(*item_args)
def event_from_item(self, sequenced_item: NamedTuple) -> TEvent:
"""
Reconstructs domain event from stored event topic and
event attrs. Used in the event store when getting domain events.
"""
assert isinstance(sequenced_item, self.sequenced_item_class), (
self.sequenced_item_class,
type(sequenced_item),
)
# Get the topic and state.
topic = getattr(sequenced_item, self.field_names.topic)
state = getattr(sequenced_item, self.field_names.state)
return self.event_from_topic_and_state(topic, state)
def event_from_topic_and_state(self, topic: str, state: bytes) -> TEvent:
domain_event_class, event_attrs = self.get_event_class_and_attrs(topic, state)
# Reconstruct domain event object.
return reconstruct_object(domain_event_class, event_attrs)
def get_event_class_and_attrs(
self, topic: str, state: bytes
) -> Tuple[Type[TEvent], Dict]:
# Resolve topic to event class.
domain_event_class: Type[TEvent] = resolve_topic(topic)
# Decrypt and decompress state.
if self.cipher:
state = self.cipher.decrypt(state)
# Decompress plaintext bytes.
if self.compressor:
state = self.compressor.decompress(state)
# Decode unicode bytes.
statestr = state.decode("utf8")
# Deserialize JSON.
event_attrs: Dict = self.json_loads(statestr)
# Return instance class and attribute values.
return domain_event_class, event_attrs
def json_loads(self, s: str) -> Dict:
try:
return self.json_decoder.decode(s)
except JSONDecodeError:
raise ValueError("Couldn't load JSON string: {}".format(s))
def event_from_notification(self, notification):
"""
Reconstructs domain event from an event notification.
:param notification: The event notification.
:return: A domain event.
"""
return self.event_from_topic_and_state(
topic=notification[self.field_names.topic],
state=notification[self.field_names.state],
)
| 35.12093 | 86 | 0.663621 |
acf855fdc73ece792d12b7febf21dc0cbb3750b0 | 2,411 | py | Python | tools/parser.py | eugeneai/ssd1306 | d49b56d9f9bcc131ab6c302c43229cbedd0fa577 | [
"MIT"
] | null | null | null | tools/parser.py | eugeneai/ssd1306 | d49b56d9f9bcc131ab6c302c43229cbedd0fa577 | [
"MIT"
] | null | null | null | tools/parser.py | eugeneai/ssd1306 | d49b56d9f9bcc131ab6c302c43229cbedd0fa577 | [
"MIT"
] | null | null | null | # MIT License
#
# Copyright (c) 2018, Alexey Dynda
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
###################################################################################
# Fixed font converter for GLCD Creator fonts to internal ssd1306 format
#
import os
import sys
import re
# if len(sys.argv) < 2:
# print "Usage: glcd2ssd1306.py inputFile > outputFile"
# print "Examples:"
# print " glcd2ssd1306.py glcdfont.c > ssd1306font.c"
# exit(1)
# filename = sys.argv[1]
filename = "../examples/games/lode_runner/sprites.cpp"
with open(filename) as f:
content = f.read()
index = 0
#pattern = re.compile(r'(0x[0-9a-fA-F]{1,})')
#name_pattern = re.compile(r'(.*)')
name_pattern = re.compile(r'([\w\d_]+)\s*(PROGMEM\s*){0,1}(\[\s*[\w\d_]*?\s*\]|)*\s*=\s*\{(.*?)\};', re.DOTALL)
#name_pattern = re.compile(r'([\w\d_]+)\s*(PROGMEM\s*){0,1}(\[\s*[\w\d_]*?\s*\]|)*\s*=\s*\{([.\/\w\s\,{}]*?)\};')
pattern = re.compile(r'\{(\s*(0x[0-9a-fA-F]+|0B\d+)\s*,{0,1}\s*)*?\}')
#pattern = re.compile(r'\{(\s*0B\d+\s*,{0,1}\s*)*\}')
all = name_pattern.findall(content)
print all
exit(1)
n = name_pattern.search(content, re.MULTILINE)
print n.start(), n.end()
print n.group(1), n.group(4)
m = pattern.search(content, n.start(), n.end())
print m
print m.group(0)
| 35.985075 | 114 | 0.630029 |
acf8566aaef6fad825d887681f5e399ee3806891 | 1,154 | py | Python | Engine/test_engine.py | spineki/CrazyDiamond | 14538a674c4365e4f5c5e79293cbec4c10ded45e | [
"MIT"
] | null | null | null | Engine/test_engine.py | spineki/CrazyDiamond | 14538a674c4365e4f5c5e79293cbec4c10ded45e | [
"MIT"
] | 1 | 2021-02-18T10:37:45.000Z | 2021-02-18T12:36:13.000Z | Engine/test_engine.py | spineki/CrazyDiamond | 14538a674c4365e4f5c5e79293cbec4c10ded45e | [
"MIT"
] | null | null | null | from Engine.engine import Engine
def test_react_to_keyword():
e = Engine()
e.reactive_keyword = ["apple", "banana"]
verif = e.react_to_keyword("apple")
assert verif == True
verif = e.react_to_keyword("ban")
assert verif == True
verif = e.react_to_keyword("bananana")
assert verif == False
def test_print_v(capsys):
with capsys.disabled():
e = Engine()
e.verbose = True
e.print_v("test")
captured = capsys.readouterr()
assert captured.out == "test; \n"
e.verbose = False
e.print_v("second_test")
captured = capsys.readouterr()
assert captured.out == ""
logs = e.log
assert logs[2:] == ['test; ','second_test; ']
def test_get_logs():
e = Engine()
e.print_v("test1", "test2")
e.print_v("test3")
logs = e.get_logs()
assert logs == "test1 test2\ntest3"
logs = e.get_logs(sep="_")
assert logs == "test1 test2_test3"
def test_purify_name():
e = Engine()
purified = e.purify_name("test")
assert purified == "test"
purified = e.purify_name("test>test|test<test?test!test")
assert purified == "test_test_test_test_test_test" | 25.644444 | 61 | 0.631716 |
acf856e23b63891c8ad965c4c2fd181b7c43d786 | 206,283 | py | Python | brim/test/unit/test_server.py | gholt/python-brim | d21bc19fde9f8b8dfb5fdfb8ad6585494e4821c1 | [
"Apache-2.0"
] | null | null | null | brim/test/unit/test_server.py | gholt/python-brim | d21bc19fde9f8b8dfb5fdfb8ad6585494e4821c1 | [
"Apache-2.0"
] | null | null | null | brim/test/unit/test_server.py | gholt/python-brim | d21bc19fde9f8b8dfb5fdfb8ad6585494e4821c1 | [
"Apache-2.0"
] | null | null | null | """Tests for brim.server."""
"""Copyright and License.
Copyright 2012-2014 Gregory Holt
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from contextlib import contextmanager
from pickle import dumps as pickle_dumps, loads as pickle_loads
from json import dumps as json_dumps, loads as json_loads
from StringIO import StringIO
from sys import exc_info
from unittest import main, TestCase
from uuid import uuid4
from mock import mock_open, patch
from brim import server, __version__
from brim.conf import Conf
class TestLogQuote(TestCase):
def test_log_quote(self):
self.assertEqual(
server._log_quote(''.join(chr(c) for c in xrange(256))),
'%00%01%02%03%04%05%06%07%08%09%0A%0B%0C%0D%0E%0F'
'%10%11%12%13%14%15%16%17%18%19%1A%1B%1C%1D%1E%1F'
'%20!"#$%25&\'()*+,-./0123456789:;<=>?@'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`'
'abcdefghijklmnopqrstuvwxyz{|}~%7F'
'%80%81%82%83%84%85%86%87%88%89%8A%8B%8C%8D%8E%8F'
'%90%91%92%93%94%95%96%97%98%99%9A%9B%9C%9D%9E%9F'
'%A0%A1%A2%A3%A4%A5%A6%A7%A8%A9%AA%AB%AC%AD%AE%AF'
'%B0%B1%B2%B3%B4%B5%B6%B7%B8%B9%BA%BB%BC%BD%BE%BF'
'%C0%C1%C2%C3%C4%C5%C6%C7%C8%C9%CA%CB%CC%CD%CE%CF'
'%D0%D1%D2%D3%D4%D5%D6%D7%D8%D9%DA%DB%DC%DD%DE%DF'
'%E0%E1%E2%E3%E4%E5%E6%E7%E8%E9%EA%EB%EC%ED%EE%EF'
'%F0%F1%F2%F3%F4%F5%F6%F7%F8%F9%FA%FB%FC%FD%FE%FF')
class TestStats(TestCase):
def test_bucket_stats(self):
bs = server._BucketStats(['testbucket'], {'test': 'worker'})
self.assertEqual(bs.get(0, 'test'), 0)
bs.set(0, 'test', 123)
self.assertEqual(bs.get(0, 'test'), 123)
bs.incr(0, 'test')
self.assertEqual(bs.get(0, 'test'), 124)
self.assertEqual(bs.get(0, 'test2'), 0)
bs.set(0, 'test2', 123)
self.assertEqual(bs.get(0, 'test2'), 0)
bs.incr(0, 'test2')
self.assertEqual(bs.get(0, 'test2'), 0)
self.assertRaises(IndexError, bs.get, 1, 'test')
self.assertRaises(IndexError, bs.set, 1, 'test', 123)
self.assertRaises(IndexError, bs.incr, 1, 'test')
def test_null_bucket_stats(self):
bs = server._BucketStats([], {})
self.assertEqual(bs.get(0, 'test'), 0)
bs.set(0, 'test', 123)
self.assertEqual(bs.get(0, 'test'), 0)
bs.incr(0, 'test')
self.assertEqual(bs.get(0, 'test'), 0)
bs = server._BucketStats([], {'test': 'worker'})
self.assertEqual(bs.get(0, 'test'), 0)
bs.set(0, 'test', 123)
self.assertEqual(bs.get(0, 'test'), 0)
bs.incr(0, 'test')
self.assertEqual(bs.get(0, 'test'), 0)
def test_stats(self):
bs = server._BucketStats(['testbucket'], ['test'])
s = server._Stats(bs, 0)
self.assertEqual(s.get('test'), 0)
s.set('test', 123)
self.assertEqual(s.get('test'), 123)
s.incr('test')
self.assertEqual(s.get('test'), 124)
self.assertEqual(s.get('test2'), 0)
s.set('test2', 123)
self.assertEqual(s.get('test2'), 0)
s.incr('test2')
self.assertEqual(s.get('test2'), 0)
s = server._Stats(bs, 1)
self.assertRaises(IndexError, s.get, 'test')
self.assertRaises(IndexError, s.set, 'test', 123)
self.assertRaises(IndexError, s.incr, 'test')
class TestEventletWSGINullLogger(TestCase):
def test_write(self):
server._EventletWSGINullLogger().write('abc', 'def', 'ghi')
class TestWsgiInput(TestCase):
def setUp(self):
self.sio = StringIO('1234567890')
self.env = {'wsgi.input': self.sio, 'brim._bytes_in': 0}
self.inp = server._WsgiInput(self.env, 3)
def test_sets_as_self(self):
self.assertEqual(self.env['wsgi.input'], self.inp)
def test_close(self):
self.inp.close()
exc = None
try:
self.inp.read()
except Exception as err:
exc = err
self.assertEqual(str(exc), 'I/O operation on closed file')
def test_flush(self):
self.inp.flush()
def test_fileno(self):
exc = None
try:
self.inp.fileno()
except Exception as err:
exc = err
self.assertEqual(
str(exc), "StringIO instance has no attribute 'fileno'")
def test_iterator(self):
self.assertEqual([c for c in self.inp], ['123', '456', '789', '0'])
self.assertEqual(self.env['brim._bytes_in'], 10)
def test_read(self):
self.assertEqual(self.inp.read(4), '1234')
self.assertEqual(self.env['brim._bytes_in'], 4)
self.assertEqual(self.inp.read(), '567890')
self.assertEqual(self.env['brim._bytes_in'], 10)
def test_readline(self):
self.sio = StringIO('1234567890\nabcdefghij\nklmnopqrst')
self.env = {'wsgi.input': self.sio, 'brim._bytes_in': 0}
self.inp = server._WsgiInput(self.env, 3)
self.assertEqual(self.inp.readline(), '1234567890\n')
self.assertEqual(self.env['brim._bytes_in'], 11)
self.assertEqual(self.inp.readline(2), 'ab')
self.assertEqual(self.env['brim._bytes_in'], 13)
self.assertEqual(self.inp.readline(20), 'cdefghij\n')
self.assertEqual(self.env['brim._bytes_in'], 22)
self.assertEqual(self.inp.readline(), 'klmnopqrst')
self.assertEqual(self.env['brim._bytes_in'], 32)
def test_readlines(self):
self.sio = StringIO('1234567890\nabcdefghij\nklmnopqrst\nuvwxyz')
self.env = {'wsgi.input': self.sio, 'brim._bytes_in': 0}
self.inp = server._WsgiInput(self.env, 3)
self.assertEqual(
self.inp.readlines(15), ['1234567890\n', 'abcdefghij\n'])
self.assertEqual(self.env['brim._bytes_in'], 22)
self.assertEqual(self.inp.readlines(), ['klmnopqrst\n', 'uvwxyz'])
self.assertEqual(self.env['brim._bytes_in'], 39)
class TestWsgiOutput(TestCase):
def test_wsgi_output(self):
env = {'brim._bytes_out': 0}
o = server._WsgiOutput(['123', '456', '78', '90'], env)
self.assertEqual(o.next(), '123')
self.assertEqual(env['brim._bytes_out'], 3)
self.assertEqual([c for c in o], ['456', '78', '90'])
class TestSendPidSig(TestCase):
def setUp(self):
self.orig_kill = server.kill
self.orig_time = server.time
self.orig_sleep = server.sleep
self.orig_unlink = server.unlink
self.open_calls = []
self.open_retval = [StringIO('12345')]
self.kill_calls = []
self.time_calls = []
self.sleep_calls = []
self.unlink_calls = []
@contextmanager
def _open(*args):
self.open_calls.append(args)
yield self.open_retval[0]
def _kill(*args):
self.kill_calls.append(args)
def _time(*args):
self.time_calls.append(args)
return len(self.time_calls)
def _sleep(*args):
self.sleep_calls.append(args)
def _unlink(*args):
self.unlink_calls.append(args)
server.open = _open
server.kill = _kill
server.time = _time
server.sleep = _sleep
server.unlink = _unlink
def tearDown(self):
del server.open
server.kill = self.orig_kill
server.time = self.orig_time
server.sleep = self.orig_sleep
server.unlink = self.orig_unlink
def test_open_not_found(self):
@contextmanager
def _open(*args):
exc = IOError('testing')
exc.errno = server.ENOENT
raise exc
server.open = _open
self.assertEqual(server._send_pid_sig('some.pid', 0), (False, 0))
def test_open_exception(self):
@contextmanager
def _open(*args):
raise IOError('testing')
server.open = _open
exc = None
try:
server._send_pid_sig('some.pid', 0)
except Exception as err:
exc = err
self.assertEqual(str(exc), 'testing')
def test_pid_file_no_int(self):
self.open_retval[0] = StringIO('')
self.assertEqual(server._send_pid_sig('some.pid', 0), (False, 0))
def test_kill_inactive_pid(self):
def _kill(*args):
exc = OSError('testing')
exc.errno = server.ESRCH
raise exc
server.kill = _kill
self.open_retval[0] = StringIO('12345')
self.assertEqual(server._send_pid_sig('some.pid', 0), (False, 12345))
def test_kill_exception(self):
def _kill(*args):
raise OSError('testing')
server.kill = _kill
self.open_retval[0] = StringIO('12345')
exc = None
try:
server._send_pid_sig('some.pid', 0)
except Exception as err:
exc = err
self.assertEqual(str(exc), 'testing')
def test_kill_worked(self):
self.open_retval[0] = StringIO('12345')
self.assertEqual(server._send_pid_sig('some.pid', 0), (True, 12345))
def test_kill_expect_exit_timeout(self):
self.open_retval[0] = StringIO('12345')
exc = None
try:
server._send_pid_sig('some.pid', 0, expect_exit=True)
except Exception as err:
exc = err
self.assertEqual(
str(exc),
'12345 did not exit after %s seconds.' % server.PID_WAIT_TIME)
self.assertEqual(self.time_calls, [()] * (server.PID_WAIT_TIME + 1))
self.assertEqual(
self.sleep_calls, [(1,)] * (server.PID_WAIT_TIME - 1))
self.assertEqual(self.unlink_calls, [])
def test_kill_expect_exit_worked(self):
kill_calls = []
def _kill(*args):
kill_calls.append(args)
if len(kill_calls) > 3:
exc = OSError()
exc.errno = server.ESRCH
raise exc
server.kill = _kill
self.open_retval[0] = StringIO('12345')
server._send_pid_sig('some.pid', 0, expect_exit=True)
self.assertEqual(self.time_calls, [()] * 3)
self.assertEqual(self.sleep_calls, [(1,)] * 2)
self.assertEqual(self.unlink_calls, [('some.pid',)])
def test_kill_expect_exit_kill_exception(self):
kill_calls = []
def _kill(*args):
kill_calls.append(args)
if len(kill_calls) > 3:
raise OSError('testing')
server.kill = _kill
self.open_retval[0] = StringIO('12345')
exc = None
try:
server._send_pid_sig('some.pid', 0, expect_exit=True)
except Exception as err:
exc = err
self.assertEqual(str(exc), 'testing')
self.assertEqual(self.time_calls, [()] * 3)
self.assertEqual(self.sleep_calls, [(1,)] * 2)
self.assertEqual(self.unlink_calls, [])
def test_kill_expect_exit_unlink_not_found(self):
kill_calls = []
unlink_calls = []
def _kill(*args):
kill_calls.append(args)
if len(kill_calls) > 1:
exc = OSError()
exc.errno = server.ESRCH
raise exc
def _unlink(*args):
unlink_calls.append(args)
exc = OSError()
exc.errno = server.ENOENT
raise exc
server.kill = _kill
server.unlink = _unlink
self.open_retval[0] = StringIO('12345')
self.assertEqual(
server._send_pid_sig('some.pid', 0, expect_exit=True),
(True, 12345))
self.assertEqual(unlink_calls, [('some.pid',)])
def test_kill_expect_exit_unlink_exception(self):
kill_calls = []
def _kill(*args):
kill_calls.append(args)
if len(kill_calls) > 1:
exc = OSError()
exc.errno = server.ESRCH
raise exc
def _unlink(*args):
raise OSError('testing')
server.kill = _kill
server.unlink = _unlink
self.open_retval[0] = StringIO('12345')
exc = None
try:
server._send_pid_sig('some.pid', 0, expect_exit=True)
except Exception as err:
exc = err
self.assertEqual(str(exc), 'testing')
class FakeServer(object):
def __init__(self, no_daemon=False, output=False):
self.no_daemon = no_daemon
self.output = output
class TestSubserver(TestCase):
_class = server.Subserver
def _get_default_confd(self):
return {}
def test_init(self):
s = FakeServer()
ss = self._class(s, 'test')
self.assertEqual(ss.server, s)
self.assertEqual(ss.name, 'test')
self.assertEqual(ss.worker_count, 1)
self.assertEqual(ss.worker_names, ['0'])
self.assertEqual(ss.stats_conf.get('start_time'), 'worker')
return ss
def test_parse_conf_defaults(self):
ss = self._class(FakeServer(), 'test')
ss._parse_conf(Conf(self._get_default_confd()))
self.assertEqual(ss.log_name, 'brimtest')
self.assertEqual(ss.log_level, 'INFO')
self.assertEqual(ss.log_facility, 'LOG_LOCAL0')
self.assertEqual(ss.json_dumps, json_dumps)
self.assertEqual(ss.json_loads, json_loads)
return ss
def test_parse_conf_log_name(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('brim', {})['log_name'] = 'name'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.log_name, 'nametest')
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['log_name'] = 'name'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.log_name, 'name')
def test_parse_conf_log_level(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('brim', {})['log_level'] = 'DEBUG'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.log_level, 'DEBUG')
ss = self._class(FakeServer(), 'test')
exc = None
try:
confd = self._get_default_confd()
confd.setdefault('brim', {})['log_level'] = 'invalid'
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(str(exc), "Invalid [test] log_level 'INVALID'.")
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['log_level'] = 'DEBUG'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.log_level, 'DEBUG')
ss = self._class(FakeServer(), 'test')
exc = None
try:
confd = self._get_default_confd()
confd.setdefault('test', {})['log_level'] = 'invalid'
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(str(exc), "Invalid [test] log_level 'INVALID'.")
def test_parse_conf_log_facility(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('brim', {})['log_facility'] = 'LOG_LOCAL1'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.log_facility, 'LOG_LOCAL1')
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('brim', {})['log_facility'] = 'LOCAL2'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.log_facility, 'LOG_LOCAL2')
ss = self._class(FakeServer(), 'test')
exc = None
try:
confd = self._get_default_confd()
confd.setdefault('brim', {})['log_facility'] = 'invalid'
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc), "Invalid [test] log_facility 'LOG_INVALID'.")
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['log_facility'] = 'LOG_LOCAL1'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.log_facility, 'LOG_LOCAL1')
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['log_facility'] = 'LOCAL2'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.log_facility, 'LOG_LOCAL2')
ss = self._class(FakeServer(), 'test')
exc = None
try:
confd = self._get_default_confd()
confd.setdefault('test', {})['log_facility'] = 'invalid'
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc), "Invalid [test] log_facility 'LOG_INVALID'.")
def test_parse_conf_json_dumps(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('brim', {})['json_dumps'] = 'pickle.dumps'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.json_dumps, pickle_dumps)
ss = self._class(FakeServer(), 'test')
exc = None
try:
confd = self._get_default_confd()
confd.setdefault('brim', {})['json_dumps'] = 'abc'
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(str(exc), "Invalid [test] json_dumps value 'abc'.")
ss = self._class(FakeServer(), 'test')
exc = None
try:
confd = self._get_default_confd()
confd.setdefault('brim', {})['json_dumps'] = 'pickle.blah'
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Could not load function 'pickle.blah' for [test] json_dumps.")
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['json_dumps'] = 'pickle.dumps'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.json_dumps, pickle_dumps)
ss = self._class(FakeServer(), 'test')
exc = None
try:
confd = self._get_default_confd()
confd.setdefault('test', {})['json_dumps'] = 'abc'
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(str(exc), "Invalid [test] json_dumps value 'abc'.")
ss = self._class(FakeServer(), 'test')
exc = None
try:
confd = self._get_default_confd()
confd.setdefault('test', {})['json_dumps'] = 'pickle.blah'
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Could not load function 'pickle.blah' for [test] json_dumps.")
def test_parse_conf_json_loads(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('brim', {})['json_loads'] = 'pickle.loads'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.json_loads, pickle_loads)
ss = self._class(FakeServer(), 'test')
exc = None
try:
confd = self._get_default_confd()
confd.setdefault('brim', {})['json_loads'] = 'abc'
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(str(exc), "Invalid [test] json_loads value 'abc'.")
ss = self._class(FakeServer(), 'test')
exc = None
try:
confd = self._get_default_confd()
confd.setdefault('brim', {})['json_loads'] = 'pickle.blah'
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Could not load function 'pickle.blah' for [test] json_loads.")
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['json_loads'] = 'pickle.loads'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.json_loads, pickle_loads)
ss = self._class(FakeServer(), 'test')
exc = None
try:
confd = self._get_default_confd()
confd.setdefault('test', {})['json_loads'] = 'abc'
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(str(exc), "Invalid [test] json_loads value 'abc'.")
ss = self._class(FakeServer(), 'test')
exc = None
try:
confd = self._get_default_confd()
confd.setdefault('test', {})['json_loads'] = 'pickle.blah'
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Could not load function 'pickle.blah' for [test] json_loads.")
def test_privileged_start(self):
# Just makes sure the method exists [it is just "pass" by default].
self._class(FakeServer(), 'test')._privileged_start()
def test_start(self, output=False, no_daemon=False, func_before_start=None,
bucket_stats=None, confd=None):
if bucket_stats is None:
bucket_stats = \
server._BucketStats(['testbucket'], {'test': 'worker'})
ss = self._class(FakeServer(output=output, no_daemon=no_daemon),
'test')
confd = confd if confd else self._get_default_confd()
confd.setdefault('brim', {})['port'] = '0'
ss._parse_conf(Conf(confd))
ss._privileged_start()
if func_before_start:
func_before_start(ss)
ss._start(bucket_stats)
self.assertEqual(ss.bucket_stats, bucket_stats)
return ss
class TestIPSubserver(TestSubserver):
_class = server.IPSubserver
def test_parse_conf_defaults(self):
ss = TestSubserver.test_parse_conf_defaults(self)
self.assertEqual(ss.ip, '*')
self.assertEqual(ss.port, 80)
self.assertEqual(ss.certfile, None)
self.assertEqual(ss.keyfile, None)
self.assertEqual(ss.client_timeout, 60)
self.assertEqual(ss.concurrent_per_worker, 1024)
self.assertEqual(ss.backlog, 4096)
self.assertEqual(ss.listen_retry, 30)
self.assertEqual(ss.eventlet_hub, None)
ss.server.no_daemon = True
ss = self._class(ss.server, 'test')
ss._parse_conf(Conf(self._get_default_confd()))
self.assertEqual(ss.worker_count, 0)
self.assertEqual(ss.worker_names, ['0'])
return ss
def test_parse_conf_ip(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('brim', {})['ip'] = '1.2.3.4'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.ip, '1.2.3.4')
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['ip'] = '1.2.3.4'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.ip, '1.2.3.4')
def test_parse_conf_port(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('brim', {})['port'] = '1234'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.port, 1234)
ss = self._class(FakeServer(), 'test')
exc = None
try:
confd = self._get_default_confd()
confd.setdefault('brim', {})['port'] = 'abc'
ss._parse_conf(Conf(confd))
except SystemExit as err:
exc = err
self.assertEqual(
str(exc),
"Configuration value [brim] port of 'abc' cannot be converted to "
"int.")
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['port'] = '1234'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.port, 1234)
ss = self._class(FakeServer(), 'test')
exc = None
try:
confd = self._get_default_confd()
confd.setdefault('test', {})['port'] = 'abc'
ss._parse_conf(Conf(confd))
except SystemExit as err:
exc = err
self.assertEqual(
str(exc),
"Configuration value [test] port of 'abc' cannot be converted to "
"int.")
def test_parse_conf_certfile(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('brim', {})['certfile'] = 'file'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.certfile, 'file')
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['certfile'] = 'file'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.certfile, 'file')
def test_parse_conf_keyfile(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('brim', {})['keyfile'] = 'file'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.keyfile, 'file')
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['keyfile'] = 'file'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.keyfile, 'file')
def test_parse_conf_client_timeout(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('brim', {})['client_timeout'] = '123'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.client_timeout, 123)
ss = self._class(FakeServer(), 'test')
exc = None
try:
confd = self._get_default_confd()
confd.setdefault('brim', {})['client_timeout'] = 'abc'
ss._parse_conf(Conf(confd))
except SystemExit as err:
exc = err
self.assertEqual(
str(exc),
"Configuration value [brim] client_timeout of 'abc' cannot be "
"converted to int.")
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['client_timeout'] = '123'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.client_timeout, 123)
ss = self._class(FakeServer(), 'test')
exc = None
try:
confd = self._get_default_confd()
confd.setdefault('test', {})['client_timeout'] = 'abc'
ss._parse_conf(Conf(confd))
except SystemExit as err:
exc = err
self.assertEqual(
str(exc),
"Configuration value [test] client_timeout of 'abc' cannot be "
"converted to int.")
def test_parse_conf_concurrent_per_worker(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('brim', {})['concurrent_per_worker'] = '123'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.concurrent_per_worker, 123)
ss = self._class(FakeServer(), 'test')
exc = None
try:
confd = self._get_default_confd()
confd.setdefault('brim', {})['concurrent_per_worker'] = 'abc'
ss._parse_conf(Conf(confd))
except SystemExit as err:
exc = err
self.assertEqual(
str(exc),
"Configuration value [brim] concurrent_per_worker of 'abc' cannot "
"be converted to int.")
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['concurrent_per_worker'] = '123'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.concurrent_per_worker, 123)
ss = self._class(FakeServer(), 'test')
exc = None
try:
confd = self._get_default_confd()
confd.setdefault('test', {})['concurrent_per_worker'] = 'abc'
ss._parse_conf(Conf(confd))
except SystemExit as err:
exc = err
self.assertEqual(
str(exc),
"Configuration value [test] concurrent_per_worker of 'abc' cannot "
"be converted to int.")
def test_parse_conf_backlog(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('brim', {})['backlog'] = '123'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.backlog, 123)
ss = self._class(FakeServer(), 'test')
exc = None
try:
confd = self._get_default_confd()
confd.setdefault('brim', {})['backlog'] = 'abc'
ss._parse_conf(Conf(confd))
except SystemExit as err:
exc = err
self.assertEqual(
str(exc),
"Configuration value [brim] backlog of 'abc' cannot be converted "
"to int.")
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['backlog'] = '123'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.backlog, 123)
ss = self._class(FakeServer(), 'test')
exc = None
try:
confd = self._get_default_confd()
confd.setdefault('test', {})['backlog'] = 'abc'
ss._parse_conf(Conf(confd))
except SystemExit as err:
exc = err
self.assertEqual(
str(exc),
"Configuration value [test] backlog of 'abc' cannot be converted "
"to int.")
def test_parse_conf_listen_retry(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('brim', {})['listen_retry'] = '123'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.listen_retry, 123)
ss = self._class(FakeServer(), 'test')
exc = None
try:
confd = self._get_default_confd()
confd.setdefault('brim', {})['listen_retry'] = 'abc'
ss._parse_conf(Conf(confd))
except SystemExit as err:
exc = err
self.assertEqual(
str(exc),
"Configuration value [brim] listen_retry of 'abc' cannot be "
"converted to int.")
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['listen_retry'] = '123'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.listen_retry, 123)
ss = self._class(FakeServer(), 'test')
exc = None
try:
confd = self._get_default_confd()
confd.setdefault('test', {})['listen_retry'] = 'abc'
ss._parse_conf(Conf(confd))
except SystemExit as err:
exc = err
self.assertEqual(
str(exc),
"Configuration value [test] listen_retry of 'abc' cannot be "
"converted to int.")
def test_parse_conf_eventlet_hub(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('brim', {})['eventlet_hub'] = 'epolls'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.eventlet_hub.__name__, 'eventlet.hubs.epolls')
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['eventlet_hub'] = 'epolls'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.eventlet_hub.__name__, 'eventlet.hubs.epolls')
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['eventlet_hub'] = 'eventlet.hubs.epolls'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.eventlet_hub.__name__, 'eventlet.hubs.epolls')
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['eventlet_hub'] = 'invalid'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc), "Could not load [test] eventlet_hub 'invalid'.")
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['eventlet_hub'] = 'invalid.module'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc), "Could not load [test] eventlet_hub 'invalid.module'.")
def test_parse_conf_workers(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('brim', {})['workers'] = '2'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.worker_count, 2)
self.assertEqual(ss.worker_names, ['0', '1'])
ss = self._class(FakeServer(no_daemon=True), 'test')
confd = self._get_default_confd()
confd.setdefault('brim', {})['workers'] = '2'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.worker_count, 0)
self.assertEqual(ss.worker_names, ['0'])
ss = self._class(FakeServer(), 'test')
exc = None
try:
confd = self._get_default_confd()
confd.setdefault('brim', {})['workers'] = 'abc'
ss._parse_conf(Conf(confd))
except SystemExit as err:
exc = err
self.assertEqual(
str(exc),
"Configuration value [brim] workers of 'abc' cannot be converted "
"to int.")
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {}).update({'workers': '2'})
ss._parse_conf(Conf(confd))
self.assertEqual(ss.worker_count, 2)
self.assertEqual(ss.worker_names, ['0', '1'])
ss = self._class(FakeServer(no_daemon=True), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {}).update({'workers': '2'})
ss._parse_conf(Conf(confd))
self.assertEqual(ss.worker_count, 0)
self.assertEqual(ss.worker_names, ['0'])
ss = self._class(FakeServer(), 'test')
exc = None
try:
confd = self._get_default_confd()
confd.setdefault('test', {})['workers'] = 'abc'
ss._parse_conf(Conf(confd))
except SystemExit as err:
exc = err
self.assertEqual(
str(exc),
"Configuration value [test] workers of 'abc' cannot be converted "
"to int.")
class AppWithInvalidInit(object):
def __init__(self):
pass
class AppWithInvalidCall(object):
def __init__(self, name, conf, next_app):
pass
def __call__(self):
pass
class AppWithNoCall(object):
def __init__(self, name, conf, next_app):
pass
class AppWithInvalidParseConf1(object):
def __init__(self, name, conf, next_app):
pass
def __call__(self, env, start_response):
pass
@classmethod
def parse_conf(cls):
pass
class AppWithInvalidParseConf2(object):
parse_conf = 'blah'
def __init__(self, name, conf, next_app):
pass
def __call__(self, env, start_response):
pass
class AppWithNoParseConf(object):
def __init__(self, name, conf, next_app):
pass
def __call__(self, env, start_response):
pass
class AppWithParseConf(object):
def __init__(self, name, conf, next_app):
pass
def __call__(self, env, start_response):
pass
@classmethod
def parse_conf(cls, name, conf):
return {'ok': True}
class AppWithInvalidStatsConf1(object):
def __init__(self, name, conf, next_app):
pass
def __call__(self, env, start_response):
pass
@classmethod
def stats_conf(cls):
pass
class AppWithInvalidStatsConf2(object):
stats_conf = 'blah'
def __init__(self, name, conf, next_app):
pass
def __call__(self, env, start_response):
pass
class AppWithNoStatsConf(object):
def __init__(self, name, conf, next_app):
pass
def __call__(self, env, start_response):
pass
class AppWithStatsConf(object):
def __init__(self, name, conf, next_app):
pass
def __call__(self, env, start_response):
if env['PATH_INFO'] == '/exception':
raise Exception('testing')
start_response('200 OK', [('Content-Length', '0')])
return []
@classmethod
def stats_conf(cls, name, conf):
return [('ok', 'sum')]
class FakeLogger(object):
def __init__(self):
self.debug_calls = []
self.info_calls = []
self.notice_calls = []
self.error_calls = []
self.exception_calls = []
def debug(self, *args):
self.debug_calls.append(args)
def info(self, *args):
self.info_calls.append(args)
def notice(self, *args):
self.notice_calls.append(args)
def error(self, *args):
self.error_calls.append(args)
def exception(self, *args):
self.exception_calls.append((args, exc_info()))
class PropertyObject(object):
pass
class TestWSGISubserver(TestIPSubserver):
_class = server.WSGISubserver
def test_init(self):
ss = TestIPSubserver.test_init(self)
self.assertEqual(ss.stats_conf.get('request_count'), 'sum')
self.assertEqual(ss.stats_conf.get('status_2xx_count'), 'sum')
self.assertEqual(ss.stats_conf.get('status_3xx_count'), 'sum')
self.assertEqual(ss.stats_conf.get('status_4xx_count'), 'sum')
self.assertEqual(ss.stats_conf.get('status_5xx_count'), 'sum')
def test_parse_conf_defaults(self):
ss = TestIPSubserver.test_parse_conf_defaults(self)
self.assertEqual(ss.log_auth_tokens, False)
self.assertEqual(ss.log_headers, False)
self.assertEqual(ss.count_status_codes, [404, 408, 499, 501])
self.assertEqual(ss.wsgi_input_iter_chunk_size, 4096)
self.assertEqual(ss.apps, [])
def test_parse_conf_log_auth_tokens(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('brim', {})['log_auth_tokens'] = 'yes'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.log_auth_tokens, True)
ss = self._class(FakeServer(), 'test')
exc = None
try:
confd = self._get_default_confd()
confd.setdefault('brim', {})['log_auth_tokens'] = 'abc'
ss._parse_conf(Conf(confd))
except SystemExit as err:
exc = err
self.assertEqual(
str(exc),
"Configuration value [brim] log_auth_tokens of 'abc' cannot be "
"converted to boolean.")
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['log_auth_tokens'] = 'yes'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.log_auth_tokens, True)
ss = self._class(FakeServer(), 'test')
exc = None
try:
confd = self._get_default_confd()
confd.setdefault('test', {})['log_auth_tokens'] = 'abc'
ss._parse_conf(Conf(confd))
except SystemExit as err:
exc = err
self.assertEqual(
str(exc),
"Configuration value [test] log_auth_tokens of 'abc' cannot be "
"converted to boolean.")
def test_parse_conf_log_headers(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('brim', {})['log_headers'] = 'yes'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.log_headers, True)
ss = self._class(FakeServer(), 'test')
exc = None
try:
confd = self._get_default_confd()
confd.setdefault('brim', {})['log_headers'] = 'abc'
ss._parse_conf(Conf(confd))
except SystemExit as err:
exc = err
self.assertEqual(
str(exc),
"Configuration value [brim] log_headers of 'abc' cannot be "
"converted to boolean.")
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['log_headers'] = 'yes'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.log_headers, True)
ss = self._class(FakeServer(), 'test')
exc = None
try:
confd = self._get_default_confd()
confd.setdefault('test', {})['log_headers'] = 'abc'
ss._parse_conf(Conf(confd))
except SystemExit as err:
exc = err
self.assertEqual(
str(exc),
"Configuration value [test] log_headers of 'abc' cannot be "
"converted to boolean.")
def test_parse_conf_count_status_codes(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('brim', {})['count_status_codes'] = '1'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.count_status_codes, [1])
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('brim', {})['count_status_codes'] = '1 2 345'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.count_status_codes, [1, 2, 345])
exc = None
try:
confd = self._get_default_confd()
confd.setdefault('brim', {})['count_status_codes'] = 'abc'
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(str(exc), "Invalid [test] count_status_codes 'abc'.")
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['count_status_codes'] = '1'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.count_status_codes, [1])
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['count_status_codes'] = '1 2 345'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.count_status_codes, [1, 2, 345])
exc = None
try:
confd = self._get_default_confd()
confd.setdefault('test', {})['count_status_codes'] = 'abc'
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(str(exc), "Invalid [test] count_status_codes 'abc'.")
def test_parse_conf_wsgi_input_iter_chunk_size(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('brim', {})['wsgi_input_iter_chunk_size'] = '123'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.wsgi_input_iter_chunk_size, 123)
ss = self._class(FakeServer(), 'test')
exc = None
try:
confd = self._get_default_confd()
confd.setdefault('brim', {})['wsgi_input_iter_chunk_size'] = 'abc'
ss._parse_conf(Conf(confd))
except SystemExit as err:
exc = err
self.assertEqual(
str(exc),
"Configuration value [brim] wsgi_input_iter_chunk_size of 'abc' "
"cannot be converted to int.")
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['wsgi_input_iter_chunk_size'] = '123'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.wsgi_input_iter_chunk_size, 123)
ss = self._class(FakeServer(), 'test')
exc = None
try:
confd = self._get_default_confd()
confd.setdefault('test', {})['wsgi_input_iter_chunk_size'] = 'abc'
ss._parse_conf(Conf(confd))
except SystemExit as err:
exc = err
self.assertEqual(
str(exc),
"Configuration value [test] wsgi_input_iter_chunk_size of 'abc' "
"cannot be converted to int.")
def test_configure_wsgi_apps(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['apps'] = 'one two'
confd.setdefault('one', {})['call'] = 'brim.wsgi_echo.WSGIEcho'
confd.setdefault('two', {})['call'] = 'brim.wsgi_echo.WSGIEcho'
conf = Conf(confd)
ss._parse_conf(conf)
self.assertEqual(len(ss.apps), 2)
self.assertEqual(ss.apps[0][0], 'one')
self.assertEqual(ss.apps[1][0], 'two')
self.assertEqual(ss.apps[0][1].__name__, 'WSGIEcho')
self.assertEqual(ss.apps[1][1].__name__, 'WSGIEcho')
self.assertEqual(ss.apps[0][2], ss.apps[0][1].parse_conf('one', conf))
self.assertEqual(ss.apps[1][2], ss.apps[1][1].parse_conf('two', conf))
def test_configure_wsgi_apps_conf_no_call(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['apps'] = 'one'
confd.setdefault('one', {})['cll'] = 'brim.wsgi_echo.WSGIEcho'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc), "App [one] not configured with 'call' option.")
def test_configure_wsgi_apps_conf_invalid_call(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['apps'] = 'one'
confd.setdefault('one', {})['call'] = 'brim_wsgi_echo_WSGIEcho'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Invalid call value 'brim_wsgi_echo_WSGIEcho' for app [one].")
def test_configure_wsgi_apps_no_load(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['apps'] = 'one'
confd.setdefault('one', {})['call'] = 'brim.wsgi_echo.sgi_cho'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Could not load class 'brim.wsgi_echo.sgi_cho' for app [one].")
def test_configure_wsgi_apps_not_a_class(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['apps'] = 'one'
confd.setdefault('one', {})['call'] = 'brim.server._send_pid_sig'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Would not be able to instantiate 'brim.server._send_pid_sig' for "
"app [one]. Probably not a class.")
def test_configure_wsgi_apps_invalid_init(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['apps'] = 'one'
confd.setdefault('one', {})['call'] = \
'brim.test.unit.test_server.AppWithInvalidInit'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Would not be able to instantiate "
"'brim.test.unit.test_server.AppWithInvalidInit' for app "
"[one]. Incorrect number of args, 1, should be 4 (self, name, "
"conf, next_app).")
def test_configure_wsgi_apps_invalid_call(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['apps'] = 'one'
confd.setdefault('one', {})['call'] = \
'brim.test.unit.test_server.AppWithInvalidCall'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Would not be able to use "
"'brim.test.unit.test_server.AppWithInvalidCall' for app "
"[one]. Incorrect number of __call__ args, 1, should be 3 (self, "
"env, start_response).")
def test_configure_wsgi_apps_no_call(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['apps'] = 'one'
confd.setdefault('one', {})['call'] = \
'brim.test.unit.test_server.AppWithNoCall'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Would not be able to use "
"'brim.test.unit.test_server.AppWithNoCall' for app "
"[one]. Probably no __call__ method.")
def test_configure_wsgi_apps_invalid_parse_conf1(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['apps'] = 'one'
confd.setdefault('one', {})['call'] = \
'brim.test.unit.test_server.AppWithInvalidParseConf1'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Cannot use 'brim.test.unit.test_server.AppWithInvalidParseConf1' "
"for app [one]. Incorrect number of parse_conf args, 1, should be "
"3 (cls, name, conf).")
def test_configure_wsgi_apps_invalid_parse_conf2(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['apps'] = 'one'
confd.setdefault('one', {})['call'] = \
'brim.test.unit.test_server.AppWithInvalidParseConf2'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Cannot use 'brim.test.unit.test_server.AppWithInvalidParseConf2' "
"for app [one]. parse_conf probably not a method.")
def test_configure_wsgi_apps_no_parse_conf(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['apps'] = 'one'
confd.setdefault('one', {})['call'] = \
'brim.test.unit.test_server.AppWithNoParseConf'
conf = Conf(confd)
ss._parse_conf(conf)
self.assertEqual(ss.apps[0][2], conf)
def test_configure_wsgi_apps_with_parse_conf(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['apps'] = 'one'
confd.setdefault('one', {})['call'] = \
'brim.test.unit.test_server.AppWithParseConf'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.apps[0][2], {'ok': True})
def test_configure_wsgi_apps_invalid_stats_conf1(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['apps'] = 'one'
confd.setdefault('one', {})['call'] = \
'brim.test.unit.test_server.AppWithInvalidStatsConf1'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Cannot use 'brim.test.unit.test_server.AppWithInvalidStatsConf1' "
"for app [one]. Incorrect number of stats_conf args, 1, should be "
"3 (cls, name, conf).")
def test_configure_wsgi_apps_invalid_stats_conf2(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['apps'] = 'one'
confd.setdefault('one', {})['call'] = \
'brim.test.unit.test_server.AppWithInvalidStatsConf2'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Cannot use 'brim.test.unit.test_server.AppWithInvalidStatsConf2' "
"for app [one]. stats_conf probably not a method.")
def test_configure_wsgi_apps_no_stats_conf(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['apps'] = 'one'
confd.setdefault('one', {})['call'] = \
'brim.test.unit.test_server.AppWithNoStatsConf'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.stats_conf.get('start_time'), 'worker')
self.assertEqual(ss.stats_conf.get('request_count'), 'sum')
self.assertEqual(ss.stats_conf.get('status_2xx_count'), 'sum')
self.assertEqual(ss.stats_conf.get('status_3xx_count'), 'sum')
self.assertEqual(ss.stats_conf.get('status_4xx_count'), 'sum')
self.assertEqual(ss.stats_conf.get('status_5xx_count'), 'sum')
def test_configure_wsgi_apps_with_stats_conf(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['apps'] = 'one'
confd.setdefault('one', {})['call'] = \
'brim.test.unit.test_server.AppWithStatsConf'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.stats_conf.get('start_time'), 'worker')
self.assertEqual(ss.stats_conf.get('request_count'), 'sum')
self.assertEqual(ss.stats_conf.get('status_2xx_count'), 'sum')
self.assertEqual(ss.stats_conf.get('status_3xx_count'), 'sum')
self.assertEqual(ss.stats_conf.get('status_4xx_count'), 'sum')
self.assertEqual(ss.stats_conf.get('status_5xx_count'), 'sum')
self.assertEqual(ss.stats_conf.get('ok'), 'sum')
def test_privileged_start(self):
ss = self._class(FakeServer(), 'test')
ss._parse_conf(Conf(self._get_default_confd()))
exc = None
try:
ss._privileged_start()
except Exception as err:
exc = err
self.assertEqual(
str(exc), 'Could not bind to *:80: [Errno 13] Permission denied')
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('brim', {})['port'] = '0'
ss._parse_conf(Conf(confd))
ss._privileged_start()
self.assertTrue(ss.sock is not None)
get_listening_tcp_socket_calls = []
def _get_listening_tcp_socket(*args, **kwargs):
get_listening_tcp_socket_calls.append((args, kwargs))
return 'sock'
ss = self._class(FakeServer(), 'test')
ss._parse_conf(Conf(self._get_default_confd()))
get_listening_tcp_socket_orig = server.get_listening_tcp_socket
try:
server.get_listening_tcp_socket = _get_listening_tcp_socket
ss._privileged_start()
finally:
server.get_listening_tcp_socket = get_listening_tcp_socket_orig
self.assertEqual(ss.sock, 'sock')
self.assertEqual(get_listening_tcp_socket_calls, [(('*', 80), {
'keyfile': None, 'style': 'eventlet', 'retry': 30,
'certfile': None, 'backlog': 4096})])
def test_start(self, output=False):
capture_exceptions_stdout_stderr_calls = []
time_calls = []
get_logger_calls = []
fake_logger = FakeLogger()
fake_wsgi = PropertyObject()
fake_wsgi.HttpProtocol = PropertyObject()
sustain_workers_calls = []
shutdown_safe_calls = []
def _capture_exceptions_stdout_stderr(*args, **kwargs):
capture_exceptions_stdout_stderr_calls.append((args, kwargs))
def _time(*args):
time_calls.append(args)
return len(time_calls)
def _get_logger(*args):
get_logger_calls.append(args)
return fake_logger
def _sustain_workers(*args, **kwargs):
sustain_workers_calls.append((args, kwargs))
def _shutdown_safe(*args):
shutdown_safe_calls.append(args)
capture_exceptions_stdout_stderr_orig = \
server.capture_exceptions_stdout_stderr
time_orig = server.time
get_logger_orig = server.get_logger
wsgi_orig = server.wsgi
sustain_workers_orig = server.sustain_workers
shutdown_safe_orig = server.shutdown_safe
try:
server.capture_exceptions_stdout_stderr = \
_capture_exceptions_stdout_stderr
server.time = _time
server.get_logger = _get_logger
server.wsgi = fake_wsgi
server.sustain_workers = _sustain_workers
server.shutdown_safe = _shutdown_safe
ss = TestIPSubserver.test_start(self, output=output)
finally:
server.capture_exceptions_stdout_stderr = \
capture_exceptions_stdout_stderr_orig
server.time = time_orig
server.get_logger = get_logger_orig
server.wsgi = wsgi_orig
server.sustain_workers = sustain_workers_orig
server.shutdown_safe = shutdown_safe_orig
if output:
self.assertEqual(capture_exceptions_stdout_stderr_calls, [])
else:
self.assertEqual(capture_exceptions_stdout_stderr_calls, [(
(),
{'exceptions': ss._capture_exception,
'stdout_func': ss._capture_stdout,
'stderr_func': ss._capture_stderr})])
self.assertEqual(time_calls, [()])
self.assertEqual(get_logger_calls, [(
ss.name, ss.log_name, ss.log_level, ss.log_facility,
ss.server.no_daemon)])
self.assertEqual(sustain_workers_calls, [
((1, ss._wsgi_worker), {'logger': fake_logger})])
self.assertEqual(shutdown_safe_calls, [(ss.sock,)])
self.assertEqual(ss.worker_id, -1)
self.assertEqual(ss.start_time, 1)
self.assertEqual(ss.logger, fake_logger)
for code in ss.count_status_codes:
key = 'status_%d_count' % code
self.assertEqual(
ss.stats_conf.get(key), 'sum',
'key %r value %r != %r' % (key, ss.stats_conf.get(key), 'sum'))
self.assertEqual(
fake_wsgi.HttpProtocol.default_request_version, 'HTTP/1.0')
self.assertEqual(fake_wsgi.HttpProtocol.log_request('blah'), None)
self.assertEqual(fake_logger.error_calls, [])
fake_wsgi.HttpProtocol.log_message(None, 'test message')
self.assertEqual(
fake_logger.error_calls, [('WSGI ERROR: test message',)])
self.assertEqual(fake_wsgi.WRITE_TIMEOUT, ss.client_timeout)
def test_start_with_output(self):
self.test_start(output=True)
def test_wsgi_worker(self, no_setproctitle=False, no_daemon=False,
with_apps=False, raises=False):
setproctitle_calls = []
use_hub_calls = []
fake_wsgi = PropertyObject()
fake_wsgi.HttpProtocol = PropertyObject()
server_calls = []
def _setproctitle(*args):
setproctitle_calls.append(args)
def _sustain_workers(*args, **kwargs):
pass
def _use_hub(*args):
use_hub_calls.append(args)
def _server(*args, **kwargs):
server_calls.append((args, kwargs))
if raises == 'socket einval':
err = server.socket_error('test socket einval')
err.errno = server.EINVAL
raise err
elif raises == 'socket other':
raise server.socket_error('test socket other')
elif raises == 'other':
raise Exception('test other')
def _time():
return 1
setproctitle_orig = server.setproctitle
sustain_workers_orig = server.sustain_workers
use_hub_orig = server.use_hub
wsgi_orig = server.wsgi
time_orig = server.time
fake_wsgi.server = _server
exc = None
try:
server.setproctitle = None if no_setproctitle else _setproctitle
server.sustain_workers = _sustain_workers
server.use_hub = _use_hub
server.wsgi = fake_wsgi
server.time = _time
ss = self._class(FakeServer(no_daemon=no_daemon, output=True),
'test')
if with_apps:
confd = self._get_default_confd()
confd.setdefault('test', {})['port'] = '0'
confd['test']['apps'] = 'one two'
confd.setdefault('one', {})['call'] = 'brim.wsgi_echo.WSGIEcho'
confd.setdefault('two', {})['call'] = 'brim.wsgi_echo.WSGIEcho'
else:
confd = self._get_default_confd()
confd.setdefault('test', {})['port'] = '0'
ss._parse_conf(Conf(confd))
ss._privileged_start()
bs = server._BucketStats(['0'], {'start_time': 'worker'})
ss._start(bs)
ss._wsgi_worker(0)
except Exception as err:
exc = err
finally:
server.setproctitle = setproctitle_orig
server.sustain_workers = sustain_workers_orig
server.use_hub = use_hub_orig
server.wsgi = wsgi_orig
server.time = time_orig
if no_setproctitle or no_daemon:
self.assertEqual(setproctitle_calls, [])
else:
self.assertEqual(setproctitle_calls, [('0:test:brimd',)])
self.assertEqual(ss.worker_id, 0)
self.assertEqual(ss.bucket_stats.get(ss.worker_id, 'start_time'), 1)
if no_daemon:
self.assertEqual(use_hub_calls, [])
else:
self.assertEqual(use_hub_calls, [(None,)])
if with_apps:
self.assertEqual(ss.first_app.__class__.__name__, 'WSGIEcho')
self.assertEqual(ss.first_app.name, 'one')
self.assertEqual(ss.first_app.next_app.name, 'two')
self.assertEqual(ss.first_app.next_app.next_app, ss)
else:
self.assertEqual(ss.first_app, ss)
self.assertEqual(len(server_calls), 1)
self.assertEqual(len(server_calls[0]), 2)
self.assertEqual(len(server_calls[0][0]), 3)
null_logger = server_calls[0][0][2]
self.assertEqual(
null_logger.__class__.__name__, '_EventletWSGINullLogger')
pool = server_calls[0][1]['custom_pool']
self.assertEqual(pool.size, ss.concurrent_per_worker)
self.assertEqual(server_calls, [(
(ss.sock, ss._wsgi_entry, null_logger),
{'minimum_chunk_size': 4096,
'custom_pool': pool})])
if raises == 'socket einval':
self.assertEqual(exc, None)
elif raises == 'socket other':
self.assertEqual(str(exc), 'test socket other')
elif raises == 'other':
self.assertEqual(str(exc), 'test other')
else:
self.assertEqual(exc, None)
def test_wsgi_worker_no_setproctitle(self):
self.test_wsgi_worker(no_setproctitle=True)
def test_wsgi_worker_no_daemon(self):
self.test_wsgi_worker(no_daemon=True)
def test_wsgi_worker_with_apps(self):
self.test_wsgi_worker(with_apps=True)
def test_wsgi_worker_raises_socket_einval(self):
self.test_wsgi_worker(raises='socket einval')
def test_wsgi_worker_raises_socket_other(self):
self.test_wsgi_worker(raises='socket other')
def test_wsgi_worker_raises_other(self):
self.test_wsgi_worker(raises='other')
def test_wsgi_entry(self, with_app=False, raises=False, with_txn=None):
ss = self._class(FakeServer(output=True), 'test')
if with_app:
confd = self._get_default_confd()
confd.setdefault('test', {})['port'] = '0'
confd['test']['apps'] = 'one'
confd.setdefault('one', {})['call'] = 'brim.wsgi_echo.WSGIEcho'
else:
confd = self._get_default_confd()
confd.setdefault('test', {})['port'] = '0'
ss._parse_conf(Conf(confd))
ss._privileged_start()
bs = server._BucketStats(['0'], {'start_time': 'worker'})
def _sustain_workers(*args, **kwargs):
pass
def _server(*args, **kwargs):
pass
sustain_workers_orig = server.sustain_workers
wsgi_orig = server.wsgi
try:
server.sustain_workers = _sustain_workers
server.wsgi = PropertyObject()
server.wsgi.HttpProtocol = PropertyObject()
server.wsgi.server = _server
ss._start(bs)
ss._wsgi_worker(0)
finally:
server.sustain_workers = sustain_workers_orig
server.wsgi = wsgi_orig
start_response_calls = []
log_request_calls = []
uuid4_instance = uuid4()
def _start_response(*args, **kwargs):
start_response_calls.append((args, kwargs))
def _uuid4():
return uuid4_instance
def _time():
return 1
def _log_request(*args, **kwargs):
log_request_calls.append((args, kwargs))
def _app_with_body_exception(env, start_response):
start_response('200 OK', [('Content-Length', '10')])
yield 'partial'
raise Exception('body exception')
uuid4_orig = server.uuid4
time_orig = server.time
env = {'PATH_INFO': '/echo', 'wsgi.input': StringIO('test value')}
if with_txn:
env['HTTP_X_TXN'] = with_txn
try:
server.uuid4 = _uuid4
server.time = _time
ss._log_request = _log_request
if raises == 'start':
ss.first_app = 'i will raise an exception'
elif raises == 'body':
ss.first_app = _app_with_body_exception
ss.logger = FakeLogger()
content = ''.join(ss._wsgi_entry(env, _start_response))
finally:
server.uuid4 = uuid4_orig
server.time = time_orig
self.assertEqual(env.get('brim'), ss)
self.assertEqual(env.get('brim.start'), 1)
self.assertEqual(env.get('brim.stats').bucket_stats, ss.bucket_stats)
self.assertEqual(env.get('brim.stats').bucket_id, ss.worker_id)
self.assertEqual(env.get('brim.logger'), ss.logger)
if with_txn:
self.assertEqual(env.get('brim.txn'), with_txn)
else:
self.assertEqual(env.get('brim.txn'), uuid4_instance.hex)
if with_app:
self.assertEqual(env.get('brim._bytes_in'), 10)
self.assertEqual(env.get('brim._bytes_out'), 10)
else:
self.assertEqual(env.get('brim._bytes_in'), 0)
if raises == 'body':
self.assertEqual(env.get('brim._bytes_out'), 7)
elif raises == 'start':
self.assertEqual(env.get('brim._bytes_out'), 0)
else:
self.assertEqual(env.get('brim._bytes_out'), 14)
wi = env.get('wsgi.input')
self.assertEqual(wi.__class__.__name__, '_WsgiInput')
self.assertEqual(wi.env, env)
self.assertEqual(wi.iter_chunk_size, ss.wsgi_input_iter_chunk_size)
self.assertEqual(env.get('brim.log_info'), [])
self.assertEqual(env.get('brim.json_dumps'), ss.json_dumps)
self.assertEqual(env.get('brim.json_loads'), ss.json_loads)
if raises:
if raises == 'start':
self.assertEqual(
env.get('brim._start_response'),
('500 Internal Server Error', [('Content-Length', '26')],
None))
self.assertEqual(content, '500 Internal Server Error\n')
else:
self.assertEqual(
env.get('brim._start_response'),
('200 OK', [('Content-Length', '10')], None))
self.assertEqual(content, 'partial')
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [])
self.assertEqual(ss.logger.error_calls, [])
self.assertEqual(len(ss.logger.exception_calls), 1)
self.assertEqual(len(ss.logger.exception_calls[0]), 2)
self.assertEqual(
ss.logger.exception_calls[0][0], ('WSGI EXCEPTION:',))
self.assertEqual(len(ss.logger.exception_calls[0][1]), 3)
if raises == 'start':
self.assertEqual(
str(ss.logger.exception_calls[0][1][1]),
"'str' object is not callable")
else:
self.assertEqual(
str(ss.logger.exception_calls[0][1][1]), 'body exception')
elif with_app:
self.assertEqual(
env.get('brim._start_response'),
('200 OK', [('Content-Length', '10')], None))
self.assertEqual(content, 'test value')
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [])
self.assertEqual(ss.logger.error_calls, [])
self.assertEqual(ss.logger.exception_calls, [])
else:
self.assertEqual(
env.get('brim._start_response'), (
'404 Not Found',
[('Content-Length', '14'), ('Content-Type', 'text/plain')],
None))
self.assertEqual(content, '404 Not Found\n')
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [])
self.assertEqual(ss.logger.error_calls, [])
self.assertEqual(ss.logger.exception_calls, [])
self.assertEqual(log_request_calls, [((env,), {})])
def test_wsgi_entry_with_apps(self):
self.test_wsgi_entry(with_app=True)
def test_wsgi_entry_raises_start_exception(self):
self.test_wsgi_entry(raises='start')
def test_wsgi_entry_raises_body_exception(self):
self.test_wsgi_entry(raises='body')
def test_wsgi_entry_with_passed_txn(self):
self.test_wsgi_entry(with_txn='passed_txn')
def _log_request_build(self, start=1330037777.77):
return {
'REQUEST_METHOD': 'GET',
'PATH_INFO': '/path',
'SERVER_PROTOCOL': 'HTTP/1.1',
'brim.start': start,
'brim.txn': 'abcdef',
'brim._start_response': (
'200 OK', [('Content-Length', '10')], None),
'brim._bytes_in': 0,
'brim._bytes_out': 10}
def _log_request_execute(self, env, end=1330037779.89,
log_auth_tokens=False, log_headers=False):
ss = self._class(FakeServer(output=True), 'test')
ss.logger = FakeLogger()
ss.log_auth_tokens = log_auth_tokens
ss.log_headers = log_headers
ss.bucket_stats = server._BucketStats(['test'], {
'request_count': 'sum', 'status_2xx_count': 'sum',
'status_200_count': 'sum', 'status_201_count': 'sum',
'status_3xx_count': 'sum', 'status_4xx_count': 'sum',
'status_5xx_count': 'sum'})
ss.worker_id = 0
time_orig = server.time
gmtime_orig = server.gmtime
def _time():
return end
def _gmtime():
return gmtime_orig(end)
try:
server.time = _time
server.gmtime = _gmtime
ss._log_request(env)
finally:
server.time = time_orig
server.gmtime = gmtime_orig
return ss
def test_log_request_no_start_response(self):
env = self._log_request_build()
del env['brim._start_response']
env['brim._bytes_out'] = 0
ss = self._log_request_execute(env)
self.assertEqual(ss.bucket_stats.get(0, 'request_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_2xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_200_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_201_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_3xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_4xx_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_5xx_count'), 0)
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [(
'- - - - 20120223T225619Z GET /path HTTP/1.1 499 - - - - abcdef '
'2.12000 - - -',)])
self.assertEqual(ss.logger.error_calls, [])
self.assertEqual(ss.logger.exception_calls, [])
self.assertEqual(ss.logger.txn, None)
def test_log_request_minimal(self):
env = self._log_request_build()
ss = self._log_request_execute(env)
self.assertEqual(ss.bucket_stats.get(0, 'request_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_2xx_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_200_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_201_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_3xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_4xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_5xx_count'), 0)
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [(
'- - - - 20120223T225619Z GET /path HTTP/1.1 200 10 - - - abcdef '
'2.12000 - - -',)])
self.assertEqual(ss.logger.error_calls, [])
self.assertEqual(ss.logger.exception_calls, [])
self.assertEqual(ss.logger.txn, None)
def test_log_request_3xx(self):
env = self._log_request_build()
env['brim._start_response'] = \
('301 Test', [('Content-Length', '10')], None)
ss = self._log_request_execute(env)
self.assertEqual(ss.bucket_stats.get(0, 'request_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_2xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_200_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_201_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_3xx_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_4xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_5xx_count'), 0)
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [(
'- - - - 20120223T225619Z GET /path HTTP/1.1 301 10 - - - abcdef '
'2.12000 - - -',)])
self.assertEqual(ss.logger.error_calls, [])
self.assertEqual(ss.logger.exception_calls, [])
self.assertEqual(ss.logger.txn, None)
def test_log_request_4xx(self):
env = self._log_request_build()
env['brim._start_response'] = \
('404 Test', [('Content-Length', '10')], None)
ss = self._log_request_execute(env)
self.assertEqual(ss.bucket_stats.get(0, 'request_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_2xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_200_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_201_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_3xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_4xx_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_5xx_count'), 0)
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [(
'- - - - 20120223T225619Z GET /path HTTP/1.1 404 10 - - - abcdef '
'2.12000 - - -',)])
self.assertEqual(ss.logger.error_calls, [])
self.assertEqual(ss.logger.exception_calls, [])
self.assertEqual(ss.logger.txn, None)
def test_log_request_5xx(self):
env = self._log_request_build()
env['brim._start_response'] = \
('503 Test', [('Content-Length', '10')], None)
ss = self._log_request_execute(env)
self.assertEqual(ss.bucket_stats.get(0, 'request_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_2xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_200_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_201_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_3xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_4xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_5xx_count'), 1)
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [(
'- - - - 20120223T225619Z GET /path HTTP/1.1 503 10 - - - abcdef '
'2.12000 - - -',)])
self.assertEqual(ss.logger.error_calls, [])
self.assertEqual(ss.logger.exception_calls, [])
self.assertEqual(ss.logger.txn, None)
def test_log_request_exception(self):
env = self._log_request_build()
del env['PATH_INFO']
ss = self._log_request_execute(env)
self.assertEqual(ss.bucket_stats.get(0, 'request_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_2xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_200_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_201_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_3xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_4xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_5xx_count'), 0)
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [])
self.assertEqual(ss.logger.error_calls, [])
self.assertEqual(len(ss.logger.exception_calls), 1)
self.assertEqual(len(ss.logger.exception_calls[0]), 2)
self.assertEqual(
ss.logger.exception_calls[0][0], ('WSGI EXCEPTION:',))
self.assertEqual(len(ss.logger.exception_calls[0][1]), 3)
self.assertEqual(
str(ss.logger.exception_calls[0][1][1]), "'PATH_INFO'")
self.assertEqual(ss.logger.txn, None)
def test_log_request_path_quoted_requoted(self):
env = self._log_request_build()
env['PATH_INFO'] = '/path%20%2Ftest'
ss = self._log_request_execute(env)
self.assertEqual(ss.bucket_stats.get(0, 'request_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_2xx_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_200_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_201_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_3xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_4xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_5xx_count'), 0)
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [(
'- - - - 20120223T225619Z GET /path%20/test HTTP/1.1 200 10 - - - '
'abcdef 2.12000 - - -',)])
self.assertEqual(ss.logger.error_calls, [])
self.assertEqual(ss.logger.exception_calls, [])
self.assertEqual(ss.logger.txn, None)
def test_log_request_query(self):
env = self._log_request_build()
env['QUERY_STRING'] = 'param1=value1+value2¶m2'
ss = self._log_request_execute(env)
self.assertEqual(ss.bucket_stats.get(0, 'request_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_2xx_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_200_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_201_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_3xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_4xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_5xx_count'), 0)
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [(
'- - - - 20120223T225619Z GET /path?param1=value1%20value2¶m2 '
'HTTP/1.1 200 10 - - - abcdef 2.12000 - - -',)])
self.assertEqual(ss.logger.error_calls, [])
self.assertEqual(ss.logger.exception_calls, [])
self.assertEqual(ss.logger.txn, None)
def test_log_request_cluster_client(self):
env = self._log_request_build()
env['HTTP_X_CLUSTER_CLIENT_IP'] = '1.2.3.4'
ss = self._log_request_execute(env)
self.assertEqual(ss.bucket_stats.get(0, 'request_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_2xx_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_200_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_201_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_3xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_4xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_5xx_count'), 0)
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [(
'1.2.3.4 - - - 20120223T225619Z GET /path HTTP/1.1 200 10 - - - '
'abcdef 2.12000 - - -',)])
self.assertEqual(ss.logger.error_calls, [])
self.assertEqual(ss.logger.exception_calls, [])
self.assertEqual(ss.logger.txn, None)
def test_log_request_forwarded_for(self):
env = self._log_request_build()
env['HTTP_X_FORWARDED_FOR'] = '1.2.3.4, 1.2.3.5'
ss = self._log_request_execute(env)
self.assertEqual(ss.bucket_stats.get(0, 'request_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_2xx_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_200_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_201_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_3xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_4xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_5xx_count'), 0)
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [(
'1.2.3.4 - - - 20120223T225619Z GET /path HTTP/1.1 200 10 - - - '
'abcdef 2.12000 - - -',)])
self.assertEqual(ss.logger.error_calls, [])
self.assertEqual(ss.logger.exception_calls, [])
self.assertEqual(ss.logger.txn, None)
def test_log_request_cluster_client_forwarded_for(self):
env = self._log_request_build()
env['HTTP_X_CLUSTER_CLIENT_IP'] = '1.2.3.4'
env['HTTP_X_FORWARDED_FOR'] = '1.2.3.5, 1.2.3.6'
ss = self._log_request_execute(env)
self.assertEqual(ss.bucket_stats.get(0, 'request_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_2xx_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_200_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_201_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_3xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_4xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_5xx_count'), 0)
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [(
'1.2.3.4 - - - 20120223T225619Z GET /path HTTP/1.1 200 10 - - - '
'abcdef 2.12000 - - -',)])
self.assertEqual(ss.logger.error_calls, [])
self.assertEqual(ss.logger.exception_calls, [])
self.assertEqual(ss.logger.txn, None)
def test_log_request_remote_addr(self):
env = self._log_request_build()
env['REMOTE_ADDR'] = '1.2.3.4'
ss = self._log_request_execute(env)
self.assertEqual(ss.bucket_stats.get(0, 'request_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_2xx_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_200_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_201_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_3xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_4xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_5xx_count'), 0)
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [(
'1.2.3.4 1.2.3.4 - - 20120223T225619Z GET /path HTTP/1.1 200 10 - '
'- - abcdef 2.12000 - - -',)])
self.assertEqual(ss.logger.error_calls, [])
self.assertEqual(ss.logger.exception_calls, [])
self.assertEqual(ss.logger.txn, None)
def test_log_request_remote_addr_cluster_client(self):
env = self._log_request_build()
env['REMOTE_ADDR'] = '1.2.3.4'
env['HTTP_X_CLUSTER_CLIENT_IP'] = '1.2.3.5'
ss = self._log_request_execute(env)
self.assertEqual(ss.bucket_stats.get(0, 'request_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_2xx_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_200_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_201_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_3xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_4xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_5xx_count'), 0)
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [(
'1.2.3.5 1.2.3.4 - - 20120223T225619Z GET /path HTTP/1.1 200 10 - '
'- - abcdef 2.12000 - - -',)])
self.assertEqual(ss.logger.error_calls, [])
self.assertEqual(ss.logger.exception_calls, [])
self.assertEqual(ss.logger.txn, None)
def test_log_request_remote_addr_forwarded_for(self):
env = self._log_request_build()
env['REMOTE_ADDR'] = '1.2.3.4'
env['HTTP_X_FORWARDED_FOR'] = '1.2.3.5, 1.2.3.6'
ss = self._log_request_execute(env)
self.assertEqual(ss.bucket_stats.get(0, 'request_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_2xx_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_200_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_201_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_3xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_4xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_5xx_count'), 0)
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [(
'1.2.3.5 1.2.3.4 - - 20120223T225619Z GET /path HTTP/1.1 200 10 - '
'- - abcdef 2.12000 - - -',)])
self.assertEqual(ss.logger.error_calls, [])
self.assertEqual(ss.logger.exception_calls, [])
self.assertEqual(ss.logger.txn, None)
def test_log_request_remote_addr_cluster_client_forwarded_for(self):
env = self._log_request_build()
env['REMOTE_ADDR'] = '1.2.3.4'
env['HTTP_X_CLUSTER_CLIENT_IP'] = '1.2.3.5'
env['HTTP_X_FORWARDED_FOR'] = '1.2.3.6, 1.2.3.7'
ss = self._log_request_execute(env)
self.assertEqual(ss.bucket_stats.get(0, 'request_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_2xx_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_200_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_201_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_3xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_4xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_5xx_count'), 0)
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [(
'1.2.3.5 1.2.3.4 - - 20120223T225619Z GET /path HTTP/1.1 200 10 - '
'- - abcdef 2.12000 - - -',)])
self.assertEqual(ss.logger.error_calls, [])
self.assertEqual(ss.logger.exception_calls, [])
self.assertEqual(ss.logger.txn, None)
def test_log_request_headers(self):
env = self._log_request_build()
env['HTTP_CONTENT_TYPE'] = 'text/plain'
env['HTTP_X_TEST'] = 'test value'
ss = self._log_request_execute(env, log_headers=True)
self.assertEqual(ss.bucket_stats.get(0, 'request_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_2xx_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_200_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_201_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_3xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_4xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_5xx_count'), 0)
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [(
'- - - - 20120223T225619Z GET /path HTTP/1.1 200 10 - - - abcdef '
'2.12000 - - - headers: '
'X-Test:test%20value%0AContent-Type:text/plain',)])
self.assertEqual(ss.logger.error_calls, [])
self.assertEqual(ss.logger.exception_calls, [])
self.assertEqual(ss.logger.txn, None)
def test_log_request_client_disconnect(self):
env = self._log_request_build()
env['brim._client_disconnect'] = True
ss = self._log_request_execute(env)
self.assertEqual(ss.bucket_stats.get(0, 'request_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_2xx_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_200_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_201_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_3xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_4xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_5xx_count'), 0)
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [(
'- - - - 20120223T225619Z GET /path HTTP/1.1 200 10 - - - abcdef '
'2.12000 disconnect - -',)])
self.assertEqual(ss.logger.error_calls, [])
self.assertEqual(ss.logger.exception_calls, [])
self.assertEqual(ss.logger.txn, None)
def test_log_request_goofy_code(self):
env = self._log_request_build()
env['brim._start_response'] = \
('2xx OK', [('Content-Length', '10')], None)
ss = self._log_request_execute(env)
self.assertEqual(ss.bucket_stats.get(0, 'request_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_2xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_200_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_201_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_3xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_4xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_5xx_count'), 0)
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [(
'- - - - 20120223T225619Z GET /path HTTP/1.1 - 10 - - - abcdef '
'2.12000 - - -',)])
self.assertEqual(ss.logger.error_calls, [])
self.assertEqual(ss.logger.exception_calls, [])
self.assertEqual(ss.logger.txn, None)
def test_log_auth_token(self):
env = self._log_request_build()
env['HTTP_X_AUTH_TOKEN'] = 'authtoken'
ss = self._log_request_execute(env, log_auth_tokens=True)
self.assertEqual(ss.bucket_stats.get(0, 'request_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_2xx_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_200_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_201_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_3xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_4xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_5xx_count'), 0)
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [(
'- - authtoken - 20120223T225619Z GET /path HTTP/1.1 200 10 - - - '
'abcdef 2.12000 - - -',)])
self.assertEqual(ss.logger.error_calls, [])
self.assertEqual(ss.logger.exception_calls, [])
self.assertEqual(ss.logger.txn, None)
def test_log_bytes_in(self):
env = self._log_request_build()
env['brim._bytes_in'] = 123
ss = self._log_request_execute(env)
self.assertEqual(ss.bucket_stats.get(0, 'request_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_2xx_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_200_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_201_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_3xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_4xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_5xx_count'), 0)
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [(
'- - - - 20120223T225619Z GET /path HTTP/1.1 200 10 123 - - '
'abcdef 2.12000 - - -',)])
self.assertEqual(ss.logger.error_calls, [])
self.assertEqual(ss.logger.exception_calls, [])
self.assertEqual(ss.logger.txn, None)
def test_log_request_referer(self):
env = self._log_request_build()
env['HTTP_REFERER'] = 'http://some.host/path%20/test?maybe=query+value'
ss = self._log_request_execute(env)
self.assertEqual(ss.bucket_stats.get(0, 'request_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_2xx_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_200_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_201_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_3xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_4xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_5xx_count'), 0)
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [(
'- - - - 20120223T225619Z GET /path HTTP/1.1 200 10 - '
'http://some.host/path%2520/test?maybe=query+value - abcdef '
'2.12000 - - -',)])
self.assertEqual(ss.logger.error_calls, [])
self.assertEqual(ss.logger.exception_calls, [])
self.assertEqual(ss.logger.txn, None)
def test_log_request_user_agent(self):
env = self._log_request_build()
env['HTTP_USER_AGENT'] = 'Some User Agent (v1.0)'
ss = self._log_request_execute(env)
self.assertEqual(ss.bucket_stats.get(0, 'request_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_2xx_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_200_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_201_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_3xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_4xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_5xx_count'), 0)
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [(
'- - - - 20120223T225619Z GET /path HTTP/1.1 200 10 - - '
'Some%20User%20Agent%20(v1.0) abcdef 2.12000 - - -',)])
self.assertEqual(ss.logger.error_calls, [])
self.assertEqual(ss.logger.exception_calls, [])
self.assertEqual(ss.logger.txn, None)
def test_log_request_authenticated_user(self):
env = self._log_request_build()
env['brim.authenticated_user'] = 'someuser'
ss = self._log_request_execute(env)
self.assertEqual(ss.bucket_stats.get(0, 'request_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_2xx_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_200_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_201_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_3xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_4xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_5xx_count'), 0)
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [(
'- - - - 20120223T225619Z GET /path HTTP/1.1 200 10 - - - abcdef '
'2.12000 - someuser -',)])
self.assertEqual(ss.logger.error_calls, [])
self.assertEqual(ss.logger.exception_calls, [])
self.assertEqual(ss.logger.txn, None)
def test_log_request_wsgi_source(self):
env = self._log_request_build()
env['brim.wsgi_source'] = 'somesource'
ss = self._log_request_execute(env)
self.assertEqual(ss.bucket_stats.get(0, 'request_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_2xx_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_200_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_201_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_3xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_4xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_5xx_count'), 0)
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [(
'- - - - 20120223T225619Z GET /path HTTP/1.1 200 10 - - - abcdef '
'2.12000 - - somesource',)])
self.assertEqual(ss.logger.error_calls, [])
self.assertEqual(ss.logger.exception_calls, [])
self.assertEqual(ss.logger.txn, None)
def test_log_request_additional_info(self):
env = self._log_request_build()
env['brim.log_info'] = ['test:', 'one', 'two']
ss = self._log_request_execute(env)
self.assertEqual(ss.bucket_stats.get(0, 'request_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_2xx_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_200_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_201_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_3xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_4xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_5xx_count'), 0)
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [(
'- - - - 20120223T225619Z GET /path HTTP/1.1 200 10 - - - abcdef '
'2.12000 - - - test: one two',)])
self.assertEqual(ss.logger.error_calls, [])
self.assertEqual(ss.logger.exception_calls, [])
self.assertEqual(ss.logger.txn, None)
def test_log_request_additional_info_and_headers(self):
env = self._log_request_build()
env['brim.log_info'] = ['test:', 'one', 'two']
env['HTTP_CONTENT_TYPE'] = 'text/plain'
ss = self._log_request_execute(env, log_headers=True)
self.assertEqual(ss.bucket_stats.get(0, 'request_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_2xx_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_200_count'), 1)
self.assertEqual(ss.bucket_stats.get(0, 'status_201_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_3xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_4xx_count'), 0)
self.assertEqual(ss.bucket_stats.get(0, 'status_5xx_count'), 0)
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [(
'- - - - 20120223T225619Z GET /path HTTP/1.1 200 10 - - - abcdef '
'2.12000 - - - test: one two headers: Content-Type:text/plain',)])
self.assertEqual(ss.logger.error_calls, [])
self.assertEqual(ss.logger.exception_calls, [])
self.assertEqual(ss.logger.txn, None)
def test_capture_exception(self):
ss = self._class(FakeServer(output=True), 'test')
ss.logger = FakeLogger()
ss.worker_id = 123
ss._capture_exception(*exc_info())
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [])
self.assertEqual(
ss.logger.error_calls,
[("UNCAUGHT EXCEPTION: wid:123 None ['None']",)])
self.assertEqual(ss.logger.exception_calls, [])
ss.logger = FakeLogger()
try:
raise Exception('test')
except Exception:
ss._capture_exception(*exc_info())
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [])
self.assertEqual(len(ss.logger.error_calls), 1)
self.assertEqual(len(ss.logger.error_calls[0]), 1)
e = ss.logger.error_calls[0][0]
self.assertTrue(e.startswith(
"UNCAUGHT EXCEPTION: wid:123 Exception: test ['Traceback (most "
"recent call last):', ' File "))
self.assertTrue(e.endswith(
'\', " raise Exception(\'test\')", \'Exception: test\']'))
self.assertEqual(ss.logger.exception_calls, [])
def test_capture_stdout(self):
ss = self._class(FakeServer(output=True), 'test')
ss.logger = FakeLogger()
ss.worker_id = 123
ss._capture_stdout('one\ntwo three\nfour\n')
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [
('STDOUT: wid:123 one',), ('STDOUT: wid:123 two three',),
('STDOUT: wid:123 four',)])
self.assertEqual(ss.logger.notice_calls, [])
self.assertEqual(ss.logger.error_calls, [])
self.assertEqual(ss.logger.exception_calls, [])
def test_capture_stderr(self):
ss = self._class(FakeServer(output=True), 'test')
ss.logger = FakeLogger()
ss.worker_id = 123
ss._capture_stderr('one\ntwo three\nfour\n')
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [])
self.assertEqual(ss.logger.error_calls, [
('STDERR: wid:123 one',), ('STDERR: wid:123 two three',),
('STDERR: wid:123 four',)])
self.assertEqual(ss.logger.exception_calls, [])
def test_clone_env(self):
ss = self._class(FakeServer(output=True), 'test')
newenv = ss.clone_env({
'brim': 1,
'brim.json_dumps': 2,
'brim.json_loads': 3,
'brim.logger': 4,
'brim.stats': 5,
'brim.txn': 6,
'SERVER_NAME': 7,
'SERVER_PORT': 8,
'SERVER_PROTOCOL': 9,
'OTHER': 10,
'REQUEST_PATH': 'request_path'})
self.assertEqual(newenv.get('brim'), 1)
self.assertEqual(newenv.get('brim.json_dumps'), 2)
self.assertEqual(newenv.get('brim.json_loads'), 3)
self.assertEqual(newenv.get('brim.logger'), 4)
self.assertEqual(newenv.get('brim.stats'), 5)
self.assertEqual(newenv.get('brim.txn'), 6)
self.assertEqual(newenv.get('SERVER_NAME'), 7)
self.assertEqual(newenv.get('SERVER_PORT'), 8)
self.assertEqual(newenv.get('SERVER_PROTOCOL'), 9)
self.assertEqual(newenv.get('OTHER'), None)
self.assertEqual(newenv.get('HTTP_REFERER'), 'request_path')
self.assertEqual(newenv.get('HTTP_USER_AGENT'), 'clone_env')
self.assertEquals(len(newenv), 11)
def test_get_response(self):
ss = self._class(FakeServer(output=True), 'test')
ss.logger = FakeLogger()
ss._parse_conf(Conf({}))
ss.worker_id = 0
ss.bucket_stats = server._BucketStats(['test'], {
'request_count': 'sum', 'status_2xx_count': 'sum',
'status_200_count': 'sum', 'status_201_count': 'sum',
'status_3xx_count': 'sum', 'status_4xx_count': 'sum',
'status_5xx_count': 'sum'})
ss.first_app = ss
status_line, headers_iteritems, excinfo, content_iter = \
ss.get_response({
'REQUEST_METHOD': 'GET',
'PATH_INFO': '/test',
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.input': StringIO('test value')})
self.assertEqual(status_line, '404 Not Found')
self.assertEqual(dict(headers_iteritems), {
'Content-Length': '14', 'Content-Type': 'text/plain'})
self.assertEqual(excinfo, None)
self.assertEqual(''.join(content_iter), '404 Not Found\n')
def fake_app(env, start_response):
start_response(
'204 No Content',
[('Content-Length', '0'), ('Content-Type', 'text/plain')])
return []
status_line, headers_iteritems, excinfo, content_iter = \
ss.get_response({
'REQUEST_METHOD': 'GET',
'PATH_INFO': '/test',
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.input': StringIO('test value')}, next_app=fake_app)
self.assertEqual(status_line, '204 No Content')
self.assertEqual(dict(headers_iteritems), {
'Content-Length': '0', 'Content-Type': 'text/plain'})
self.assertEqual(excinfo, None)
self.assertEqual(''.join(content_iter), '')
def fake_app2(env, start_response):
start_response(
'200 OK',
[('Content-Length', '7'), ('Content-Type', 'text/plain')])
return ['200 OK']
status_line, headers_iteritems, excinfo, content_iter = \
ss.get_response({
'REQUEST_METHOD': 'GET',
'PATH_INFO': '/test',
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.input': StringIO('test value')}, next_app=fake_app2)
self.assertEqual(status_line, '200 OK')
self.assertEqual(dict(headers_iteritems), {
'Content-Length': '7', 'Content-Type': 'text/plain'})
self.assertEqual(excinfo, None)
self.assertEqual(''.join(content_iter), '200 OK')
class TCPWithInvalidInit(object):
def __init__(self):
pass
class TCPWithInvalidCall(object):
def __init__(self, name, conf):
pass
def __call__(self):
pass
class TCPWithNoCall(object):
def __init__(self, name, conf):
pass
class TCPWithInvalidParseConf1(object):
def __init__(self, name, conf):
pass
def __call__(self, subserver, stats, sock, ip, port):
pass
@classmethod
def parse_conf(cls):
pass
class TCPWithInvalidParseConf2(object):
parse_conf = 'blah'
def __init__(self, name, conf):
pass
def __call__(self, subserver, stats, sock, ip, port):
pass
class TCPWithNoParseConf(object):
def __init__(self, name, conf):
pass
def __call__(self, subserver, stats, sock, ip, port):
pass
class TCPWithParseConf(object):
def __init__(self, name, conf):
pass
def __call__(self, subserver, stats, sock, ip, port):
pass
@classmethod
def parse_conf(cls, name, conf):
return {'ok': True}
class TCPWithInvalidStatsConf1(object):
def __init__(self, name, conf):
pass
def __call__(self, subserver, stats, sock, ip, port):
pass
@classmethod
def stats_conf(cls):
pass
class TCPWithInvalidStatsConf2(object):
stats_conf = 'blah'
def __init__(self, name, conf):
pass
def __call__(self, subserver, stats, sock, ip, port):
pass
class TCPWithNoStatsConf(object):
def __init__(self, name, conf):
pass
def __call__(self, subserver, stats, sock, ip, port):
pass
class TCPWithStatsConf(object):
def __init__(self, name, conf):
pass
def __call__(self, subserver, stats, sock, ip, port):
pass
@classmethod
def stats_conf(cls, name, conf):
return [('ok', 'sum')]
class TestTCPSubserver(TestIPSubserver):
_class = server.TCPSubserver
def _get_default_confd(self):
return {'test': {'call': 'brim.tcp_echo.TCPEcho'}}
def test_init(self):
ss = TestIPSubserver.test_init(self)
self.assertEqual(ss.stats_conf.get('connection_count'), 'sum')
def test_parse_conf_defaults(self):
ss = TestIPSubserver.test_parse_conf_defaults(self)
self.assertEqual(ss.handler.__name__, 'TCPEcho')
def test_parse_conf_no_call(self):
ss = self._class(FakeServer(), 'test')
conf = Conf({})
exc = None
try:
ss._parse_conf(conf)
except Exception as err:
exc = err
self.assertEqual(
str(exc), "[test] not configured with 'call' option.")
def test_parse_conf_invalid_call(self):
ss = self._class(FakeServer(), 'test')
conf = Conf({'test': {'call': 'invalid'}})
exc = None
try:
ss._parse_conf(conf)
except Exception as err:
exc = err
self.assertEqual(str(exc), "Invalid call value 'invalid' for [test].")
def test_configure_handler(self):
ss = self._class(FakeServer(), 'test')
conf = Conf(self._get_default_confd())
ss._parse_conf(conf)
self.assertEqual(ss.handler.__name__, 'TCPEcho')
self.assertEqual(ss.handler_conf, ss.handler.parse_conf('test', conf))
def test_configure_handler_no_call_option(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['cll'] = confd['test']['call']
del confd['test']['call']
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc), "[test] not configured with 'call' option.")
def test_configure_handler_invalid_call_option(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['call'] = 'brim_tcp_echo_TCPEcho'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc), "Invalid call value 'brim_tcp_echo_TCPEcho' for [test].")
def test_configure_handler_no_load(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['call'] = 'brim.tcp_echo.cp_echo'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Could not load class 'brim.tcp_echo.cp_echo' for [test].")
def test_configure_handler_not_a_class(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['call'] = 'brim.server._send_pid_sig'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Would not be able to instantiate 'brim.server._send_pid_sig' for "
"[test]. Probably not a class.")
def test_configure_handler_invalid_init(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['call'] = 'brim.test.unit.test_server.TCPWithInvalidInit'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Would not be able to instantiate "
"'brim.test.unit.test_server.TCPWithInvalidInit' for [test]. "
"Incorrect number of args, 1, should be 3 (self, name, "
"parsed_conf).")
def test_configure_handler_invalid_call(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['call'] = 'brim.test.unit.test_server.TCPWithInvalidCall'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Would not be able to use "
"'brim.test.unit.test_server.TCPWithInvalidCall' for [test]. "
"Incorrect number of __call__ args, 1, should be 6 (self, "
"subserver, stats, sock, ip, port).")
def test_configure_handler_no_call(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['call'] = 'brim.test.unit.test_server.TCPWithNoCall'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Would not be able to use "
"'brim.test.unit.test_server.TCPWithNoCall' for [test]. Probably "
"no __call__ method.")
def test_configure_handler_invalid_parse_conf1(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['call'] = \
'brim.test.unit.test_server.TCPWithInvalidParseConf1'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Cannot use 'brim.test.unit.test_server.TCPWithInvalidParseConf1' "
"for [test]. Incorrect number of parse_conf args, 1, should be 3 "
"(cls, name, conf).")
def test_configure_handler_invalid_parse_conf2(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['call'] = \
'brim.test.unit.test_server.TCPWithInvalidParseConf2'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Cannot use 'brim.test.unit.test_server.TCPWithInvalidParseConf2' "
"for [test]. parse_conf probably not a method.")
def test_configure_handler_no_parse_conf(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['call'] = 'brim.test.unit.test_server.TCPWithNoParseConf'
conf = Conf(confd)
ss._parse_conf(conf)
self.assertEqual(ss.handler_conf, conf)
def test_configure_handler_with_parse_conf(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['call'] = 'brim.test.unit.test_server.TCPWithParseConf'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.handler_conf, {'ok': True})
def test_configure_handler_invalid_stats_conf1(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['call'] = \
'brim.test.unit.test_server.TCPWithInvalidStatsConf1'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Cannot use 'brim.test.unit.test_server.TCPWithInvalidStatsConf1' "
"for [test]. Incorrect number of stats_conf args, 1, should be 3 "
"(cls, name, conf).")
def test_configure_handler_invalid_stats_conf2(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['call'] = \
'brim.test.unit.test_server.TCPWithInvalidStatsConf2'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Cannot use 'brim.test.unit.test_server.TCPWithInvalidStatsConf2' "
"for [test]. stats_conf probably not a method.")
def test_configure_handler_no_stats_conf(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['call'] = 'brim.test.unit.test_server.TCPWithNoStatsConf'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.stats_conf.get('start_time'), 'worker')
self.assertEqual(ss.stats_conf.get('connection_count'), 'sum')
def test_configure_handler_with_stats_conf(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['call'] = 'brim.test.unit.test_server.TCPWithStatsConf'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.stats_conf.get('start_time'), 'worker')
self.assertEqual(ss.stats_conf.get('connection_count'), 'sum')
self.assertEqual(ss.stats_conf.get('ok'), 'sum')
def test_privileged_start(self):
ss = self._class(FakeServer(), 'test')
ss._parse_conf(Conf(self._get_default_confd()))
exc = None
try:
ss._privileged_start()
except Exception as err:
exc = err
self.assertEqual(
str(exc), 'Could not bind to *:80: [Errno 13] Permission denied')
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('brim', {})['port'] = '0'
ss._parse_conf(Conf(confd))
ss._privileged_start()
self.assertTrue(ss.sock is not None)
get_listening_tcp_socket_calls = []
def _get_listening_tcp_socket(*args, **kwargs):
get_listening_tcp_socket_calls.append((args, kwargs))
return 'sock'
ss = self._class(FakeServer(), 'test')
ss._parse_conf(Conf(self._get_default_confd()))
get_listening_tcp_socket_orig = server.get_listening_tcp_socket
try:
server.get_listening_tcp_socket = _get_listening_tcp_socket
ss._privileged_start()
finally:
server.get_listening_tcp_socket = get_listening_tcp_socket_orig
self.assertEqual(ss.sock, 'sock')
self.assertEqual(get_listening_tcp_socket_calls, [(('*', 80), {
'keyfile': None, 'style': 'eventlet', 'retry': 30,
'certfile': None, 'backlog': 4096})])
def test_start(self, output=False):
capture_exceptions_stdout_stderr_calls = []
time_calls = []
get_logger_calls = []
fake_logger = FakeLogger()
sustain_workers_calls = []
shutdown_safe_calls = []
def _capture_exceptions_stdout_stderr(*args, **kwargs):
capture_exceptions_stdout_stderr_calls.append((args, kwargs))
def _time(*args):
time_calls.append(args)
return len(time_calls)
def _get_logger(*args):
get_logger_calls.append(args)
return fake_logger
def _sustain_workers(*args, **kwargs):
sustain_workers_calls.append((args, kwargs))
def _shutdown_safe(*args):
shutdown_safe_calls.append(args)
capture_exceptions_stdout_stderr_orig = \
server.capture_exceptions_stdout_stderr
time_orig = server.time
get_logger_orig = server.get_logger
sustain_workers_orig = server.sustain_workers
shutdown_safe_orig = server.shutdown_safe
try:
server.capture_exceptions_stdout_stderr = \
_capture_exceptions_stdout_stderr
server.time = _time
server.get_logger = _get_logger
server.sustain_workers = _sustain_workers
server.shutdown_safe = _shutdown_safe
ss = TestIPSubserver.test_start(self, output=output)
finally:
server.capture_exceptions_stdout_stderr = \
capture_exceptions_stdout_stderr_orig
server.time = time_orig
server.get_logger = get_logger_orig
server.sustain_workers = sustain_workers_orig
server.shutdown_safe = shutdown_safe_orig
if output:
self.assertEqual(capture_exceptions_stdout_stderr_calls, [])
else:
self.assertEqual(capture_exceptions_stdout_stderr_calls, [((), {
'exceptions': ss._capture_exception,
'stdout_func': ss._capture_stdout,
'stderr_func': ss._capture_stderr})])
self.assertEqual(time_calls, [()])
self.assertEqual(get_logger_calls, [(
ss.name, ss.log_name, ss.log_level, ss.log_facility,
ss.server.no_daemon)])
self.assertEqual(sustain_workers_calls, [
((1, ss._tcp_worker), {'logger': fake_logger})])
self.assertEqual(shutdown_safe_calls, [(ss.sock,)])
self.assertEqual(ss.worker_id, -1)
self.assertEqual(ss.start_time, 1)
self.assertEqual(ss.logger, fake_logger)
self.assertEqual(fake_logger.error_calls, [])
self.assertEqual(ss.handler.__class__.__name__, 'TCPEcho')
def test_start_with_output(self):
self.test_start(output=True)
def test_tcp_worker(self, no_setproctitle=False, no_daemon=False,
raises=False):
setproctitle_calls = []
use_hub_calls = []
spawn_n_calls = []
GreenPool_calls = []
accept_calls = []
def _setproctitle(*args):
setproctitle_calls.append(args)
def _use_hub(*args):
use_hub_calls.append(args)
def _spawn_n(*args):
spawn_n_calls.append(args)
if raises == 'socket einval':
err = server.socket_error('test socket einval')
err.errno = server.EINVAL
raise err
elif raises == 'socket other':
raise server.socket_error('test socket other')
elif raises == 'other':
raise Exception('test other')
def _GreenPool(*args, **kwargs):
GreenPool_calls.append((args, kwargs))
rv = PropertyObject()
rv.spawn_n = _spawn_n
rv.waitall = lambda *a: None
return rv
def _accept(*args):
accept_calls.append(args)
if len(accept_calls) == 1:
return 'sock', ('ip', 'port')
raise Exception('additional accept')
def _sustain_workers(*args, **kwargs):
pass
def _time():
return 1
setproctitle_orig = server.setproctitle
use_hub_orig = server.use_hub
GreenPool_orig = server.GreenPool
sustain_workers_orig = server.sustain_workers
time_orig = server.time
exc = None
try:
server.setproctitle = None if no_setproctitle else _setproctitle
server.use_hub = _use_hub
server.GreenPool = _GreenPool
server.sustain_workers = _sustain_workers
server.time = _time
ss = self._class(
FakeServer(no_daemon=no_daemon, output=True), 'test')
confd = self._get_default_confd()
confd['test']['port'] = '0'
ss._parse_conf(Conf(confd))
ss._privileged_start()
bs = server._BucketStats(['0'], {'start_time': 'worker'})
ss._start(bs)
ss.sock.accept = _accept
ss._tcp_worker(0)
except Exception as err:
exc = err
finally:
server.setproctitle = setproctitle_orig
server.use_hub = use_hub_orig
server.GreenPool = GreenPool_orig
server.sustain_workers = sustain_workers_orig
server.time = time_orig
if no_setproctitle or no_daemon:
self.assertEqual(setproctitle_calls, [])
else:
self.assertEqual(setproctitle_calls, [('0:test:brimd',)])
self.assertEqual(ss.worker_id, 0)
self.assertEqual(ss.bucket_stats.get(ss.worker_id, 'start_time'), 1)
if no_daemon:
self.assertEqual(use_hub_calls, [])
else:
self.assertEqual(use_hub_calls, [(None,)])
self.assertEqual(ss.handler.__class__.__name__, 'TCPEcho')
self.assertEqual(
GreenPool_calls, [((), {'size': ss.concurrent_per_worker})])
self.assertEqual(len(spawn_n_calls), 1)
self.assertEqual(len(spawn_n_calls[0]), 6)
self.assertEqual(spawn_n_calls[0][0].__class__.__name__, 'TCPEcho')
self.assertEqual(spawn_n_calls[0][1], ss)
self.assertEqual(spawn_n_calls[0][2].bucket_stats, ss.bucket_stats)
self.assertEqual(spawn_n_calls[0][3], 'sock')
self.assertEqual(spawn_n_calls[0][4], 'ip')
self.assertEqual(spawn_n_calls[0][5], 'port')
if raises:
self.assertEqual(accept_calls, [()])
else:
self.assertEqual(accept_calls, [(), ()])
if raises == 'socket einval':
self.assertEqual(exc, None)
elif raises == 'socket other':
self.assertEqual(str(exc), 'test socket other')
elif raises == 'other':
self.assertEqual(str(exc), 'test other')
else:
self.assertEqual(str(exc), 'additional accept')
def test_tcp_worker_no_setproctitle(self):
self.test_tcp_worker(no_setproctitle=True)
def test_tcp_worker_no_daemon(self):
self.test_tcp_worker(no_daemon=True)
def test_tcp_worker_raises_socket_einval(self):
self.test_tcp_worker(raises='socket einval')
def test_tcp_worker_raises_socket_other(self):
self.test_tcp_worker(raises='socket other')
def test_tcp_worker_raises_other(self):
self.test_tcp_worker(raises='other')
def test_capture_exception(self):
ss = self._class(FakeServer(output=True), 'test')
ss.logger = FakeLogger()
ss.worker_id = 123
ss._capture_exception(*exc_info())
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [])
self.assertEqual(
ss.logger.error_calls,
[("UNCAUGHT EXCEPTION: tid:123 None ['None']",)])
self.assertEqual(ss.logger.exception_calls, [])
ss.logger = FakeLogger()
try:
raise Exception('test')
except Exception:
ss._capture_exception(*exc_info())
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [])
self.assertEqual(len(ss.logger.error_calls), 1)
self.assertEqual(len(ss.logger.error_calls[0]), 1)
e = ss.logger.error_calls[0][0]
self.assertTrue(e.startswith(
"UNCAUGHT EXCEPTION: tid:123 Exception: test ['Traceback (most "
"recent call last):', ' File "))
self.assertTrue(e.endswith(
'\', " raise Exception(\'test\')", \'Exception: test\']'))
self.assertEqual(ss.logger.exception_calls, [])
def test_capture_stdout(self):
ss = self._class(FakeServer(output=True), 'test')
ss.logger = FakeLogger()
ss.worker_id = 123
ss._capture_stdout('one\ntwo three\nfour\n')
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [
('STDOUT: tid:123 one',), ('STDOUT: tid:123 two three',),
('STDOUT: tid:123 four',)])
self.assertEqual(ss.logger.notice_calls, [])
self.assertEqual(ss.logger.error_calls, [])
self.assertEqual(ss.logger.exception_calls, [])
def test_capture_stderr(self):
ss = self._class(FakeServer(output=True), 'test')
ss.logger = FakeLogger()
ss.worker_id = 123
ss._capture_stderr('one\ntwo three\nfour\n')
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [])
self.assertEqual(ss.logger.error_calls, [
('STDERR: tid:123 one',), ('STDERR: tid:123 two three',),
('STDERR: tid:123 four',)])
self.assertEqual(ss.logger.exception_calls, [])
class UDPWithInvalidInit(object):
def __init__(self):
pass
class UDPWithInvalidCall(object):
def __init__(self, name, conf):
pass
def __call__(self):
pass
class UDPWithNoCall(object):
def __init__(self, name, conf):
pass
class UDPWithInvalidParseConf1(object):
def __init__(self, name, conf):
pass
def __call__(self, subserver, stats, sock, datagram, ip, port):
pass
@classmethod
def parse_conf(cls):
pass
class UDPWithInvalidParseConf2(object):
parse_conf = 'blah'
def __init__(self, name, conf):
pass
def __call__(self, subserver, stats, sock, datagram, ip, port):
pass
class UDPWithNoParseConf(object):
def __init__(self, name, conf):
pass
def __call__(self, subserver, stats, sock, datagram, ip, port):
pass
class UDPWithParseConf(object):
def __init__(self, name, conf):
pass
def __call__(self, subserver, stats, sock, datagram, ip, port):
pass
@classmethod
def parse_conf(cls, name, conf):
return {'ok': True}
class UDPWithInvalidStatsConf1(object):
def __init__(self, name, conf):
pass
def __call__(self, subserver, stats, sock, datagram, ip, port):
pass
@classmethod
def stats_conf(cls):
pass
class UDPWithInvalidStatsConf2(object):
stats_conf = 'blah'
def __init__(self, name, conf):
pass
def __call__(self, subserver, stats, sock, datagram, ip, port):
pass
class UDPWithNoStatsConf(object):
def __init__(self, name, conf):
pass
def __call__(self, subserver, stats, sock, datagram, ip, port):
pass
class UDPWithStatsConf(object):
def __init__(self, name, conf):
pass
def __call__(self, subserver, stats, sock, datagram, ip, port):
pass
@classmethod
def stats_conf(cls, name, conf):
return [('ok', 'sum')]
class TestUDPSubserver(TestIPSubserver):
_class = server.UDPSubserver
def _get_default_confd(self):
return {'test': {'call': 'brim.udp_echo.UDPEcho'}}
def test_init(self):
ss = TestIPSubserver.test_init(self)
self.assertEqual(ss.stats_conf.get('datagram_count'), 'sum')
def test_parse_conf_defaults(self):
ss = TestIPSubserver.test_parse_conf_defaults(self)
self.assertEqual(ss.handler.__name__, 'UDPEcho')
self.assertEqual(ss.max_datagram_size, 65536)
def test_parse_conf_max_datagram_size(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('brim', {})['max_datagram_size'] = '123'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.max_datagram_size, 123)
ss = self._class(FakeServer(), 'test')
exc = None
try:
confd = self._get_default_confd()
confd.setdefault('brim', {})['max_datagram_size'] = 'abc'
ss._parse_conf(Conf(confd))
except SystemExit as err:
exc = err
self.assertEqual(
str(exc),
"Configuration value [brim] max_datagram_size of 'abc' cannot be "
"converted to int.")
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['max_datagram_size'] = '123'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.max_datagram_size, 123)
ss = self._class(FakeServer(), 'test')
exc = None
try:
confd = self._get_default_confd()
confd.setdefault('test', {})['max_datagram_size'] = 'abc'
ss._parse_conf(Conf(confd))
except SystemExit as err:
exc = err
self.assertEqual(
str(exc),
"Configuration value [test] max_datagram_size of 'abc' cannot be "
"converted to int.")
def test_parse_conf_no_call(self):
ss = self._class(FakeServer(), 'test')
conf = Conf({})
exc = None
try:
ss._parse_conf(conf)
except Exception as err:
exc = err
self.assertEqual(
str(exc), "[test] not configured with 'call' option.")
def test_parse_conf_invalid_call(self):
ss = self._class(FakeServer(), 'test')
conf = Conf({'test': {'call': 'invalid'}})
exc = None
try:
ss._parse_conf(conf)
except Exception as err:
exc = err
self.assertEqual(str(exc), "Invalid call value 'invalid' for [test].")
def test_configure_handler(self):
ss = self._class(FakeServer(), 'test')
conf = Conf(self._get_default_confd())
ss._parse_conf(conf)
self.assertEqual(ss.handler.__name__, 'UDPEcho')
self.assertEqual(ss.handler_conf, ss.handler.parse_conf('test', conf))
def test_configure_handler_no_call_option(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['cll'] = confd['test']['call']
del confd['test']['call']
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc), "[test] not configured with 'call' option.")
def test_configure_handler_invalid_call(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['call'] = 'brim_udp_echo_UDPEcho'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc), "Invalid call value 'brim_udp_echo_UDPEcho' for [test].")
def test_configure_handler_no_load(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['call'] = 'brim.udp_echo.cp_echo'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Could not load class 'brim.udp_echo.cp_echo' for [test].")
def test_configure_handler_not_a_class(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['call'] = 'brim.server._send_pid_sig'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Would not be able to instantiate 'brim.server._send_pid_sig' for "
"[test]. Probably not a class.")
def test_configure_handler_invalid_init(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['call'] = 'brim.test.unit.test_server.UDPWithInvalidInit'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Would not be able to instantiate "
"'brim.test.unit.test_server.UDPWithInvalidInit' for [test]. "
"Incorrect number of args, 1, should be 3 (self, name, "
"parsed_conf).")
def test_configure_handler_invalid_call_option(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['call'] = 'brim.test.unit.test_server.UDPWithInvalidCall'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Would not be able to use "
"'brim.test.unit.test_server.UDPWithInvalidCall' for [test]. "
"Incorrect number of __call__ args, 1, should be 7 (self, "
"subserver, stats, sock, datagram, ip, port).")
def test_configure_handler_no_call(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['call'] = 'brim.test.unit.test_server.UDPWithNoCall'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Would not be able to use "
"'brim.test.unit.test_server.UDPWithNoCall' for [test]. Probably "
"no __call__ method.")
def test_configure_handler_invalid_parse_conf1(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['call'] = \
'brim.test.unit.test_server.UDPWithInvalidParseConf1'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Cannot use 'brim.test.unit.test_server.UDPWithInvalidParseConf1' "
"for [test]. Incorrect number of parse_conf args, 1, should be 3 "
"(cls, name, conf).")
def test_configure_handler_invalid_parse_conf2(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['call'] = \
'brim.test.unit.test_server.UDPWithInvalidParseConf2'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Cannot use 'brim.test.unit.test_server.UDPWithInvalidParseConf2' "
"for [test]. parse_conf probably not a method.")
def test_configure_handler_no_parse_conf(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['call'] = 'brim.test.unit.test_server.UDPWithNoParseConf'
conf = Conf(confd)
ss._parse_conf(conf)
self.assertEqual(ss.handler_conf, conf)
def test_configure_handler_with_parse_conf(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['call'] = 'brim.test.unit.test_server.UDPWithParseConf'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.handler_conf, {'ok': True})
def test_configure_handler_invalid_stats_conf1(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['call'] = \
'brim.test.unit.test_server.UDPWithInvalidStatsConf1'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Cannot use 'brim.test.unit.test_server.UDPWithInvalidStatsConf1' "
"for [test]. Incorrect number of stats_conf args, 1, should be 3 "
"(cls, name, conf).")
def test_configure_handler_invalid_stats_conf2(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['call'] = \
'brim.test.unit.test_server.UDPWithInvalidStatsConf2'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Cannot use 'brim.test.unit.test_server.UDPWithInvalidStatsConf2' "
"for [test]. stats_conf probably not a method.")
def test_configure_handler_no_stats_conf(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['call'] = 'brim.test.unit.test_server.UDPWithNoStatsConf'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.stats_conf.get('start_time'), 'worker')
self.assertEqual(ss.stats_conf.get('datagram_count'), 'sum')
def test_configure_handler_with_stats_conf(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd['test']['call'] = 'brim.test.unit.test_server.UDPWithStatsConf'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.stats_conf.get('start_time'), 'worker')
self.assertEqual(ss.stats_conf.get('datagram_count'), 'sum')
self.assertEqual(ss.stats_conf.get('ok'), 'sum')
def test_privileged_start(self):
ss = self._class(FakeServer(), 'test')
ss._parse_conf(Conf(self._get_default_confd()))
exc = None
try:
ss._privileged_start()
except Exception as err:
exc = err
self.assertEqual(
str(exc), 'Could not bind to *:80: [Errno 13] Permission denied')
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('brim', {})['port'] = '0'
ss._parse_conf(Conf(confd))
ss._privileged_start()
self.assertTrue(ss.sock is not None)
get_listening_udp_socket_calls = []
def _get_listening_udp_socket(*args, **kwargs):
get_listening_udp_socket_calls.append((args, kwargs))
return 'sock'
ss = self._class(FakeServer(), 'test')
ss._parse_conf(Conf(self._get_default_confd()))
get_listening_udp_socket_orig = server.get_listening_udp_socket
try:
server.get_listening_udp_socket = _get_listening_udp_socket
ss._privileged_start()
finally:
server.get_listening_udp_socket = get_listening_udp_socket_orig
self.assertEqual(ss.sock, 'sock')
self.assertEqual(
get_listening_udp_socket_calls,
[(('*', 80), {'style': 'eventlet', 'retry': 30})])
def test_start(self, output=False):
capture_exceptions_stdout_stderr_calls = []
time_calls = []
get_logger_calls = []
fake_logger = FakeLogger()
sustain_workers_calls = []
shutdown_safe_calls = []
def _capture_exceptions_stdout_stderr(*args, **kwargs):
capture_exceptions_stdout_stderr_calls.append((args, kwargs))
def _time(*args):
time_calls.append(args)
return len(time_calls)
def _get_logger(*args):
get_logger_calls.append(args)
return fake_logger
def _sustain_workers(*args, **kwargs):
sustain_workers_calls.append((args, kwargs))
def _shutdown_safe(*args):
shutdown_safe_calls.append(args)
capture_exceptions_stdout_stderr_orig = \
server.capture_exceptions_stdout_stderr
time_orig = server.time
get_logger_orig = server.get_logger
sustain_workers_orig = server.sustain_workers
shutdown_safe_orig = server.shutdown_safe
try:
server.capture_exceptions_stdout_stderr = \
_capture_exceptions_stdout_stderr
server.time = _time
server.get_logger = _get_logger
server.sustain_workers = _sustain_workers
server.shutdown_safe = _shutdown_safe
ss = TestIPSubserver.test_start(self, output=output)
finally:
server.capture_exceptions_stdout_stderr = \
capture_exceptions_stdout_stderr_orig
server.time = time_orig
server.get_logger = get_logger_orig
server.sustain_workers = sustain_workers_orig
server.shutdown_safe = shutdown_safe_orig
if output:
self.assertEqual(capture_exceptions_stdout_stderr_calls, [])
else:
self.assertEqual(capture_exceptions_stdout_stderr_calls, [((), {
'exceptions': ss._capture_exception,
'stdout_func': ss._capture_stdout,
'stderr_func': ss._capture_stderr})])
self.assertEqual(time_calls, [()])
self.assertEqual(get_logger_calls, [(
ss.name, ss.log_name, ss.log_level, ss.log_facility,
ss.server.no_daemon)])
self.assertEqual(sustain_workers_calls, [
((1, ss._udp_worker), {'logger': fake_logger})])
self.assertEqual(shutdown_safe_calls, [(ss.sock,)])
self.assertEqual(ss.worker_id, -1)
self.assertEqual(ss.start_time, 1)
self.assertEqual(ss.logger, fake_logger)
self.assertEqual(fake_logger.error_calls, [])
self.assertEqual(ss.handler.__class__.__name__, 'UDPEcho')
def test_start_with_output(self):
self.test_start(output=True)
def test_udp_worker(self, no_setproctitle=False, no_daemon=False,
raises=False):
setproctitle_calls = []
use_hub_calls = []
spawn_n_calls = []
GreenPool_calls = []
recvfrom_calls = []
def _setproctitle(*args):
setproctitle_calls.append(args)
def _use_hub(*args):
use_hub_calls.append(args)
def _spawn_n(*args):
spawn_n_calls.append(args)
if raises == 'socket einval':
err = server.socket_error('test socket einval')
err.errno = server.EINVAL
raise err
elif raises == 'socket other':
raise server.socket_error('test socket other')
elif raises == 'other':
raise Exception('test other')
def _GreenPool(*args, **kwargs):
GreenPool_calls.append((args, kwargs))
rv = PropertyObject()
rv.spawn_n = _spawn_n
rv.waitall = lambda *a: None
return rv
def _recvfrom(*args):
recvfrom_calls.append(args)
if len(recvfrom_calls) == 1:
return 'datagram', ('ip', 'port')
raise Exception('additional recvfrom')
def _sustain_workers(*args, **kwargs):
pass
def _time():
return 1
setproctitle_orig = server.setproctitle
use_hub_orig = server.use_hub
GreenPool_orig = server.GreenPool
sustain_workers_orig = server.sustain_workers
time_orig = server.time
exc = None
try:
server.setproctitle = None if no_setproctitle else _setproctitle
server.use_hub = _use_hub
server.GreenPool = _GreenPool
server.sustain_workers = _sustain_workers
server.time = _time
ss = self._class(
FakeServer(no_daemon=no_daemon, output=True), 'test')
confd = self._get_default_confd()
confd['test']['port'] = '0'
ss._parse_conf(Conf(confd))
ss._privileged_start()
bs = server._BucketStats(['0'], {'start_time': 'worker'})
ss._start(bs)
ss.sock.recvfrom = _recvfrom
ss._udp_worker(0)
except Exception as err:
exc = err
finally:
server.setproctitle = setproctitle_orig
server.use_hub = use_hub_orig
server.GreenPool = GreenPool_orig
server.sustain_workers = sustain_workers_orig
server.time = time_orig
if no_setproctitle or no_daemon:
self.assertEqual(setproctitle_calls, [])
else:
self.assertEqual(setproctitle_calls, [('0:test:brimd',)])
self.assertEqual(ss.worker_id, 0)
self.assertEqual(ss.bucket_stats.get(ss.worker_id, 'start_time'), 1)
if no_daemon:
self.assertEqual(use_hub_calls, [])
else:
self.assertEqual(use_hub_calls, [(None,)])
self.assertEqual(
GreenPool_calls, [((), {'size': ss.concurrent_per_worker})])
self.assertEqual(len(spawn_n_calls), 1)
self.assertEqual(len(spawn_n_calls[0]), 7)
self.assertEqual(spawn_n_calls[0][0].__class__.__name__, 'UDPEcho')
self.assertEqual(spawn_n_calls[0][1], ss)
self.assertEqual(spawn_n_calls[0][2].bucket_stats, ss.bucket_stats)
self.assertEqual(spawn_n_calls[0][3], ss.sock)
self.assertEqual(spawn_n_calls[0][4], 'datagram')
self.assertEqual(spawn_n_calls[0][5], 'ip')
self.assertEqual(spawn_n_calls[0][6], 'port')
if raises:
self.assertEqual(recvfrom_calls, [(ss.max_datagram_size,)])
else:
self.assertEqual(recvfrom_calls, [
(ss.max_datagram_size,), (ss.max_datagram_size,)])
if raises == 'socket einval':
self.assertEqual(exc, None)
elif raises == 'socket other':
self.assertEqual(str(exc), 'test socket other')
elif raises == 'other':
self.assertEqual(str(exc), 'test other')
else:
self.assertEqual(str(exc), 'additional recvfrom')
def test_udp_worker_no_setproctitle(self):
self.test_udp_worker(no_setproctitle=True)
def test_start_no_daemon(self):
self.test_udp_worker(no_daemon=True)
def test_start_raises_socket_einval(self):
self.test_udp_worker(raises='socket einval')
def test_start_raises_socket_other(self):
self.test_udp_worker(raises='socket other')
def test_start_raises_other(self):
self.test_udp_worker(raises='other')
def test_capture_exception(self):
ss = self._class(FakeServer(output=True), 'test')
ss.logger = FakeLogger()
ss.worker_id = 123
ss._capture_exception(*exc_info())
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [])
self.assertEqual(
ss.logger.error_calls,
[("UNCAUGHT EXCEPTION: uid:123 None ['None']",)])
self.assertEqual(ss.logger.exception_calls, [])
ss.logger = FakeLogger()
try:
raise Exception('test')
except Exception:
ss._capture_exception(*exc_info())
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [])
self.assertEqual(len(ss.logger.error_calls), 1)
self.assertEqual(len(ss.logger.error_calls[0]), 1)
e = ss.logger.error_calls[0][0]
self.assertTrue(e.startswith(
"UNCAUGHT EXCEPTION: uid:123 Exception: test ['Traceback (most "
"recent call last):', ' File "))
self.assertTrue(e.endswith(
'\', " raise Exception(\'test\')", \'Exception: test\']'))
self.assertEqual(ss.logger.exception_calls, [])
def test_capture_stdout(self):
ss = self._class(FakeServer(output=True), 'test')
ss.logger = FakeLogger()
ss.worker_id = 123
ss._capture_stdout('one\ntwo three\nfour\n')
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [
('STDOUT: uid:123 one',), ('STDOUT: uid:123 two three',),
('STDOUT: uid:123 four',)])
self.assertEqual(ss.logger.notice_calls, [])
self.assertEqual(ss.logger.error_calls, [])
self.assertEqual(ss.logger.exception_calls, [])
def test_capture_stderr(self):
ss = self._class(FakeServer(output=True), 'test')
ss.logger = FakeLogger()
ss.worker_id = 123
ss._capture_stderr('one\ntwo three\nfour\n')
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [])
self.assertEqual(ss.logger.error_calls, [
('STDERR: uid:123 one',), ('STDERR: uid:123 two three',),
('STDERR: uid:123 four',)])
self.assertEqual(ss.logger.exception_calls, [])
class DaemonWithInvalidInit(object):
def __init__(self):
pass
class DaemonWithInvalidCall(object):
def __init__(self, name, conf):
pass
def __call__(self):
pass
class DaemonWithNoCall(object):
def __init__(self, name, conf):
pass
class DaemonWithInvalidParseConf1(object):
def __init__(self, name, conf):
pass
def __call__(self, subserver, stats):
pass
@classmethod
def parse_conf(cls):
pass
class DaemonWithInvalidParseConf2(object):
parse_conf = 'blah'
def __init__(self, name, conf):
pass
def __call__(self, subserver, stats):
pass
class DaemonWithNoParseConf(object):
def __init__(self, name, conf):
pass
def __call__(self, subserver, stats):
pass
class DaemonWithParseConf(object):
def __init__(self, name, conf):
pass
def __call__(self, subserver, stats):
pass
@classmethod
def parse_conf(cls, name, conf):
return {'ok': True}
class DaemonWithInvalidStatsConf1(object):
def __init__(self, name, conf):
pass
def __call__(self, subserver, stats):
pass
@classmethod
def stats_conf(cls):
pass
class DaemonWithInvalidStatsConf2(object):
stats_conf = 'blah'
def __init__(self, name, conf):
pass
def __call__(self, subserver, stats):
pass
class DaemonWithNoStatsConf(object):
def __init__(self, name, conf):
pass
def __call__(self, subserver, stats):
pass
class DaemonWithStatsConf(object):
def __init__(self, name, conf):
self.calls = []
def __call__(self, subserver, stats):
self.calls.append((subserver, stats))
@classmethod
def stats_conf(cls, name, conf):
return [('ok', 'sum')]
class TestDaemonsSubserver(TestSubserver):
_class = server.DaemonsSubserver
def test_parse_conf_defaults(self):
ss = TestSubserver.test_parse_conf_defaults(self)
self.assertEqual(ss.daemons, [])
self.assertEqual(ss.worker_count, 0)
self.assertEqual(ss.worker_names, ['0'])
def test_configure_daemons(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['daemons'] = 'one two'
confd.setdefault('one', {})['call'] = 'brim.daemon_sample.DaemonSample'
confd.setdefault('two', {})['call'] = 'brim.daemon_sample.DaemonSample'
conf = Conf(confd)
ss._parse_conf(conf)
self.assertEqual(len(ss.daemons), 2)
self.assertEqual(ss.daemons[0][0], 'one')
self.assertEqual(ss.daemons[1][0], 'two')
self.assertEqual(ss.daemons[0][1].__name__, 'DaemonSample')
self.assertEqual(ss.daemons[1][1].__name__, 'DaemonSample')
self.assertEqual(
ss.daemons[0][2], ss.daemons[0][1].parse_conf('one', conf))
self.assertEqual(
ss.daemons[1][2], ss.daemons[1][1].parse_conf('two', conf))
def test_configure_daemons_conf_no_call(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['daemons'] = 'one'
confd.setdefault('one', {})['cll'] = 'brim.daemon_sample.DaemonSample'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc), "Daemon [one] not configured with 'call' option.")
def test_configure_daemons_conf_invalid_call(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['daemons'] = 'one'
confd.setdefault('one', {})['call'] = 'brim_daemon_sample_DaemonSample'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Invalid call value 'brim_daemon_sample_DaemonSample' for daemon "
"[one].")
def test_configure_daemons_no_load(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['daemons'] = 'one'
confd.setdefault('one', {})['call'] = 'brim.daemon_sample.aemon_sample'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Could not load class 'brim.daemon_sample.aemon_sample' for "
"daemon [one].")
def test_configure_daemons_not_a_class(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['daemons'] = 'one'
confd.setdefault('one', {})['call'] = 'brim.server._send_pid_sig'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Would not be able to instantiate 'brim.server._send_pid_sig' for "
"daemon [one]. Probably not a class.")
def test_configure_daemons_invalid_init(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['daemons'] = 'one'
confd.setdefault('one', {})['call'] = \
'brim.test.unit.test_server.DaemonWithInvalidInit'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Would not be able to instantiate "
"'brim.test.unit.test_server.DaemonWithInvalidInit' for daemon "
"[one]. Incorrect number of args, 1, should be 3 (self, name, "
"conf).")
def test_configure_daemons_invalid_call(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['daemons'] = 'one'
confd.setdefault('one', {})['call'] = \
'brim.test.unit.test_server.DaemonWithInvalidCall'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Would not be able to use "
"'brim.test.unit.test_server.DaemonWithInvalidCall' for daemon "
"[one]. Incorrect number of __call__ args, 1, should be 3 (self, "
"subserver, stats).")
def test_configure_daemons_no_call(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['daemons'] = 'one'
confd.setdefault('one', {})['call'] = \
'brim.test.unit.test_server.DaemonWithNoCall'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Would not be able to use "
"'brim.test.unit.test_server.DaemonWithNoCall' for daemon "
"[one]. Probably no __call__ method.")
def test_configure_daemons_invalid_parse_conf1(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['daemons'] = 'one'
confd.setdefault('one', {})['call'] = \
'brim.test.unit.test_server.DaemonWithInvalidParseConf1'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Cannot use "
"'brim.test.unit.test_server.DaemonWithInvalidParseConf1' for "
"daemon [one]. Incorrect number of parse_conf args, 1, should be "
"3 (cls, name, conf).")
def test_configure_daemons_invalid_parse_conf2(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['daemons'] = 'one'
confd.setdefault('one', {})['call'] = \
'brim.test.unit.test_server.DaemonWithInvalidParseConf2'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Cannot use "
"'brim.test.unit.test_server.DaemonWithInvalidParseConf2' for "
"daemon [one]. parse_conf probably not a method.")
def test_configure_daemons_no_parse_conf(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['daemons'] = 'one'
confd.setdefault('one', {})['call'] = \
'brim.test.unit.test_server.DaemonWithNoParseConf'
conf = Conf(confd)
ss._parse_conf(conf)
self.assertEqual(ss.daemons[0][2], conf)
def test_configure_daemons_with_parse_conf(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['daemons'] = 'one'
confd.setdefault('one', {})['call'] = \
'brim.test.unit.test_server.DaemonWithParseConf'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.daemons[0][2], {'ok': True})
def test_configure_daemons_invalid_stats_conf1(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['daemons'] = 'one'
confd.setdefault('one', {})['call'] = \
'brim.test.unit.test_server.DaemonWithInvalidStatsConf1'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Cannot use "
"'brim.test.unit.test_server.DaemonWithInvalidStatsConf1' for "
"daemon [one]. Incorrect number of stats_conf args, 1, should be "
"3 (cls, name, conf).")
def test_configure_daemons_invalid_stats_conf2(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['daemons'] = 'one'
confd.setdefault('one', {})['call'] = \
'brim.test.unit.test_server.DaemonWithInvalidStatsConf2'
exc = None
try:
ss._parse_conf(Conf(confd))
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Cannot use "
"'brim.test.unit.test_server.DaemonWithInvalidStatsConf2' for "
"daemon [one]. stats_conf probably not a method.")
def test_configure_daemons_no_stats_conf(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['daemons'] = 'one'
confd.setdefault('one', {})['call'] = \
'brim.test.unit.test_server.DaemonWithNoStatsConf'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.stats_conf.get('start_time'), 'worker')
def test_configure_daemons_with_stats_conf(self):
ss = self._class(FakeServer(), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['daemons'] = 'one'
confd.setdefault('one', {})['call'] = \
'brim.test.unit.test_server.DaemonWithStatsConf'
ss._parse_conf(Conf(confd))
self.assertEqual(ss.stats_conf.get('start_time'), 'worker')
self.assertEqual(ss.stats_conf.get('ok'), 'sum')
def test_start(self, output=False):
capture_exceptions_stdout_stderr_calls = []
time_calls = []
get_logger_calls = []
fake_logger = FakeLogger()
sustain_workers_calls = []
def _capture_exceptions_stdout_stderr(*args, **kwargs):
capture_exceptions_stdout_stderr_calls.append((args, kwargs))
def _time(*args):
time_calls.append(args)
return len(time_calls)
def _get_logger(*args):
get_logger_calls.append(args)
return fake_logger
def _sustain_workers(*args, **kwargs):
sustain_workers_calls.append((args, kwargs))
confd = self._get_default_confd()
confd.setdefault('test', {})['daemons'] = 'one'
confd.setdefault('one', {})['call'] = 'brim.daemon_sample.DaemonSample'
capture_exceptions_stdout_stderr_orig = \
server.capture_exceptions_stdout_stderr
time_orig = server.time
get_logger_orig = server.get_logger
sustain_workers_orig = server.sustain_workers
try:
server.capture_exceptions_stdout_stderr = \
_capture_exceptions_stdout_stderr
server.time = _time
server.get_logger = _get_logger
server.sustain_workers = _sustain_workers
ss = TestSubserver.test_start(self, output=output, confd=confd)
finally:
server.capture_exceptions_stdout_stderr = \
capture_exceptions_stdout_stderr_orig
server.time = time_orig
server.get_logger = get_logger_orig
server.sustain_workers = sustain_workers_orig
if output:
self.assertEqual(capture_exceptions_stdout_stderr_calls, [])
else:
self.assertEqual(capture_exceptions_stdout_stderr_calls, [((), {
'exceptions': ss._capture_exception,
'stdout_func': ss._capture_stdout,
'stderr_func': ss._capture_stderr})])
self.assertEqual(time_calls, [()])
self.assertEqual(get_logger_calls, [(
ss.name, ss.log_name, ss.log_level, ss.log_facility,
ss.server.no_daemon)])
self.assertEqual(ss.worker_count, 1)
self.assertEqual(sustain_workers_calls, [
((ss.worker_count, ss._daemon), {'logger': fake_logger})])
self.assertEqual(ss.worker_id, -1)
self.assertEqual(ss.start_time, 1)
self.assertEqual(ss.logger, fake_logger)
self.assertEqual(fake_logger.error_calls, [])
def test_start_with_output(self):
self.test_start(output=True)
def test_daemon(self, no_setproctitle=False):
setproctitle_calls = []
def _setproctitle(*args):
setproctitle_calls.append(args)
def _time():
return 1
def _sustain_workers(*args, **kwargs):
pass
setproctitle_orig = server.setproctitle
time_orig = server.time
sustain_workers_orig = server.sustain_workers
try:
server.setproctitle = None if no_setproctitle else _setproctitle
server.time = _time
server.sustain_workers = _sustain_workers
ss = self._class(FakeServer(output=True), 'test')
confd = self._get_default_confd()
confd.setdefault('test', {})['daemons'] = 'one'
confd.setdefault('one', {})['call'] = \
'brim.test.unit.test_server.DaemonWithStatsConf'
ss._parse_conf(Conf(confd))
ss._privileged_start()
bs = server._BucketStats(['0'], {'start_time': 'worker'})
ss._start(bs)
daemon = ss._daemon(0)
finally:
server.setproctitle = setproctitle_orig
server.time = time_orig
server.sustain_workers = sustain_workers_orig
if no_setproctitle:
self.assertEqual(setproctitle_calls, [])
else:
self.assertEqual(setproctitle_calls, [('one:test:brimd',)])
self.assertEqual(ss.worker_id, 0)
self.assertEqual(ss.bucket_stats.get(ss.worker_id, 'start_time'), 1)
self.assertEqual(ss.daemons[0][0], 'one')
self.assertEqual(ss.daemons[0][1].__name__, 'DaemonWithStatsConf')
self.assertEqual(ss.daemons[0][2].store, {
'test': {'daemons': 'one'},
'one': {'call': 'brim.test.unit.test_server.DaemonWithStatsConf'}})
self.assertEqual(len(daemon.calls), 1)
self.assertEqual(len(daemon.calls[0]), 2)
self.assertEqual(daemon.calls[0][0], ss)
self.assertEqual(daemon.calls[0][1].bucket_stats, ss.bucket_stats)
def test_daemon_no_setproctitle(self):
self.test_daemon(no_setproctitle=True)
def test_capture_exception(self):
ss = self._class(FakeServer(output=True), 'test')
ss.logger = FakeLogger()
ss.worker_id = 123
ss._capture_exception(*exc_info())
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [])
self.assertEqual(
ss.logger.error_calls,
[("UNCAUGHT EXCEPTION: did:123 None ['None']",)])
self.assertEqual(ss.logger.exception_calls, [])
ss.logger = FakeLogger()
try:
raise Exception('test')
except Exception:
ss._capture_exception(*exc_info())
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [])
self.assertEqual(len(ss.logger.error_calls), 1)
self.assertEqual(len(ss.logger.error_calls[0]), 1)
e = ss.logger.error_calls[0][0]
self.assertTrue(e.startswith(
"UNCAUGHT EXCEPTION: did:123 Exception: test ['Traceback (most "
"recent call last):', ' File "))
self.assertTrue(e.endswith(
'\', " raise Exception(\'test\')", \'Exception: test\']'))
self.assertEqual(ss.logger.exception_calls, [])
def test_capture_stdout(self):
ss = self._class(FakeServer(output=True), 'test')
ss.logger = FakeLogger()
ss.worker_id = 123
ss._capture_stdout('one\ntwo three\nfour\n')
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [
('STDOUT: did:123 one',), ('STDOUT: did:123 two three',),
('STDOUT: did:123 four',)])
self.assertEqual(ss.logger.notice_calls, [])
self.assertEqual(ss.logger.error_calls, [])
self.assertEqual(ss.logger.exception_calls, [])
def test_capture_stderr(self):
ss = self._class(FakeServer(output=True), 'test')
ss.logger = FakeLogger()
ss.worker_id = 123
ss._capture_stderr('one\ntwo three\nfour\n')
self.assertEqual(ss.logger.debug_calls, [])
self.assertEqual(ss.logger.info_calls, [])
self.assertEqual(ss.logger.notice_calls, [])
self.assertEqual(ss.logger.error_calls, [
('STDERR: did:123 one',), ('STDERR: did:123 two three',),
('STDERR: did:123 four',)])
self.assertEqual(ss.logger.exception_calls, [])
@patch('__builtin__.open', mock_open(read_data='12345'))
class TestServer(TestCase):
def setUp(self):
self.orig_read_conf = server.read_conf
self.orig_fork = server.fork
self.orig_sleep = server.sleep
self.orig_send_pid_sig = server._send_pid_sig
self.orig_droppriv = server.droppriv
self.orig_get_logger = server.get_logger
self.orig_capture_exceptions_stdout_stderr = \
server.capture_exceptions_stdout_stderr
self.read_conf_calls = []
self.conf = Conf({})
self.fork_calls = []
self.fork_retval = [12345]
self.sleep_calls = []
self.send_pid_sig_calls = []
self.send_pid_sig_retval = [True, 12345]
self.droppriv_calls = []
self.get_logger_calls = []
self.capture_calls = []
def _read_conf(*args):
self.read_conf_calls.append(args)
return self.conf
def _fork(*args):
self.fork_calls.append(args)
if len(self.fork_retval) > 1:
return self.fork_retval.pop(0)
return self.fork_retval[0]
def _sleep(*args):
self.sleep_calls.append(args)
def _send_pid_sig(*args, **kwargs):
self.send_pid_sig_calls.append((args, kwargs))
return self.send_pid_sig_retval
def _droppriv(*args):
self.droppriv_calls.append(args)
def _get_logger(*args):
self.get_logger_calls.append(args)
return FakeLogger()
def _capture_exceptions_stdout_stderr(*args, **kwargs):
self.capture_calls.append((args, kwargs))
server.read_conf = _read_conf
server.fork = _fork
server.sleep = _sleep
server._send_pid_sig = _send_pid_sig
server.droppriv = _droppriv
server.get_logger = _get_logger
server.capture_exceptions_stdout_stderr = \
_capture_exceptions_stdout_stderr
self.stdin = StringIO()
self.stdout = StringIO()
self.stderr = StringIO()
self.serv = server.Server([], self.stdin, self.stdout, self.stderr)
def tearDown(self):
server.read_conf = self.orig_read_conf
server.fork = self.orig_fork
server.sleep = self.orig_sleep
server._send_pid_sig = self.orig_send_pid_sig
server.droppriv = self.orig_droppriv
server.get_logger = self.orig_get_logger
server.capture_exceptions_stdout_stderr = \
self.orig_capture_exceptions_stdout_stderr
def test_uses_standard_items_by_default(self):
serv = server.Server()
self.assertEqual(serv.args, server.sys_argv[1:])
self.assertEqual(serv.stdin, server.sys_stdin)
self.assertEqual(serv.stdout, server.sys_stdout)
self.assertEqual(serv.stderr, server.sys_stderr)
def test_main(self):
self.conf = Conf({'brim': {'port': '0'}, 'wsgi': {}})
self.conf.files = ['ok.conf']
self.serv.args = ['no-daemon']
server_calls = []
def _server(*args, **kwargs):
server_calls.append((args, kwargs))
orig_wsgi_server = server.wsgi.server
try:
server.wsgi.server = _server
self.assertEqual(self.serv.main(), 0)
finally:
server.wsgi.server = orig_wsgi_server
self.assertEqual(len(server_calls), 1)
def test_args_exception(self):
self.serv.args = [123]
self.assertEqual(self.serv.main(), 1)
self.assertEqual(self.stdout.getvalue(), '')
self.assertTrue(
self.stderr.getvalue() in [
"'int' object is unsubscriptable\n",
"'int' object has no attribute '__getitem__'\n"],
repr(self.stderr.getvalue()))
def test_args_help1(self):
self.serv.args = ['-?']
try:
self.assertEqual(self.serv.main(), 0)
except SystemExit:
pass
self.assertTrue('Usage: ' in self.stdout.getvalue())
self.assertTrue("Command (defaults to 'no-daemon'):" in
self.stdout.getvalue())
self.assertTrue('Options:' in self.stdout.getvalue())
self.assertEqual(self.stderr.getvalue(), '')
def test_args_help2(self):
self.serv.args = ['-h']
try:
self.assertEqual(self.serv.main(), 0)
except SystemExit:
pass
self.assertTrue('Usage: ' in self.stdout.getvalue())
self.assertTrue("Command (defaults to 'no-daemon'):" in
self.stdout.getvalue())
self.assertTrue('Options:' in self.stdout.getvalue())
self.assertEqual(self.stderr.getvalue(), '')
def test_args_help3(self):
self.serv.args = ['--help']
try:
self.assertEqual(self.serv.main(), 0)
except SystemExit:
pass
self.assertTrue('Usage: ' in self.stdout.getvalue())
self.assertTrue("Command (defaults to 'no-daemon'):" in
self.stdout.getvalue())
self.assertTrue('Options:' in self.stdout.getvalue())
self.assertEqual(self.stderr.getvalue(), '')
def test_args_default_conf(self):
self.assertEqual(self.serv.main(), 1)
self.assertEqual(self.read_conf_calls, [(server.DEFAULT_CONF_FILES,)])
def test_args_override_conf1(self):
self.serv.args = ['-c', 'one.conf']
self.assertEqual(self.serv.main(), 1)
self.assertEqual(self.read_conf_calls, [(['one.conf'],)])
self.assertEqual(self.stdout.getvalue(), '')
self.assertEqual(self.stderr.getvalue(), 'No configuration found.\n')
def test_args_override_conf2(self):
self.serv.args = ['-c', 'one.conf', '--conf', 'two.conf']
self.assertEqual(self.serv.main(), 1)
self.assertEqual(self.read_conf_calls, [(['one.conf', 'two.conf'],)])
self.assertEqual(self.stdout.getvalue(), '')
self.assertEqual(self.stderr.getvalue(), 'No configuration found.\n')
def test_args_default_pid_file(self):
self.conf.files = ['ok.conf']
self.serv.args = ['status']
self.assertEqual(self.serv.main(), 0)
self.assertEqual(self.stdout.getvalue(), '12345 is running\n')
self.assertEqual(self.stderr.getvalue(), '')
self.assertEqual(self.serv.pid_file, '/var/run/brimd.pid')
def test_args_override_pid_file1(self):
self.serv.args = ['-p', 'pidfile']
self.assertEqual(self.serv.main(), 1)
self.assertEqual(self.serv.pid_file, 'pidfile')
def test_args_override_pid_file2(self):
self.serv.args = ['--pid-file', 'pidfile']
self.assertEqual(self.serv.main(), 1)
self.assertEqual(self.serv.pid_file, 'pidfile')
def test_args_no_pid_file(self):
self.conf.files = ['ok.conf']
self.serv.args = ['-p', '-', 'status']
self.assertEqual(self.serv.main(), 1)
self.assertEqual(self.stdout.getvalue(), '')
self.assertEqual(
self.stderr.getvalue(),
'pid_file not in use so status cannot be used.\n')
self.assertEqual(self.serv.pid_file, '-')
def test_args_default_nodaemon_output(self):
self.assertEqual(self.serv.main(), 1)
self.assertTrue(self.serv.output)
def test_args_default_start_output(self):
self.serv.args = ['start']
self.send_pid_sig_retval[0] = False
self.assertEqual(self.serv.main(), 1)
self.assertFalse(self.serv.output)
def test_args_override_output1(self):
self.serv.args = ['start', '-o']
self.send_pid_sig_retval[0] = False
self.assertEqual(self.serv.main(), 1)
self.assertTrue(self.serv.output)
def test_args_override_output2(self):
self.serv.args = ['start', '--output']
self.send_pid_sig_retval[0] = False
self.assertEqual(self.serv.main(), 1)
self.assertTrue(self.serv.output)
def test_version(self):
self.serv.args = ['--version']
self.assertEqual(self.serv.main(), 0)
self.assertEqual(
self.stdout.getvalue(), 'Brim.Net Core Server %s\n' % __version__)
self.assertEqual(self.stderr.getvalue(), '')
def test_parser_error(self):
self.serv.args = ['--invalid']
self.assertEqual(self.serv.main(), 1)
self.assertEqual(self.stdout.getvalue(), '')
self.assertEqual(
self.stderr.getvalue(), 'no such option: --invalid\n')
def test_too_many_commands(self):
self.serv.args = ['one', 'two']
self.assertEqual(self.serv.main(), 1)
self.assertEqual(self.stdout.getvalue(), '')
self.assertEqual(
self.stderr.getvalue(),
'Too many commands given; only one allowed.\n')
def test_default_command_no_daemon1(self):
self.assertEqual(self.serv.main(), 1)
self.assertTrue(self.serv.no_daemon)
self.assertEqual(self.stdout.getvalue(), '')
self.assertEqual(self.stderr.getvalue(), 'No configuration found.\n')
def test_default_command_no_daemon2(self):
self.serv.args = ['start']
self.send_pid_sig_retval[0] = False
self.assertEqual(self.serv.main(), 1)
self.assertFalse(self.serv.no_daemon)
self.assertEqual(self.stdout.getvalue(), '')
self.assertEqual(self.stderr.getvalue(), 'No configuration found.\n')
def test_start_already_running(self):
self.conf.files = ['ok.conf']
self.serv.args = ['start']
self.assertEqual(self.serv.main(), 0)
self.assertEqual(self.stdout.getvalue(), '12345 already running\n')
self.assertEqual(self.stderr.getvalue(), '')
def test_start_no_conf(self):
self.serv.args = ['start']
self.send_pid_sig_retval[0] = False
self.assertEqual(self.serv.main(), 1)
self.assertEqual(self.stdout.getvalue(), '')
self.assertEqual(self.stderr.getvalue(), 'No configuration found.\n')
def test_start_has_conf(self):
self.conf.files = ['ok.conf']
self.serv.args = ['start']
self.send_pid_sig_retval[0] = False
self.assertEqual(self.serv._parse_args(), self.conf)
self.assertEqual(self.stdout.getvalue(), '')
self.assertEqual(self.stderr.getvalue(), '')
def test_start_has_conf_no_pid_file_in_use(self):
self.conf.files = ['ok.conf']
self.serv.args = ['-p', '-', 'start']
self.send_pid_sig_retval[0] = False
self.assertEqual(self.serv._parse_args(), self.conf)
self.assertEqual(self.stdout.getvalue(), '')
self.assertEqual(self.stderr.getvalue(), '')
def test_restart_no_conf(self):
self.serv.args = ['restart']
self.assertEqual(self.serv.main(), 1)
self.assertEqual(self.stdout.getvalue(), '')
self.assertEqual(self.stderr.getvalue(), 'No configuration found.\n')
def test_restart_has_conf(self):
self.conf.files = ['ok.conf']
self.serv.args = ['restart']
self.assertEqual(self.serv._parse_args(), self.conf)
self.assertEqual(self.stdout.getvalue(), '')
self.assertEqual(self.stderr.getvalue(), '')
self.assertEqual(self.fork_calls, [()])
def test_restart_has_conf_no_pid_file_in_use(self):
self.conf.files = ['ok.conf']
self.serv.args = ['-p', '-', 'restart']
exc = None
try:
self.serv._parse_args()
except Exception as err:
exc = err
self.assertEqual(
str(exc), 'pid_file not in use so restart cannot be used.')
def test_restart_has_conf_fork_side(self):
self.fork_retval[0] = 0
self.conf.files = ['ok.conf']
self.serv.args = ['restart']
self.assertEqual(self.serv.main(), 0)
self.assertEqual(self.stdout.getvalue(), '')
self.assertEqual(self.stderr.getvalue(), '')
self.assertEqual(self.fork_calls, [()])
self.assertEqual(self.send_pid_sig_calls, [
((self.serv.pid_file, 0), {}),
((self.serv.pid_file, server.SIGHUP),
{'expect_exit': True, 'pid_override': 12345})])
def test_reload_no_conf(self):
self.serv.args = ['reload']
self.assertEqual(self.serv.main(), 1)
self.assertEqual(self.stdout.getvalue(), '')
self.assertEqual(self.stderr.getvalue(), 'No configuration found.\n')
def test_reload_has_conf(self):
self.conf.files = ['ok.conf']
self.serv.args = ['reload']
self.assertEqual(self.serv._parse_args(), self.conf)
self.assertEqual(self.stdout.getvalue(), '')
self.assertEqual(self.stderr.getvalue(), '')
self.assertEqual(self.fork_calls, [()])
def test_reload_has_conf_no_pid_file_in_use(self):
self.conf.files = ['ok.conf']
self.serv.args = ['-p', '-', 'reload']
exc = None
try:
self.serv._parse_args()
except Exception as err:
exc = err
self.assertEqual(
str(exc), 'pid_file not in use so reload cannot be used.')
def test_reload_has_conf_fork_side(self):
self.fork_retval[0] = 0
self.conf.files = ['ok.conf']
self.serv.args = ['reload']
self.assertEqual(self.serv.main(), 0)
self.assertEqual(self.stdout.getvalue(), '')
self.assertEqual(self.stderr.getvalue(), '')
self.assertEqual(self.fork_calls, [()])
self.assertEqual(self.send_pid_sig_calls, [
((self.serv.pid_file, 0), {}),
((self.serv.pid_file, server.SIGHUP),
{'expect_exit': True, 'pid_override': 12345})])
def test_force_reload_no_conf(self):
self.serv.args = ['force-reload']
self.assertEqual(self.serv.main(), 1)
self.assertEqual(self.stdout.getvalue(), '')
self.assertEqual(self.stderr.getvalue(), 'No configuration found.\n')
def test_force_reload_has_conf(self):
self.conf.files = ['ok.conf']
self.serv.args = ['force-reload']
self.assertEqual(self.serv._parse_args(), self.conf)
self.assertEqual(self.stdout.getvalue(), '')
self.assertEqual(self.stderr.getvalue(), '')
self.assertEqual(self.fork_calls, [()])
def test_force_reload_has_conf_fork_side(self):
self.fork_retval[0] = 0
self.conf.files = ['ok.conf']
self.serv.args = ['force-reload']
self.assertEqual(self.serv.main(), 0)
self.assertEqual(self.stdout.getvalue(), '')
self.assertEqual(self.stderr.getvalue(), '')
self.assertEqual(self.fork_calls, [()])
self.assertEqual(self.send_pid_sig_calls, [
((self.serv.pid_file, 0), {}),
((self.serv.pid_file, server.SIGHUP),
{'expect_exit': True, 'pid_override': 12345})])
def test_shutdown(self):
self.conf.files = ['ok.conf']
self.serv.args = ['shutdown']
self.assertEqual(self.serv.main(), 0)
self.assertEqual(self.stdout.getvalue(), '')
self.assertEqual(self.stderr.getvalue(), '')
self.assertEqual(self.send_pid_sig_calls, [
((self.serv.pid_file, server.SIGHUP), {'expect_exit': True})])
def test_shutdown_no_pid_file_in_use(self):
self.conf.files = ['ok.conf']
self.serv.args = ['-p', '-', 'shutdown']
exc = None
try:
self.serv._parse_args()
except Exception as err:
exc = err
self.assertEqual(
str(exc), 'pid_file not in use so shutdown cannot be used.')
def test_stop(self):
self.conf.files = ['ok.conf']
self.serv.args = ['stop']
self.assertEqual(self.serv.main(), 0)
self.assertEqual(self.stdout.getvalue(), '')
self.assertEqual(self.stderr.getvalue(), '')
self.assertEqual(self.send_pid_sig_calls, [
((self.serv.pid_file, server.SIGTERM), {'expect_exit': True})])
def test_stop_no_pid_file_in_use(self):
self.conf.files = ['ok.conf']
self.serv.args = ['-p', '-', 'stop']
exc = None
try:
self.serv._parse_args()
except Exception as err:
exc = err
self.assertEqual(
str(exc), 'pid_file not in use so stop cannot be used.')
def test_status_running(self):
self.conf.files = ['ok.conf']
self.serv.args = ['status']
self.assertEqual(self.serv.main(), 0)
self.assertEqual(self.stdout.getvalue(), '12345 is running\n')
self.assertEqual(self.stderr.getvalue(), '')
self.assertEqual(
self.send_pid_sig_calls, [((self.serv.pid_file, 0), {})])
def test_status_not_running(self):
self.conf.files = ['ok.conf']
self.send_pid_sig_retval[0] = False
self.serv.args = ['status']
self.assertEqual(self.serv.main(), 0)
self.assertEqual(self.stdout.getvalue(), '12345 is not running\n')
self.assertEqual(self.stderr.getvalue(), '')
self.assertEqual(
self.send_pid_sig_calls, [((self.serv.pid_file, 0), {})])
def test_status_not_running_no_pid(self):
self.conf.files = ['ok.conf']
self.send_pid_sig_retval[0] = False
self.send_pid_sig_retval[1] = 0
self.serv.args = ['status']
self.assertEqual(self.serv.main(), 0)
self.assertEqual(self.stdout.getvalue(), 'not running\n')
self.assertEqual(self.stderr.getvalue(), '')
self.assertEqual(
self.send_pid_sig_calls, [((self.serv.pid_file, 0), {})])
def test_no_daemon_no_conf(self):
self.serv.args = ['no-daemon']
self.assertEqual(self.serv.main(), 1)
self.assertEqual(self.stdout.getvalue(), '')
self.assertEqual(self.stderr.getvalue(), 'No configuration found.\n')
def test_no_daemon_has_conf(self):
self.conf.files = ['ok.conf']
self.serv.args = ['no-daemon']
self.assertEqual(self.serv._parse_args(), self.conf)
self.assertEqual(self.stdout.getvalue(), '')
self.assertEqual(self.stderr.getvalue(), '')
def test_unknown_command(self):
self.conf.files = ['ok.conf']
self.serv.args = ['unknown']
self.assertEqual(self.serv.main(), 1)
self.assertEqual(self.stdout.getvalue(), '')
self.assertEqual(
self.stderr.getvalue(), "Unknown command 'unknown'.\n")
def test_parse_conf_default(self):
self.conf.files = ['ok.conf']
self.serv.args = ['start']
self.serv._parse_args()
self.serv._parse_conf(Conf({
'wsgi': {'port': '1234'},
'wsgi#name1': {},
'wsgi#name2': {},
'tcp': {'call': 'brim.test.unit.test_server.TCPWithStatsConf'},
'tcp#name1': {
'call': 'brim.test.unit.test_server.TCPWithStatsConf'},
'tcp#name2': {
'call': 'brim.test.unit.test_server.TCPWithStatsConf'},
'udp': {'call': 'brim.test.unit.test_server.UDPWithStatsConf'},
'udp#name1': {
'call': 'brim.test.unit.test_server.UDPWithStatsConf'},
'udp#name2': {
'call': 'brim.test.unit.test_server.UDPWithStatsConf'},
'daemons': {}}))
self.assertEqual(self.serv.user, None)
self.assertEqual(self.serv.group, None)
self.assertEqual(self.serv.umask, 0022)
self.assertEqual(self.serv.log_name, 'brim')
self.assertEqual(self.serv.log_level, 'INFO')
self.assertEqual(self.serv.log_facility, 'LOG_LOCAL0')
self.assertEqual(sorted(s.name for s in self.serv.subservers), [
'daemons', 'tcp', 'tcp#name1', 'tcp#name2', 'udp', 'udp#name1',
'udp#name2', 'wsgi', 'wsgi#name1', 'wsgi#name2'])
# Just verifies subserver._parse_conf was called.
wsgi = [s for s in self.serv.subservers if s.name == 'wsgi'][0]
self.assertEqual(wsgi.port, 1234)
def test_parse_conf_sets_error_handler(self):
self.conf.files = ['ok.conf']
self.serv.args = ['no-daemon']
self.serv._parse_args()
conf = Conf({'brim': {'test': 'abc'}})
# Asserts conf.error is still the default behavior of SystemExit.
exc = None
try:
conf.get_int('brim', 'test', 0)
except SystemExit as err:
exc = err
self.assertEqual(
str(exc),
"Configuration value [brim] test of 'abc' cannot be converted to "
"int.")
self.serv._parse_conf(conf)
# Asserts conf.error is now the new behavior of just raising Exception.
exc = None
try:
conf.get_int('brim', 'test', 0)
except Exception as err:
exc = err
self.assertEqual(
str(exc),
"Configuration value [brim] test of 'abc' cannot be converted to "
"int.")
def test_parse_conf_user(self):
self.conf.files = ['ok.conf']
self.serv.args = ['no-daemon']
self.serv._parse_args()
self.serv._parse_conf(Conf({'brim': {'user': 'name'}}))
self.assertEqual(self.serv.user, 'name')
def test_parse_conf_group(self):
self.conf.files = ['ok.conf']
self.serv.args = ['no-daemon']
self.serv._parse_args()
self.serv._parse_conf(Conf({'brim': {'group': 'name'}}))
self.assertEqual(self.serv.group, 'name')
def test_parse_conf_umask(self):
self.conf.files = ['ok.conf']
self.serv.args = ['no-daemon']
self.serv._parse_args()
self.serv._parse_conf(Conf({'brim': {'umask': '0777'}}))
self.assertEqual(self.serv.umask, 0777)
exc = None
try:
self.serv._parse_conf(Conf({'brim': {'umask': 'abc'}}))
except Exception as err:
exc = err
self.assertEqual(str(exc), "Invalid umask value 'abc'.")
exc = None
try:
self.serv._parse_conf(Conf({'brim': {'umask': '99'}}))
except Exception as err:
exc = err
self.assertEqual(str(exc), "Invalid umask value '99'.")
def test_parse_conf_log_name(self):
self.conf.files = ['ok.conf']
self.serv.args = ['no-daemon']
self.serv._parse_args()
self.serv._parse_conf(Conf({'brim': {'log_name': 'name'}}))
self.assertEqual(self.serv.log_name, 'name')
def test_parse_conf_log_level(self):
self.conf.files = ['ok.conf']
self.serv.args = ['no-daemon']
self.serv._parse_args()
self.serv._parse_conf(Conf({'brim': {'log_level': 'DEBUG'}}))
self.assertEqual(self.serv.log_level, 'DEBUG')
exc = None
try:
self.serv._parse_conf(Conf({'brim': {'log_level': 'invalid'}}))
except Exception as err:
exc = err
self.assertEqual(str(exc), "Invalid [brim] log_level 'INVALID'.")
def test_parse_conf_log_facility(self):
self.conf.files = ['ok.conf']
self.serv.args = ['no-daemon']
self.serv._parse_args()
self.serv._parse_conf(
Conf({'brim': {'log_facility': 'LOG_LOCAL1'}}))
self.assertEqual(self.serv.log_facility, 'LOG_LOCAL1')
self.serv._parse_conf(Conf({'brim': {'log_facility': 'LOCAL2'}}))
self.assertEqual(self.serv.log_facility, 'LOG_LOCAL2')
exc = None
try:
self.serv._parse_conf(
Conf({'brim': {'log_facility': 'invalid'}}))
except Exception as err:
exc = err
self.assertEqual(
str(exc), "Invalid [brim] log_facility 'LOG_INVALID'.")
def test_start(self):
self.conf = Conf({'brim': {'port': '0'}, 'wsgi': {}})
self.conf.files = ['ok.conf']
self.serv.args = ['no-daemon']
self.serv._parse_args()
self.serv._parse_conf(self.conf)
subserv = self.serv.subservers[0]
subserv._parse_conf(self.conf)
sustain_workers_calls = []
def _sustain_workers(*args, **kwargs):
sustain_workers_calls.append((args, kwargs))
orig_sustain_workers = server.sustain_workers
try:
server.sustain_workers = _sustain_workers
self.serv._start()
finally:
server.sustain_workers = orig_sustain_workers
# Since we're in no-daemon, Server didn't call sustain_workers, but the
# wsgi subserver did.
self.assertEqual(sustain_workers_calls, [
((0, subserv._wsgi_worker), {'logger': subserv.logger})])
def test_start_no_subservers(self):
self.conf = Conf({'brim': {'port': '0'}})
self.conf.files = ['ok.conf']
self.serv.args = ['no-daemon']
self.serv._parse_args()
self.serv._parse_conf(self.conf)
sustain_workers_calls = []
def _sustain_workers(*args, **kwargs):
sustain_workers_calls.append((args, kwargs))
orig_sustain_workers = server.sustain_workers
exc = None
try:
server.sustain_workers = _sustain_workers
self.serv._start()
except Exception as err:
exc = err
finally:
server.sustain_workers = orig_sustain_workers
self.assertEqual(str(exc), 'No subservers configured.')
self.assertEqual(sustain_workers_calls, [])
def test_start_daemoned_parent_side(self):
self.conf = Conf({'brim': {'port': '0'}, 'wsgi': {}})
self.conf.files = ['ok.conf']
self.serv.args = ['start']
self.serv._parse_args()
self.serv._parse_conf(self.conf)
subserv = self.serv.subservers[0]
subserv._parse_conf(self.conf)
sustain_workers_calls = []
open_retval = [StringIO()]
open_calls = []
def _sustain_workers(*args, **kwargs):
sustain_workers_calls.append((args, kwargs))
@contextmanager
def _open(*args):
open_calls.append(args)
yield open_retval[0]
orig_sustain_workers = server.sustain_workers
try:
server.sustain_workers = _sustain_workers
server.open = _open
self.serv._start()
finally:
server.sustain_workers = orig_sustain_workers
del server.open
self.assertEqual(sustain_workers_calls, [])
self.assertEqual(open_calls, [('/var/run/brimd.pid', 'w')])
self.assertEqual(open_retval[0].getvalue(), '12345\n')
def test_start_daemoned_child_side(self):
self.conf = Conf({'brim': {'port': '0'}, 'wsgi': {}})
self.conf.files = ['ok.conf']
self.serv.args = ['start']
self.serv._parse_args()
self.serv._parse_conf(self.conf)
subserv = self.serv.subservers[0]
subserv._parse_conf(self.conf)
sustain_workers_calls = []
self.fork_retval[0] = 0
def _sustain_workers(*args, **kwargs):
sustain_workers_calls.append((args, kwargs))
orig_sustain_workers = server.sustain_workers
try:
server.sustain_workers = _sustain_workers
self.serv._start()
finally:
server.sustain_workers = orig_sustain_workers
self.assertEqual(sustain_workers_calls, [
((1, self.serv._start_subserver), {'logger': self.serv.logger})])
self.assertEqual(self.capture_calls, [
((), {'exceptions': self.serv._capture_exception,
'stdout_func': self.serv._capture_stdout,
'stderr_func': self.serv._capture_stderr})])
def test_start_daemoned_child_side_console_mode(self):
self.conf = Conf({'brim': {'port': '0'}, 'wsgi': {}})
self.conf.files = ['ok.conf']
self.serv.args = ['-o', 'start']
self.serv._parse_args()
self.serv._parse_conf(self.conf)
subserv = self.serv.subservers[0]
subserv._parse_conf(self.conf)
sustain_workers_calls = []
self.fork_retval[0] = 0
def _sustain_workers(*args, **kwargs):
sustain_workers_calls.append((args, kwargs))
orig_sustain_workers = server.sustain_workers
try:
server.sustain_workers = _sustain_workers
self.serv._start()
finally:
server.sustain_workers = orig_sustain_workers
self.assertEqual(sustain_workers_calls, [
((1, self.serv._start_subserver), {'logger': self.serv.logger})])
self.assertEqual(self.capture_calls, [])
def test_start_subserver(self, no_setproctitle=False):
self.conf = Conf({'brim': {'port': '0'}, 'wsgi': {}})
self.conf.files = ['ok.conf']
self.serv.args = ['start']
self.serv._parse_args()
self.serv._parse_conf(self.conf)
subserv = self.serv.subservers[0]
subserv._parse_conf(self.conf)
sustain_workers_calls = []
self.fork_retval[0] = 0
def _sustain_workers(*args, **kwargs):
sustain_workers_calls.append((args, kwargs))
orig_sustain_workers = server.sustain_workers
try:
server.sustain_workers = _sustain_workers
self.serv._start()
finally:
server.sustain_workers = orig_sustain_workers
self.assertEqual(sustain_workers_calls, [
((1, self.serv._start_subserver), {'logger': self.serv.logger})])
self.assertEqual(self.capture_calls, [
((), {'exceptions': self.serv._capture_exception,
'stdout_func': self.serv._capture_stdout,
'stderr_func': self.serv._capture_stderr})])
setproctitle_calls = []
start_calls = []
def _setproctitle(*args):
setproctitle_calls.append(args)
def _start(*args):
start_calls.append(args)
setproctitle_orig = server.setproctitle
try:
server.setproctitle = None if no_setproctitle else _setproctitle
subserv._start = _start
self.serv._start_subserver(0)
finally:
server.setproctitle = setproctitle_orig
if no_setproctitle:
self.assertEqual(setproctitle_calls, [])
else:
self.assertEqual(setproctitle_calls, [('wsgi:brimd',)])
self.assertEqual(start_calls, [(self.serv.bucket_stats[0],)])
def test_start_subserver_no_setproctitle(self):
self.test_start_subserver(no_setproctitle=True)
def test_capture_exception(self):
self.serv.logger = FakeLogger()
self.serv._capture_exception()
self.assertEqual(
self.serv.logger.error_calls,
[("UNCAUGHT EXCEPTION: main None ['None']",)])
self.serv.logger = FakeLogger()
try:
raise Exception('testing')
except Exception:
self.serv._capture_exception(*exc_info())
self.assertEqual(len(self.serv.logger.error_calls), 1)
self.assertEqual(len(self.serv.logger.error_calls[0]), 1)
self.assertTrue(self.serv.logger.error_calls[0][0].startswith(
'UNCAUGHT EXCEPTION: main Exception: testing [\'Traceback '
'(most recent call last):\''))
self.assertTrue(self.serv.logger.error_calls[0][0].endswith(
' raise Exception(\'testing\')", \'Exception: testing\']'))
def test_capture_stdout(self):
self.serv.logger = FakeLogger()
self.serv._capture_stdout('one\ntwo\nthree\n')
self.assertEqual(self.serv.logger.info_calls, [
('STDOUT: main one',), ('STDOUT: main two',),
('STDOUT: main three',)])
self.serv.logger = FakeLogger()
self.serv._capture_stdout('one\ntwo\nthree\n')
self.assertEqual(self.serv.logger.info_calls, [
('STDOUT: main one',), ('STDOUT: main two',),
('STDOUT: main three',)])
def test_capture_stderr(self):
self.serv.logger = FakeLogger()
self.serv._capture_stderr('one\ntwo\nthree\n')
self.assertEqual(self.serv.logger.error_calls, [
('STDERR: main one',), ('STDERR: main two',),
('STDERR: main three',)])
self.serv.logger = FakeLogger()
self.serv._capture_stderr('one\ntwo\nthree\n')
self.assertEqual(self.serv.logger.error_calls, [
('STDERR: main one',), ('STDERR: main two',),
('STDERR: main three',)])
if __name__ == '__main__':
main()
| 38.22179 | 79 | 0.601024 |
acf856edc3d95b2c75885d08a90659c3336bf149 | 1,042 | py | Python | __Courses__/Python - Introduction to Python Programming - Udacity/C5. Scripting/Scripting with Raw Input.py | JUD210/Study-Note | 2add9db3f11d99370f49878f0c19e9caa60d2d02 | [
"MIT"
] | null | null | null | __Courses__/Python - Introduction to Python Programming - Udacity/C5. Scripting/Scripting with Raw Input.py | JUD210/Study-Note | 2add9db3f11d99370f49878f0c19e9caa60d2d02 | [
"MIT"
] | null | null | null | __Courses__/Python - Introduction to Python Programming - Udacity/C5. Scripting/Scripting with Raw Input.py | JUD210/Study-Note | 2add9db3f11d99370f49878f0c19e9caa60d2d02 | [
"MIT"
] | null | null | null | """
We can get raw input from the user with the built-in function input, which takes in an optional string argument that you can use to specify a message to show to the user when asking for input.
"""
name = input("Enter your name: ")
print("Hello there, {}!".format(name.title()))
# Enter your name: hyuk
# Hello there, Hyuk!
"""
This prompts the user to enter a name and then uses the input in a greeting. The input function takes in whatever the user types and stores it as a string. If you want to interpret their input as something other than a string, like an integer, as in the example below, you need to wrap the result with the new type to convert it from a string.
"""
num = int(input("Enter an integer: "))
print("hello " * num)
# Enter an integer: 5
# hello hello hello hello hello
"""
We can also interpret user input as a Python expression using the built-in function eval. This function evaluates a string as a line of Python.
"""
result = eval(input("Enter an expression: "))
print(result)
# Enter an expression: 2*3
# 6
| 38.592593 | 343 | 0.732246 |
acf857300b98cff01107939de2aa4670753ff091 | 910 | py | Python | app/__init__.py | HunterLC/FARSystem | a8b91fcd1914e84dd2ec2b8321c51627779bb89b | [
"Apache-2.0"
] | null | null | null | app/__init__.py | HunterLC/FARSystem | a8b91fcd1914e84dd2ec2b8321c51627779bb89b | [
"Apache-2.0"
] | null | null | null | app/__init__.py | HunterLC/FARSystem | a8b91fcd1914e84dd2ec2b8321c51627779bb89b | [
"Apache-2.0"
] | null | null | null | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy() # 实例化
from .models import *
from app.views.account import account_blue
from app.views.actor import actor_blue
from app.views.film import film_blue
from app.views.recommend import recommend_blue
from app.views.setting import setting_blue
def init_app():
app = Flask(__name__, template_folder='../templates', static_folder='../static')
app.secret_key = "liu_chang98hunter_lc25"
app.config.from_object('settings.DevelopmentConfig')
# 将db注册到app中
db.init_app(app)
# 注册蓝图
app.register_blueprint(actor_blue, url_prefix='/actor')
app.register_blueprint(film_blue, url_prefix='/film')
app.register_blueprint(account_blue, url_prefix='/account')
app.register_blueprint(recommend_blue, url_prefix='/recommend')
app.register_blueprint(setting_blue, url_prefix='/setting')
return app
| 29.354839 | 84 | 0.764835 |
acf8578c4f0f991e46109af13faf3914ddfc8e15 | 7,015 | py | Python | test/integ/test_key_pair_authentication.py | fermezz/snowflake-connector-python | bc9616ad568b23cb8a931d2d590041f6bac1cff9 | [
"Apache-2.0"
] | null | null | null | test/integ/test_key_pair_authentication.py | fermezz/snowflake-connector-python | bc9616ad568b23cb8a931d2d590041f6bac1cff9 | [
"Apache-2.0"
] | null | null | null | test/integ/test_key_pair_authentication.py | fermezz/snowflake-connector-python | bc9616ad568b23cb8a931d2d590041f6bac1cff9 | [
"Apache-2.0"
] | 1 | 2021-03-25T14:00:15.000Z | 2021-03-25T14:00:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2021 Snowflake Computing Inc. All right reserved.
#
import uuid
import pytest
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import dsa, rsa
import snowflake.connector
@pytest.mark.skipolddriver
def test_different_key_length(is_public_test, request, conn_cnx, db_parameters):
if is_public_test:
pytest.skip("This test requires ACCOUNTADMIN privilege to set the public key")
test_user = "python_test_keypair_user_" + str(uuid.uuid4()).replace("-", "_")
db_config = {
"protocol": db_parameters["protocol"],
"account": db_parameters["account"],
"user": test_user,
"host": db_parameters["host"],
"port": db_parameters["port"],
"database": db_parameters["database"],
"schema": db_parameters["schema"],
"timezone": "UTC",
}
def fin():
with conn_cnx() as cnx:
cnx.cursor().execute(
"""
use role accountadmin
"""
)
cnx.cursor().execute(
"""
drop user if exists {user}
""".format(
user=test_user
)
)
request.addfinalizer(fin)
testcases = [2048, 4096, 8192]
with conn_cnx() as cnx:
cursor = cnx.cursor()
cursor.execute(
"""
use role accountadmin
"""
)
cursor.execute("create user " + test_user)
for key_length in testcases:
private_key_der, public_key_der_encoded = generate_key_pair(key_length)
cnx.cursor().execute(
"""
alter user {user} set rsa_public_key='{public_key}'
""".format(
user=test_user, public_key=public_key_der_encoded
)
)
db_config["private_key"] = private_key_der
with snowflake.connector.connect(**db_config) as _:
pass
@pytest.mark.skipolddriver
def test_multiple_key_pair(is_public_test, request, conn_cnx, db_parameters):
if is_public_test:
pytest.skip("This test requires ACCOUNTADMIN privilege to set the public key")
test_user = "python_test_keypair_user_" + str(uuid.uuid4()).replace("-", "_")
db_config = {
"protocol": db_parameters["protocol"],
"account": db_parameters["account"],
"user": test_user,
"host": db_parameters["host"],
"port": db_parameters["port"],
"database": db_parameters["database"],
"schema": db_parameters["schema"],
"timezone": "UTC",
}
def fin():
with conn_cnx() as cnx:
cnx.cursor().execute(
"""
use role accountadmin
"""
)
cnx.cursor().execute(
"""
drop user if exists {user}
""".format(
user=test_user
)
)
request.addfinalizer(fin)
private_key_one_der, public_key_one_der_encoded = generate_key_pair(2048)
private_key_two_der, public_key_two_der_encoded = generate_key_pair(2048)
with conn_cnx() as cnx:
cnx.cursor().execute(
"""
use role accountadmin
"""
)
cnx.cursor().execute(
"""
create user {user}
""".format(
user=test_user
)
)
cnx.cursor().execute(
"""
alter user {user} set rsa_public_key='{public_key}'
""".format(
user=test_user, public_key=public_key_one_der_encoded
)
)
db_config["private_key"] = private_key_one_der
with snowflake.connector.connect(**db_config) as _:
pass
# assert exception since different key pair is used
db_config["private_key"] = private_key_two_der
# although specifying password,
# key pair authentication should used and it should fail since we don't do fall back
db_config["password"] = "fake_password"
with pytest.raises(snowflake.connector.errors.DatabaseError) as exec_info:
snowflake.connector.connect(**db_config)
assert exec_info.value.errno == 250001
assert exec_info.value.sqlstate == "08001"
assert "JWT token is invalid" in exec_info.value.msg
with conn_cnx() as cnx:
cnx.cursor().execute(
"""
use role accountadmin
"""
)
cnx.cursor().execute(
"""
alter user {user} set rsa_public_key_2='{public_key}'
""".format(
user=test_user, public_key=public_key_two_der_encoded
)
)
with snowflake.connector.connect(**db_config) as _:
pass
def test_bad_private_key(db_parameters):
db_config = {
"protocol": db_parameters["protocol"],
"account": db_parameters["account"],
"user": db_parameters["user"],
"host": db_parameters["host"],
"port": db_parameters["port"],
"database": db_parameters["database"],
"schema": db_parameters["schema"],
"timezone": "UTC",
}
dsa_private_key = dsa.generate_private_key(key_size=2048, backend=default_backend())
dsa_private_key_der = dsa_private_key.private_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
)
encrypted_rsa_private_key_der = rsa.generate_private_key(
key_size=2048, public_exponent=65537, backend=default_backend()
).private_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.BestAvailableEncryption(b"abcd"),
)
bad_private_key_test_cases = [
"abcd",
1234,
b"abcd",
dsa_private_key_der,
encrypted_rsa_private_key_der,
]
for private_key in bad_private_key_test_cases:
db_config["private_key"] = private_key
with pytest.raises(snowflake.connector.errors.ProgrammingError) as exec_info:
snowflake.connector.connect(**db_config)
assert exec_info.value.errno == 251008
def generate_key_pair(key_length):
private_key = rsa.generate_private_key(
backend=default_backend(), public_exponent=65537, key_size=key_length
)
private_key_der = private_key.private_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
)
public_key_pem = (
private_key.public_key()
.public_bytes(
serialization.Encoding.PEM, serialization.PublicFormat.SubjectPublicKeyInfo
)
.decode("utf-8")
)
# strip off header
public_key_der_encoded = "".join(public_key_pem.split("\n")[1:-2])
return private_key_der, public_key_der_encoded
| 29.599156 | 88 | 0.616108 |
acf8590b8b8c57397512bf64e25ef0e46fb82e8b | 4,013 | py | Python | HWK2_dna_analysis.py | radhikakerur/Programming-For-BI-A | 42459d6127431e9f73f5b1b9200f13e9ef049415 | [
"MIT"
] | null | null | null | HWK2_dna_analysis.py | radhikakerur/Programming-For-BI-A | 42459d6127431e9f73f5b1b9200f13e9ef049415 | [
"MIT"
] | null | null | null | HWK2_dna_analysis.py | radhikakerur/Programming-For-BI-A | 42459d6127431e9f73f5b1b9200f13e9ef049415 | [
"MIT"
] | null | null | null | # Homework 2: DNA analysis
# This program reads DNA sequencer output and computes statistics, such as
# the GC content. Run it from the command line like this:
# python dna_analysis.py myfile.fastq
###########################################################################
### Libraries
###
# The sys module supports reading files, command-line arguments, etc.
import sys
###########################################################################
### Read the nucleotides into a variable named seq
###
# You need to specify a file name
if len(sys.argv) < 2:
print("You must supply a file name as an argument when running this program.")
sys.exit(2)
# The file name specified on the command line, as a string.
filename = sys.argv[1]
# A file object from which data can be read.
inputfile = open('/Users/radhikakerur/PycharmProjects/Class 2/HWK 2/data/sample_1.fastq')
# All the nucleotides in the input file that have been read so far.
seq = ""
# The current line number (= the number of lines read so far).
linenum = 0
for line in inputfile:
linenum = linenum + 1
# if we are on the 2nd, 6th, 10th line...
if linenum % 4 == 2:
# Remove the newline characters from the end of the line
line = line.rstrip()
seq = seq + line
###########################################################################
### Compute statistics
###
# Total nucleotides seen so far.
total_count = 0
# Number of A,T, G and C nucleotides seen so far.
gc_count = 0
at_count = 0
g_count = 0
c_count = 0
a_count = 0
t_count = 0
# for each base pair in the string,
for bp in seq:
# increment the total number of bps we've seen
total_count = total_count + 1
# next, if the bp is a G or a C,
if bp == 'C' or bp == 'G':
# increment the count of gc
gc_count = gc_count + 1
#Solution 2 for Problem 4
if bp == 'A' or bp == 'T':
# increment the count of at
at_count = at_count + 1
'''
#Solution 1 for Problem 4
total_count = 0
for bp in seq:
# increment the total number of bps we've seen
total_count = total_count + 1
# next, if the bp is a G or a C,
if bp == 'C' or bp == 'G':
# increment the count of gc
gc_count = gc_count + 1
'''
'''
#Solution 2: Possible loop to calculate ATGC and AT and GC
for bp in seq:
total_count = total_count + 1
if bp == 'G':
g_count = g_count + 1
gc_count = gc_count + 1
elif bp == 'C':
c_count = c_count + 1
gc_count = gc_count + 1
elif bp == 'A':
a_count = a_count + 1
at_count = at_count + 1
elif bp == 'T':
t_count = t_count + 1
at_count = at_count + 1
'''
# for each base pair in the string,
#Extra loop to calculate ATGC
for bp in seq:
if bp == 'G':
g_count = g_count + 1
elif bp == 'C':
c_count = c_count + 1
elif bp == 'A':
a_count = a_count + 1
elif bp == 'T':
t_count = t_count + 1
# else:
# print('===',bp)
sum_count = g_count + c_count + a_count + t_count
# divide the gc_count by the total_count
#gc_content = float(gc_count) / total_count #Now commented as the right method to calculate the gc_content is gc_count) / sum_count
# divide the gc_count and at_count by the sum_count
gc_content = float(gc_count) / sum_count
at_content = float(at_count) / sum_count
# Print the answers
print('GC-content:', gc_content)
print('AT-content:', at_content)
print('G count:', g_count)
print('C count:', c_count)
print('A count:', a_count)
print('T count:', t_count)
print('Sum count:', sum_count)
print('Total count:', total_count)
print('seq count:', len(seq))
print('AT/GC Ratio:',(a_count+t_count)/(g_count+c_count))
#Another method to calculate AT/GC
#print('AT/GC Ratio:',(at_count)/(gc_count))
if(gc_content > 0.6):
print('GC Classification: high GC content')
elif(gc_content < 0.4):
print('GC Classification: low GC content')
else:
print('GC Classification: moderate GC content')
| 27.675862 | 133 | 0.607027 |
acf85c4d88e8cb6fa290fe3a0175860f27a518b7 | 686 | py | Python | app/core/migrations/0003_ingredient.py | EddieRosas/recipe-app-api | 956b0985edf0741e2510a7717c87b4f2c9903f73 | [
"MIT"
] | null | null | null | app/core/migrations/0003_ingredient.py | EddieRosas/recipe-app-api | 956b0985edf0741e2510a7717c87b4f2c9903f73 | [
"MIT"
] | null | null | null | app/core/migrations/0003_ingredient.py | EddieRosas/recipe-app-api | 956b0985edf0741e2510a7717c87b4f2c9903f73 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.15 on 2020-07-21 02:18
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0002_tag'),
]
operations = [
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=225)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 28.583333 | 118 | 0.618076 |
acf85d3dabbaadf316ffda7237a7646398111b4a | 12,111 | py | Python | git_rebase_update.py | 2youyou2/depot_tools | 8b94108e684872a89f7108f51ba74f01220d64fa | [
"BSD-3-Clause"
] | 7 | 2018-09-26T11:10:40.000Z | 2020-12-19T13:32:12.000Z | git_rebase_update.py | 2youyou2/depot_tools | 8b94108e684872a89f7108f51ba74f01220d64fa | [
"BSD-3-Clause"
] | null | null | null | git_rebase_update.py | 2youyou2/depot_tools | 8b94108e684872a89f7108f51ba74f01220d64fa | [
"BSD-3-Clause"
] | 4 | 2020-03-27T07:49:45.000Z | 2020-11-17T02:46:42.000Z | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Tool to update all branches to have the latest changes from their upstreams.
"""
import argparse
import collections
import logging
import sys
import textwrap
import os
from fnmatch import fnmatch
from pprint import pformat
import git_common as git
STARTING_BRANCH_KEY = 'depot-tools.rebase-update.starting-branch'
STARTING_WORKDIR_KEY = 'depot-tools.rebase-update.starting-workdir'
def find_return_branch_workdir():
"""Finds the branch and working directory which we should return to after
rebase-update completes.
These values may persist across multiple invocations of rebase-update, if
rebase-update runs into a conflict mid-way.
"""
return_branch = git.get_config(STARTING_BRANCH_KEY)
workdir = git.get_config(STARTING_WORKDIR_KEY)
if not return_branch:
workdir = os.getcwd()
git.set_config(STARTING_WORKDIR_KEY, workdir)
return_branch = git.current_branch()
if return_branch != 'HEAD':
git.set_config(STARTING_BRANCH_KEY, return_branch)
return return_branch, workdir
def fetch_remotes(branch_tree):
"""Fetches all remotes which are needed to update |branch_tree|."""
fetch_tags = False
remotes = set()
tag_set = git.tags()
fetchspec_map = {}
all_fetchspec_configs = git.get_config_regexp(r'^remote\..*\.fetch')
for fetchspec_config in all_fetchspec_configs:
key, _, fetchspec = fetchspec_config.partition(' ')
dest_spec = fetchspec.partition(':')[2]
remote_name = key.split('.')[1]
fetchspec_map[dest_spec] = remote_name
for parent in branch_tree.itervalues():
if parent in tag_set:
fetch_tags = True
else:
full_ref = git.run('rev-parse', '--symbolic-full-name', parent)
for dest_spec, remote_name in fetchspec_map.iteritems():
if fnmatch(full_ref, dest_spec):
remotes.add(remote_name)
break
fetch_args = []
if fetch_tags:
# Need to fetch all because we don't know what remote the tag comes from :(
# TODO(iannucci): assert that the tags are in the remote fetch refspec
fetch_args = ['--all']
else:
fetch_args.append('--multiple')
fetch_args.extend(remotes)
# TODO(iannucci): Should we fetch git-svn?
if not fetch_args: # pragma: no cover
print 'Nothing to fetch.'
else:
git.run_with_stderr('fetch', *fetch_args, stdout=sys.stdout,
stderr=sys.stderr)
def remove_empty_branches(branch_tree):
tag_set = git.tags()
ensure_root_checkout = git.once(lambda: git.run('checkout', git.root()))
deletions = {}
reparents = {}
downstreams = collections.defaultdict(list)
for branch, parent in git.topo_iter(branch_tree, top_down=False):
downstreams[parent].append(branch)
# If branch and parent have the same tree, then branch has to be marked
# for deletion and its children and grand-children reparented to parent.
if git.hash_one(branch+":") == git.hash_one(parent+":"):
ensure_root_checkout()
logging.debug('branch %s merged to %s', branch, parent)
# Mark branch for deletion while remembering the ordering, then add all
# its children as grand-children of its parent and record reparenting
# information if necessary.
deletions[branch] = len(deletions)
for down in downstreams[branch]:
if down in deletions:
continue
# Record the new and old parent for down, or update such a record
# if it already exists. Keep track of the ordering so that reparenting
# happen in topological order.
downstreams[parent].append(down)
if down not in reparents:
reparents[down] = (len(reparents), parent, branch)
else:
order, _, old_parent = reparents[down]
reparents[down] = (order, parent, old_parent)
# Apply all reparenting recorded, in order.
for branch, value in sorted(reparents.iteritems(), key=lambda x:x[1][0]):
_, parent, old_parent = value
if parent in tag_set:
git.set_branch_config(branch, 'remote', '.')
git.set_branch_config(branch, 'merge', 'refs/tags/%s' % parent)
print ('Reparented %s to track %s [tag] (was tracking %s)'
% (branch, parent, old_parent))
else:
git.run('branch', '--set-upstream-to', parent, branch)
print ('Reparented %s to track %s (was tracking %s)'
% (branch, parent, old_parent))
# Apply all deletions recorded, in order.
for branch, _ in sorted(deletions.iteritems(), key=lambda x: x[1]):
print git.run('branch', '-d', branch)
def rebase_branch(branch, parent, start_hash):
logging.debug('considering %s(%s) -> %s(%s) : %s',
branch, git.hash_one(branch), parent, git.hash_one(parent),
start_hash)
# If parent has FROZEN commits, don't base branch on top of them. Instead,
# base branch on top of whatever commit is before them.
back_ups = 0
orig_parent = parent
while git.run('log', '-n1', '--format=%s',
parent, '--').startswith(git.FREEZE):
back_ups += 1
parent = git.run('rev-parse', parent+'~')
if back_ups:
logging.debug('Backed parent up by %d from %s to %s',
back_ups, orig_parent, parent)
if git.hash_one(parent) != start_hash:
# Try a plain rebase first
print 'Rebasing:', branch
rebase_ret = git.rebase(parent, start_hash, branch, abort=True)
if not rebase_ret.success:
# TODO(iannucci): Find collapsible branches in a smarter way?
print "Failed! Attempting to squash", branch, "...",
sys.stdout.flush()
squash_branch = branch+"_squash_attempt"
git.run('checkout', '-b', squash_branch)
git.squash_current_branch(merge_base=start_hash)
# Try to rebase the branch_squash_attempt branch to see if it's empty.
squash_ret = git.rebase(parent, start_hash, squash_branch, abort=True)
empty_rebase = git.hash_one(squash_branch) == git.hash_one(parent)
git.run('checkout', branch)
git.run('branch', '-D', squash_branch)
if squash_ret.success and empty_rebase:
print 'Success!'
git.squash_current_branch(merge_base=start_hash)
git.rebase(parent, start_hash, branch)
else:
print "Failed!"
print
# rebase and leave in mid-rebase state.
# This second rebase attempt should always fail in the same
# way that the first one does. If it magically succeeds then
# something very strange has happened.
second_rebase_ret = git.rebase(parent, start_hash, branch)
if second_rebase_ret.success: # pragma: no cover
print "Second rebase succeeded unexpectedly!"
print "Please see: http://crbug.com/425696"
print "First rebased failed with:"
print rebase_ret.stderr
else:
print "Here's what git-rebase (squashed) had to say:"
print
print squash_ret.stdout
print squash_ret.stderr
print textwrap.dedent(
"""\
Squashing failed. You probably have a real merge conflict.
Your working copy is in mid-rebase. Either:
* completely resolve like a normal git-rebase; OR
* abort the rebase and mark this branch as dormant:
git config branch.%s.dormant true
And then run `git rebase-update` again to resume.
""" % branch)
return False
else:
print '%s up-to-date' % branch
git.remove_merge_base(branch)
git.get_or_create_merge_base(branch)
return True
def main(args=None):
parser = argparse.ArgumentParser()
parser.add_argument('--verbose', '-v', action='store_true')
parser.add_argument('--keep-going', '-k', action='store_true',
help='Keep processing past failed rebases.')
parser.add_argument('--no_fetch', '--no-fetch', '-n',
action='store_true',
help='Skip fetching remotes.')
parser.add_argument(
'--current', action='store_true', help='Only rebase the current branch.')
parser.add_argument('branches', nargs='*',
help='Branches to be rebased. All branches are assumed '
'if none specified.')
opts = parser.parse_args(args)
if opts.verbose: # pragma: no cover
logging.getLogger().setLevel(logging.DEBUG)
# TODO(iannucci): snapshot all branches somehow, so we can implement
# `git rebase-update --undo`.
# * Perhaps just copy packed-refs + refs/ + logs/ to the side?
# * commit them to a secret ref?
# * Then we could view a summary of each run as a
# `diff --stat` on that secret ref.
if git.in_rebase():
# TODO(iannucci): Be able to resume rebase with flags like --continue,
# etc.
print (
'Rebase in progress. Please complete the rebase before running '
'`git rebase-update`.'
)
return 1
return_branch, return_workdir = find_return_branch_workdir()
os.chdir(git.run('rev-parse', '--show-toplevel'))
if git.current_branch() == 'HEAD':
if git.run('status', '--porcelain'):
print 'Cannot rebase-update with detached head + uncommitted changes.'
return 1
else:
git.freeze() # just in case there are any local changes.
branches_to_rebase = set(opts.branches)
if opts.current:
branches_to_rebase.add(git.current_branch())
skipped, branch_tree = git.get_branch_tree()
if branches_to_rebase:
skipped = set(skipped).intersection(branches_to_rebase)
for branch in skipped:
print 'Skipping %s: No upstream specified' % branch
if not opts.no_fetch:
fetch_remotes(branch_tree)
merge_base = {}
for branch, parent in branch_tree.iteritems():
merge_base[branch] = git.get_or_create_merge_base(branch, parent)
logging.debug('branch_tree: %s' % pformat(branch_tree))
logging.debug('merge_base: %s' % pformat(merge_base))
retcode = 0
unrebased_branches = []
# Rebase each branch starting with the root-most branches and working
# towards the leaves.
for branch, parent in git.topo_iter(branch_tree):
# Only rebase specified branches, unless none specified.
if branches_to_rebase and branch not in branches_to_rebase:
continue
if git.is_dormant(branch):
print 'Skipping dormant branch', branch
else:
ret = rebase_branch(branch, parent, merge_base[branch])
if not ret:
retcode = 1
if opts.keep_going:
print '--keep-going set, continuing with next branch.'
unrebased_branches.append(branch)
if git.in_rebase():
git.run_with_retcode('rebase', '--abort')
if git.in_rebase(): # pragma: no cover
print 'Failed to abort rebase. Something is really wrong.'
break
else:
break
if unrebased_branches:
print
print 'The following branches could not be cleanly rebased:'
for branch in unrebased_branches:
print ' %s' % branch
if not retcode:
remove_empty_branches(branch_tree)
# return_branch may not be there any more.
if return_branch in git.branches():
git.run('checkout', return_branch)
git.thaw()
else:
root_branch = git.root()
if return_branch != 'HEAD':
print (
"%r was merged with its parent, checking out %r instead."
% (return_branch, root_branch)
)
git.run('checkout', root_branch)
# return_workdir may also not be there any more.
if return_workdir:
try:
os.chdir(return_workdir)
except OSError as e:
print (
"Unable to return to original workdir %r: %s"
% (return_workdir, e)
)
git.set_config(STARTING_BRANCH_KEY, '')
git.set_config(STARTING_WORKDIR_KEY, '')
return retcode
if __name__ == '__main__': # pragma: no cover
try:
sys.exit(main())
except KeyboardInterrupt:
sys.stderr.write('interrupted\n')
sys.exit(1)
| 34.504274 | 79 | 0.659896 |
acf8601124e1ec0262624be7bb6311a9e0854d4b | 2,102 | py | Python | base_python/tests/test_run_config.py | cogment/cogment-verse | b7e3eddac57021ec77912a5d38e09f8202dc352f | [
"Apache-2.0"
] | 23 | 2021-10-01T01:33:15.000Z | 2022-03-10T18:18:50.000Z | base_python/tests/test_run_config.py | cogment/cogment-verse | b7e3eddac57021ec77912a5d38e09f8202dc352f | [
"Apache-2.0"
] | 35 | 2021-11-06T04:37:07.000Z | 2022-03-18T18:05:28.000Z | base_python/tests/test_run_config.py | cogment/cogment-verse | b7e3eddac57021ec77912a5d38e09f8202dc352f | [
"Apache-2.0"
] | 4 | 2021-12-14T15:24:50.000Z | 2022-01-17T11:06:34.000Z | # Copyright 2021 AI Redefined Inc. <dev+cogment@ai-r.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import yaml
from data_pb2 import RunConfig
from google.protobuf.json_format import MessageToDict, ParseDict
# for pytest fixtures
# pylint: disable=redefined-outer-name
@pytest.fixture
def config_dict():
config_str = """
environment:
specs:
implementation: gym/CartPole-v0
num_players: 1
observation_space:
properties:
- box:
shape: [4]
action_space:
properties:
- discrete:
num: 2
config:
render_width: 256
flatten: True
framestack: 1
epsilon_min: 0.1
epsilon_steps: 100000
target_net_update_schedule: 1000
learning_rate: 1.0e-4
lr_warmup_steps: 10000
demonstration_count: 0
total_trial_count: 10000
model_publication_interval: 1000
model_archive_interval: 4000 # Archive every 4000 training steps
batch_size: 256
min_replay_buffer_size: 1000
max_parallel_trials: 4
model_kwargs: {}
max_replay_buffer_size: 100000
aggregate_by_actor: False
replay_buffer_config:
observation_dtype: float32
action_dtype: int8
agent_implementation: rainbowtorch
"""
return yaml.safe_load(config_str)
def test_config(config_dict):
# should not raise exception
run_config = ParseDict(config_dict, RunConfig())
dct = MessageToDict(run_config, preserving_proto_field_name=True)
assert "environment" in dct
assert "replay_buffer_config" in dct
assert "observation_dtype" in dct["replay_buffer_config"]
| 28.794521 | 74 | 0.72883 |
acf860a084dfb527cf834928b519a4de130c5544 | 2,353 | py | Python | py/riscv/AddressTable.py | nujgnayuf/force-riscv | 4c1e837263c80145359eb48e3cd62766c13f9cc4 | [
"Apache-2.0"
] | null | null | null | py/riscv/AddressTable.py | nujgnayuf/force-riscv | 4c1e837263c80145359eb48e3cd62766c13f9cc4 | [
"Apache-2.0"
] | null | null | null | py/riscv/AddressTable.py | nujgnayuf/force-riscv | 4c1e837263c80145359eb48e3cd62766c13f9cc4 | [
"Apache-2.0"
] | 1 | 2020-06-17T09:37:45.000Z | 2020-06-17T09:37:45.000Z | #
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR
# FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from base.Sequence import Sequence
import inspect
import sys
import os
## AddressTableRISCV
# Provide available address for exceptions handlers.
class AddressTableRISCV(Sequence):
def __init__(self, gen_thread):
super().__init__(gen_thread)
self.table_index = None
def generate(self, **kwargs):
self.table_index = kwargs.get('table_index', None)
if self.table_index is None:
self.table_index = self.getRandomGPR(exclude="0,1,2")
self.reserveRegisterByIndex(64,self.table_index,"GPR","ReadWrite")
return self.table_index
def tableIndex(self):
return self.table_index
def getAddress(self, reg_index):
# TODO
pass
## AddressTableManagerRISCV class
class AddressTableManagerRISCV(Sequence):
def __init__(self, gen_thread):
super().__init__(gen_thread)
self.table_index = None # address table index
self.address_table = None # address table
def generate(self):
self.address_table = AddressTableRISCV(self.genThread)
self.table_index = self.address_table.generate(table_index=self.table_index)
fast_mode = self.genThread.fastMode
addr_table_info_set = {}
addr_table_info_set["table_index"] = self.table_index;
addr_table_info_set["fast_mode"] = fast_mode
self.genSequence("InitializeAddrTables", addr_table_info_set)
# only copy table index.
def createShallowCopy(self, gen_thread):
address_table_manager_copy = AddressTableManagerRISCV(gen_thread)
address_table_manager_copy.table_index = self.table_index
return address_table_manager_copy
def addressTable(self):
return self.address_table
| 34.101449 | 87 | 0.716107 |
acf861d32516d5fafb252b1da1b374b1e7bac1c7 | 1,251 | py | Python | indy-diem/async_calls.py | kiva/indy-diem | c015a44b15886a2a039c3b7768cf03a6295c134e | [
"Apache-2.0"
] | null | null | null | indy-diem/async_calls.py | kiva/indy-diem | c015a44b15886a2a039c3b7768cf03a6295c134e | [
"Apache-2.0"
] | 15 | 2021-08-17T15:31:07.000Z | 2021-09-20T15:11:59.000Z | indy-diem/async_calls.py | kiva/indy-diem | c015a44b15886a2a039c3b7768cf03a6295c134e | [
"Apache-2.0"
] | null | null | null | from indy import anoncreds
async def create_master_secret(prover):
master_secret_id = await anoncreds.prover_create_master_secret(prover['wallet'], None)
return master_secret_id
async def create_credential_offer(issuer_wallet, cred_def_id):
cred_offer = await anoncreds.issuer_create_credential_offer(issuer_wallet, cred_def_id)
return cred_offer
async def create_credential_req(prover):
cred_req = await anoncreds.prover_create_credential_req(prover['wallet'], prover['did'], prover['cred_offer'],
prover['cred_def'], prover['master_secret_id'])
return cred_req
async def create_credential(issuer):
credential = await anoncreds.issuer_create_credential(issuer['wallet'], issuer['cred_offer'],
issuer['cred_req'], issuer['cred_values'], None, None)
return credential
async def store_credential(prover):
credential = await anoncreds.prover_store_credential(prover['wallet'], None, prover['cred_req_metadata'],
prover['cred'],
prover['cred_def'], None)
return credential
| 40.354839 | 114 | 0.635492 |
acf8635d6cee4024bb76be6bb71086b574845df4 | 333 | py | Python | examples/utils/utils.py | ephsmith/darts | 0e5b5ad184ed8e83e703e5c955156400930d8afa | [
"Apache-2.0"
] | null | null | null | examples/utils/utils.py | ephsmith/darts | 0e5b5ad184ed8e83e703e5c955156400930d8afa | [
"Apache-2.0"
] | 42 | 2021-10-04T17:11:50.000Z | 2021-12-24T15:37:41.000Z | examples/utils/utils.py | ephsmith/darts | 0e5b5ad184ed8e83e703e5c955156400930d8afa | [
"Apache-2.0"
] | null | null | null | # fix pythonpath when working locally
from os.path import dirname, basename
from os import getcwd
import sys
def fix_pythonpath_if_working_locally():
"""Add the parent path to pythonpath if current working dir is darts/examples"""
cwd = getcwd()
if basename(cwd) == "examples":
sys.path.insert(0, dirname(cwd))
| 27.75 | 84 | 0.723724 |
acf8637b4d3a6862f059c65b44e2b2a70a24d548 | 6,319 | py | Python | tests/test.py | notetau/pimpl-cpp-generator | e6afe37b6034e613b706b60a80e8daf93f33c1d3 | [
"MIT"
] | 6 | 2016-05-28T09:10:19.000Z | 2020-06-01T14:48:19.000Z | tests/test.py | notetau/pimpl-cpp-generator | e6afe37b6034e613b706b60a80e8daf93f33c1d3 | [
"MIT"
] | null | null | null | tests/test.py | notetau/pimpl-cpp-generator | e6afe37b6034e613b706b60a80e8daf93f33c1d3 | [
"MIT"
] | 1 | 2020-06-01T14:48:20.000Z | 2020-06-01T14:48:20.000Z | # Copyright (c) 2015 Noto, Yuta
# Released under the MIT license
# http://opensource.org/licenses/mit-license.php
import sys
import os
import pprint
sys.path.append(os.path.dirname(__file__) + '/..')
import pimplgen
from nose.tools import ok_, eq_
BASE_DIR = os.path.dirname(__file__)
def setup_module(module):
# static library path for travis ci (trusty)
pimplgen.cl.Config.set_compatibility_check(False)
pimplgen.cl.Config.set_library_file('/usr/lib/x86_64-linux-gnu/libclang-3.4.so.1')
TEST_PARAMETER = [
# 0
({
'src_file': BASE_DIR + '/cppsrc/basic1.cpp',
'target_class': 'Basic1',
'output_class': 'TBasic1'
},
{'class_decl': 'template < typename U = int > struct',
'class_name': 'Basic1',
'class_sig': 'template < typename U = int > struct Basic1',
'constructor_info': [],
'func_info': [{'args': [{'name': 'x', 'sig': 'float x'}], 'const': False, 'func_name': 'foo', 'is_void': False, 'restrict': False, 'result': 'int', 'template_args': [], 'volatile': False},
{'args': [{'name': 'pimplvar0', 'sig': 'int pimplvar0'}, {'name': 'y', 'sig': 'float y'}, {'name': 'pimplvar1', 'sig': 'double pimplvar1'}], 'const': False, 'func_name': 'bar', 'is_void': True, 'restrict': False, 'result': 'void', 'template_args': [], 'volatile': False},
{'args': [{'name': 'z', 'sig': 'int z = ( 42 )'}], 'const': False, 'func_name': 'baz', 'is_void': True, 'restrict': False, 'result': 'void', 'template_args': [], 'volatile': False},
{'args': [], 'const': False, 'func_name': 'qux', 'is_void': False, 'restrict': False, 'result': 'double', 'template_args': [], 'volatile': False},
{'args': [{'name': 't', 'sig': 'T t'}], 'const': False, 'func_name': 'norf', 'is_void': False, 'restrict': False, 'result': 'T', 'template_args': [{'name': 'T', 'sig': 'typename T'}, {'name': 'N', 'sig': 'long N'}], 'volatile': False}],
'template_args': [{'name': 'U', 'sig': 'typename U = int'}]}
),
# 1
({
'src_file': BASE_DIR + '/cppsrc/basic1.cpp',
'target_class': 'Basic2',
'output_class': 'TBasic2'
},
{'class_decl': 'struct',
'class_name': 'Basic2',
'class_sig': 'struct Basic2',
'constructor_info': [],
'func_info': [{'args': [], 'const': False, 'func_name': 'bar', 'is_void': True, 'restrict': False, 'result': 'void', 'template_args': [], 'volatile': False},
{'args': [{'name': 'x', 'sig': 'int x'}], 'const': False, 'func_name': 'baz', 'is_void': False, 'restrict': False, 'result': 'int', 'template_args': [], 'volatile': False}],
'template_args': []}
),
# 2
(),
# 3
( {
'src_file': BASE_DIR + '/cppsrc/basic1.cpp',
'target_class': 'Basic4',
'output_class': 'TBasic4'
},
{'class_decl': 'struct',
'class_name': 'Basic4',
'class_sig': 'struct Basic4',
'constructor_info': [],
'func_info': [{'args': [{'name': 'x', 'sig': 'float x'}], 'const': False, 'func_name': 'foo', 'is_void': False, 'restrict': False, 'result': 'int', 'template_args': [], 'volatile': False},
{'args': [{'name': 'x', 'sig': 'float x'}], 'const': False, 'func_name': 'foofoo', 'is_void': False, 'restrict': False, 'result': 'int', 'template_args': [], 'volatile': False},
{'args': [], 'const': False, 'func_name': 'bar', 'is_void': False, 'restrict': False, 'result': 'int', 'template_args': [], 'volatile': True},
{'args': [{'name': 'a', 'sig': 'char a'}], 'const': True, 'func_name': 'baz', 'is_void': False, 'restrict': False, 'result': 'int', 'template_args': [], 'volatile': False},
{'args': [], 'const': True, 'func_name': 'qux', 'is_void': False, 'restrict': False, 'result': 'double', 'template_args': [], 'volatile': True}],
'template_args': []}
),
# 4
( {
'src_file': BASE_DIR + '/cppsrc/basic1.cpp',
'target_class': 'a::Basic5',
'output_class': 'TBasic5'
},
{'class_decl': 'struct',
'class_name': 'Basic5',
'class_sig': 'struct Basic5',
'constructor_info': [],
'func_info': [{'args': [{'name': 'x', 'sig': 'float x'}], 'const': False, 'func_name': 'foo', 'is_void': True, 'restrict': False, 'result': 'void', 'template_args': [], 'volatile': False}],
'template_args': []}
),
# 5
( {
'src_file': BASE_DIR + '/cppsrc/basic1.cpp',
'target_class': 'a::b::Basic6',
'output_class': 'TBasic6'
},
{'class_decl': 'struct',
'class_name': 'Basic6',
'class_sig': 'struct Basic6',
'constructor_info': [],
'func_info': [{'args': [{'name': 'x', 'sig': 'int x'}], 'const': False, 'func_name': 'foo', 'is_void': True, 'restrict': False, 'result': 'void', 'template_args': [], 'volatile': False},
{'args': [{'name': 'b5', 'sig': 'Basic5 & b5'}], 'const': False, 'func_name': 'bar', 'is_void': True, 'restrict': False, 'result': 'void', 'template_args': [], 'volatile': False},
{'args': [{'name': 'other', 'sig': 'const Basic6 & other'}], 'const': False, 'func_name': 'operator=', 'is_void': False, 'restrict': False, 'result': 'a::b::Basic6 &', 'template_args': [], 'volatile': False}],
'template_args': []}
)
]
def check_pattern(idx):
pattern = TEST_PARAMETER[idx]
command = '{src_file} -t {target_class} -o {output_class}'.format(**pattern[0])
args = pimplgen.parse_args(command)
generator = pimplgen.PimplGenerator(args)
class_info = generator.parse()
eq_(pattern[1], class_info)
def test_0() : check_pattern(0)
def test_1() : check_pattern(1)
def test_3() : check_pattern(3)
def test_4() : check_pattern(4)
def test_5() : check_pattern(5)
def run_pattern(idx):
pattern = TEST_PARAMETER[idx]
command = '{src_file} -t {target_class} -o {output_class}'.format(**pattern[0])
args = pimplgen.parse_args(command)
generator = pimplgen.PimplGenerator(args)
class_info = generator.parse()
pprint.pprint(class_info, width=300)
print '/////////////////////////////////////////////////////////////////////'
codes = generator.generate_code()
print codes[0]
print ''
print codes[1]
if __name__ == '__main__':
setup_module(None)
run_pattern(5)
| 47.871212 | 290 | 0.562589 |
acf863d827c31415e584fb858ea509f65ddb72f7 | 1,327 | py | Python | fit.py | foamliu/Fit_Quad | 2702e41bc6f34a720db66b79d0cf177379b15fe4 | [
"MIT"
] | null | null | null | fit.py | foamliu/Fit_Quad | 2702e41bc6f34a720db66b79d0cf177379b15fe4 | [
"MIT"
] | null | null | null | fit.py | foamliu/Fit_Quad | 2702e41bc6f34a720db66b79d0cf177379b15fe4 | [
"MIT"
] | 2 | 2019-08-08T10:03:25.000Z | 2020-02-19T12:24:05.000Z | import cv2 as cv
import imutils
import numpy as np
def fit_quad(filename):
image = cv.imread(filename)
image = cv.resize(image, (512, 512))
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
gray = cv.bilateralFilter(gray, 11, 17, 17)
edged = cv.Canny(gray, 30, 200)
cnts = cv.findContours(edged.copy(), cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
cnts = sorted(cnts, key=cv.contourArea, reverse=True)[:10]
screenCnt = None
# loop over our contours
for c in cnts:
# approximate the contour
peri = cv.arcLength(c, True)
approx = cv.approxPolyDP(c, 0.015 * peri, True)
# if our approximated contour has four points, then
# we can assume that we have found our screen
if len(approx) == 4:
screenCnt = approx
break
return image, screenCnt
if __name__ == '__main__':
for i in range(15):
filename = 'images/img_back_{}_img.png'.format(i)
print(filename)
image, screenCnt = fit_quad(filename)
cv.imwrite('images/img_{}.jpg'.format(i), image)
if screenCnt is not None:
cv.drawContours(image, [screenCnt], -1, (0, 255, 0), 3)
cv.imwrite('images/out_{}.jpg'.format(i), image)
print(np.squeeze(screenCnt, 1))
| 29.488889 | 78 | 0.62095 |
acf863f8aa39e5e88ef856e4335848bb9aa6c1fe | 32,263 | py | Python | tests/drop_packets/drop_packets.py | jleveque/sonic-mgmt | d74770a8e2fa9d7decc02064f3340570da1dfe3a | [
"Apache-2.0"
] | null | null | null | tests/drop_packets/drop_packets.py | jleveque/sonic-mgmt | d74770a8e2fa9d7decc02064f3340570da1dfe3a | [
"Apache-2.0"
] | null | null | null | tests/drop_packets/drop_packets.py | jleveque/sonic-mgmt | d74770a8e2fa9d7decc02064f3340570da1dfe3a | [
"Apache-2.0"
] | null | null | null | import logging
import os
import importlib
import netaddr
import pytest
import time
import ptf.testutils as testutils
import ptf.mask as mask
import ptf.packet as packet
from tests.common.helpers.assertions import pytest_assert, pytest_require
from tests.common.platform.device_utils import fanout_switch_port_lookup
from tests.common.helpers.constants import DEFAULT_NAMESPACE
from tests.common.utilities import get_inventory_files
RX_DRP = "RX_DRP"
RX_ERR = "RX_ERR"
L2_COL_KEY = RX_DRP
L3_COL_KEY = RX_ERR
pytest.SKIP_COUNTERS_FOR_MLNX = False
MELLANOX_MAC_UPDATE_SCRIPT = os.path.join(os.path.dirname(__file__), "fanout/mellanox/mlnx_update_mac.j2")
LOG_EXPECT_PORT_OPER_DOWN_RE = ".*Port {} oper state set from up to down.*"
LOG_EXPECT_PORT_OPER_UP_RE = ".*Port {} oper state set from down to up.*"
logger = logging.getLogger(__name__)
@pytest.fixture
def fanouthost(request, duthosts, rand_one_dut_hostname, localhost):
"""
Fixture that allows to update Fanout configuration if there is a need to send incorrect packets.
Added possibility to create vendor specific logic to handle fanout configuration.
If vendor need to update Fanout configuration, 'fanouthost' fixture should load and return appropriate instance.
This instance can be used inside test case to handle fanout configuration in vendor specific section.
By default 'fanouthost' fixture will not instantiate any instance so it will return None, and in such case
'fanouthost' instance should not be used in test case logic.
"""
duthost = duthosts[rand_one_dut_hostname]
fanout = None
# Check that class to handle fanout config is implemented
if "mellanox" == duthost.facts["asic_type"]:
for file_name in os.listdir(os.path.join(os.path.dirname(__file__), "fanout")):
# Import fanout configuration handler based on vendor name
if "mellanox" in file_name:
module = importlib.import_module("..fanout.{0}.{0}_fanout".format(file_name.strip(".py")), __name__)
fanout = module.FanoutHandler(duthost, localhost, get_inventory_files(request))
if not fanout.is_mellanox:
fanout = None
break
yield fanout
if fanout is not None:
fanout.restore_config()
@pytest.fixture(scope="module")
def pkt_fields(duthosts, rand_one_dut_hostname, tbinfo):
duthost = duthosts[rand_one_dut_hostname]
# Gather ansible facts
mg_facts = duthost.get_extended_minigraph_facts(tbinfo)
ipv4_addr = None
ipv6_addr = None
for item in mg_facts["minigraph_bgp"]:
if item["name"] == mg_facts["minigraph_bgp"][0]["name"]:
if netaddr.valid_ipv4(item["addr"]):
ipv4_addr = item["addr"]
else:
ipv6_addr = item["addr"]
class Collector(dict):
def __getitem__(self, key):
value = super(Collector, self).__getitem__(key)
if key == "ipv4_dst" and value is None:
pytest.skip("IPv4 address is not defined")
elif key == "ipv6_dst" and value is None:
pytest.skip("IPv6 address is not defined")
return value
test_pkt_data = Collector({
"ipv4_dst": ipv4_addr,
"ipv4_src": "1.1.1.1",
"ipv6_dst": ipv6_addr,
"ipv6_src": "ffff::101:101",
"tcp_sport": 1234,
"tcp_dport": 4321
})
return test_pkt_data
def expected_packet_mask(pkt):
""" Return mask for sniffing packet """
exp_pkt = pkt.copy()
exp_pkt = mask.Mask(exp_pkt)
exp_pkt.set_do_not_care_scapy(packet.Ether, 'dst')
exp_pkt.set_do_not_care_scapy(packet.Ether, 'src')
exp_pkt.set_do_not_care_scapy(packet.IP, 'ttl')
exp_pkt.set_do_not_care_scapy(packet.IP, 'chksum')
return exp_pkt
@pytest.fixture(scope="module")
def setup(duthosts, rand_one_dut_hostname, tbinfo):
"""
Setup fixture for collecting PortChannel, VLAN and RIF port members.
@return: Dictionary with keys:
port_channel_members, vlan_members, rif_members, dut_to_ptf_port_map, neighbor_sniff_ports, vlans, mg_facts
"""
duthost = duthosts[rand_one_dut_hostname]
intf_per_namespace = {}
port_channel_members = {}
vlan_members = {}
configured_vlans = []
rif_members = []
if tbinfo["topo"]["type"] == "ptf":
pytest.skip("Unsupported topology {}".format(tbinfo["topo"]))
#Gather interface facts per asic
for ns in duthost.get_asic_namespace_list():
intf_per_namespace[ns if ns is not DEFAULT_NAMESPACE else ''] = duthost.interface_facts(namespace=ns)['ansible_facts']['ansible_interface_facts']
# Gather ansible facts
mg_facts = duthost.get_extended_minigraph_facts(tbinfo)
for port_channel, interfaces in mg_facts['minigraph_portchannels'].items():
for iface in interfaces["members"]:
port_channel_members[iface] = port_channel
for vlan_id in mg_facts["minigraph_vlans"]:
for iface in mg_facts["minigraph_vlans"][vlan_id]["members"]:
vlan_members[iface] = vlan_id
rif_members = {item["attachto"]: item["attachto"] for item in mg_facts["minigraph_interfaces"]}
# Compose list of sniff ports
neighbor_sniff_ports = []
for dut_port, neigh in mg_facts['minigraph_neighbors'].items():
neighbor_sniff_ports.append(mg_facts['minigraph_ptf_indices'][dut_port])
for vlan_name, vlans_data in mg_facts["minigraph_vlans"].items():
configured_vlans.append(int(vlans_data["vlanid"]))
setup_information = {
"port_channel_members": port_channel_members,
"vlan_members": vlan_members,
"rif_members": rif_members,
"dut_to_ptf_port_map": mg_facts["minigraph_ptf_indices"],
"neighbor_sniff_ports": neighbor_sniff_ports,
"vlans": configured_vlans,
"mg_facts": mg_facts,
"intf_per_namespace": intf_per_namespace
}
return setup_information
@pytest.fixture
def rif_port_down(duthosts, rand_one_dut_hostname, setup, fanouthosts, loganalyzer):
"""Shut RIF interface and return neighbor IP address attached to this interface."""
duthost = duthosts[rand_one_dut_hostname]
wait_after_ports_up = 30
if not setup["rif_members"]:
pytest.skip("RIF interface is absent")
rif_member_iface = setup["rif_members"].keys()[0]
vm_name = setup["mg_facts"]["minigraph_neighbors"][rif_member_iface].get("name", None)
pytest_assert(vm_name, 'Neighbor not found for RIF member "{}"'.format(rif_member_iface))
ip_dst = None
for item in setup["mg_facts"]["minigraph_bgp"]:
if item["name"] == vm_name and netaddr.valid_ipv4(item["addr"]):
ip_dst = item["addr"]
break
pytest_assert(ip_dst, 'Unable to find IP address for neighbor "{}"'.format(vm_name))
fanout_neighbor, fanout_intf = fanout_switch_port_lookup(fanouthosts, duthost.hostname, rif_member_iface)
loganalyzer[rand_one_dut_hostname].expect_regex = [LOG_EXPECT_PORT_OPER_DOWN_RE.format(rif_member_iface)]
with loganalyzer[rand_one_dut_hostname] as _:
fanout_neighbor.shutdown(fanout_intf)
time.sleep(1)
yield ip_dst
loganalyzer[rand_one_dut_hostname].expect_regex = [LOG_EXPECT_PORT_OPER_UP_RE.format(rif_member_iface)]
with loganalyzer[rand_one_dut_hostname] as _:
fanout_neighbor.no_shutdown(fanout_intf)
time.sleep(wait_after_ports_up)
@pytest.fixture(params=["port_channel_members", "vlan_members", "rif_members"])
def tx_dut_ports(request, setup):
""" Fixture for getting port members of specific port group """
return setup[request.param] if setup[request.param] else pytest.skip("No {} available".format(request.param))
@pytest.fixture
def ports_info(ptfadapter, duthosts, rand_one_dut_hostname, setup, tx_dut_ports):
"""
Return:
dut_iface - DUT interface name expected to receive packtes from PTF
asic_index - asic which owns the dut_iface, significant on a multi-asic platform.
ptf_tx_port_id - Port ID used by PTF for sending packets from expected PTF interface
dst_mac - DUT interface destination MAC address
src_mac - PTF interface source MAC address
"""
duthost = duthosts[rand_one_dut_hostname]
data = {}
data["dut_iface"] = random.choice(tx_dut_ports.keys())
# Check which asic owns this interface
for ns in duthost.get_asic_namespace_list():
if data["dut_iface"] in setup['intf_per_namespace'][ns if ns is not DEFAULT_NAMESPACE else '']:
break
# Get the asic index
asic_index = duthost.get_asic_id_from_namespace(ns)
data["asic_index"] = asic_index
data["ptf_tx_port_id"] = setup["dut_to_ptf_port_map"][data["dut_iface"]]
data["dst_mac"] = setup['intf_per_namespace'][ns if ns is not DEFAULT_NAMESPACE else ''][data["dut_iface"]]['macaddress']
data["src_mac"] = ptfadapter.dataplane.ports[(0, data["ptf_tx_port_id"])].mac()
return data
def log_pkt_params(dut_iface, mac_dst, mac_src, ip_dst, ip_src):
""" Displays information about packet fields used in test case: mac_dst, mac_src, ip_dst, ip_src """
logger.info("Selected TX interface on DUT - {}".format(dut_iface))
logger.info("Packet DST MAC - {}".format(mac_dst))
logger.info("Packet SRC MAC - {}".format(mac_src))
logger.info("Packet IP DST - {}".format(ip_dst))
logger.info("Packet IP SRC - {}".format(ip_src))
def send_packets(pkt, duthost, ptfadapter, ptf_tx_port_id, num_packets=1):
# Clear packets buffer on PTF
ptfadapter.dataplane.flush()
time.sleep(1)
# Send packets
testutils.send(ptfadapter, ptf_tx_port_id, pkt, count=num_packets)
time.sleep(1)
def test_equal_smac_dmac_drop(do_test, ptfadapter, duthosts, rand_one_dut_hostname, setup, fanouthost, pkt_fields, ports_info):
"""
@summary: Create a packet with equal SMAC and DMAC.
"""
if not fanouthost:
pytest.skip("Test case requires explicit fanout support")
duthost = duthosts[rand_one_dut_hostname]
log_pkt_params(ports_info["dut_iface"], ports_info["dst_mac"], ports_info["dst_mac"], pkt_fields["ipv4_dst"], pkt_fields["ipv4_src"])
src_mac = ports_info["dst_mac"]
if "mellanox" == duthost.facts["asic_type"]:
pytest.SKIP_COUNTERS_FOR_MLNX = True
src_mac = "00:00:00:00:00:11"
# Prepare openflow rule
fanouthost.update_config(template_path=MELLANOX_MAC_UPDATE_SCRIPT, match_mac=src_mac, set_mac=ports_info["dst_mac"], eth_field="eth_src")
pkt = testutils.simple_tcp_packet(
eth_dst=ports_info["dst_mac"], # DUT port
eth_src=src_mac, # PTF port
ip_src=pkt_fields["ipv4_src"], # PTF source
ip_dst=pkt_fields["ipv4_dst"], # VM source
tcp_sport=pkt_fields["tcp_sport"],
tcp_dport=pkt_fields["tcp_dport"]
)
comparable_pkt = testutils.simple_tcp_packet(
eth_dst=ports_info["dst_mac"], # DUT port
eth_src=ports_info["dst_mac"], # PTF port
ip_src=pkt_fields["ipv4_src"], # PTF source
ip_dst=pkt_fields["ipv4_dst"], # VM source
tcp_sport=pkt_fields["tcp_sport"],
tcp_dport=pkt_fields["tcp_dport"]
)
do_test("L2", pkt, ptfadapter, duthost, ports_info, setup["neighbor_sniff_ports"], comparable_pkt=comparable_pkt)
def test_multicast_smac_drop(do_test, ptfadapter, duthosts, rand_one_dut_hostname, setup, fanouthost, pkt_fields, ports_info):
"""
@summary: Create a packet with multicast SMAC.
"""
if not fanouthost:
pytest.skip("Test case requires explicit fanout support")
duthost = duthosts[rand_one_dut_hostname]
multicast_smac = "01:00:5e:00:01:02"
src_mac = multicast_smac
log_pkt_params(ports_info["dut_iface"], ports_info["dst_mac"], multicast_smac, pkt_fields["ipv4_dst"], pkt_fields["ipv4_src"])
if "mellanox" == duthost.facts["asic_type"]:
pytest.SKIP_COUNTERS_FOR_MLNX = True
src_mac = "00:00:00:00:00:11"
# Prepare openflow rule
fanouthost.update_config(template_path=MELLANOX_MAC_UPDATE_SCRIPT, match_mac=src_mac, set_mac=multicast_smac, eth_field="eth_src")
pkt = testutils.simple_tcp_packet(
eth_dst=ports_info["dst_mac"], # DUT port
eth_src=src_mac,
ip_src=pkt_fields["ipv4_src"], # PTF source
ip_dst=pkt_fields["ipv4_dst"], # VM source
tcp_sport=pkt_fields["tcp_sport"],
tcp_dport=pkt_fields["tcp_dport"]
)
comparable_pkt = testutils.simple_tcp_packet(
eth_dst=ports_info["dst_mac"], # DUT port
eth_src=multicast_smac,
ip_src=pkt_fields["ipv4_src"], # PTF source
ip_dst=pkt_fields["ipv4_dst"], # VM source
tcp_sport=pkt_fields["tcp_sport"],
tcp_dport=pkt_fields["tcp_dport"]
)
do_test("L2", pkt, ptfadapter, duthost, ports_info, setup["neighbor_sniff_ports"], comparable_pkt=comparable_pkt)
def test_not_expected_vlan_tag_drop(do_test, ptfadapter, duthosts, rand_one_dut_hostname, setup, pkt_fields, ports_info):
"""
@summary: Create a VLAN tagged packet which VLAN ID does not match ingress port VLAN ID.
"""
duthost = duthosts[rand_one_dut_hostname]
start_vlan_id = 2
log_pkt_params(ports_info["dut_iface"], ports_info["dst_mac"], ports_info["src_mac"], pkt_fields["ipv4_dst"], pkt_fields["ipv4_src"])
max_vlan_id = 1000
upper_bound = max(setup["vlans"]) if setup["vlans"] else max_vlan_id
for interim in range(start_vlan_id, upper_bound):
if interim not in setup["vlans"]:
vlan_id = interim
break
else:
pytest.fail("Unable to generate unique not yet existed VLAN ID. Already configured VLANs range {}-{}".format(start_vlan_id,
upper_bound))
pkt = testutils.simple_tcp_packet(
eth_dst=ports_info["dst_mac"], # DUT port
eth_src=ports_info["src_mac"], # PTF port
ip_src=pkt_fields["ipv4_src"], # PTF source
ip_dst=pkt_fields["ipv4_dst"], # VM source
tcp_sport=pkt_fields["tcp_sport"],
tcp_dport=pkt_fields["tcp_dport"],
dl_vlan_enable=True,
vlan_vid=vlan_id,
)
do_test("L2", pkt, ptfadapter, duthost, ports_info, setup["neighbor_sniff_ports"])
def test_dst_ip_is_loopback_addr(do_test, ptfadapter, duthosts, rand_one_dut_hostname, setup, pkt_fields, tx_dut_ports, ports_info):
"""
@summary: Create a packet with loopback destination IP adress.
"""
duthost = duthosts[rand_one_dut_hostname]
ip_dst = "127.0.0.1"
log_pkt_params(ports_info["dut_iface"], ports_info["dst_mac"], ports_info["src_mac"], ip_dst, pkt_fields["ipv4_src"])
pkt = testutils.simple_tcp_packet(
eth_dst=ports_info["dst_mac"], # DUT port
eth_src=ports_info["src_mac"], # PTF port
ip_src=pkt_fields["ipv4_src"], # PTF source
ip_dst=ip_dst, # VM source
tcp_sport=pkt_fields["tcp_sport"],
tcp_dport=pkt_fields["tcp_dport"])
do_test("L3", pkt, ptfadapter, duthost, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports)
def test_src_ip_is_loopback_addr(do_test, ptfadapter, duthosts, rand_one_dut_hostname, setup, tx_dut_ports, pkt_fields, ports_info):
"""
@summary: Create a packet with loopback source IP adress.
"""
duthost = duthosts[rand_one_dut_hostname]
ip_src = "127.0.0.1"
log_pkt_params(ports_info["dut_iface"], ports_info["dst_mac"], ports_info["src_mac"], pkt_fields["ipv4_dst"], ip_src)
pkt = testutils.simple_tcp_packet(
eth_dst=ports_info["dst_mac"], # DUT port
eth_src=ports_info["src_mac"], # PTF port
ip_src=ip_src, # PTF source
ip_dst=pkt_fields["ipv4_dst"], # VM source
tcp_sport=pkt_fields["tcp_sport"],
tcp_dport=pkt_fields["tcp_dport"])
do_test("L3", pkt, ptfadapter, duthost, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports)
def test_dst_ip_absent(do_test, ptfadapter, duthosts, rand_one_dut_hostname, setup, tx_dut_ports, pkt_fields, ports_info):
"""
@summary: Create a packet with absent destination IP address.
"""
duthost = duthosts[rand_one_dut_hostname]
log_pkt_params(ports_info["dut_iface"], ports_info["dst_mac"], ports_info["src_mac"], "", pkt_fields["ipv4_src"])
pkt = testutils.simple_tcp_packet(
eth_dst=ports_info["dst_mac"], # DUT port
eth_src=ports_info["src_mac"], # PTF port
ip_src=pkt_fields["ipv4_src"], # PTF source
ip_dst="", # VM source
tcp_sport=pkt_fields["tcp_sport"],
tcp_dport=pkt_fields["tcp_dport"])
do_test("L3", pkt, ptfadapter, duthost, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports)
@pytest.mark.parametrize("ip_addr", ["ipv4", "ipv6"])
def test_src_ip_is_multicast_addr(do_test, ptfadapter, duthosts, rand_one_dut_hostname, setup, tx_dut_ports, pkt_fields, ip_addr, ports_info):
"""
@summary: Create a packet with multicast source IP adress.
"""
duthost = duthosts[rand_one_dut_hostname]
ip_src = None
if ip_addr == "ipv4":
ip_src = "224.0.0.5"
pkt = testutils.simple_tcp_packet(
eth_dst=ports_info["dst_mac"], # DUT port
eth_src=ports_info["src_mac"], # PTF port
ip_src=ip_src,
ip_dst=pkt_fields["ipv4_dst"], # VM source
tcp_sport=pkt_fields["tcp_sport"],
tcp_dport=pkt_fields["tcp_dport"])
elif ip_addr == "ipv6":
if not pkt_fields["ipv6_dst"]:
pytest.skip("BGP neighbour with IPv6 addr was not found")
ip_src = "FF02:AAAA:FEE5::1:3"
pkt = testutils.simple_tcpv6_packet(
eth_dst=ports_info["dst_mac"], # DUT port
eth_src=ports_info["src_mac"], # PTF port
ipv6_src=ip_src,
ipv6_dst=pkt_fields["ipv6_dst"], # VM source
tcp_sport=pkt_fields["tcp_sport"],
tcp_dport=pkt_fields["tcp_dport"])
else:
pytest.fail("Incorrect value specified for 'ip_addr' test parameter. Supported parameters: 'ipv4' and 'ipv6'")
log_pkt_params(ports_info["dut_iface"], ports_info["dst_mac"], ports_info["src_mac"], pkt_fields["ipv4_dst"], ip_src)
do_test("L3", pkt, ptfadapter, duthost, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports)
def test_src_ip_is_class_e(do_test, ptfadapter, duthosts, rand_one_dut_hostname, setup, tx_dut_ports, pkt_fields, ports_info):
"""
@summary: Create a packet with source IP address in class E.
"""
duthost = duthosts[rand_one_dut_hostname]
asic_type = duthost.facts["asic_type"]
pytest_require("broadcom" not in asic_type, "BRCM does not drop SIP class E packets")
ip_list = ["240.0.0.1", "255.255.255.254"]
for ip_class_e in ip_list:
log_pkt_params(ports_info["dut_iface"], ports_info["dst_mac"], ports_info["src_mac"], pkt_fields["ipv4_dst"],
ip_class_e)
pkt = testutils.simple_tcp_packet(
eth_dst=ports_info["dst_mac"], # DUT port
eth_src=ports_info["src_mac"], # PTF port
ip_src=ip_class_e,
ip_dst=pkt_fields["ipv4_dst"], # VM source
tcp_sport=pkt_fields["tcp_sport"],
tcp_dport=pkt_fields["tcp_dport"])
do_test("L3", pkt, ptfadapter, duthost, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports)
@pytest.mark.parametrize("addr_type, addr_direction", [("ipv4", "src"), ("ipv6", "src"), ("ipv4", "dst"),
("ipv6", "dst")])
def test_ip_is_zero_addr(do_test, ptfadapter, duthosts, rand_one_dut_hostname, setup, tx_dut_ports, pkt_fields, addr_type, addr_direction, ports_info):
"""
@summary: Create a packet with "0.0.0.0" source or destination IP address.
"""
duthost = duthosts[rand_one_dut_hostname]
zero_ipv4 = "0.0.0.0"
zero_ipv6 = "::0"
pkt_params = {
"eth_dst": ports_info["dst_mac"], # DUT port
"eth_src": ports_info["src_mac"], # PTF port
"tcp_sport": pkt_fields["tcp_sport"],
"tcp_dport": pkt_fields["tcp_dport"]
}
if addr_type == "ipv4":
if addr_direction == "src":
pkt_params["ip_src"] = zero_ipv4
pkt_params["ip_dst"] = pkt_fields["ipv4_dst"] # VM source
elif addr_direction == "dst":
pkt_params["ip_src"] = pkt_fields["ipv4_src"] # VM source
pkt_params["ip_dst"] = zero_ipv4
else:
pytest.fail("Incorrect value specified for 'addr_direction'. Supported parameters: 'src' and 'dst'")
pkt = testutils.simple_tcp_packet(**pkt_params)
elif addr_type == "ipv6":
if not pkt_fields["ipv6_dst"]:
pytest.skip("BGP neighbour with IPv6 addr was not found")
if addr_direction == "src":
pkt_params["ipv6_src"] = zero_ipv6
pkt_params["ipv6_dst"] = pkt_fields["ipv6_dst"] # VM source
elif addr_direction == "dst":
pkt_params["ipv6_src"] = pkt_fields["ipv6_src"] # VM source
pkt_params["ipv6_dst"] = zero_ipv6
else:
pytest.fail("Incorrect value specified for 'addr_direction'. Supported parameters: 'src' and 'dst'")
pkt = testutils.simple_tcpv6_packet(**pkt_params)
else:
pytest.fail("Incorrect value specified for 'addr_type' test parameter. Supported parameters: 'ipv4' or 'ipv6'")
logger.info(pkt_params)
do_test("L3", pkt, ptfadapter, duthost, ports_info, setup["dut_to_ptf_port_map"].values(), tx_dut_ports)
def test_dst_ip_link_local(do_test, ptfadapter, duthosts, rand_one_dut_hostname, setup, tx_dut_ports, pkt_fields, ports_info):
"""
@summary: Create a packet with link-local address "169.254.0.0/16".
"""
duthost = duthosts[rand_one_dut_hostname]
asic_type = duthost.facts["asic_type"]
pytest_require("broadcom" not in asic_type, "BRCM does not drop DIP link local packets")
link_local_ip = "169.254.10.125"
pkt_params = {
"eth_dst": ports_info["dst_mac"], # DUT port
"eth_src": ports_info["src_mac"], # PTF port
"tcp_sport": pkt_fields["tcp_sport"],
"tcp_dport": pkt_fields["tcp_dport"]
}
pkt_params["ip_src"] = pkt_fields["ipv4_src"] # VM source
pkt_params["ip_dst"] = link_local_ip
pkt = testutils.simple_tcp_packet(**pkt_params)
logger.info(pkt_params)
do_test("L3", pkt, ptfadapter, duthost, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports)
# Test case is skipped, because SONiC does not have a control to adjust loop-back filter settings.
# Default SONiC behaviour is to forward the traffic, so loop-back filter does not triggers for IP packets.
# All router interfaces has attribute "sx_interface_attributes_t.loopback_enable" - enabled.
# To enable loop-back filter drops - need to disable that attribute when create RIF.
# To do this can be used SAI attribute SAI_ROUTER_INTERFACE_ATTR_LOOPBACK_PACKET_ACTION, which is not exposed to SONiC
@pytest.mark.skip(reason="SONiC can't enable loop-back filter feature")
def test_loopback_filter(do_test, ptfadapter, duthosts, rand_one_dut_hostname, setup, tx_dut_ports, pkt_fields, ports_info):
"""
@summary: Create a packet drops by loopback-filter. Loop-back filter means that route to the host
with DST IP of received packet exists on received interface
"""
duthost = duthosts[rand_one_dut_hostname]
ip_dst = None
vm_name = setup["mg_facts"]["minigraph_neighbors"][ports_info["dut_iface"]]["name"]
for item in setup["mg_facts"]["minigraph_bgp"]:
if item["name"] == vm_name:
ip_dst = item["addr"]
break
if ip_dst is None:
pytest.skip("Testcase is not supported on current interface")
log_pkt_params(ports_info["dut_iface"], ports_info["dst_mac"], ports_info["src_mac"], ip_dst, pkt_fields["ipv4_src"])
pkt = testutils.simple_tcp_packet(
eth_dst=ports_info["dst_mac"], # DUT port
eth_src=ports_info["src_mac"], # PTF port
ip_src=pkt_fields["ipv4_src"], # PTF source
ip_dst=ip_dst,
tcp_sport=pkt_fields["tcp_sport"],
tcp_dport=pkt_fields["tcp_dport"])
do_test("L3", pkt, ptfadapter, duthost, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports)
def test_ip_pkt_with_expired_ttl(do_test, ptfadapter, duthosts, rand_one_dut_hostname, setup, tx_dut_ports, pkt_fields, ports_info):
"""
@summary: Create an IP packet with TTL=0.
"""
duthost = duthosts[rand_one_dut_hostname]
log_pkt_params(ports_info["dut_iface"], ports_info["dst_mac"], ports_info["src_mac"], pkt_fields["ipv4_dst"],
pkt_fields["ipv4_src"])
pkt = testutils.simple_tcp_packet(
eth_dst=ports_info["dst_mac"], # DUT port
eth_src=ports_info["src_mac"], # PTF port
ip_src=pkt_fields["ipv4_src"], # PTF source
ip_dst=pkt_fields["ipv4_dst"], # VM IP address
tcp_sport=pkt_fields["tcp_sport"],
tcp_dport=pkt_fields["tcp_dport"],
ip_ttl=0)
do_test("L3", pkt, ptfadapter, duthost, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports)
@pytest.mark.parametrize("pkt_field, value", [("version", 1), ("chksum", 10), ("ihl", 1)])
def test_broken_ip_header(do_test, ptfadapter, duthosts, rand_one_dut_hostname, setup, tx_dut_ports, pkt_fields, pkt_field, value, ports_info):
"""
@summary: Create a packet with broken IP header.
"""
duthost = duthosts[rand_one_dut_hostname]
log_pkt_params(ports_info["dut_iface"], ports_info["dst_mac"], ports_info["src_mac"], pkt_fields["ipv4_dst"], pkt_fields["ipv4_src"])
pkt = testutils.simple_tcp_packet(
eth_dst=ports_info["dst_mac"], # DUT port
eth_src=ports_info["src_mac"], # PTF port
ip_src=pkt_fields["ipv4_src"], # PTF source
ip_dst=pkt_fields["ipv4_dst"],
tcp_sport=pkt_fields["tcp_sport"],
tcp_dport=pkt_fields["tcp_dport"]
)
setattr(pkt[testutils.scapy.scapy.all.IP], pkt_field, value)
do_test("L3", pkt, ptfadapter, duthost, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports)
def test_absent_ip_header(do_test, ptfadapter, duthosts, rand_one_dut_hostname, setup, tx_dut_ports, pkt_fields, ports_info):
"""
@summary: Create packets with absent IP header.
"""
duthost = duthosts[rand_one_dut_hostname]
log_pkt_params(ports_info["dut_iface"], ports_info["dst_mac"], ports_info["src_mac"], pkt_fields["ipv4_dst"],
pkt_fields["ipv4_src"])
pkt = testutils.simple_tcp_packet(
eth_dst=ports_info["dst_mac"], # DUT port
eth_src=ports_info["src_mac"], # PTF port
ip_src=pkt_fields["ipv4_src"], # PTF source
ip_dst=pkt_fields["ipv4_dst"],
tcp_sport=pkt_fields["tcp_sport"],
tcp_dport=pkt_fields["tcp_dport"]
)
tcp = pkt[testutils.scapy.scapy.all.TCP]
del pkt[testutils.scapy.scapy.all.IP]
pkt.type = 0x800
pkt = pkt/tcp
do_test("L3", pkt, ptfadapter, duthost, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports)
@pytest.mark.parametrize("eth_dst", ["01:00:5e:00:01:02", "ff:ff:ff:ff:ff:ff"])
def test_unicast_ip_incorrect_eth_dst(do_test, ptfadapter, duthosts, rand_one_dut_hostname, setup, tx_dut_ports, pkt_fields, eth_dst, ports_info):
"""
@summary: Create packets with multicast/broadcast ethernet dst.
"""
duthost = duthosts[rand_one_dut_hostname]
if "vlan" in tx_dut_ports[ports_info["dut_iface"]].lower():
pytest.skip("Test case is not supported on VLAN interface")
log_pkt_params(ports_info["dut_iface"], eth_dst, ports_info["src_mac"], pkt_fields["ipv4_dst"], pkt_fields["ipv4_src"])
pkt = testutils.simple_tcp_packet(
eth_dst=eth_dst, # DUT port
eth_src=ports_info["src_mac"], # PTF port
ip_src=pkt_fields["ipv4_src"], # PTF source
ip_dst=pkt_fields["ipv4_dst"],
tcp_sport=pkt_fields["tcp_sport"],
tcp_dport=pkt_fields["tcp_dport"]
)
do_test("L3", pkt, ptfadapter, duthost, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports)
@pytest.mark.parametrize("igmp_version,msg_type", [("v1", "general_query"), ("v3", "general_query"), ("v1", "membership_report"),
("v2", "membership_report"), ("v3", "membership_report"), ("v2", "leave_group")])
def test_non_routable_igmp_pkts(do_test, ptfadapter, duthosts, rand_one_dut_hostname, setup, fanouthost, tx_dut_ports, pkt_fields, igmp_version, msg_type, ports_info):
"""
@summary: Create an IGMP non-routable packets.
"""
# IGMP Types:
# 0x11 = Membership Query
# 0x12 = Version 1 Membership Report
# 0x16 = Version 2 Membership Report
# 0x17 = Leave Group
# IP destination address according to the RFC 2236:
# Message Type Destination Group
# ------------ -----------------
# General Query ALL-SYSTEMS (224.0.0.1)
# Group-Specific Query The group being queried
# Membership Report The group being reported
# Leave Message ALL-ROUTERS (224.0.0.2)
# TODO: fix this workaround as of now current PTF and Scapy versions do not support creation of IGMP packets
# Temporaly created hex of IGMP packet layer by using scapy version 2.4.3.
# Example how to get HEX of specific IGMP packets:
# v3_membership_query = IGMPv3(type=0x11, mrcode=0, chksum=None)/scapy.contrib.igmpv3.IGMPv3mq(gaddr="224.0.0.1",
# srcaddrs=["172.16.11.1", "10.0.0.59"], qrv=1, qqic=125, numsrc=2)
# gr_obj = scapy.contrib.igmpv3.IGMPv3gr(rtype=1, auxdlen=0, maddr="224.2.2.4", numsrc=2, srcaddrs=["172.16.11.1",
# "10.0.0.59"]).build()
# v3_membership_report = IGMPv3(type=0x22, mrcode=0, chksum=None)/scapy.contrib.igmpv3.IGMPv3mr(res2=0x00, numgrp=1,
# records=[gr_obj]).build()
# The rest packets are build like "simple_igmp_packet" function from PTF testutils.py
# FIXME: Need some sort of configuration for EOS and SONiC fanout hosts to
# not drop IGMP packets before they reach the DUT
if not fanouthost:
pytest.skip("Test case requires explicit fanout support")
duthost = duthosts[rand_one_dut_hostname]
from scapy.contrib.igmp import IGMP
Ether = testutils.scapy.Ether
IP = testutils.scapy.IP
if "vlan" in tx_dut_ports[ports_info["dut_iface"]].lower() and msg_type == "membership_report":
pytest.skip("Test case is not supported on VLAN interface")
igmp_proto = 0x02
multicast_group_addr = "224.1.1.1"
ethernet_dst = "01:00:5e:01:01:01"
ip_dst = {"general_query": "224.0.0.1",
"membership_report": multicast_group_addr}
igmp_types = {"v1": {"general_query": IGMP(type=0x11, gaddr="224.0.0.1"),
"membership_report": IGMP(type=0x12, gaddr=multicast_group_addr)},
"v2": {"membership_report": IGMP(type=0x16, gaddr=multicast_group_addr),
"leave_group": IGMP(type=0x17, gaddr=multicast_group_addr)},
"v3": {"general_query": "\x11\x00L2\xe0\x00\x00\x01\x01}\x00\x02\xac\x10\x0b\x01\n\x00\x00;",
"membership_report": "\"\x009\xa9\x00\x00\x00\x01\x01\x00\x00\x02\xe0\x02\x02\x04\xac\x10\x0b\x01\n\x00\x00;"}
}
if igmp_version == "v3":
pkt = testutils.simple_ip_packet(
eth_dst=ethernet_dst,
eth_src=ports_info["src_mac"],
ip_src=pkt_fields["ipv4_src"],
ip_dst=ip_dst[msg_type],
ip_ttl=1,
ip_proto=igmp_proto
)
del pkt["Raw"]
pkt = pkt / igmp_types[igmp_version][msg_type]
else:
eth_layer = Ether(src=ports_info["src_mac"], dst=ethernet_dst)
ip_layer = IP(src=pkt_fields["ipv4_src"], )
igmp_layer = igmp_types[igmp_version][msg_type]
assert igmp_layer.igmpize(ip=ip_layer, ether=eth_layer), "Can't create IGMP packet"
pkt = eth_layer/ip_layer/igmp_layer
log_pkt_params(ports_info["dut_iface"], ethernet_dst, ports_info["src_mac"], pkt.getlayer("IP").dst, pkt_fields["ipv4_src"])
do_test("L3", pkt, ptfadapter, duthost, ports_info, setup["dut_to_ptf_port_map"].values(), tx_dut_ports)
| 42.339895 | 167 | 0.680625 |
acf86519c0a4cfc2a5e23b15e0fa2af240019c07 | 3,992 | py | Python | framework/utils_test.py | saan5/chromium-dashboard | a9650cd3a2d58bee819ae218051eeb0f3e657190 | [
"Apache-2.0"
] | 2 | 2021-06-19T14:59:53.000Z | 2021-09-18T14:45:59.000Z | framework/utils_test.py | saan5/chromium-dashboard | a9650cd3a2d58bee819ae218051eeb0f3e657190 | [
"Apache-2.0"
] | null | null | null | framework/utils_test.py | saan5/chromium-dashboard | a9650cd3a2d58bee819ae218051eeb0f3e657190 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from __future__ import print_function
import unittest
import testing_config # Must be imported before the module under test.
import mock
import werkzeug.exceptions # Flask HTTP stuff.
from framework import utils
class MockHandler(object):
def __init__(self, path):
self.handler_called_with = None
self.redirected_to = None
self.request = self
self.path = path
@utils.strip_trailing_slash
def handlerMethod(self, *args):
self.handler_called_with = args
def redirect(self, new_path):
self.redirected_to = new_path
class UtilsFunctionTests(unittest.TestCase):
def test_normalized_name(self):
self.assertEqual('', utils.normalized_name(''))
self.assertEqual('abc', utils.normalized_name('abc'))
self.assertEqual('abc', utils.normalized_name('Abc'))
self.assertEqual('abc', utils.normalized_name('ABC'))
self.assertEqual('abc', utils.normalized_name('A BC'))
self.assertEqual('abc', utils.normalized_name('A B/C'))
self.assertEqual('abc', utils.normalized_name(' /A B/C /'))
def test_format_feature_url(self):
self.assertEqual(
'/feature/123',
utils.format_feature_url(123))
@mock.patch('logging.error')
@mock.patch('logging.warning')
@mock.patch('time.sleep') # Run test full speed.
def testRetryDecorator_ExceedFailures(
self, mock_sleep, mock_warn, mock_err):
class Tracker(object):
func_called = 0
tracker = Tracker()
# Use a function that always fails.
@utils.retry(2, delay=1, backoff=2)
def testFunc(tracker):
tracker.func_called += 1
raise Exception('Failed')
with self.assertRaises(Exception):
testFunc(tracker)
self.assertEquals(3, tracker.func_called)
self.assertEqual(2, len(mock_sleep.mock_calls))
self.assertEqual(2, len(mock_warn.mock_calls))
self.assertEqual(1, len(mock_err.mock_calls))
@mock.patch('logging.warning')
@mock.patch('time.sleep') # Run test full speed.
def testRetryDecorator_EventuallySucceed(self, mock_sleep, mock_warn):
class Tracker(object):
func_called = 0
tracker = Tracker()
# Use a function that succeeds on the 2nd attempt.
@utils.retry(2, delay=1, backoff=2)
def testFunc(tracker):
tracker.func_called += 1
if tracker.func_called < 2:
raise Exception('Failed')
testFunc(tracker)
self.assertEquals(2, tracker.func_called)
self.assertEqual(1, len(mock_sleep.mock_calls))
self.assertEqual(1, len(mock_warn.mock_calls))
def test_strip_trailing_slash(self):
handlerInstance = MockHandler('/request/path')
handlerInstance.handlerMethod('/request/path')
self.assertEqual(('/request/path',), handlerInstance.handler_called_with)
self.assertIsNone(handlerInstance.redirected_to)
handlerInstance = MockHandler('/request/path/')
handlerInstance.handlerMethod('/request/path/')
self.assertIsNone(handlerInstance.handler_called_with)
self.assertEqual('/request/path', handlerInstance.redirected_to)
def test_get_banner_time__None(self):
"""If no time specified, it returns None."""
self.assertIsNone(utils.get_banner_time(None))
def test_get_banner_time__tuple(self):
"""If a time tuple is specified, it returns a timestamp."""
time_tuple = (2019, 6, 13, 18, 30)
actual = utils.get_banner_time(time_tuple)
self.assertEqual(1560450600, actual)
| 33.266667 | 77 | 0.72495 |
acf8679b0be483c62df3cdb8ac67ac84f88098db | 11,476 | py | Python | pytorch_keras_converter/utility/torch2keras.py | sonibla/pytorch_keras_converter | 21925b67b6eb3cbbfa8eb6d33f682d57dafd357d | [
"MIT"
] | 17 | 2019-10-01T14:14:18.000Z | 2021-04-25T13:32:24.000Z | pytorch_keras_converter/utility/torch2keras.py | sonibla/pytorch_keras_converter | 21925b67b6eb3cbbfa8eb6d33f682d57dafd357d | [
"MIT"
] | null | null | null | pytorch_keras_converter/utility/torch2keras.py | sonibla/pytorch_keras_converter | 21925b67b6eb3cbbfa8eb6d33f682d57dafd357d | [
"MIT"
] | 2 | 2019-10-01T14:02:43.000Z | 2019-10-01T14:14:19.000Z | from . import converting_layers as c_l
from tqdm import tqdm
from .LayerRepresentation import normalizeShape
try:
import tensorflow.keras as keras
except ImportError:
try:
import keras
except ImportError:
keras = None
try:
import torch
except ImportError:
torch = None
lastProgress = 0
def kerasShape(tensor):
"""
Determine the shape of a tensor or a keras layer
Useful to check that PyTorch to Keras conversion doesn't fail
"""
if tensor is None:
return None
else:
if '_keras_shape' in dir(tensor):
if tensor._keras_shape is not None:
shape = tensor._keras_shape
# In LayerRepresentation, we leave out batch size :
shape = list(shape)
del shape[0]
shape = tuple(shape)
elif 'shape' in dir(tensor):
shape = tensor.shape.as_list()
del shape[0]
shape = tuple(shape)
elif '_shape_val' in dir(tensor):
if tensor._shape_val is not None:
kerasShape = tensor._shape_val
# In LayerRepresentation, we leave out batch size, so we
# start at 1 (not 0) :
values = range(1, len(kerasShape._dims))
shape = [kerasShape._dims[k]._value for k in values]
shape = tuple(shape)
else:
shape = None
shape = normalizeShape(shape)
return shape
def convert_torch2keras_file(model, input_size=None):
createSimpleEquivalences(model, file=True)
return model
def convert_torch2keras(model, input_size, weights=True, quiet=True):
"""
Converts a pytroch model to keras
Arguments:
-model:
the model to convert (LayerRepresentation)
-input_size:
int, list, or tuple.
-weights (bool):
If weights should be converted too (may take a lot of time !)
-quiet (bool):
If a progress bar should appear
Returns:
the model (LayerRepresentation)
"""
global lastProgress
lastProgress = 0
# Step 1 : Compute all input and output shapes and place it on our model
# Convert input_size into tulpe
input_size = normalizeShape(input_size)
if not quiet:
progressBar = tqdm(total=model.numberOfChildren() + 1, unit='layer')
print("\nAnalysing model...")
else:
progressBar = None
findAllInputShapes(model, input_size)
# Step 2: convert every simple layer (i.e native layers, in most cases)
if not quiet:
print("\nComputing equivalents layer by layer...")
createSimpleEquivalences(model,
weights=weights,
quiet=quiet,
progressBar=progressBar)
# Let's check if our model is fully converted:
if 'keras' in model.equivalent.keys():
return model
# Step 3: keras Fonctionnal API
if not quiet:
print("\nConnecting layers together with Keras Functionnal API...")
while 'keras' not in model.equivalent.keys():
advancedKerasEquivalence(model,
quiet=quiet,
progressBar=progressBar)
# Done!
if not quiet:
progressBar.close()
print("\nDone !")
return model
def createSimpleEquivalences(model,
file=False,
weights=True,
quiet=True,
progressBar=None):
"""
Computes equivalent of most simple layers (native pyTorch layers,
nn.Sequential containing only native layers...)
Arguments:
-model:
A LayerRepresentation object to use
-file (bool):
If we want to write the equivalent in a python file
-weights (bool):
Also convert weights
-quiet:
If a progress bar should appear
-progressBar:
If a progress bar was already created, put it were
"""
# Create a progress bar if necessary
if not quiet and progressBar is None:
progressBar = tqdm(total=model.numberOfChildren() + 1, unit='layer')
if 'torch' in model.equivalent.keys(): # torch equivalent available
# CONVERSION: torch -> keras
if not model.children: # 1st case: no children
if model.isTorchBuiltIn():
kerasEq = None
kerasEq = c_l.torch2kerasEquivalent(model, weights=weights)
kerasEqTxt = None
if file:
kerasEqTxt = c_l.torch2kerasEquivalent(model,
file=True,
weights=weights)
if kerasEq is not None:
# keras equivalent computation succeeded!
model.equivalent['keras'] = kerasEq
if kerasEqTxt is not None:
# keras equivalent computation succeeded!
model.equivalentTxt['keras'] = kerasEqTxt
if not quiet:
updateProgress(model, progressBar)
else: # 2nd case: there are children
if not model.childrenEquivalentsCompleted('keras',
file=file):
# Trere are children,
# but all equivalents aren't computed yet
for child in model.children:
createSimpleEquivalences(child,
file=file,
weights=weights,
quiet=quiet,
progressBar=progressBar)
# Here, we have computed all simple layers
# If possible, we can still find an equivalent
# if model is a container (sequential for example)
success = model.childrenEquivalentsCompleted('keras')
if model.isTorchBuiltIn() and success:
kerasEq = c_l.torch2kerasEquivalent(model, weights=weights)
if kerasEq is not None:
model.equivalent['keras'] = kerasEq
if file:
successTxt = model.childrenEquivalentsCompleted('keras',
file=True)
if model.isTorchBuiltIn() and successTxt:
kerasEqTxt = c_l.torch2kerasEquivalent(model,
file=True,
weights=weights)
model.equivalentTxt['keras'] = kerasEqTxt
if not quiet:
updateProgress(model, progressBar)
def findAllInputShapes(model, pyTorch_input_size):
"""
Finds input and output shapes of every layer in a model only knowing main
input shape
Arguments:
-model:
A LayerRepresentation object of the model to analsye
-pyTorch_input_size:
input shape
Raises:
-RuntimeError:
If provided input shape isn't compatible with the model
"""
if torch is None:
raise ImportError("Could not import torch. Conversion failed !")
pyTorchModel = model.equivalent['torch']
def register_hook(module):
def hook(module, Input, Output):
identifier = id(module)
# Input shape
inputShape = list(Input[0].size())
del inputShape[0]
# Output shape
if isinstance(Output, (list, tuple)):
outputShape = [
list(o.size())[1:] for o in Output
]
else:
outputShape = list(Output.size())
del outputShape[0]
inputShape = normalizeShape(inputShape)
outputShape = normalizeShape(outputShape)
# Saving shapes
selectedModel = model.getChildId(identifier, framework='torch')
selectedModel.input_shape = inputShape
selectedModel.output_shape = outputShape
module.register_forward_hook(hook)
# multiple inputs to the network
if isinstance(pyTorch_input_size, tuple):
pyTorch_input_size = [pyTorch_input_size]
# batch_size of 2 for batchnorm
x = [torch.rand(1, *in_size) for in_size in pyTorch_input_size]
# register hook
pyTorchModel.apply(register_hook)
# make a forward pass
try:
pyTorchModel(*x)
except RuntimeError as err:
raise RuntimeError('Failed to analyse pyTorch model !\n{}'.format(err))
def advancedKerasEquivalence(model,
quiet=True,
progressBar=None):
"""
Uses keras Functionnal API to find all remaining equivalents
Arguments:
-model:
A LayerRepresentation object to complete, or a list of
LayerRepresentation objects to complete
-quiet:
If a progress bar should appear
-progressBar:
If a progress bar was already created, put it were
"""
# Create a progress bar if necessary
if not quiet and progressBar is None:
progressBar = tqdm(total=model.numberOfChildren() + 1, unit='layer')
if isinstance(model, list):
# If we have to deal with a list of models:
for oneModel in model:
advancedKerasEquivalence(oneModel,
quiet=quiet,
progressBar=progressBar)
else:
if not quiet:
updateProgress(model, progressBar)
notKerasEquivExist = not('keras' in model.equivalent.keys())
kerasOutputExist = model.kerasOutput is not None
if notKerasEquivExist and model.childrenEquivalentsCompleted('keras'):
c_l.spreadSignal(model)
kerasOutputExist = model.kerasOutput is not None
if kerasOutputExist:
if model.name is not None:
kerasEq = keras.models.Model(inputs=model.kerasInput,
outputs=model.kerasOutput,
name=model.name)
else:
kerasEq = keras.models.Model(inputs=model.kerasInput,
outputs=model.kerasOutput)
model.equivalent['keras'] = kerasEq
# Do the same to sub-sub-layers
if not quiet:
updateProgress(model, progressBar)
advancedKerasEquivalence(model.children,
quiet=quiet,
progressBar=progressBar)
def updateProgress(model, progressBar):
"""
During a conversion, updates the progress bar.
Value is aucomatically computed using numberOfEquivalents
Arguments:
-model:
A LayerRepresentation objest of one layer in the model being
converted
-progressBar:
A ProgressBar object : the bar to update
"""
global lastProgress
mainParent = model.firstParent()
progress = mainParent.numberOfEquivalents(framework='keras')
diff = progress-lastProgress
progressBar.update(diff)
lastProgress = progress
| 32.602273 | 79 | 0.555943 |
acf8682507163bdc34a752a7aea1cd639be84e84 | 1,035 | py | Python | adminmgr/media/code/python/red3/BD_1201700162_1201701618_1201701643_reducer.py | IamMayankThakur/test-bigdata | cef633eb394419b955bdce479699d0115d8f99c3 | [
"Apache-2.0"
] | 9 | 2019-11-08T02:05:27.000Z | 2021-12-13T12:06:35.000Z | adminmgr/media/code/python/red3/BD_1201700162_1201701618_1201701643_reducer.py | IamMayankThakur/test-bigdata | cef633eb394419b955bdce479699d0115d8f99c3 | [
"Apache-2.0"
] | 6 | 2019-11-27T03:23:16.000Z | 2021-06-10T19:15:13.000Z | adminmgr/media/code/python/red3/BD_1201700162_1201701618_1201701643_reducer.py | IamMayankThakur/test-bigdata | cef633eb394419b955bdce479699d0115d8f99c3 | [
"Apache-2.0"
] | 4 | 2019-11-26T17:04:27.000Z | 2021-12-13T11:57:03.000Z | #!/usr/bin/python3
#Reducer.py
import sys
import operator
rec={}
for record in sys.stdin:
vb,sr=record.split('\t')
sdn,bats=vb.split('/')
sr=sr.split('/')
balls=int(sr[1],10)
runs=int(sr[0],10)
if sdn not in rec:
rec[sdn]={}
if bats not in rec[sdn]:
rec[sdn][bats]=[]
rec[sdn][bats].append(runs)
rec[sdn][bats].append(balls)
else:
rec[sdn][bats][0]+=runs
rec[sdn][balls][1]+=balls
new={}
for key,value in rec.items():
new[key]={}
for bats,sr in value.items():
if sr[1]>=10:
new[key][bats]=[]
sr_new=sr[0]*100/sr[1]
new[key][bats].append(sr_new)
new[key][bats].append(sr[0])
final={}
for stname,players in new.items():
batsmen= None
l = [k for k,v in players.items() if v == max(players.values())]
if len(l)>1:
maxdel=0
if new[stname][players][1]>maxdel:
maxdel=new[stname][players][1]
final[stname]=l[0]
else:
final[stname]=l[0]
res = dict(sorted(final.items(), key=operator.itemgetter(0)))
for fin,batsmen in res.items():
print ('%s,%s'% (fin,batsmen))
| 19.903846 | 65 | 0.615459 |
acf86833efa75c0e0650da76fc2dd551d1388542 | 29,485 | py | Python | src/neighborhoods.py | thouis/ABC-Enhancer-Gene-Prediction | 8cde9c3ee62ed1589afc18a25f45da35f0473205 | [
"MIT"
] | null | null | null | src/neighborhoods.py | thouis/ABC-Enhancer-Gene-Prediction | 8cde9c3ee62ed1589afc18a25f45da35f0473205 | [
"MIT"
] | null | null | null | src/neighborhoods.py | thouis/ABC-Enhancer-Gene-Prediction | 8cde9c3ee62ed1589afc18a25f45da35f0473205 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
from scipy import interpolate
import os
import os.path
from subprocess import check_call, check_output, PIPE, Popen, getoutput, CalledProcessError
from tools import *
import linecache
import traceback
import time
import pyranges as pr
pd.options.display.max_colwidth = 10000 #seems to be necessary for pandas to read long file names... strange
def load_genes(file,
ue_file,
chrom_sizes,
outdir,
expression_table_list,
gene_id_names,
primary_id,
cellType,
class_gene_file):
bed = read_bed(file)
genes = process_gene_bed(bed, gene_id_names, primary_id, chrom_sizes)
genes[['chr', 'start', 'end', 'name', 'score', 'strand']].to_csv(os.path.join(outdir, "GeneList.bed"),
sep='\t', index=False, header=False)
if len(expression_table_list) > 0:
# Add expression information
names_list = []
print("Using gene expression from files: {} \n".format(expression_table_list))
for expression_table in expression_table_list:
try:
name = os.path.basename(expression_table)
expr = pd.read_table(expression_table, names=[primary_id, name + '.Expression'])
expr[name + '.Expression'] = expr[name + '.Expression'].astype(float)
expr = expr.groupby(primary_id).max()
genes = genes.merge(expr, how="left", right_index=True, left_on='symbol')
names_list.append(name + '.Expression')
except Exception as e:
print(e)
traceback.print_exc()
print("Failed on {}".format(expression_table))
genes['Expression'] = genes[names_list].mean(axis = 1)
genes['Expression.quantile'] = genes['Expression'].rank(method='average', na_option="top", ascending=True, pct=True)
else:
genes['Expression'] = np.NaN
#Ubiquitously expressed annotation
if ue_file is not None:
ubiq = pd.read_csv(ue_file, sep="\t")
genes['is_ue'] = genes['name'].isin(ubiq.iloc[:,0].values.tolist())
#cell type
genes['cellType'] = cellType
#genes for class assignment
if class_gene_file is None:
genes_for_class_assignment = genes
else:
genes_for_class_assignment = read_bed(class_gene_file)
genes_for_class_assignment = process_gene_bed(genes_for_class_assignment, gene_id_names, primary_id, chrom_sizes, fail_on_nonunique=False)
return genes, genes_for_class_assignment
def annotate_genes_with_features(genes,
genome_sizes,
skip_gene_counts=False,
features={},
outdir=".",
force=False,
use_fast_count=True,
default_accessibility_feature = "",
**kwargs):
#Setup files for counting
bounds_bed = os.path.join(outdir, "GeneList.bed")
tss1kb = make_tss_region_file(genes, outdir, genome_sizes)
tss1kb_file = os.path.join(outdir, "GeneList.TSS1kb.bed")
#Count features over genes and promoters
genes = count_features_for_bed(genes, bounds_bed, genome_sizes, features, outdir, "Genes", force=force, use_fast_count=use_fast_count)
tsscounts = count_features_for_bed(tss1kb, tss1kb_file, genome_sizes, features, outdir, "Genes.TSS1kb", force=force, use_fast_count=use_fast_count)
tsscounts = tsscounts.drop(['chr','start','end','score','strand'], axis=1)
merged = genes.merge(tsscounts, on="name", suffixes=['','.TSS1Kb'])
access_col = default_accessibility_feature + ".RPKM.quantile.TSS1Kb"
if 'H3K27ac.RPKM.quantile.TSS1Kb' in merged.columns:
merged['PromoterActivityQuantile'] = ((0.0001+merged['H3K27ac.RPKM.quantile.TSS1Kb'])*(0.0001+merged[access_col])).rank(method='average', na_option="top", ascending=True, pct=True)
else:
merged['PromoterActivityQuantile'] = ((0.0001+merged[access_col])).rank(method='average', na_option="top", ascending=True, pct=True)
merged.to_csv(os.path.join(outdir, "GeneList.txt"),
sep='\t', index=False, header=True, float_format="%.6f")
return merged
def make_tss_region_file(genes, outdir, sizes, tss_slop=500):
#Given a gene file, define 1kb regions around the tss of each gene
sizes_pr = df_to_pyranges(read_bed(sizes + '.bed'))
tss1kb = genes.loc[:,['chr','start','end','name','score','strand']]
tss1kb['start'] = genes['tss']
tss1kb['end'] = genes['tss']
tss1kb = df_to_pyranges(tss1kb).slack(tss_slop)
tss1kb = pr.gf.genome_bounds(tss1kb, sizes_pr).df[['Chromosome','Start','End','name','score','strand']]
tss1kb.columns = ['chr','start','end','name','score','strand']
tss1kb_file = os.path.join(outdir, "GeneList.TSS1kb.bed")
tss1kb.to_csv(tss1kb_file, header=False, index=False, sep='\t')
#The TSS1kb file should be sorted
sort_command = "bedtools sort -faidx {sizes} -i {tss1kb_file} > {tss1kb_file}.sorted; mv {tss1kb_file}.sorted {tss1kb_file}".format(**locals())
run_command(sort_command)
# p = Popen(sort_command, stdout=PIPE, stderr=PIPE, shell=True)
# print("Sorting Genes.TSS1kb file. \n Running: " + sort_command + "\n")
# (stdoutdata, stderrdata) = p.communicate()
# err = str(stderrdata, 'utf-8')
return(tss1kb)
def process_gene_bed(bed, name_cols, main_name, chrom_sizes=None, fail_on_nonunique=True):
try:
bed = bed.drop(['thickStart','thickEnd','itemRgb','blockCount','blockSizes','blockStarts'], axis=1)
except Exception as e:
pass
assert(main_name in name_cols)
names = bed.name.str.split(";", expand=True)
assert(len(names.columns) == len(name_cols.split(",")))
names.columns = name_cols.split(",")
bed = pd.concat([bed, names], axis=1)
bed['name'] = bed[main_name]
#bed = bed.sort_values(by=['chr','start']) #JN Keep original sort order
bed['tss'] = get_tss_for_bed(bed)
bed.drop_duplicates(inplace=True)
#Remove genes that are not defined in chromosomes file
if chrom_sizes is not None:
sizes = read_bed(chrom_sizes)
bed['chr'] = bed['chr'].astype('str') #JN needed in case chromosomes are all integer
bed = bed[bed['chr'].isin(set(sizes['chr'].values))]
#Enforce that gene names should be unique
if fail_on_nonunique:
assert(len(set(bed['name'])) == len(bed['name'])), "Gene IDs are not unique! Failing. Please ensure unique identifiers are passed to --genes"
return bed
def get_tss_for_bed(bed):
assert_bed3(bed)
tss = bed['start'].copy()
tss.loc[bed.loc[:,'strand'] == "-"] = bed.loc[bed.loc[:,'strand'] == "-",'end']
return tss
def assert_bed3(df):
assert(type(df).__name__ == "DataFrame")
assert('chr' in df.columns)
assert('start' in df.columns)
assert('end' in df.columns)
assert('strand' in df.columns)
def load_enhancers(outdir=".",
genome_sizes="",
features={},
genes=None,
force=False,
candidate_peaks="",
skip_rpkm_quantile=False,
cellType=None,
tss_slop_for_class_assignment = 500,
use_fast_count=True,
default_accessibility_feature = "",
qnorm = None,
class_override_file = None):
enhancers = read_bed(candidate_peaks)
enhancers['chr'] = enhancers['chr'].astype('str')
enhancers = count_features_for_bed(enhancers, candidate_peaks, genome_sizes, features, outdir, "Enhancers", skip_rpkm_quantile, force, use_fast_count)
#cellType
if cellType is not None:
enhancers['cellType'] = cellType
# Assign categories
if genes is not None:
print("Assigning classes to enhancers")
enhancers = assign_enhancer_classes(enhancers, genes, tss_slop = tss_slop_for_class_assignment)
#TO DO: Should qnorm each bam file separately (before averaging). Currently qnorm being performed on the average
enhancers = run_qnorm(enhancers, qnorm)
enhancers = compute_activity(enhancers, default_accessibility_feature)
enhancers.to_csv(os.path.join(outdir, "EnhancerList.txt"),
sep='\t', index=False, header=True, float_format="%.6f")
enhancers[['chr', 'start', 'end', 'name']].to_csv(os.path.join(outdir, "EnhancerList.bed"),
sep='\t', index=False, header=False)
#Kristy's version
def assign_enhancer_classes(enhancers, genes, tss_slop=500):
# build pyranges df
tss_pyranges = df_to_pyranges(genes, start_col='tss', end_col='tss', start_slop=tss_slop, end_slop=tss_slop)
gene_pyranges = df_to_pyranges(genes)
def get_class_pyranges(enhancers, tss_pyranges = tss_pyranges, gene_pyranges = gene_pyranges):
'''
Takes in PyRanges objects : Enhancers, tss_pyranges, gene_pyranges
Returns dataframe with uid (representing enhancer) and symbol of the gene/promoter that is overlapped'''
#genes
genic_enh = enhancers.join(gene_pyranges, suffix="_genic")
genic_enh = genic_enh.df[['symbol','uid']].groupby('uid',as_index=False).aggregate(lambda x: ','.join(list(set(x))))
#promoters
promoter_enh = enhancers.join(tss_pyranges, suffix="_promoter")
promoter_enh = promoter_enh.df[['symbol','uid']].groupby('uid',as_index=False).aggregate(lambda x: ','.join(list(set(x))))
return genic_enh, promoter_enh
# import pdb
# pdb.Pdb(stdout=sys.__stdout__).set_trace()
# pdb.set_trace()
# label everything as intergenic
enhancers["class"] = "intergenic"
enhancers['uid'] = range(enhancers.shape[0])
enh = df_to_pyranges(enhancers)
genes, promoters = get_class_pyranges(enh)
enhancers = enh.df.drop(['Chromosome','Start','End'], axis=1)
enhancers.loc[enhancers['uid'].isin(genes.uid), 'class'] = 'genic'
enhancers.loc[enhancers['uid'].isin(promoters.uid), 'class'] = 'promoter'
enhancers["isPromoterElement"] = enhancers["class"] == "promoter"
enhancers["isGenicElement"] = enhancers["class"] == "genic"
enhancers["isIntergenicElement"] = enhancers["class"] == "intergenic"
# Output stats
print("Total enhancers: {}".format(len(enhancers)))
print(" Promoters: {}".format(sum(enhancers['isPromoterElement'])))
print(" Genic: {}".format(sum(enhancers['isGenicElement'])))
print(" Intergenic: {}".format(sum(enhancers['isIntergenicElement'])))
#Add promoter/genic symbol
enhancers = enhancers.merge(promoters.rename(columns={'symbol':'promoterSymbol'}), on='uid', how = 'left').fillna(value={'promoterSymbol':""})
enhancers = enhancers.merge(genes.rename(columns={'symbol':'genicSymbol'}), on='uid', how = 'left').fillna(value={'genicSymbol':""})
enhancers.drop(['uid'], axis=1, inplace=True)
# just to keep things consistent with original code
enhancers["name"] = enhancers.apply(lambda e: "{}|{}:{}-{}".format(e["class"], e.chr, e.start, e.end), axis=1)
return enhancers
#TO DO: convert to pyranges
# def overrideEnhancerAnnotations(enhancers, cell_line, override_file):
# #Override enhancer class with manual annotations
# override = pandas.read_csv(override_file, sep="\t")
# override = override.loc[override['cellType'] == cell_line, :]
# if override.shape[0] > 0:
# enhancers = read_enhancers(enhancers)
# else:
# return(enhancers)
# #for each entry in the override file find the overlaps with enhancers
# #Then modify each enhancer entry appropriately
# for idx, row in override.iterrows():
# ovl_idx = enhancers.within_range(row['chr'],row['start'],row['end']).index
# enhancers.ranges.loc[ovl_idx, 'class'] = row['class']
# #Now need to update various columns derived from 'class'
# enhancers.ranges.loc[ovl_idx, 'isPromoterElement'] = row['class'] == 'promoter'
# enhancers.ranges.loc[ovl_idx, 'isGenicElement'] = row['class'] == 'genic'
# enhancers.ranges.loc[ovl_idx, 'isIntergenicElement'] = row['class'] == 'intergenic'
# enhancers.ranges.loc[ovl_idx, 'name'] = enhancers.ranges.loc[ovl_idx].apply(lambda e: "{}|{}:{}-{}".format(e["class"], e.chr, e.start, e.end), axis=1)
# return enhancers.ranges
def run_count_reads(target, output, bed_file, genome_sizes, use_fast_count):
if target.endswith(".bam"):
count_bam(target, bed_file, output, genome_sizes=genome_sizes, use_fast_count=use_fast_count)
elif target.endswith(".tagAlign.gz") or target.endswith(".tagAlign.bgz"):
count_tagalign(target, bed_file, output, genome_sizes)
elif isBigWigFile(target):
count_bigwig(target, bed_file, output)
else:
raise ValueError("File {} name was not in .bam, .tagAlign.gz, .bw".format(target))
def count_bam(bamfile, bed_file, output, genome_sizes, use_fast_count=True, verbose=True):
completed = True
#Fast count:
#bamtobed uses a lot of memory. Instead reorder bed file to match ordering of bam file. Assumed .bam file is sorted in the chromosome order defined by its header.
#Then use bedtools coverage, then sort back to expected order
#Requires an faidx file with chr in the same order as the bam file.
# import pdb
# #pdb.Pdb(stdout=sys.__stdout__).set_trace()
# pdb.set_trace()
if use_fast_count:
temp_output = output + ".temp_sort_order"
faidx_command = "awk 'FNR==NR {{x2[$1] = $0; next}} $1 in x2 {{print x2[$1]}}' {genome_sizes} <(samtools view -H {bamfile} | grep SQ | cut -f 2 | cut -c 4- ) > {temp_output}".format(**locals())
command = "bedtools sort -faidx {temp_output} -i {bed_file} | bedtools coverage -g {temp_output} -counts -sorted -a stdin -b {bamfile} | awk '{{print $1 \"\\t\" $2 \"\\t\" $3 \"\\t\" $NF}}' | bedtools sort -faidx {genome_sizes} -i stdin > {output}; rm {temp_output}".format(**locals()) #
#executable='/bin/bash' needed to parse < redirect in faidx_command
p = Popen(faidx_command, stdout=PIPE, stderr=PIPE, shell=True, executable='/bin/bash')
if verbose: print("Running: " + faidx_command)
(stdoutdata, stderrdata) = p.communicate()
err = str(stderrdata, 'utf-8')
p = Popen(command, stdout=PIPE, stderr=PIPE, shell=True)
if verbose: print("Running: " + command)
(stdoutdata, stderrdata) = p.communicate()
err = str(stderrdata, 'utf-8')
try:
data = pd.read_table(output, header=None).loc[:,3].values
except Exception as e:
print("Fast count method failed to count: " + str(bamfile) + "\n")
print(err)
print("Trying bamtobed method ...\n")
completed = False
#Alternate counting method. Slower and requires more memory.
# convert BAM to BED, filter to standard chromosomes, sort, then use the very fast bedtools coverage -sorted algorithm
# Note: This requires that bed_file is also sorted and in same chromosome order as genome_sizes (first do bedtools sort -i bed_file -faidx genome_sizes)
# BEDTools will error out if files are not properly sorted
# Also requires that {genome_sizes} has a corresponding {genome_sizes}.bed file
if not use_fast_count or ("terminated" in err) or ("Error" in err) or ("ERROR" in err) or not completed:
command = "bedtools bamtobed -i {bamfile} | cut -f 1-3 | bedtools intersect -wa -a stdin -b {genome_sizes}.bed | bedtools sort -i stdin -faidx {genome_sizes} | bedtools coverage -g {genome_sizes} -counts -sorted -a {bed_file} -b stdin | awk '{{print $1 \"\\t\" $2 \"\\t\" $3 \"\\t\" $NF}}' > {output}".format(**locals())
p = Popen(command, stdout=PIPE, stderr=PIPE, shell=True)
if verbose: print("Running: " + command)
(stdoutdata, stderrdata) = p.communicate()
try:
data = pd.read_table(output, header=None).loc[:,3].values
except Exception as e:
print(e)
print(stderrdata)
completed = False
# Check for successful finish -- BEDTools can run into memory problems
#import pdb; pdb.set_trace()
err = str(stderrdata, 'utf-8')
if ("terminated" not in err) and ("Error" not in err) and ("ERROR" not in err) and any(data):
print("BEDTools completed successfully. \n")
completed = True
else:
print("BEDTools failed to count file: " + str(bamfile) + "\n")
print(err)
completed = False
def count_tagalign(tagalign, bed_file, output, genome_sizes):
command1 = "tabix -B {tagalign} {bed_file} | cut -f1-3".format(**locals())
command2 = "bedtools coverage -counts -b stdin -a {bed_file} | awk '{{print $1 \"\\t\" $2 \"\\t\" $3 \"\\t\" $NF}}' ".format(**locals())
p1 = Popen(command1, stdout=PIPE, shell=True)
with open(output, "wb") as outfp:
p2 = check_call(command2, stdin=p1.stdout, stdout=outfp, shell=True)
if not p2 == 0:
print(p2.stderr)
def count_bigwig(target, bed_file, output):
from pyBigWig import open as open_bigwig
bw = open_bigwig(target)
bed = read_bed(bed_file)
with open(output, "wb") as outfp:
for chr, start, end, *rest in bed.itertuples(index=False, name=None):
# if isinstance(name, np.float):
# name = ""
try:
val = bw.stats(chr, int(start), int(max(end, start + 1)), "mean")[0] or 0
except RuntimeError:
print("Failed on", chr, start, end)
raise
val *= abs(end - start) # convert to total coverage
output = ("\t".join([chr, str(start), str(end), str(val)]) + "\n").encode('ascii')
outfp.write(output)
def isBigWigFile(filename):
return(filename.endswith(".bw") or filename.endswith(".bigWig") or filename.endswith(".bigwig"))
def count_features_for_bed(df, bed_file, genome_sizes, features, directory, filebase, skip_rpkm_quantile=False, force=False, use_fast_count=True):
for feature, feature_bam_list in features.items():
start_time = time.time()
if isinstance(feature_bam_list, str):
feature_bam_list = [feature_bam_list]
for feature_bam in feature_bam_list:
df = count_single_feature_for_bed(df, bed_file, genome_sizes, feature_bam, feature, directory, filebase, skip_rpkm_quantile, force, use_fast_count)
df = average_features(df, feature.replace('feature_',''), feature_bam_list, skip_rpkm_quantile)
elapsed_time = time.time() - start_time
print("Feature " + feature + " completed in " + str(elapsed_time))
return df
def count_single_feature_for_bed(df, bed_file, genome_sizes, feature_bam, feature, directory, filebase, skip_rpkm_quantile, force, use_fast_count):
orig_shape = df.shape[0]
feature_name = feature + "." + os.path.basename(feature_bam)
feature_outfile = os.path.join(directory, "{}.{}.CountReads.bedgraph".format(filebase, feature_name))
if force or (not os.path.exists(feature_outfile)) or (os.path.getsize(feature_outfile) == 0):
print("Regenerating", feature_outfile)
print("Counting coverage for {}".format(filebase + "." + feature_name))
run_count_reads(feature_bam, feature_outfile, bed_file, genome_sizes, use_fast_count)
else:
print("Loading coverage from pre-calculated file for {}".format(filebase + "." + feature_name))
domain_counts = read_bed(feature_outfile)
score_column = domain_counts.columns[-1]
total_counts = count_total(feature_bam)
domain_counts = domain_counts[['chr', 'start', 'end', score_column]]
featurecount = feature_name + ".readCount"
domain_counts.rename(columns={score_column: featurecount}, inplace=True)
domain_counts['chr'] = domain_counts['chr'].astype('str')
df = df.merge(domain_counts.drop_duplicates())
#df = smart_merge(df, domain_counts.drop_duplicates())
assert df.shape[0] == orig_shape, "Dimension mismatch"
df[feature_name + ".RPM"] = 1e6 * df[featurecount] / float(total_counts)
if not skip_rpkm_quantile:
df[featurecount + ".quantile"] = df[featurecount].rank() / float(len(df))
df[feature_name + ".RPM.quantile"] = df[feature_name + ".RPM"].rank() / float(len(df))
df[feature_name + ".RPKM"] = 1e3 * df[feature_name + ".RPM"] / (df.end - df.start).astype(float)
df[feature_name + ".RPKM.quantile"] = df[feature_name + ".RPKM"].rank() / float(len(df))
return df[~ df.duplicated()]
def average_features(df, feature, feature_bam_list, skip_rpkm_quantile):
feature_RPM_cols = [feature + "." + os.path.basename(feature_bam) + '.RPM' for feature_bam in feature_bam_list]
df[feature + '.RPM'] = df[feature_RPM_cols].mean(axis = 1)
if not skip_rpkm_quantile:
feature_RPKM_cols = [feature + "." + os.path.basename(feature_bam) + '.RPKM' for feature_bam in feature_bam_list]
df[feature + '.RPM.quantile'] = df[feature + '.RPM'].rank() / float(len(df))
df[feature + '.RPKM'] = df[feature_RPKM_cols].mean(axis = 1)
df[feature + '.RPKM.quantile'] = df[feature + '.RPKM'].rank() / float(len(df))
return df
# From /seq/lincRNA/Jesse/bin/scripts/JuicerUtilities.R
#
bed_extra_colnames = ["name", "score", "strand", "thickStart", "thickEnd", "itemRgb", "blockCount", "blockSizes", "blockStarts"]
#JN: 9/13/19: Don't assume chromosomes start with 'chr'
#chromosomes = ['chr' + str(entry) for entry in list(range(1,23)) + ['M','X','Y']] # should pass this in as an input file to specify chromosome order
def read_bed(filename, extra_colnames=bed_extra_colnames, chr=None, sort=False, skip_chr_sorting=True):
skip = 1 if ("track" in open(filename, "r").readline()) else 0
names = ["chr", "start", "end"] + extra_colnames
result = pd.read_table(filename, names=names, header=None, skiprows=skip, comment='#')
result = result.dropna(axis=1, how='all') # drop empty columns
assert result.columns[0] == "chr"
#result['chr'] = pd.Categorical(result['chr'], chromosomes, ordered=True)
result['chr'] = pd.Categorical(result['chr'], ordered=True)
if chr is not None:
result = result[result.chr == chr]
if not skip_chr_sorting:
result.sort_values("chr", inplace=True)
if sort:
result.sort_values(["chr", "start", "end"], inplace=True)
return result
def read_bedgraph(filename):
read_bed(filename, extra_colnames=["score"], skip_chr_sorting=True)
def count_bam_mapped(bam_file):
# Counts number of reads in a BAM file WITHOUT iterating. Requires that the BAM is indexed
# chromosomes = ['chr' + str(x) for x in range(1,23)] + ['chrX'] + ['chrY']
command = ("samtools idxstats " + bam_file)
data = check_output(command, shell=True)
lines = data.decode("ascii").split("\n")
#vals = list(int(l.split("\t")[2]) for l in lines[:-1] if l.split("\t")[0] in chromosomes)
vals = list(int(l.split("\t")[2]) for l in lines[:-1])
if not sum(vals) > 0:
raise ValueError("Error counting BAM file: count <= 0")
return sum(vals)
def count_tagalign_total(tagalign):
#result = int(check_output("zcat " + tagalign + " | wc -l", shell=True))
result = int(check_output("zcat {} | grep -E 'chr[1-9]|chr1[0-9]|chr2[0-2]|chrX|chrY' | wc -l".format(tagalign), shell=True))
assert (result > 0)
return result
def count_bigwig_total(bw_file):
from pyBigWig import open as open_bigwig
bw = open_bigwig(bw_file)
result = sum(l * bw.stats(ch, 0, l, "mean")[0] for ch, l in bw.chroms().items())
assert (abs(result) > 0) ## BigWig could have negative values, e.g. the negative-strand GroCAP bigwigs
return result
def count_total(infile):
if infile.endswith(".tagAlign.gz") or infile.endswith(".tagAlign.bgz"):
total_counts = count_tagalign_total(infile)
elif infile.endswith(".bam"):
total_counts = count_bam_mapped(infile)
elif isBigWigFile(infile):
total_counts = count_bigwig_total(infile)
else:
raise RuntimeError("Did not recognize file format of: " + infile)
return total_counts
def parse_params_file(args):
# Parse parameters file and return params dictionary
params = {}
params["default_accessibility_feature"] = determine_accessibility_feature(args)
params["features"] = get_features(args)
if args.expression_table:
params["expression_table"] = args.expression_table.split(",")
else:
params["expression_table"] = ''
return(params)
def get_features(args):
features = {}
if args.H3K27ac:
features['H3K27ac'] = args.H3K27ac.split(",")
if args.ATAC:
features['ATAC'] = args.ATAC.split(",")
if args.DHS:
features['DHS'] = args.DHS.split(",")
if args.supplementary_features is not None:
supp = pd.read_csv(args.supplementary_features, sep="\t")
for idx,row in supp.iterrows():
features[row['feature_name']] = row['file'].split(",")
return features
def determine_accessibility_feature(args):
if args.default_accessibility_feature is not None:
return args.default_accessibility_feature
elif (not args.ATAC) and (not args.DHS):
raise RuntimeError("Both DHS and ATAC have been provided. Must set one file to be the default accessibility feature!")
elif args.ATAC:
return "ATAC"
elif args.DHS:
return "DHS"
else:
raise RuntimeError("At least one of ATAC or DHS must be provided!")
def compute_activity(df, access_col):
if access_col == "DHS":
if 'H3K27ac.RPM' in df.columns:
df['activity_base'] = np.sqrt(df['normalized_h3K27ac'] * df['normalized_dhs'])
df['activity_base_no_qnorm'] = np.sqrt(df['H3K27ac.RPM'] * df['DHS.RPM'])
else:
df['activity_base'] = df['normalized_dhs']
df['activity_base_no_qnorm'] = df['DHS.RPM']
elif access_col == "ATAC":
if 'H3K27ac.RPM' in df.columns:
df['activity_base'] = np.sqrt(df['normalized_h3K27ac'] * df['normalized_atac'])
df['activity_base_no_qnorm'] = np.sqrt(df['H3K27ac.RPM'] * df['ATAC.RPM'])
else:
df['activity_base'] = df['normalized_atac']
df['activity_base_no_qnorm'] = df['ATAC.RPM']
else:
raise RuntimeError("At least one of ATAC or DHS must be provided!")
return df
def run_qnorm(df, qnorm, qnorm_method = "rank", separate_promoters = True):
# Quantile normalize epigenetic data to a reference
#
# Option to qnorm promoters and nonpromoters separately
if qnorm is None:
if 'H3K27ac.RPM' in df.columns: df['normalized_h3K27ac'] = df['H3K27ac.RPM']
if 'DHS.RPM' in df.columns: df['normalized_dhs'] = df['DHS.RPM']
if 'ATAC.RPM' in df.columns: df['normalized_atac'] = df['ATAC.RPM']
else:
qnorm = pd.read_csv(qnorm, sep = "\t")
nRegions = df.shape[0]
col_dict = {'DHS.RPM' : 'normalized_dhs', 'ATAC.RPM' : 'normalized_atac', 'H3K27ac.RPM' : 'normalized_h3K27ac'}
for col in set(df.columns & col_dict.keys()):
#if there is no ATAC.RPM in the qnorm file, but there is ATAC.RPM in enhancers, then qnorm ATAC to DHS
if col == 'ATAC.RPM' and 'ATAC.RPM' not in qnorm.columns:
qnorm['ATAC.RPM'] = qnorm['DHS.RPM']
if not separate_promoters:
qnorm = qnorm.loc[qnorm['enh_class' == "any"]]
if qnorm_method == "rank":
interpfunc = interpolate.interp1d(qnorm['rank'], qnorm[col], kind='linear', fill_value='extrapolate')
df[col_dict[col]] = interpfunc((1 - df[col + ".quantile"]) * nRegions).clip(0)
elif qnorm_method == "quantile":
interpfunc = interpolate.interp1d(qnorm['quantile'], qnorm[col], kind='linear', fill_value='extrapolate')
df[col_dict[col]] = interpfunc(df[col + ".quantile"]).clip(0)
else:
for enh_class in ['promoter','nonpromoter']:
this_qnorm = qnorm.loc[qnorm['enh_class'] == enh_class]
#Need to recompute quantiles within each class
if enh_class == 'promoter':
this_idx = df.index[np.logical_or(df['class'] == "tss", df['class'] == "promoter")]
else:
this_idx = df.index[np.logical_and(df['class'] != "tss" , df['class'] != "promoter")]
df.loc[this_idx, col + enh_class + ".quantile"] = df.loc[this_idx, col].rank()/len(this_idx)
if qnorm_method == "rank":
interpfunc = interpolate.interp1d(this_qnorm['rank'], this_qnorm[col], kind='linear', fill_value='extrapolate')
df.loc[this_idx, col_dict[col]] = interpfunc((1 - df.loc[this_idx, col + enh_class + ".quantile"]) * len(this_idx)).clip(0)
elif qnorm_method == "quantile":
interpfunc = interpolate.interp1d(this_qnorm['quantile'], this_qnorm[col], kind='linear', fill_value='extrapolate')
df.loc[this_idx, col_dict[col]] = interpfunc(df.loc[this_idx, col + enh_class + ".quantile"]).clip(0)
return df
| 45.57187 | 328 | 0.642428 |
acf86957957809cdeeafd2a5903df30266b3b1fd | 2,284 | py | Python | homeassistant/components/blockchain/sensor.py | jasperro/core | 26d7b2164e8a971506790ae5af06f31abdf278b5 | [
"Apache-2.0"
] | 4 | 2016-12-23T10:36:36.000Z | 2021-04-22T12:38:16.000Z | homeassistant/components/blockchain/sensor.py | jasperro/core | 26d7b2164e8a971506790ae5af06f31abdf278b5 | [
"Apache-2.0"
] | 9 | 2022-01-27T06:32:10.000Z | 2022-03-31T07:07:51.000Z | homeassistant/components/blockchain/sensor.py | winterscar/core | 5a55d508791aae65f16396691d014c73fb2095f0 | [
"Apache-2.0"
] | 1 | 2020-03-07T10:43:50.000Z | 2020-03-07T10:43:50.000Z | """Support for Blockchain.com sensors."""
from datetime import timedelta
import logging
from pyblockchain import get_balance, validate_address
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION, CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by blockchain.com"
CONF_ADDRESSES = "addresses"
DEFAULT_NAME = "Bitcoin Balance"
ICON = "mdi:currency-btc"
SCAN_INTERVAL = timedelta(minutes=5)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_ADDRESSES): [cv.string],
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Blockchain.com sensors."""
addresses = config.get(CONF_ADDRESSES)
name = config.get(CONF_NAME)
for address in addresses:
if not validate_address(address):
_LOGGER.error("Bitcoin address is not valid: %s", address)
return False
add_entities([BlockchainSensor(name, addresses)], True)
class BlockchainSensor(Entity):
"""Representation of a Blockchain.com sensor."""
def __init__(self, name, addresses):
"""Initialize the sensor."""
self._name = name
self.addresses = addresses
self._state = None
self._unit_of_measurement = "BTC"
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement this sensor expresses itself in."""
return self._unit_of_measurement
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return ICON
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
return {ATTR_ATTRIBUTION: ATTRIBUTION}
def update(self):
"""Get the latest state of the sensor."""
self._state = get_balance(self.addresses)
| 26.55814 | 77 | 0.689142 |
acf86996a345e576c1d294152aa6eda393b73c2f | 20,049 | py | Python | sdk/python/pulumi_aws_native/appstream/_inputs.py | AaronFriel/pulumi-aws-native | 5621690373ac44accdbd20b11bae3be1baf022d1 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws_native/appstream/_inputs.py | AaronFriel/pulumi-aws-native | 5621690373ac44accdbd20b11bae3be1baf022d1 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws_native/appstream/_inputs.py | AaronFriel/pulumi-aws-native | 5621690373ac44accdbd20b11bae3be1baf022d1 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'AppBlockS3LocationArgs',
'AppBlockScriptDetailsArgs',
'AppBlockTagArgs',
'ApplicationS3LocationArgs',
'ApplicationTagArgs',
'DirectoryConfigServiceAccountCredentialsArgs',
'EntitlementAttributeArgs',
'FleetComputeCapacityArgs',
'FleetDomainJoinInfoArgs',
'FleetTagArgs',
'FleetVpcConfigArgs',
'ImageBuilderAccessEndpointArgs',
'ImageBuilderDomainJoinInfoArgs',
'ImageBuilderTagArgs',
'ImageBuilderVpcConfigArgs',
'StackAccessEndpointArgs',
'StackApplicationSettingsArgs',
'StackStorageConnectorArgs',
'StackTagArgs',
'StackUserSettingArgs',
]
@pulumi.input_type
class AppBlockS3LocationArgs:
def __init__(__self__, *,
s3_bucket: pulumi.Input[str],
s3_key: pulumi.Input[str]):
pulumi.set(__self__, "s3_bucket", s3_bucket)
pulumi.set(__self__, "s3_key", s3_key)
@property
@pulumi.getter(name="s3Bucket")
def s3_bucket(self) -> pulumi.Input[str]:
return pulumi.get(self, "s3_bucket")
@s3_bucket.setter
def s3_bucket(self, value: pulumi.Input[str]):
pulumi.set(self, "s3_bucket", value)
@property
@pulumi.getter(name="s3Key")
def s3_key(self) -> pulumi.Input[str]:
return pulumi.get(self, "s3_key")
@s3_key.setter
def s3_key(self, value: pulumi.Input[str]):
pulumi.set(self, "s3_key", value)
@pulumi.input_type
class AppBlockScriptDetailsArgs:
def __init__(__self__, *,
executable_path: pulumi.Input[str],
script_s3_location: pulumi.Input['AppBlockS3LocationArgs'],
timeout_in_seconds: pulumi.Input[int],
executable_parameters: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "executable_path", executable_path)
pulumi.set(__self__, "script_s3_location", script_s3_location)
pulumi.set(__self__, "timeout_in_seconds", timeout_in_seconds)
if executable_parameters is not None:
pulumi.set(__self__, "executable_parameters", executable_parameters)
@property
@pulumi.getter(name="executablePath")
def executable_path(self) -> pulumi.Input[str]:
return pulumi.get(self, "executable_path")
@executable_path.setter
def executable_path(self, value: pulumi.Input[str]):
pulumi.set(self, "executable_path", value)
@property
@pulumi.getter(name="scriptS3Location")
def script_s3_location(self) -> pulumi.Input['AppBlockS3LocationArgs']:
return pulumi.get(self, "script_s3_location")
@script_s3_location.setter
def script_s3_location(self, value: pulumi.Input['AppBlockS3LocationArgs']):
pulumi.set(self, "script_s3_location", value)
@property
@pulumi.getter(name="timeoutInSeconds")
def timeout_in_seconds(self) -> pulumi.Input[int]:
return pulumi.get(self, "timeout_in_seconds")
@timeout_in_seconds.setter
def timeout_in_seconds(self, value: pulumi.Input[int]):
pulumi.set(self, "timeout_in_seconds", value)
@property
@pulumi.getter(name="executableParameters")
def executable_parameters(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "executable_parameters")
@executable_parameters.setter
def executable_parameters(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "executable_parameters", value)
@pulumi.input_type
class AppBlockTagArgs:
def __init__(__self__, *,
tag_key: pulumi.Input[str],
tag_value: pulumi.Input[str]):
pulumi.set(__self__, "tag_key", tag_key)
pulumi.set(__self__, "tag_value", tag_value)
@property
@pulumi.getter(name="tagKey")
def tag_key(self) -> pulumi.Input[str]:
return pulumi.get(self, "tag_key")
@tag_key.setter
def tag_key(self, value: pulumi.Input[str]):
pulumi.set(self, "tag_key", value)
@property
@pulumi.getter(name="tagValue")
def tag_value(self) -> pulumi.Input[str]:
return pulumi.get(self, "tag_value")
@tag_value.setter
def tag_value(self, value: pulumi.Input[str]):
pulumi.set(self, "tag_value", value)
@pulumi.input_type
class ApplicationS3LocationArgs:
def __init__(__self__, *,
s3_bucket: pulumi.Input[str],
s3_key: pulumi.Input[str]):
pulumi.set(__self__, "s3_bucket", s3_bucket)
pulumi.set(__self__, "s3_key", s3_key)
@property
@pulumi.getter(name="s3Bucket")
def s3_bucket(self) -> pulumi.Input[str]:
return pulumi.get(self, "s3_bucket")
@s3_bucket.setter
def s3_bucket(self, value: pulumi.Input[str]):
pulumi.set(self, "s3_bucket", value)
@property
@pulumi.getter(name="s3Key")
def s3_key(self) -> pulumi.Input[str]:
return pulumi.get(self, "s3_key")
@s3_key.setter
def s3_key(self, value: pulumi.Input[str]):
pulumi.set(self, "s3_key", value)
@pulumi.input_type
class ApplicationTagArgs:
def __init__(__self__, *,
tag_key: pulumi.Input[str],
tag_value: pulumi.Input[str]):
pulumi.set(__self__, "tag_key", tag_key)
pulumi.set(__self__, "tag_value", tag_value)
@property
@pulumi.getter(name="tagKey")
def tag_key(self) -> pulumi.Input[str]:
return pulumi.get(self, "tag_key")
@tag_key.setter
def tag_key(self, value: pulumi.Input[str]):
pulumi.set(self, "tag_key", value)
@property
@pulumi.getter(name="tagValue")
def tag_value(self) -> pulumi.Input[str]:
return pulumi.get(self, "tag_value")
@tag_value.setter
def tag_value(self, value: pulumi.Input[str]):
pulumi.set(self, "tag_value", value)
@pulumi.input_type
class DirectoryConfigServiceAccountCredentialsArgs:
def __init__(__self__, *,
account_name: pulumi.Input[str],
account_password: pulumi.Input[str]):
pulumi.set(__self__, "account_name", account_name)
pulumi.set(__self__, "account_password", account_password)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter(name="accountPassword")
def account_password(self) -> pulumi.Input[str]:
return pulumi.get(self, "account_password")
@account_password.setter
def account_password(self, value: pulumi.Input[str]):
pulumi.set(self, "account_password", value)
@pulumi.input_type
class EntitlementAttributeArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str]):
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class FleetComputeCapacityArgs:
def __init__(__self__, *,
desired_instances: pulumi.Input[int]):
pulumi.set(__self__, "desired_instances", desired_instances)
@property
@pulumi.getter(name="desiredInstances")
def desired_instances(self) -> pulumi.Input[int]:
return pulumi.get(self, "desired_instances")
@desired_instances.setter
def desired_instances(self, value: pulumi.Input[int]):
pulumi.set(self, "desired_instances", value)
@pulumi.input_type
class FleetDomainJoinInfoArgs:
def __init__(__self__, *,
directory_name: Optional[pulumi.Input[str]] = None,
organizational_unit_distinguished_name: Optional[pulumi.Input[str]] = None):
if directory_name is not None:
pulumi.set(__self__, "directory_name", directory_name)
if organizational_unit_distinguished_name is not None:
pulumi.set(__self__, "organizational_unit_distinguished_name", organizational_unit_distinguished_name)
@property
@pulumi.getter(name="directoryName")
def directory_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "directory_name")
@directory_name.setter
def directory_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "directory_name", value)
@property
@pulumi.getter(name="organizationalUnitDistinguishedName")
def organizational_unit_distinguished_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "organizational_unit_distinguished_name")
@organizational_unit_distinguished_name.setter
def organizational_unit_distinguished_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "organizational_unit_distinguished_name", value)
@pulumi.input_type
class FleetTagArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class FleetVpcConfigArgs:
def __init__(__self__, *,
security_group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
if security_group_ids is not None:
pulumi.set(__self__, "security_group_ids", security_group_ids)
if subnet_ids is not None:
pulumi.set(__self__, "subnet_ids", subnet_ids)
@property
@pulumi.getter(name="securityGroupIds")
def security_group_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "security_group_ids")
@security_group_ids.setter
def security_group_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "security_group_ids", value)
@property
@pulumi.getter(name="subnetIds")
def subnet_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "subnet_ids")
@subnet_ids.setter
def subnet_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "subnet_ids", value)
@pulumi.input_type
class ImageBuilderAccessEndpointArgs:
def __init__(__self__, *,
endpoint_type: pulumi.Input[str],
vpce_id: pulumi.Input[str]):
pulumi.set(__self__, "endpoint_type", endpoint_type)
pulumi.set(__self__, "vpce_id", vpce_id)
@property
@pulumi.getter(name="endpointType")
def endpoint_type(self) -> pulumi.Input[str]:
return pulumi.get(self, "endpoint_type")
@endpoint_type.setter
def endpoint_type(self, value: pulumi.Input[str]):
pulumi.set(self, "endpoint_type", value)
@property
@pulumi.getter(name="vpceId")
def vpce_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "vpce_id")
@vpce_id.setter
def vpce_id(self, value: pulumi.Input[str]):
pulumi.set(self, "vpce_id", value)
@pulumi.input_type
class ImageBuilderDomainJoinInfoArgs:
def __init__(__self__, *,
directory_name: Optional[pulumi.Input[str]] = None,
organizational_unit_distinguished_name: Optional[pulumi.Input[str]] = None):
if directory_name is not None:
pulumi.set(__self__, "directory_name", directory_name)
if organizational_unit_distinguished_name is not None:
pulumi.set(__self__, "organizational_unit_distinguished_name", organizational_unit_distinguished_name)
@property
@pulumi.getter(name="directoryName")
def directory_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "directory_name")
@directory_name.setter
def directory_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "directory_name", value)
@property
@pulumi.getter(name="organizationalUnitDistinguishedName")
def organizational_unit_distinguished_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "organizational_unit_distinguished_name")
@organizational_unit_distinguished_name.setter
def organizational_unit_distinguished_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "organizational_unit_distinguished_name", value)
@pulumi.input_type
class ImageBuilderTagArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class ImageBuilderVpcConfigArgs:
def __init__(__self__, *,
security_group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
if security_group_ids is not None:
pulumi.set(__self__, "security_group_ids", security_group_ids)
if subnet_ids is not None:
pulumi.set(__self__, "subnet_ids", subnet_ids)
@property
@pulumi.getter(name="securityGroupIds")
def security_group_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "security_group_ids")
@security_group_ids.setter
def security_group_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "security_group_ids", value)
@property
@pulumi.getter(name="subnetIds")
def subnet_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "subnet_ids")
@subnet_ids.setter
def subnet_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "subnet_ids", value)
@pulumi.input_type
class StackAccessEndpointArgs:
def __init__(__self__, *,
endpoint_type: pulumi.Input[str],
vpce_id: pulumi.Input[str]):
pulumi.set(__self__, "endpoint_type", endpoint_type)
pulumi.set(__self__, "vpce_id", vpce_id)
@property
@pulumi.getter(name="endpointType")
def endpoint_type(self) -> pulumi.Input[str]:
return pulumi.get(self, "endpoint_type")
@endpoint_type.setter
def endpoint_type(self, value: pulumi.Input[str]):
pulumi.set(self, "endpoint_type", value)
@property
@pulumi.getter(name="vpceId")
def vpce_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "vpce_id")
@vpce_id.setter
def vpce_id(self, value: pulumi.Input[str]):
pulumi.set(self, "vpce_id", value)
@pulumi.input_type
class StackApplicationSettingsArgs:
def __init__(__self__, *,
enabled: pulumi.Input[bool],
settings_group: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "enabled", enabled)
if settings_group is not None:
pulumi.set(__self__, "settings_group", settings_group)
@property
@pulumi.getter
def enabled(self) -> pulumi.Input[bool]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="settingsGroup")
def settings_group(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "settings_group")
@settings_group.setter
def settings_group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "settings_group", value)
@pulumi.input_type
class StackStorageConnectorArgs:
def __init__(__self__, *,
connector_type: pulumi.Input[str],
domains: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
resource_identifier: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "connector_type", connector_type)
if domains is not None:
pulumi.set(__self__, "domains", domains)
if resource_identifier is not None:
pulumi.set(__self__, "resource_identifier", resource_identifier)
@property
@pulumi.getter(name="connectorType")
def connector_type(self) -> pulumi.Input[str]:
return pulumi.get(self, "connector_type")
@connector_type.setter
def connector_type(self, value: pulumi.Input[str]):
pulumi.set(self, "connector_type", value)
@property
@pulumi.getter
def domains(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "domains")
@domains.setter
def domains(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "domains", value)
@property
@pulumi.getter(name="resourceIdentifier")
def resource_identifier(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "resource_identifier")
@resource_identifier.setter
def resource_identifier(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_identifier", value)
@pulumi.input_type
class StackTagArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class StackUserSettingArgs:
def __init__(__self__, *,
action: pulumi.Input[str],
permission: pulumi.Input[str]):
pulumi.set(__self__, "action", action)
pulumi.set(__self__, "permission", permission)
@property
@pulumi.getter
def action(self) -> pulumi.Input[str]:
return pulumi.get(self, "action")
@action.setter
def action(self, value: pulumi.Input[str]):
pulumi.set(self, "action", value)
@property
@pulumi.getter
def permission(self) -> pulumi.Input[str]:
return pulumi.get(self, "permission")
@permission.setter
def permission(self, value: pulumi.Input[str]):
pulumi.set(self, "permission", value)
| 32.975329 | 114 | 0.66547 |
acf86b199a5686df0ee1c107b80c18a192074705 | 1,400 | py | Python | pinax/apps/topics/templatetags/topics_tags.py | peiwei/pinax | 34f95b1df4318655fe9bd90dcda8fe824e0c4117 | [
"MIT"
] | 1 | 2019-02-12T04:45:09.000Z | 2019-02-12T04:45:09.000Z | pinax/apps/topics/templatetags/topics_tags.py | peiwei/pinax | 34f95b1df4318655fe9bd90dcda8fe824e0c4117 | [
"MIT"
] | null | null | null | pinax/apps/topics/templatetags/topics_tags.py | peiwei/pinax | 34f95b1df4318655fe9bd90dcda8fe824e0c4117 | [
"MIT"
] | 1 | 2019-02-12T04:45:40.000Z | 2019-02-12T04:45:40.000Z | from django import template
from django.contrib.contenttypes.models import ContentType
from topics.models import Topic
register = template.Library()
@register.inclusion_tag("topics/topic_item.html", takes_context=True)
def show_topic(context, topic):
return {
"topic": topic,
"group": context.get("group"),
}
class TopicsForGroupNode(template.Node):
def __init__(self, group_name, context_name):
self.group = template.Variable(group_name)
self.context_name = context_name
def render(self, context):
try:
group = self.group.resolve(context)
except template.VariableDoesNotExist:
return u""
content_type = ContentType.objects.get_for_model(group)
context[self.context_name] = Topic.objects.filter(
content_type=content_type, object_id=group.id)
return u""
@register.tag(name="get_topics_for_group")
def do_get_topics_for_group(parser, token):
"""
Provides the template tag {% get_topics_for_group GROUP as VARIABLE %}
"""
try:
_tagname, group_name, _as, context_name = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError(u"%(tagname)r tag syntax is as follows: "
"{%% %(tagname)r GROUP as VARIABLE %%}" % {"tagname": tagname})
return TopicsForGroupNode(group_name, context_name)
| 28.571429 | 84 | 0.68 |
acf86d83d3a572e93c9b995bbde103117c3eec8b | 3,760 | py | Python | wang/dataDownload.py | zhangqx/movie_recommender | 93eddb89f7ac2a8358bbe5c91b26e7e2b4184c38 | [
"MIT"
] | 1 | 2019-12-03T16:20:01.000Z | 2019-12-03T16:20:01.000Z | wang/dataDownload.py | zhangqx/movie_recommender | 93eddb89f7ac2a8358bbe5c91b26e7e2b4184c38 | [
"MIT"
] | null | null | null | wang/dataDownload.py | zhangqx/movie_recommender | 93eddb89f7ac2a8358bbe5c91b26e7e2b4184c38 | [
"MIT"
] | 3 | 2019-06-24T15:59:42.000Z | 2019-12-03T16:20:02.000Z | import os
from urllib.request import urlretrieve
from tqdm import tqdm
import zipfile
import hashlib
import shutil
import pandas as pd
import re
import pickle
import tensorflow as tf
# % matplotlib inline
# % config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import time
import datetime
import numpy as np
from sklearn.model_selection import train_test_split
import random
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
def _unzip(save_path, _, database_name, data_path):
"""
Unzip wrapper with the same interface as _ungzip
:param save_path: The path of the gzip files
:param database_name: Name of database
:param data_path: Path to extract to
:param _: HACK - Used to have to same interface as _ungzip
"""
print('Extracting {}...'.format(database_name))
with zipfile.ZipFile(save_path) as zf:
zf.extractall(data_path)
def download_extract(database_name, data_path):
"""
Download and extract database
:param database_name: Database name
"""
DATASET_ML1M = 'ml-1m'
if database_name == DATASET_ML1M:
url = 'http://files.grouplens.org/datasets/movielens/ml-1m.zip'
hash_code = 'c4d9eecfca2ab87c1945afe126590906'
extract_path = os.path.join(data_path, 'ml-1m')
save_path = os.path.join(data_path, 'ml-1m.zip')
extract_fn = _unzip
if os.path.exists(extract_path):
print('Found {} Data'.format(database_name))
return
if not os.path.exists(data_path):
os.makedirs(data_path)
print(data_path)
if not os.path.exists(save_path):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='Downloading {}'.format(database_name)) as pbar:
urlretrieve(
url,
save_path,
pbar.hook)
assert hashlib.md5(open(save_path, 'rb').read()).hexdigest() == hash_code, \
'{} file is corrupted. Remove the file and try again.'.format(save_path)
os.makedirs(extract_path)
try:
extract_fn(save_path, extract_path, database_name, data_path)
except Exception as err:
shutil.rmtree(extract_path) # Remove extraction folder if there is an error
raise err
print('Done.')
# Remove compressed data
# os.remove(save_path)
class DLProgress(tqdm):
"""
Handle Progress Bar while Downloading
"""
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
"""
A hook function that will be called once on establishment of the network connection and
once after each block read thereafter.
:param block_num: A count of blocks transferred so far
:param block_size: Block size in bytes
:param total_size: The total size of the file. This may be -1 on older FTP servers which do not return
a file size in response to a retrieval request.
"""
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if __name__ == "__main__":
data_dir = './'
download_extract('ml-1m', data_dir)
# 用户数据展示
users_title = ['UserID', 'Gender', 'Age', 'OccupationID', 'Zip-code']
users = pd.read_table('./ml-1m/users.dat', sep='::', header=None, names=users_title, engine = 'python')
print(users.head(10))
# 电影数据展示
movies_title = ['MovieID', 'Title', 'Genres']
movies = pd.read_table('./ml-1m/movies.dat', sep='::', header=None, names=movies_title, engine = 'python')
print(movies.head(10))
# 评分数据展示
ratings_title = ['UserID','MovieID', 'Rating', 'timestamps']
ratings = pd.read_table('./ml-1m/ratings.dat', sep='::', header=None, names=ratings_title, engine = 'python')
print(ratings.head(10))
| 30.322581 | 116 | 0.671277 |
acf86e2b71478964b66d14d718f7833b7cfd1463 | 4,117 | py | Python | synthesizing_results/generic_models/synthesize_hypersearch_LR_for_a_subject.py | tufts-ml/fNIRS-mental-workload-classifiers | b5199d6184e659152d1fe650db48eba53a221186 | [
"MIT"
] | 4 | 2021-12-22T12:04:29.000Z | 2022-03-23T20:02:21.000Z | synthesizing_results/generic_models/synthesize_hypersearch_LR_for_a_subject.py | tufts-ml/fNIRS-mental-workload-classifiers | b5199d6184e659152d1fe650db48eba53a221186 | [
"MIT"
] | null | null | null | synthesizing_results/generic_models/synthesize_hypersearch_LR_for_a_subject.py | tufts-ml/fNIRS-mental-workload-classifiers | b5199d6184e659152d1fe650db48eba53a221186 | [
"MIT"
] | 4 | 2021-12-29T09:02:20.000Z | 2022-02-24T22:15:40.000Z | import os
import numpy as np
import csv
import argparse
def extract_experiment_setting(experiment_name):
print('Passed in experiment_name is {}'.format(experiment_name), flush = True)
hyper_parameter_dict = {}
#hyperparameter to extract
C = experiment_name.split('C')[-1]
#record to dict
hyper_parameter_dict['C'] = C
#print values
header = ' checking experiment '.center(100, '-')
print(header)
print('C: {}'.format(C))
print('\n')
return hyper_parameter_dict
def extract_experiment_performance(experiment_dir, experiment_name):
performance_file_fullpath = os.path.join(experiment_dir, experiment_name, 'result_analysis/performance.txt')
returned_file = None
with open(performance_file_fullpath, 'r') as f: #only read mode, do not modify
returned_file = f.read()
validation_accuracy = round(float(returned_file.split('highest validation accuracy: ')[1].split('\n')[0]), 3)
test_accuracy = returned_file.split('corresponding test accuracy: ')[1].split('\n')[0]
print('validation_accuracy: {}'.format(validation_accuracy))
print('test_accuracy: {}'.format(test_accuracy))
return returned_file, validation_accuracy, test_accuracy
def main(experiment_dir, summary_save_dir):
experiments = os.listdir(experiment_dir)
incomplete_experiment_writer = open(os.path.join(summary_save_dir, 'incomplete_experiment_list.txt'), 'w')
summary_filename = os.path.join(summary_save_dir, 'hypersearch_summary.csv')
with open(summary_filename, mode='w') as csv_file:
fieldnames = ['validation_accuracy', 'test_accuracy', 'C', 'performance_string', 'experiment_folder', 'status']
fileEmpty = os.stat(summary_filename).st_size==0
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
if fileEmpty:
writer.writeheader()
for experiment_name in experiments:
if experiment_name !='hypersearch_summary':
experiment_folder = os.path.join(experiment_dir, experiment_name)
experiment_summary = extract_experiment_setting(experiment_name)
try:
returned_file, validation_accuracy, test_accuracy = extract_experiment_performance(experiment_dir, experiment_name)
print('Able to extract performance', flush = True)
experiment_summary.update(validation_accuracy=validation_accuracy, test_accuracy=test_accuracy, performance_string=returned_file, experiment_folder=experiment_folder, status='Completed')
print('Able to update experiment_summary\n\n')
except:
print(' NOT ABLE TO PROCESS {} \n\n'.format(experiment_dir + '/' + experiment_name).center(100, '-'), flush=True)
incomplete_experiment_writer.write(f"{experiment_name}\n\n")
experiment_summary.update(validation_accuracy='NA', test_accuracy='NA', performance_string='NA', experiment_folder=experiment_folder, status='Incompleted')
writer.writerow(experiment_summary)
incomplete_experiment_writer.close()
if __name__=="__main__":
parser = argparse.ArgumentParser(description='synthesizing hyperparameter search results')
parser.add_argument('--experiment_dir')
#parse args
args = parser.parse_args()
experiment_dir = args.experiment_dir
assert os.path.exists(experiment_dir),'The passed in experiment_dir {} does not exist'.format(experiment_dir)
summary_save_dir = os.path.join(experiment_dir, 'hypersearch_summary')
if not os.path.exists(summary_save_dir):
os.makedirs(summary_save_dir)
main(experiment_dir, summary_save_dir)
| 35.491379 | 206 | 0.639543 |
acf86e9bda75cc948825f9e52d43109b55292717 | 2,084 | py | Python | into/backends/tests/test_bcolz.py | jreback/into | 0b2a0b26d66c2cbc5fc9e66f64255cf4d4adb007 | [
"BSD-3-Clause"
] | null | null | null | into/backends/tests/test_bcolz.py | jreback/into | 0b2a0b26d66c2cbc5fc9e66f64255cf4d4adb007 | [
"BSD-3-Clause"
] | null | null | null | into/backends/tests/test_bcolz.py | jreback/into | 0b2a0b26d66c2cbc5fc9e66f64255cf4d4adb007 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import, division, print_function
from into.backends.bcolz import (create, append, convert, ctable, carray,
resource, discover)
from into.chunks import chunks
from into import into, append, convert, resource, discover
import numpy as np
from into.utils import tmpfile
import os
def eq(a, b):
c = a == b
if isinstance(c, np.ndarray):
c = c.all()
return c
a = carray([1, 2, 3, 4])
x = np.array([1, 2])
def test_discover():
assert discover(a) == discover(a[:])
def test_convert():
assert isinstance(convert(carray, np.ones([1, 2, 3])), carray)
b = carray([1, 2, 3])
assert isinstance(convert(np.ndarray, b), np.ndarray)
def test_chunks():
c = convert(chunks(np.ndarray), a, chunksize=2)
assert isinstance(c, chunks(np.ndarray))
assert len(list(c)) == 2
assert eq(list(c)[1], [3, 4])
assert eq(convert(np.ndarray, c), a[:])
def test_append_chunks():
b = carray(x)
append(b, chunks(np.ndarray)([x, x]))
assert len(b) == len(x) * 3
def test_append_other():
b = carray(x)
append(b, convert(list, x))
assert len(b) == 2 * len(x)
def test_resource_ctable():
with tmpfile('.bcolz') as fn:
os.remove(fn)
r = resource(fn, dshape='var * {name: string[5, "ascii"], balance: int32}')
assert isinstance(r, ctable)
assert r.dtype == [('name', 'S5'), ('balance', 'i4')]
def test_resource_carray():
with tmpfile('.bcolz') as fn:
os.remove(fn)
r = resource(fn, dshape='var * int32')
assert isinstance(r, carray)
assert r.dtype == 'i4'
y = np.array([('Alice', 100), ('Bob', 200)],
dtype=[('name', 'S7'), ('amount', 'i4')])
def test_convert_numpy_to_ctable():
b = convert(ctable, y)
assert isinstance(b, ctable)
assert eq(b[:], y)
def test_resource_existing_carray():
with tmpfile('.bcolz') as fn:
os.remove(fn)
r = resource(fn, dshape=discover(y))
append(r, y)
r.flush()
r2 = resource(fn)
assert eq(r2[:], y)
| 22.901099 | 83 | 0.598369 |
acf86fb1d14b9e43bee954d6c8dd5c3463534a9e | 411 | py | Python | products/migrations/0014_productfile_name.py | anonshubh/eCommerce-rostores- | 7503e855d650556e216c42fc1c5b95a42bb9c501 | [
"Apache-2.0"
] | null | null | null | products/migrations/0014_productfile_name.py | anonshubh/eCommerce-rostores- | 7503e855d650556e216c42fc1c5b95a42bb9c501 | [
"Apache-2.0"
] | null | null | null | products/migrations/0014_productfile_name.py | anonshubh/eCommerce-rostores- | 7503e855d650556e216c42fc1c5b95a42bb9c501 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.2.12 on 2020-04-29 17:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0013_productfile_free'),
]
operations = [
migrations.AddField(
model_name='productfile',
name='name',
field=models.CharField(blank=True, max_length=128, null=True),
),
]
| 21.631579 | 74 | 0.608273 |
acf87028865c6a8d4eb3d6955b90fd429b3dce65 | 192 | py | Python | main.py | robmarkcole/pycom-projects | 8643de33c43421adfc410589ccd1e5f60f007a05 | [
"MIT"
] | null | null | null | main.py | robmarkcole/pycom-projects | 8643de33c43421adfc410589ccd1e5f60f007a05 | [
"MIT"
] | null | null | null | main.py | robmarkcole/pycom-projects | 8643de33c43421adfc410589ccd1e5f60f007a05 | [
"MIT"
] | 2 | 2019-07-30T12:16:17.000Z | 2022-03-29T17:43:06.000Z | # main.py -- put your code here!
import pycom
import time
from network import WLAN
pycom.heartbeat(False)
pycom.rgbled(0x00ff00) # make green
wlan = WLAN()
print(wlan.isconnected())
| 19.2 | 36 | 0.71875 |
acf8708b5538701c9aa9e431ea4b66383de92b05 | 32,545 | py | Python | env/lib/python3.9/site-packages/flask/scaffold.py | dalbonip/hmp_hunter | c19be232d5b2fff2f5e0c3a994ffed5b791b3759 | [
"MIT"
] | 4 | 2021-12-08T07:15:32.000Z | 2021-12-11T16:37:40.000Z | src/flask/scaffold.py | zheyanliu99/flask | 41aaaf7fa07f28ef15fd1d343142e96440889a8b | [
"BSD-3-Clause"
] | 4 | 2021-12-10T12:34:23.000Z | 2022-01-02T16:21:09.000Z | src/flask/scaffold.py | zheyanliu99/flask | 41aaaf7fa07f28ef15fd1d343142e96440889a8b | [
"BSD-3-Clause"
] | 4 | 2021-11-23T05:36:16.000Z | 2021-11-23T05:39:33.000Z | import importlib.util
import os
import pkgutil
import sys
import typing as t
from collections import defaultdict
from functools import update_wrapper
from json import JSONDecoder
from json import JSONEncoder
from jinja2 import FileSystemLoader
from werkzeug.exceptions import default_exceptions
from werkzeug.exceptions import HTTPException
from .cli import AppGroup
from .globals import current_app
from .helpers import get_root_path
from .helpers import locked_cached_property
from .helpers import send_from_directory
from .templating import _default_template_ctx_processor
from .typing import AfterRequestCallable
from .typing import AppOrBlueprintKey
from .typing import BeforeRequestCallable
from .typing import TeardownCallable
from .typing import TemplateContextProcessorCallable
from .typing import URLDefaultCallable
from .typing import URLValuePreprocessorCallable
if t.TYPE_CHECKING:
from .wrappers import Response
from .typing import ErrorHandlerCallable
# a singleton sentinel value for parameter defaults
_sentinel = object()
F = t.TypeVar("F", bound=t.Callable[..., t.Any])
def setupmethod(f: F) -> F:
"""Wraps a method so that it performs a check in debug mode if the
first request was already handled.
"""
def wrapper_func(self, *args: t.Any, **kwargs: t.Any) -> t.Any:
if self._is_setup_finished():
raise AssertionError(
"A setup function was called after the first request "
"was handled. This usually indicates a bug in the"
" application where a module was not imported and"
" decorators or other functionality was called too"
" late.\nTo fix this make sure to import all your view"
" modules, database models, and everything related at a"
" central place before the application starts serving"
" requests."
)
return f(self, *args, **kwargs)
return t.cast(F, update_wrapper(wrapper_func, f))
class Scaffold:
"""Common behavior shared between :class:`~flask.Flask` and
:class:`~flask.blueprints.Blueprint`.
:param import_name: The import name of the module where this object
is defined. Usually :attr:`__name__` should be used.
:param static_folder: Path to a folder of static files to serve.
If this is set, a static route will be added.
:param static_url_path: URL prefix for the static route.
:param template_folder: Path to a folder containing template files.
for rendering. If this is set, a Jinja loader will be added.
:param root_path: The path that static, template, and resource files
are relative to. Typically not set, it is discovered based on
the ``import_name``.
.. versionadded:: 2.0
"""
name: str
_static_folder: t.Optional[str] = None
_static_url_path: t.Optional[str] = None
#: JSON encoder class used by :func:`flask.json.dumps`. If a
#: blueprint sets this, it will be used instead of the app's value.
json_encoder: t.Optional[t.Type[JSONEncoder]] = None
#: JSON decoder class used by :func:`flask.json.loads`. If a
#: blueprint sets this, it will be used instead of the app's value.
json_decoder: t.Optional[t.Type[JSONDecoder]] = None
def __init__(
self,
import_name: str,
static_folder: t.Optional[t.Union[str, os.PathLike]] = None,
static_url_path: t.Optional[str] = None,
template_folder: t.Optional[str] = None,
root_path: t.Optional[str] = None,
):
#: The name of the package or module that this object belongs
#: to. Do not change this once it is set by the constructor.
self.import_name = import_name
self.static_folder = static_folder # type: ignore
self.static_url_path = static_url_path
#: The path to the templates folder, relative to
#: :attr:`root_path`, to add to the template loader. ``None`` if
#: templates should not be added.
self.template_folder = template_folder
if root_path is None:
root_path = get_root_path(self.import_name)
#: Absolute path to the package on the filesystem. Used to look
#: up resources contained in the package.
self.root_path = root_path
#: The Click command group for registering CLI commands for this
#: object. The commands are available from the ``flask`` command
#: once the application has been discovered and blueprints have
#: been registered.
self.cli = AppGroup()
#: A dictionary mapping endpoint names to view functions.
#:
#: To register a view function, use the :meth:`route` decorator.
#:
#: This data structure is internal. It should not be modified
#: directly and its format may change at any time.
self.view_functions: t.Dict[str, t.Callable] = {}
#: A data structure of registered error handlers, in the format
#: ``{scope: {code: {class: handler}}}```. The ``scope`` key is
#: the name of a blueprint the handlers are active for, or
#: ``None`` for all requests. The ``code`` key is the HTTP
#: status code for ``HTTPException``, or ``None`` for
#: other exceptions. The innermost dictionary maps exception
#: classes to handler functions.
#:
#: To register an error handler, use the :meth:`errorhandler`
#: decorator.
#:
#: This data structure is internal. It should not be modified
#: directly and its format may change at any time.
self.error_handler_spec: t.Dict[
AppOrBlueprintKey,
t.Dict[t.Optional[int], t.Dict[t.Type[Exception], "ErrorHandlerCallable"]],
] = defaultdict(lambda: defaultdict(dict))
#: A data structure of functions to call at the beginning of
#: each request, in the format ``{scope: [functions]}``. The
#: ``scope`` key is the name of a blueprint the functions are
#: active for, or ``None`` for all requests.
#:
#: To register a function, use the :meth:`before_request`
#: decorator.
#:
#: This data structure is internal. It should not be modified
#: directly and its format may change at any time.
self.before_request_funcs: t.Dict[
AppOrBlueprintKey, t.List[BeforeRequestCallable]
] = defaultdict(list)
#: A data structure of functions to call at the end of each
#: request, in the format ``{scope: [functions]}``. The
#: ``scope`` key is the name of a blueprint the functions are
#: active for, or ``None`` for all requests.
#:
#: To register a function, use the :meth:`after_request`
#: decorator.
#:
#: This data structure is internal. It should not be modified
#: directly and its format may change at any time.
self.after_request_funcs: t.Dict[
AppOrBlueprintKey, t.List[AfterRequestCallable]
] = defaultdict(list)
#: A data structure of functions to call at the end of each
#: request even if an exception is raised, in the format
#: ``{scope: [functions]}``. The ``scope`` key is the name of a
#: blueprint the functions are active for, or ``None`` for all
#: requests.
#:
#: To register a function, use the :meth:`teardown_request`
#: decorator.
#:
#: This data structure is internal. It should not be modified
#: directly and its format may change at any time.
self.teardown_request_funcs: t.Dict[
AppOrBlueprintKey, t.List[TeardownCallable]
] = defaultdict(list)
#: A data structure of functions to call to pass extra context
#: values when rendering templates, in the format
#: ``{scope: [functions]}``. The ``scope`` key is the name of a
#: blueprint the functions are active for, or ``None`` for all
#: requests.
#:
#: To register a function, use the :meth:`context_processor`
#: decorator.
#:
#: This data structure is internal. It should not be modified
#: directly and its format may change at any time.
self.template_context_processors: t.Dict[
AppOrBlueprintKey, t.List[TemplateContextProcessorCallable]
] = defaultdict(list, {None: [_default_template_ctx_processor]})
#: A data structure of functions to call to modify the keyword
#: arguments passed to the view function, in the format
#: ``{scope: [functions]}``. The ``scope`` key is the name of a
#: blueprint the functions are active for, or ``None`` for all
#: requests.
#:
#: To register a function, use the
#: :meth:`url_value_preprocessor` decorator.
#:
#: This data structure is internal. It should not be modified
#: directly and its format may change at any time.
self.url_value_preprocessors: t.Dict[
AppOrBlueprintKey,
t.List[URLValuePreprocessorCallable],
] = defaultdict(list)
#: A data structure of functions to call to modify the keyword
#: arguments when generating URLs, in the format
#: ``{scope: [functions]}``. The ``scope`` key is the name of a
#: blueprint the functions are active for, or ``None`` for all
#: requests.
#:
#: To register a function, use the :meth:`url_defaults`
#: decorator.
#:
#: This data structure is internal. It should not be modified
#: directly and its format may change at any time.
self.url_default_functions: t.Dict[
AppOrBlueprintKey, t.List[URLDefaultCallable]
] = defaultdict(list)
def __repr__(self) -> str:
return f"<{type(self).__name__} {self.name!r}>"
def _is_setup_finished(self) -> bool:
raise NotImplementedError
@property
def static_folder(self) -> t.Optional[str]:
"""The absolute path to the configured static folder. ``None``
if no static folder is set.
"""
if self._static_folder is not None:
return os.path.join(self.root_path, self._static_folder)
else:
return None
@static_folder.setter
def static_folder(self, value: t.Optional[t.Union[str, os.PathLike]]) -> None:
if value is not None:
value = os.fspath(value).rstrip(r"\/")
self._static_folder = value
@property
def has_static_folder(self) -> bool:
"""``True`` if :attr:`static_folder` is set.
.. versionadded:: 0.5
"""
return self.static_folder is not None
@property
def static_url_path(self) -> t.Optional[str]:
"""The URL prefix that the static route will be accessible from.
If it was not configured during init, it is derived from
:attr:`static_folder`.
"""
if self._static_url_path is not None:
return self._static_url_path
if self.static_folder is not None:
basename = os.path.basename(self.static_folder)
return f"/{basename}".rstrip("/")
return None
@static_url_path.setter
def static_url_path(self, value: t.Optional[str]) -> None:
if value is not None:
value = value.rstrip("/")
self._static_url_path = value
def get_send_file_max_age(self, filename: t.Optional[str]) -> t.Optional[int]:
"""Used by :func:`send_file` to determine the ``max_age`` cache
value for a given file path if it wasn't passed.
By default, this returns :data:`SEND_FILE_MAX_AGE_DEFAULT` from
the configuration of :data:`~flask.current_app`. This defaults
to ``None``, which tells the browser to use conditional requests
instead of a timed cache, which is usually preferable.
.. versionchanged:: 2.0
The default configuration is ``None`` instead of 12 hours.
.. versionadded:: 0.9
"""
value = current_app.send_file_max_age_default
if value is None:
return None
return int(value.total_seconds())
def send_static_file(self, filename: str) -> "Response":
"""The view function used to serve files from
:attr:`static_folder`. A route is automatically registered for
this view at :attr:`static_url_path` if :attr:`static_folder` is
set.
.. versionadded:: 0.5
"""
if not self.has_static_folder:
raise RuntimeError("'static_folder' must be set to serve static_files.")
# send_file only knows to call get_send_file_max_age on the app,
# call it here so it works for blueprints too.
max_age = self.get_send_file_max_age(filename)
return send_from_directory(
t.cast(str, self.static_folder), filename, max_age=max_age
)
@locked_cached_property
def jinja_loader(self) -> t.Optional[FileSystemLoader]:
"""The Jinja loader for this object's templates. By default this
is a class :class:`jinja2.loaders.FileSystemLoader` to
:attr:`template_folder` if it is set.
.. versionadded:: 0.5
"""
if self.template_folder is not None:
return FileSystemLoader(os.path.join(self.root_path, self.template_folder))
else:
return None
def open_resource(self, resource: str, mode: str = "rb") -> t.IO[t.AnyStr]:
"""Open a resource file relative to :attr:`root_path` for
reading.
For example, if the file ``schema.sql`` is next to the file
``app.py`` where the ``Flask`` app is defined, it can be opened
with:
.. code-block:: python
with app.open_resource("schema.sql") as f:
conn.executescript(f.read())
:param resource: Path to the resource relative to
:attr:`root_path`.
:param mode: Open the file in this mode. Only reading is
supported, valid values are "r" (or "rt") and "rb".
"""
if mode not in {"r", "rt", "rb"}:
raise ValueError("Resources can only be opened for reading.")
return open(os.path.join(self.root_path, resource), mode)
def _method_route(
self,
method: str,
rule: str,
options: dict,
) -> t.Callable[[F], F]:
if "methods" in options:
raise TypeError("Use the 'route' decorator to use the 'methods' argument.")
return self.route(rule, methods=[method], **options)
def get(self, rule: str, **options: t.Any) -> t.Callable[[F], F]:
"""Shortcut for :meth:`route` with ``methods=["GET"]``.
.. versionadded:: 2.0
"""
return self._method_route("GET", rule, options)
def post(self, rule: str, **options: t.Any) -> t.Callable[[F], F]:
"""Shortcut for :meth:`route` with ``methods=["POST"]``.
.. versionadded:: 2.0
"""
return self._method_route("POST", rule, options)
def put(self, rule: str, **options: t.Any) -> t.Callable[[F], F]:
"""Shortcut for :meth:`route` with ``methods=["PUT"]``.
.. versionadded:: 2.0
"""
return self._method_route("PUT", rule, options)
def delete(self, rule: str, **options: t.Any) -> t.Callable[[F], F]:
"""Shortcut for :meth:`route` with ``methods=["DELETE"]``.
.. versionadded:: 2.0
"""
return self._method_route("DELETE", rule, options)
def patch(self, rule: str, **options: t.Any) -> t.Callable[[F], F]:
"""Shortcut for :meth:`route` with ``methods=["PATCH"]``.
.. versionadded:: 2.0
"""
return self._method_route("PATCH", rule, options)
def route(self, rule: str, **options: t.Any) -> t.Callable[[F], F]:
"""Decorate a view function to register it with the given URL
rule and options. Calls :meth:`add_url_rule`, which has more
details about the implementation.
.. code-block:: python
@app.route("/")
def index():
return "Hello, World!"
See :ref:`url-route-registrations`.
The endpoint name for the route defaults to the name of the view
function if the ``endpoint`` parameter isn't passed.
The ``methods`` parameter defaults to ``["GET"]``. ``HEAD`` and
``OPTIONS`` are added automatically.
:param rule: The URL rule string.
:param options: Extra options passed to the
:class:`~werkzeug.routing.Rule` object.
"""
def decorator(f: F) -> F:
endpoint = options.pop("endpoint", None)
self.add_url_rule(rule, endpoint, f, **options)
return f
return decorator
@setupmethod
def add_url_rule(
self,
rule: str,
endpoint: t.Optional[str] = None,
view_func: t.Optional[t.Callable] = None,
provide_automatic_options: t.Optional[bool] = None,
**options: t.Any,
) -> None:
"""Register a rule for routing incoming requests and building
URLs. The :meth:`route` decorator is a shortcut to call this
with the ``view_func`` argument. These are equivalent:
.. code-block:: python
@app.route("/")
def index():
...
.. code-block:: python
def index():
...
app.add_url_rule("/", view_func=index)
See :ref:`url-route-registrations`.
The endpoint name for the route defaults to the name of the view
function if the ``endpoint`` parameter isn't passed. An error
will be raised if a function has already been registered for the
endpoint.
The ``methods`` parameter defaults to ``["GET"]``. ``HEAD`` is
always added automatically, and ``OPTIONS`` is added
automatically by default.
``view_func`` does not necessarily need to be passed, but if the
rule should participate in routing an endpoint name must be
associated with a view function at some point with the
:meth:`endpoint` decorator.
.. code-block:: python
app.add_url_rule("/", endpoint="index")
@app.endpoint("index")
def index():
...
If ``view_func`` has a ``required_methods`` attribute, those
methods are added to the passed and automatic methods. If it
has a ``provide_automatic_methods`` attribute, it is used as the
default if the parameter is not passed.
:param rule: The URL rule string.
:param endpoint: The endpoint name to associate with the rule
and view function. Used when routing and building URLs.
Defaults to ``view_func.__name__``.
:param view_func: The view function to associate with the
endpoint name.
:param provide_automatic_options: Add the ``OPTIONS`` method and
respond to ``OPTIONS`` requests automatically.
:param options: Extra options passed to the
:class:`~werkzeug.routing.Rule` object.
"""
raise NotImplementedError
def endpoint(self, endpoint: str) -> t.Callable:
"""Decorate a view function to register it for the given
endpoint. Used if a rule is added without a ``view_func`` with
:meth:`add_url_rule`.
.. code-block:: python
app.add_url_rule("/ex", endpoint="example")
@app.endpoint("example")
def example():
...
:param endpoint: The endpoint name to associate with the view
function.
"""
def decorator(f):
self.view_functions[endpoint] = f
return f
return decorator
@setupmethod
def before_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:
"""Register a function to run before each request.
For example, this can be used to open a database connection, or
to load the logged in user from the session.
.. code-block:: python
@app.before_request
def load_user():
if "user_id" in session:
g.user = db.session.get(session["user_id"])
The function will be called without any arguments. If it returns
a non-``None`` value, the value is handled as if it was the
return value from the view, and further request handling is
stopped.
"""
self.before_request_funcs.setdefault(None, []).append(f)
return f
@setupmethod
def after_request(self, f: AfterRequestCallable) -> AfterRequestCallable:
"""Register a function to run after each request to this object.
The function is called with the response object, and must return
a response object. This allows the functions to modify or
replace the response before it is sent.
If a function raises an exception, any remaining
``after_request`` functions will not be called. Therefore, this
should not be used for actions that must execute, such as to
close resources. Use :meth:`teardown_request` for that.
"""
self.after_request_funcs.setdefault(None, []).append(f)
return f
@setupmethod
def teardown_request(self, f: TeardownCallable) -> TeardownCallable:
"""Register a function to be run at the end of each request,
regardless of whether there was an exception or not. These functions
are executed when the request context is popped, even if not an
actual request was performed.
Example::
ctx = app.test_request_context()
ctx.push()
...
ctx.pop()
When ``ctx.pop()`` is executed in the above example, the teardown
functions are called just before the request context moves from the
stack of active contexts. This becomes relevant if you are using
such constructs in tests.
Teardown functions must avoid raising exceptions. If
they execute code that might fail they
will have to surround the execution of that code with try/except
statements and log any errors.
When a teardown function was called because of an exception it will
be passed an error object.
The return values of teardown functions are ignored.
.. admonition:: Debug Note
In debug mode Flask will not tear down a request on an exception
immediately. Instead it will keep it alive so that the interactive
debugger can still access it. This behavior can be controlled
by the ``PRESERVE_CONTEXT_ON_EXCEPTION`` configuration variable.
"""
self.teardown_request_funcs.setdefault(None, []).append(f)
return f
@setupmethod
def context_processor(
self, f: TemplateContextProcessorCallable
) -> TemplateContextProcessorCallable:
"""Registers a template context processor function."""
self.template_context_processors[None].append(f)
return f
@setupmethod
def url_value_preprocessor(
self, f: URLValuePreprocessorCallable
) -> URLValuePreprocessorCallable:
"""Register a URL value preprocessor function for all view
functions in the application. These functions will be called before the
:meth:`before_request` functions.
The function can modify the values captured from the matched url before
they are passed to the view. For example, this can be used to pop a
common language code value and place it in ``g`` rather than pass it to
every view.
The function is passed the endpoint name and values dict. The return
value is ignored.
"""
self.url_value_preprocessors[None].append(f)
return f
@setupmethod
def url_defaults(self, f: URLDefaultCallable) -> URLDefaultCallable:
"""Callback function for URL defaults for all view functions of the
application. It's called with the endpoint and values and should
update the values passed in place.
"""
self.url_default_functions[None].append(f)
return f
@setupmethod
def errorhandler(
self, code_or_exception: t.Union[t.Type[Exception], int]
) -> t.Callable[["ErrorHandlerCallable"], "ErrorHandlerCallable"]:
"""Register a function to handle errors by code or exception class.
A decorator that is used to register a function given an
error code. Example::
@app.errorhandler(404)
def page_not_found(error):
return 'This page does not exist', 404
You can also register handlers for arbitrary exceptions::
@app.errorhandler(DatabaseError)
def special_exception_handler(error):
return 'Database connection failed', 500
.. versionadded:: 0.7
Use :meth:`register_error_handler` instead of modifying
:attr:`error_handler_spec` directly, for application wide error
handlers.
.. versionadded:: 0.7
One can now additionally also register custom exception types
that do not necessarily have to be a subclass of the
:class:`~werkzeug.exceptions.HTTPException` class.
:param code_or_exception: the code as integer for the handler, or
an arbitrary exception
"""
def decorator(f: "ErrorHandlerCallable") -> "ErrorHandlerCallable":
self.register_error_handler(code_or_exception, f)
return f
return decorator
@setupmethod
def register_error_handler(
self,
code_or_exception: t.Union[t.Type[Exception], int],
f: "ErrorHandlerCallable",
) -> None:
"""Alternative error attach function to the :meth:`errorhandler`
decorator that is more straightforward to use for non decorator
usage.
.. versionadded:: 0.7
"""
if isinstance(code_or_exception, HTTPException): # old broken behavior
raise ValueError(
"Tried to register a handler for an exception instance"
f" {code_or_exception!r}. Handlers can only be"
" registered for exception classes or HTTP error codes."
)
try:
exc_class, code = self._get_exc_class_and_code(code_or_exception)
except KeyError:
raise KeyError(
f"'{code_or_exception}' is not a recognized HTTP error"
" code. Use a subclass of HTTPException with that code"
" instead."
) from None
self.error_handler_spec[None][code][exc_class] = f
@staticmethod
def _get_exc_class_and_code(
exc_class_or_code: t.Union[t.Type[Exception], int]
) -> t.Tuple[t.Type[Exception], t.Optional[int]]:
"""Get the exception class being handled. For HTTP status codes
or ``HTTPException`` subclasses, return both the exception and
status code.
:param exc_class_or_code: Any exception class, or an HTTP status
code as an integer.
"""
exc_class: t.Type[Exception]
if isinstance(exc_class_or_code, int):
exc_class = default_exceptions[exc_class_or_code]
else:
exc_class = exc_class_or_code
assert issubclass(
exc_class, Exception
), "Custom exceptions must be subclasses of Exception."
if issubclass(exc_class, HTTPException):
return exc_class, exc_class.code
else:
return exc_class, None
def _endpoint_from_view_func(view_func: t.Callable) -> str:
"""Internal helper that returns the default endpoint for a given
function. This always is the function name.
"""
assert view_func is not None, "expected view func if endpoint is not provided."
return view_func.__name__
def _matching_loader_thinks_module_is_package(loader, mod_name):
"""Attempt to figure out if the given name is a package or a module.
:param: loader: The loader that handled the name.
:param mod_name: The name of the package or module.
"""
# Use loader.is_package if it's available.
if hasattr(loader, "is_package"):
return loader.is_package(mod_name)
cls = type(loader)
# NamespaceLoader doesn't implement is_package, but all names it
# loads must be packages.
if cls.__module__ == "_frozen_importlib" and cls.__name__ == "NamespaceLoader":
return True
# Otherwise we need to fail with an error that explains what went
# wrong.
raise AttributeError(
f"'{cls.__name__}.is_package()' must be implemented for PEP 302"
f" import hooks."
)
def _find_package_path(root_mod_name):
"""Find the path that contains the package or module."""
try:
spec = importlib.util.find_spec(root_mod_name)
if spec is None:
raise ValueError("not found")
# ImportError: the machinery told us it does not exist
# ValueError:
# - the module name was invalid
# - the module name is __main__
# - *we* raised `ValueError` due to `spec` being `None`
except (ImportError, ValueError):
pass # handled below
else:
# namespace package
if spec.origin in {"namespace", None}:
return os.path.dirname(next(iter(spec.submodule_search_locations)))
# a package (with __init__.py)
elif spec.submodule_search_locations:
return os.path.dirname(os.path.dirname(spec.origin))
# just a normal module
else:
return os.path.dirname(spec.origin)
# we were unable to find the `package_path` using PEP 451 loaders
loader = pkgutil.get_loader(root_mod_name)
if loader is None or root_mod_name == "__main__":
# import name is not found, or interactive/main module
return os.getcwd()
if hasattr(loader, "get_filename"):
filename = loader.get_filename(root_mod_name)
elif hasattr(loader, "archive"):
# zipimporter's loader.archive points to the .egg or .zip file.
filename = loader.archive
else:
# At least one loader is missing both get_filename and archive:
# Google App Engine's HardenedModulesHook, use __file__.
filename = importlib.import_module(root_mod_name).__file__
package_path = os.path.abspath(os.path.dirname(filename))
# If the imported name is a package, filename is currently pointing
# to the root of the package, need to get the current directory.
if _matching_loader_thinks_module_is_package(loader, root_mod_name):
package_path = os.path.dirname(package_path)
return package_path
def find_package(import_name: str):
"""Find the prefix that a package is installed under, and the path
that it would be imported from.
The prefix is the directory containing the standard directory
hierarchy (lib, bin, etc.). If the package is not installed to the
system (:attr:`sys.prefix`) or a virtualenv (``site-packages``),
``None`` is returned.
The path is the entry in :attr:`sys.path` that contains the package
for import. If the package is not installed, it's assumed that the
package was imported from the current working directory.
"""
root_mod_name, _, _ = import_name.partition(".")
package_path = _find_package_path(root_mod_name)
py_prefix = os.path.abspath(sys.prefix)
# installed to the system
if package_path.startswith(py_prefix):
return py_prefix, package_path
site_parent, site_folder = os.path.split(package_path)
# installed to a virtualenv
if site_folder.lower() == "site-packages":
parent, folder = os.path.split(site_parent)
# Windows (prefix/lib/site-packages)
if folder.lower() == "lib":
return parent, package_path
# Unix (prefix/lib/pythonX.Y/site-packages)
if os.path.basename(parent).lower() == "lib":
return os.path.dirname(parent), package_path
# something else (prefix/site-packages)
return site_parent, package_path
# not installed
return None, package_path
| 37.408046 | 87 | 0.632816 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.