max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
pydensecrf/py_densecrf.py | MarvinTeichmann/pydensecrf | 0 | 6619751 | <reponame>MarvinTeichmann/pydensecrf
"""
The MIT License (MIT)
Copyright (c) 2017 <NAME>
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import numpy as np
import scipy as scp
import logging
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.INFO,
stream=sys.stdout)
from pydensecrf.utils import unary_from_labels, create_pairwise_bilateral
from pydensecrf.utils import create_pairwise_gaussian
import pydensecrf.lattice as pylattice
def exp_and_normalize(features):
"""
Aka "softmax" in deep learning literature
"""
exp_features = np.exp(features - np.max(features, axis=0))
normalize_features = exp_features / np.sum(exp_features, axis=0)
return normalize_features
def potts_comp_update(weight, features):
return -weight * features
class DenseCRF():
"""This is a reimplementation of DenseCRF (almost) entirely in python.
"""
def __init__(self, npixels, nclasses):
super(DenseCRF, self).__init__()
self.npixels = npixels
self.nclasses = nclasses
self.kernel_list = []
self.compact_list = []
def set_unary_energy(self, unary):
self.unary = unary
return
def add_pairwise_energy(self, feats, compat=3,
kernel_type="diag", norm="symmetric"):
self.kernel_list.append(self._init_lattice(feats, kernel_type, norm))
self.compact_list.append(self._init_comp(compat))
def _init_lattice(self, feats, kernel_type, norm):
if not kernel_type == "diag":
raise NotImplementedError
if not norm == "symmetric":
raise NotImplementedError
lattice = pylattice.Permutohedral()
lattice.init_filer(feats)
nfeats = np.ones([1, feats.shape[1]], dtype=np.float32)
norm = lattice.compute(nfeats)
norm = 1 / np.sqrt(norm + 1e-20)
def compute_lattice(inp):
# Normalize
norm_inp = inp * norm
# Apply lattice
message = lattice.compute(norm_inp)
# Normalize
norm_message = message * norm
return norm_message
return compute_lattice
def _init_comp(self, compat):
if type(compat) is not int and not float:
print("Compat is {}.".format(compat))
raise NotImplementedError
return lambda feat: potts_comp_update(compat, feat)
def inference(self, num_iter=5):
prediction = exp_and_normalize(-self.unary)
for i in range(num_iter):
tmp1 = -self.unary
for kernel, comp in zip(self.kernel_list, self.compact_list):
tmp2 = kernel(prediction)
tmp2 = comp(tmp2)
tmp1 = tmp1 - tmp2
prediction = exp_and_normalize(tmp1)
# assert(False)
# todo: write wrapper for matrixXF
# no copy required.
return prediction
def start_inference():
pass
def step_inference():
pass
| """
The MIT License (MIT)
Copyright (c) 2017 <NAME>
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import numpy as np
import scipy as scp
import logging
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.INFO,
stream=sys.stdout)
from pydensecrf.utils import unary_from_labels, create_pairwise_bilateral
from pydensecrf.utils import create_pairwise_gaussian
import pydensecrf.lattice as pylattice
def exp_and_normalize(features):
"""
Aka "softmax" in deep learning literature
"""
exp_features = np.exp(features - np.max(features, axis=0))
normalize_features = exp_features / np.sum(exp_features, axis=0)
return normalize_features
def potts_comp_update(weight, features):
return -weight * features
class DenseCRF():
"""This is a reimplementation of DenseCRF (almost) entirely in python.
"""
def __init__(self, npixels, nclasses):
super(DenseCRF, self).__init__()
self.npixels = npixels
self.nclasses = nclasses
self.kernel_list = []
self.compact_list = []
def set_unary_energy(self, unary):
self.unary = unary
return
def add_pairwise_energy(self, feats, compat=3,
kernel_type="diag", norm="symmetric"):
self.kernel_list.append(self._init_lattice(feats, kernel_type, norm))
self.compact_list.append(self._init_comp(compat))
def _init_lattice(self, feats, kernel_type, norm):
if not kernel_type == "diag":
raise NotImplementedError
if not norm == "symmetric":
raise NotImplementedError
lattice = pylattice.Permutohedral()
lattice.init_filer(feats)
nfeats = np.ones([1, feats.shape[1]], dtype=np.float32)
norm = lattice.compute(nfeats)
norm = 1 / np.sqrt(norm + 1e-20)
def compute_lattice(inp):
# Normalize
norm_inp = inp * norm
# Apply lattice
message = lattice.compute(norm_inp)
# Normalize
norm_message = message * norm
return norm_message
return compute_lattice
def _init_comp(self, compat):
if type(compat) is not int and not float:
print("Compat is {}.".format(compat))
raise NotImplementedError
return lambda feat: potts_comp_update(compat, feat)
def inference(self, num_iter=5):
prediction = exp_and_normalize(-self.unary)
for i in range(num_iter):
tmp1 = -self.unary
for kernel, comp in zip(self.kernel_list, self.compact_list):
tmp2 = kernel(prediction)
tmp2 = comp(tmp2)
tmp1 = tmp1 - tmp2
prediction = exp_and_normalize(tmp1)
# assert(False)
# todo: write wrapper for matrixXF
# no copy required.
return prediction
def start_inference():
pass
def step_inference():
pass | en | 0.702227 | The MIT License (MIT) Copyright (c) 2017 <NAME> Aka "softmax" in deep learning literature This is a reimplementation of DenseCRF (almost) entirely in python. # Normalize # Apply lattice # Normalize # assert(False) # todo: write wrapper for matrixXF # no copy required. | 2.268631 | 2 |
tests/test_http_server.py | jayqi/quickhttp | 2 | 6619752 | import shutil
from threading import Thread
from time import sleep
import pytest
import requests
import quickhttp.exceptions as exceptions
from quickhttp.http_server import (
DEFAULT_PORT_RANGE_MIN,
DEFAULT_PORT_RANGE_MAX,
is_port_available,
find_available_port,
SearchType,
run_timed_http_server,
)
KEEP_ALIVE_TIME = 3 # Duration to keep server alive for
WAIT_TIME = 2 # Duration to wait before running test, to give server time to start up
@pytest.fixture()
def timed_http_server(tmp_path, html_file):
shutil.copy(html_file, tmp_path)
port = find_available_port()
thread = Thread(
target=run_timed_http_server,
kwargs={
"address": "127.0.0.1",
"port": port,
"directory": tmp_path,
"timeout": KEEP_ALIVE_TIME,
},
daemon=True,
)
thread.start()
sleep(WAIT_TIME)
yield (tmp_path, port)
thread.join()
def test_is_port_available(timed_http_server):
_, port = timed_http_server
assert not is_port_available(port)
sleep(WAIT_TIME + KEEP_ALIVE_TIME)
assert is_port_available(port)
@pytest.mark.parametrize("search_type", [level.value for level in SearchType])
def test_find_available_port(search_type):
port = find_available_port(search_type=search_type)
assert is_port_available(port)
assert port >= DEFAULT_PORT_RANGE_MIN
assert port <= DEFAULT_PORT_RANGE_MAX
def test_find_available_port_invalid_search_type():
with pytest.raises(exceptions.InvalidSearchTypeError, match="Invalid search_type"):
find_available_port(search_type="invalid_type")
def test_find_available_port_none_found(timed_http_server):
directory, port = timed_http_server
with pytest.raises(exceptions.NoAvailablePortFoundError):
find_available_port(range_min=port, range_max=port)
def test_run_timed_http_server(timed_http_server):
# Server is working
directory, port = timed_http_server
assert not is_port_available(port)
response = requests.get(f"http://127.0.0.1:{port}")
assert response.status_code == 200
with (directory / "index.html").open("r") as fp:
assert response.text == fp.read()
sleep(WAIT_TIME + KEEP_ALIVE_TIME)
# Server is closed
assert is_port_available(port)
with pytest.raises(requests.exceptions.ConnectionError):
requests.get(f"http://127.0.0.1:{port}")
| import shutil
from threading import Thread
from time import sleep
import pytest
import requests
import quickhttp.exceptions as exceptions
from quickhttp.http_server import (
DEFAULT_PORT_RANGE_MIN,
DEFAULT_PORT_RANGE_MAX,
is_port_available,
find_available_port,
SearchType,
run_timed_http_server,
)
KEEP_ALIVE_TIME = 3 # Duration to keep server alive for
WAIT_TIME = 2 # Duration to wait before running test, to give server time to start up
@pytest.fixture()
def timed_http_server(tmp_path, html_file):
shutil.copy(html_file, tmp_path)
port = find_available_port()
thread = Thread(
target=run_timed_http_server,
kwargs={
"address": "127.0.0.1",
"port": port,
"directory": tmp_path,
"timeout": KEEP_ALIVE_TIME,
},
daemon=True,
)
thread.start()
sleep(WAIT_TIME)
yield (tmp_path, port)
thread.join()
def test_is_port_available(timed_http_server):
_, port = timed_http_server
assert not is_port_available(port)
sleep(WAIT_TIME + KEEP_ALIVE_TIME)
assert is_port_available(port)
@pytest.mark.parametrize("search_type", [level.value for level in SearchType])
def test_find_available_port(search_type):
port = find_available_port(search_type=search_type)
assert is_port_available(port)
assert port >= DEFAULT_PORT_RANGE_MIN
assert port <= DEFAULT_PORT_RANGE_MAX
def test_find_available_port_invalid_search_type():
with pytest.raises(exceptions.InvalidSearchTypeError, match="Invalid search_type"):
find_available_port(search_type="invalid_type")
def test_find_available_port_none_found(timed_http_server):
directory, port = timed_http_server
with pytest.raises(exceptions.NoAvailablePortFoundError):
find_available_port(range_min=port, range_max=port)
def test_run_timed_http_server(timed_http_server):
# Server is working
directory, port = timed_http_server
assert not is_port_available(port)
response = requests.get(f"http://127.0.0.1:{port}")
assert response.status_code == 200
with (directory / "index.html").open("r") as fp:
assert response.text == fp.read()
sleep(WAIT_TIME + KEEP_ALIVE_TIME)
# Server is closed
assert is_port_available(port)
with pytest.raises(requests.exceptions.ConnectionError):
requests.get(f"http://127.0.0.1:{port}")
| en | 0.962282 | # Duration to keep server alive for # Duration to wait before running test, to give server time to start up # Server is working # Server is closed | 2.558928 | 3 |
pkgconf.py | ArobasMusic/am-conan-qt | 0 | 6619753 | import os
version = "5.15.3"
packageVersion = "{}-{}".format(version, os.getenv('BUILD_NUMBER', '0'))
| import os
version = "5.15.3"
packageVersion = "{}-{}".format(version, os.getenv('BUILD_NUMBER', '0'))
| none | 1 | 1.300752 | 1 | |
yam/indexation.py | Aleksandre/YAM | 0 | 6619754 | <gh_stars>0
"""
This module contains music indexation logic.
"""
import os
from profiling import profile
from mutagen import mutagen
from content import Track
import os.path
import time
import logging
import config
import cPickle as json
class MusicIndexer:
"""
This class will scan a folder recursievely looking for music files.
Each file tags are extracted using mutagen.
Each time a track is read, the progressCallback method is called
"""
def __init__(self, pathToIndex = None, progressCallback = None):
self.pathToIndex = pathToIndex or config.getProperty('music_library_folder')
self.progressCallback = progressCallback
self.supported_audio_extensions = [".mp3", ".flac"]
self.supported_cover_extensions = [".png", ".bmp", ".jpeg", ".jpg"]
self.likely_artcover_name = ["front", "cover", "art", "folder", "album", ".front", ".cover", ".art", ".folder", ".album"]
@profile
def run(self):
print "Starting indexation of folder: ", self.pathToIndex
startTime = time.time()
result = Result()
#Scan configured directory for all music files
musicFiles = []
rootdir = self.pathToIndex
for root, subFolders, files in os.walk(rootdir):
for file in files:
filename, ext = os.path.splitext(file)
if ext in self.supported_audio_extensions:
musicFiles.append(os.path.join(root,file))
#Try to extract audio metadata from files
i = 0
tracks = []
unhandled_files = []
result.totalFileCount = len(musicFiles)
for audioFile in musicFiles:
try:
metadata = mutagen.File(audioFile.strip(), easy=True)
if metadata :
track = self._indexTrack(metadata, audioFile)
if track:
result.numberOfIndexedFiles +=1;
tracks.append(track)
else :
unhandled_files.append(audioFile)
else :
unhandled_files.append(audioFile)
except Exception as e:
print e
unhandled_files.append(audioFile)
i = i + 1
if self.progressCallback:
self.progressCallback(i)
result.numberOfFilesNotImported = len(unhandled_files)
result.filesNotImported = unhandled_files
result.processRunningTimeInMS = (time.time() - startTime)
self._saveResult(result)
self.progressCallback(result.totalFileCount)
return tracks, result
def getNumberOfFilesToHandle(self):
"""Scan the configured path to get how many files will be handled
when the indexer is actually ran with the current configuration.
"""
count = 0
rootdir = self.pathToIndex
for root, subFolders, files in os.walk(rootdir):
for file in files:
filename, ext = os.path.splitext(file)
if ext in self.supported_audio_extensions:
count = count + 1
print "The music indexer will try to index ", str(count), " file(s)."
return count
def resetArt(self, tracks):
for track in tracks:
track.albumCoverPath = ""
return tracks
def _indexTrack(self, trackData, track_path):
track = Track()
try:
if "title" in trackData: track.title = trackData["title"][0].title() or 'No Title({0})'.format(row + 1)
if "artist" in trackData: track.artist = trackData["artist"][0].title() or 'Unknown Artist'
if "album" in trackData: track.albumTitle = trackData["album"][0].title() or 'Unknown {0} Album'.format(track.artist)
track.lengthMS = trackData.info.length
if "tracknumber" in trackData:
trackNum = trackData["tracknumber"][0]
track.num = trackNum
track.filePath = track_path
track.albumCoverPath = self._getAlbumCover(os.path.dirname(track.filePath))
except Exception as e:
print e
return track
return track
def _getAlbumCover(self, albumRootDir):
coverPath = ""
#For each file found in album folder
for _file in os.listdir(albumRootDir):
#Don't recurse. If it's a dir, skip to next file.
if os.path.isdir(_file):
continue
filename, ext = os.path.splitext(_file)
#Is the file an image ?
if ext.lower() in self.supported_cover_extensions:
#It is
coverPath = os.path.join(albumRootDir , _file)
#print coverPath
#Is the file name make it likely to be the cover ?
if filename.lower() in self.likely_artcover_name:
#Yes, my work is done, perfect match, get out.
break
else:
#The name is weird, keep it anyway in case no other
#image is found.
coverPath = os.path.join(albumRootDir,ext)
return coverPath
def _saveResult(self, result):
reportFolder = config.getFullFileName("reports/")
from datetime import datetime
now = datetime.now()
resultFilename = reportFolder + "{0}.txt".format(datetime(now.year, now.month, now.day, now.hour, now.minute))
try:
if not os.path.exists(reportFolder):
os.makedirs(reportFolder)
except IOError as e:
print "Could not create directory to save results: {0}".format(reportFolder)
return False
print "Saving indexation result..."
try:
with open(resultFilename, 'wb') as f:
json.dump(result, f)
return True
except IOError as e:
logging.debug(e)
print e
return False
class Result:
totalFileCount = 0
numberOfIndexedFiles = 0
numberOfFilesNotImported = 0
processRunningTimeInMS = 0
trackIndexFilename = ""
filesNotImported = []
def __init__(self):
pass
@profile
def reIndexArt():
import content, config
config.setConfigFolder('../config/')
indexer = MusicIndexer()
tracks = content.load()
for track in tracks:
track.albumCoverPath = indexer._getAlbumCover(os.path.dirname(track.filePath)).encode("utf-8")
content.save(tracks)
def reIndex():
reIndexArt()
if __name__ == '__main__':
reIndexArt() | """
This module contains music indexation logic.
"""
import os
from profiling import profile
from mutagen import mutagen
from content import Track
import os.path
import time
import logging
import config
import cPickle as json
class MusicIndexer:
"""
This class will scan a folder recursievely looking for music files.
Each file tags are extracted using mutagen.
Each time a track is read, the progressCallback method is called
"""
def __init__(self, pathToIndex = None, progressCallback = None):
self.pathToIndex = pathToIndex or config.getProperty('music_library_folder')
self.progressCallback = progressCallback
self.supported_audio_extensions = [".mp3", ".flac"]
self.supported_cover_extensions = [".png", ".bmp", ".jpeg", ".jpg"]
self.likely_artcover_name = ["front", "cover", "art", "folder", "album", ".front", ".cover", ".art", ".folder", ".album"]
@profile
def run(self):
print "Starting indexation of folder: ", self.pathToIndex
startTime = time.time()
result = Result()
#Scan configured directory for all music files
musicFiles = []
rootdir = self.pathToIndex
for root, subFolders, files in os.walk(rootdir):
for file in files:
filename, ext = os.path.splitext(file)
if ext in self.supported_audio_extensions:
musicFiles.append(os.path.join(root,file))
#Try to extract audio metadata from files
i = 0
tracks = []
unhandled_files = []
result.totalFileCount = len(musicFiles)
for audioFile in musicFiles:
try:
metadata = mutagen.File(audioFile.strip(), easy=True)
if metadata :
track = self._indexTrack(metadata, audioFile)
if track:
result.numberOfIndexedFiles +=1;
tracks.append(track)
else :
unhandled_files.append(audioFile)
else :
unhandled_files.append(audioFile)
except Exception as e:
print e
unhandled_files.append(audioFile)
i = i + 1
if self.progressCallback:
self.progressCallback(i)
result.numberOfFilesNotImported = len(unhandled_files)
result.filesNotImported = unhandled_files
result.processRunningTimeInMS = (time.time() - startTime)
self._saveResult(result)
self.progressCallback(result.totalFileCount)
return tracks, result
def getNumberOfFilesToHandle(self):
"""Scan the configured path to get how many files will be handled
when the indexer is actually ran with the current configuration.
"""
count = 0
rootdir = self.pathToIndex
for root, subFolders, files in os.walk(rootdir):
for file in files:
filename, ext = os.path.splitext(file)
if ext in self.supported_audio_extensions:
count = count + 1
print "The music indexer will try to index ", str(count), " file(s)."
return count
def resetArt(self, tracks):
for track in tracks:
track.albumCoverPath = ""
return tracks
def _indexTrack(self, trackData, track_path):
track = Track()
try:
if "title" in trackData: track.title = trackData["title"][0].title() or 'No Title({0})'.format(row + 1)
if "artist" in trackData: track.artist = trackData["artist"][0].title() or 'Unknown Artist'
if "album" in trackData: track.albumTitle = trackData["album"][0].title() or 'Unknown {0} Album'.format(track.artist)
track.lengthMS = trackData.info.length
if "tracknumber" in trackData:
trackNum = trackData["tracknumber"][0]
track.num = trackNum
track.filePath = track_path
track.albumCoverPath = self._getAlbumCover(os.path.dirname(track.filePath))
except Exception as e:
print e
return track
return track
def _getAlbumCover(self, albumRootDir):
coverPath = ""
#For each file found in album folder
for _file in os.listdir(albumRootDir):
#Don't recurse. If it's a dir, skip to next file.
if os.path.isdir(_file):
continue
filename, ext = os.path.splitext(_file)
#Is the file an image ?
if ext.lower() in self.supported_cover_extensions:
#It is
coverPath = os.path.join(albumRootDir , _file)
#print coverPath
#Is the file name make it likely to be the cover ?
if filename.lower() in self.likely_artcover_name:
#Yes, my work is done, perfect match, get out.
break
else:
#The name is weird, keep it anyway in case no other
#image is found.
coverPath = os.path.join(albumRootDir,ext)
return coverPath
def _saveResult(self, result):
reportFolder = config.getFullFileName("reports/")
from datetime import datetime
now = datetime.now()
resultFilename = reportFolder + "{0}.txt".format(datetime(now.year, now.month, now.day, now.hour, now.minute))
try:
if not os.path.exists(reportFolder):
os.makedirs(reportFolder)
except IOError as e:
print "Could not create directory to save results: {0}".format(reportFolder)
return False
print "Saving indexation result..."
try:
with open(resultFilename, 'wb') as f:
json.dump(result, f)
return True
except IOError as e:
logging.debug(e)
print e
return False
class Result:
totalFileCount = 0
numberOfIndexedFiles = 0
numberOfFilesNotImported = 0
processRunningTimeInMS = 0
trackIndexFilename = ""
filesNotImported = []
def __init__(self):
pass
@profile
def reIndexArt():
import content, config
config.setConfigFolder('../config/')
indexer = MusicIndexer()
tracks = content.load()
for track in tracks:
track.albumCoverPath = indexer._getAlbumCover(os.path.dirname(track.filePath)).encode("utf-8")
content.save(tracks)
def reIndex():
reIndexArt()
if __name__ == '__main__':
reIndexArt() | en | 0.913119 | This module contains music indexation logic. This class will scan a folder recursievely looking for music files. Each file tags are extracted using mutagen. Each time a track is read, the progressCallback method is called #Scan configured directory for all music files #Try to extract audio metadata from files Scan the configured path to get how many files will be handled when the indexer is actually ran with the current configuration. #For each file found in album folder #Don't recurse. If it's a dir, skip to next file. #Is the file an image ? #It is #print coverPath #Is the file name make it likely to be the cover ? #Yes, my work is done, perfect match, get out. #The name is weird, keep it anyway in case no other #image is found. | 2.739632 | 3 |
lambda/s3object_custom_resource.py | aws-samples/iam-roles-pipeline | 4 | 6619755 | <gh_stars>1-10
from urllib.request import build_opener, HTTPHandler, Request
import boto3
import json
from zipfile import ZipFile
s3_client = boto3.client('s3')
def handler(event, context):
"""
Lambda entry point.
CloudFormation Custom Resource that uploads a file to a S3 bucket.
Custom Resource parameters:
Target: # Target bucket where a object will be uploaded
Bucket:
Key: cloudformation/roles-template.zip
ZipBody: # Wheather the file content should be zipped
Body: # File content that will be uploaded
"""
print('Received request:', json.dumps(event, indent=4))
request = event['RequestType']
properties = event['ResourceProperties']
if not {'Target', 'Body'}.issubset(properties.keys()):
return send_response(event, context, 'FAILED', 'Missing required parameters')
target = properties['Target']
try:
if request in {'Create', 'Update'}:
if 'Body' in properties:
target['Body'] = properties['Body']
if 'ZipBody' in properties.keys():
print('Zip Body before put into S3')
with ZipFile('/tmp/body.zip','w') as zip:
zip.writestr(target['Key'].replace('.zip', '.yml'), properties['Body'])
with open('/tmp/body.zip', 'rb') as zipfile:
target['Body'] = zipfile.read()
s3_client.put_object(**target)
else:
return send_response(event, context, 'FAILED', 'Malformed body')
return send_response(event, context, 'SUCCESS', 'Created')
if request == 'Delete':
s3_client.delete_object(
Bucket=target['Bucket'],
Key=target['Key'],
)
return send_response(event, context, 'SUCCESS', 'Deleted')
except Exception as ex:
return send_response(event, context, 'FAILED', str(ex.args))
return send_response(event, context, 'FAILED', f'Unexpected: {request}')
def send_response(event, context, status, message):
"""
Sends a response to CloudFormation service, confirming whether the file upload works or not.
"""
bucket = event['ResourceProperties'].get('Target', {}).get('Bucket')
key = event['ResourceProperties'].get('Target', {}).get('Key')
body = json.dumps(
{
'Status': status,
'Reason': message,
'StackId': event['StackId'],
'RequestId': event['RequestId'],
'LogicalResourceId': event['LogicalResourceId'],
'PhysicalResourceId': f's3://{bucket}/{key}',
'Data': {
'Bucket': bucket,
'Key': key,
},
}
)
request = Request(event['ResponseURL'], data=body.encode('utf-8'), method='PUT')
request.add_header('Content-Type', '')
request.add_header('Content-Length', len(body))
opener = build_opener(HTTPHandler)
opener.open(request) | from urllib.request import build_opener, HTTPHandler, Request
import boto3
import json
from zipfile import ZipFile
s3_client = boto3.client('s3')
def handler(event, context):
"""
Lambda entry point.
CloudFormation Custom Resource that uploads a file to a S3 bucket.
Custom Resource parameters:
Target: # Target bucket where a object will be uploaded
Bucket:
Key: cloudformation/roles-template.zip
ZipBody: # Wheather the file content should be zipped
Body: # File content that will be uploaded
"""
print('Received request:', json.dumps(event, indent=4))
request = event['RequestType']
properties = event['ResourceProperties']
if not {'Target', 'Body'}.issubset(properties.keys()):
return send_response(event, context, 'FAILED', 'Missing required parameters')
target = properties['Target']
try:
if request in {'Create', 'Update'}:
if 'Body' in properties:
target['Body'] = properties['Body']
if 'ZipBody' in properties.keys():
print('Zip Body before put into S3')
with ZipFile('/tmp/body.zip','w') as zip:
zip.writestr(target['Key'].replace('.zip', '.yml'), properties['Body'])
with open('/tmp/body.zip', 'rb') as zipfile:
target['Body'] = zipfile.read()
s3_client.put_object(**target)
else:
return send_response(event, context, 'FAILED', 'Malformed body')
return send_response(event, context, 'SUCCESS', 'Created')
if request == 'Delete':
s3_client.delete_object(
Bucket=target['Bucket'],
Key=target['Key'],
)
return send_response(event, context, 'SUCCESS', 'Deleted')
except Exception as ex:
return send_response(event, context, 'FAILED', str(ex.args))
return send_response(event, context, 'FAILED', f'Unexpected: {request}')
def send_response(event, context, status, message):
"""
Sends a response to CloudFormation service, confirming whether the file upload works or not.
"""
bucket = event['ResourceProperties'].get('Target', {}).get('Bucket')
key = event['ResourceProperties'].get('Target', {}).get('Key')
body = json.dumps(
{
'Status': status,
'Reason': message,
'StackId': event['StackId'],
'RequestId': event['RequestId'],
'LogicalResourceId': event['LogicalResourceId'],
'PhysicalResourceId': f's3://{bucket}/{key}',
'Data': {
'Bucket': bucket,
'Key': key,
},
}
)
request = Request(event['ResponseURL'], data=body.encode('utf-8'), method='PUT')
request.add_header('Content-Type', '')
request.add_header('Content-Length', len(body))
opener = build_opener(HTTPHandler)
opener.open(request) | en | 0.846536 | Lambda entry point. CloudFormation Custom Resource that uploads a file to a S3 bucket. Custom Resource parameters: Target: # Target bucket where a object will be uploaded Bucket: Key: cloudformation/roles-template.zip ZipBody: # Wheather the file content should be zipped Body: # File content that will be uploaded Sends a response to CloudFormation service, confirming whether the file upload works or not. | 2.52858 | 3 |
src/abnfearley/grammar.py | HeptaSean/ABNFEarley | 0 | 6619756 | """Structure of ABNFEarley grammars.
The following classes are provided to define the structure of and
programmatically create Grammars:
Grammar -- whole grammar consists of rules with names as left-hand sides
and GrammarElements as right-hand side
GrammarElement is the abstract base class for all of these:
Alternation -- contains alternative GrammarElements
Concatenation -- contains concatenated GrammarElements
Repetition -- contains repeated GrammarElement with lower and upper
bound
LiteralString -- matches literal string to input
LiteralRange -- matches range of literal bytes to input
RuleCall -- call of grammar rule from within right-hand side
"""
import os
from abc import ABCMeta, abstractmethod
from typing import Optional, Union, Sequence, Mapping, Iterator
class Grammar(Mapping[str, 'GrammarElement']):
"""Whole grammar consisting of named rules.
A grammar consists of a mapping from names to GrammarElement
instances (which can be arbitrarily nested Alternation,
Concatenation, Repetition, literal and RuleCall instances).
"""
def __init__(self, name: str,
rules: Mapping[str, 'GrammarElement'],
imports: Optional[Sequence['Grammar']] = None) -> None:
"""Initialise with mapping from rule names to right-hand sides.
Arguments:
name -- name of the grammar to identify it to the user
rules -- mapping from rule names to right-hand sides
imports -- sequence of other grammars used in this one
Note: While mutable mappings and sequences could be given as
arguments, we assume that they are not mutated after
initialisation.
"""
self._name = name
self._rules = rules
if imports is not None:
self._imports = imports
else:
self._imports = []
for rule, rhs in self._rules.items():
rhs.register(self, rule, self)
@property
def name(self) -> str:
"""Get name of grammar."""
return self._name
@property
def rules(self) -> Mapping[str, 'GrammarElement']:
"""Get rules of grammar."""
return self._rules
@property
def imports(self) -> Sequence['Grammar']:
"""Get imported grammars."""
return self._imports
def __getitem__(self, rule: str) -> 'GrammarElement':
"""Get rule from grammar or its imports."""
if rule in self._rules:
return self._rules[rule]
for grammar in self._imports:
if rule in grammar:
return grammar[rule]
raise KeyError("Rule '{}' not defined in grammar '{}'.".format(
rule, self._name))
def __iter__(self) -> Iterator[str]:
"""Iterate over rules and imported rules."""
for rule in self._rules:
yield rule
for grammar in self._imports:
for rule in grammar:
yield rule
def __len__(self) -> int:
"""Total number of rules in grammar and imports."""
length = len(self._rules)
for grammar in self._imports:
length += len(grammar)
return length
def __eq__(self, other: object) -> bool:
"""Recursively check structural equality."""
if not isinstance(other, Grammar):
return False
return (self.name == other.name and
self.rules == other.rules and
self.imports == other.imports)
def __repr__(self, indent: int = 0) -> str:
"""Get evaluable representation."""
result = indent * ' '
result += 'abnfearley.Grammar({!r}, '.format(self._name)
result += 'collections.OrderedDict(['
first = True
for rule, rhs in self._rules.items():
if first:
first = False
else:
result += ','
result += os.linesep
result += indent * ' '
result += ' ({!r},'.format(rule) + os.linesep
result += rhs.__repr__(indent + 5)
result += ')'
result += ']), ['
first = True
for grammar in self._imports:
if first:
first = False
else:
result += ','
result += os.linesep
result += grammar.__repr__(indent + 4)
result += '])'
return result
def __str__(self) -> str:
"""Get ABNF representation."""
result = '; ===== Grammar {!s} ====='.format(self._name)
if self._imports:
result += os.linesep
result += '; uses rules from '
import_names = [g.name for g in self._imports]
result += ', '.join(import_names)
for rule, rhs in self._rules.items():
result += os.linesep
result += '{!s} = {!s}'.format(rule, rhs)
return result
class GrammarElement(metaclass=ABCMeta):
"""Abstract base class for all kinds of grammar elements."""
def __init__(self) -> None:
"""Trivially initialise GrammarElement."""
self._parent: Union['GrammarElement', Grammar, None] = None
self._rule = ''
self._grammar: Union[Grammar, None] = None
def _location(self,
parent: Union['GrammarElement', Grammar, None] = None,
rule: str = '',
grammar: Union[Grammar, None] = None) -> str:
if parent is None and hasattr(self, '_parent'):
parent = self._parent
if not rule and hasattr(self, '_rule'):
rule = self._rule
if grammar is None and hasattr(self, '_grammar'):
grammar = self._grammar
if isinstance(parent, Grammar):
return "rule '{}' in grammar '{}'".format(rule, parent.name)
elif isinstance(grammar, Grammar):
return "{} in rule '{}' in grammar '{}'".format(
type(parent), rule, grammar.name)
return "unknown location"
def register(self, parent: Union['GrammarElement', Grammar],
rule: str, grammar: Grammar) -> None:
"""Register GrammarElement at a parent.
Arguments:
parent -- direct parent containing this element
(other GrammarElement or Grammar)
rule -- name of rule containing this element
grammar -- Grammar containing this element
(can be same as parent for direct right-hand sides)
Note: This method should only be called by Grammar.__init__ or
the register method of other GrammarElement instances.
"""
if self._parent is not None:
raise ValueError(
'{} registered at {} is already registered at {}.'.format(
type(self),
self._location(parent, rule, grammar),
self._location()))
self._parent = parent
self._rule = rule
self._grammar = grammar
@property
def parent(self) -> Union['GrammarElement', Grammar, None]:
"""Get parent element."""
if not hasattr(self, '_parent'):
return None
return self._parent
@property
def rule(self) -> str:
"""Get name of rule containing element."""
if not hasattr(self, '_rule'):
return ''
return self._rule
@property
def grammar(self) -> Union[Grammar, None]:
"""Get grammar containing element."""
if not hasattr(self, '_grammar'):
return None
return self._grammar
@abstractmethod
def __eq__(self, other: object) -> bool:
"""Recursively check strict structural equality."""
raise NotImplementedError
@abstractmethod
def __repr__(self, indent: int = 0) -> str:
"""Get evaluable representation."""
raise NotImplementedError
@abstractmethod
def __str__(self, needs_parens: bool = False) -> str:
"""Get ABNF representation."""
raise NotImplementedError
class Alternation(GrammarElement, Sequence[GrammarElement]):
"""Alternation between GrammarElement instances.
At least one of the alternatives has to match the input for the
Alternation to match.
"""
def __init__(self, elements: Sequence[GrammarElement]) -> None:
"""Initialise with sequence of alternatives.
Argument:
elements -- the alternatives
Note: While a mutable sequence could be given as argument, we
assume that it is not mutated after initialisation.
"""
super().__init__()
self._elements = elements
def __getitem__(self, key): # type: ignore
"""Get alternative(s) at index or slice key."""
return self._elements[key] # type: ignore
def __len__(self) -> int:
"""Get number of alternatives."""
return len(self._elements)
def register(self, parent: Union[GrammarElement, Grammar],
rule: str, grammar: Grammar) -> None:
"""Register GrammarElement at a parent.
Arguments:
parent -- direct parent containing this element
(other GrammarElement or Grammar)
rule -- name of rule containing this element
grammar -- Grammar containing this element
(can be same as parent for direct right-hand sides)
Note: This method should only be called by Grammar.__init__ or
the register method of other GrammarElement instances.
"""
super().register(parent, rule, grammar)
for element in self._elements:
element.register(self, rule, grammar)
def __eq__(self, other: object) -> bool:
"""Recursively check strict structural equality."""
if not isinstance(other, Alternation):
return False
if len(self) != len(other):
return False
for self_element, other_element in zip(self, other):
if self_element != other_element:
return False
return True
def __repr__(self, indent: int = 0) -> str:
"""Get evaluable representation."""
result = indent * ' '
result += 'abnfearley.Alternation(['
first = True
for element in self._elements:
if first:
first = False
else:
result += ','
result += os.linesep
result += element.__repr__(indent + 4)
result += '])'
return result
def __str__(self, needs_parens: bool = False) -> str:
"""Get ABNF representation."""
result = '()'
if self._elements:
if len(self._elements) == 1:
result = self._elements[0].__str__(needs_parens)
else:
result = ' / '.join(
[element.__str__(True)
for element in self._elements])
if needs_parens:
result = '(' + result + ')'
return result
class Concatenation(GrammarElement, Sequence[GrammarElement]):
"""Concatenation of GrammarElements instances.
All contained elements have to match the input in order for the
Concatenation to match.
"""
def __init__(self, elements: Sequence[GrammarElement]) -> None:
"""Initialise with sequence of concatenated elements.
Argument:
elements -- the concatenated elements
Note: While a mutable sequence could be given as argument, we
assume that it is not mutated after initialisation.
"""
super().__init__()
self._elements = elements
def __getitem__(self, key): # type: ignore
"""Get concatenated element(s) at index or slice key."""
return self._elements[key] # type: ignore
def __len__(self) -> int:
"""Get length of concatenation."""
return len(self._elements)
def register(self, parent: Union[GrammarElement, Grammar],
rule: str, grammar: Grammar) -> None:
"""Register GrammarElement at a parent.
Arguments:
parent -- direct parent containing this element
(other GrammarElement or Grammar)
rule -- name of rule containing this element
grammar -- Grammar containing this element
(can be same as parent for direct right-hand sides)
Note: This method should only be called by Grammar.__init__ or
the register method of other GrammarElement instances.
"""
super().register(parent, rule, grammar)
for element in self._elements:
element.register(self, rule, grammar)
def __eq__(self, other: object) -> bool:
"""Recursively check strict structural equality."""
if not isinstance(other, Concatenation):
return False
if len(self) != len(other):
return False
for self_element, other_element in zip(self, other):
if self_element != other_element:
return False
return True
def __repr__(self, indent: int = 0) -> str:
"""Get evaluable representation."""
result = indent * ' '
result += 'abnfearley.Concatenation(['
first = True
for element in self._elements:
if first:
first = False
else:
result += ','
result += os.linesep
result += element.__repr__(indent + 4)
result += '])'
return result
def __str__(self, needs_parens: bool = False) -> str:
"""Get ABNF representation."""
result = '()'
if self._elements:
if len(self._elements) == 1:
result = self._elements[0].__str__(needs_parens)
else:
result = ' '.join(
[element.__str__(True)
for element in self._elements])
if needs_parens:
result = '(' + result + ')'
return result
class Repetition(GrammarElement):
"""Repetition of a GrammarElement instance.
The contained element has to match at least lower times and at most
upper times for the Repetition to match. If upper is None, it can
match arbitrarily often.
"""
def __init__(self, element: GrammarElement,
lower: int = 0, upper: Optional[int] = None) -> None:
"""Initialise with repeated element and optionally bounds.
Arguments:
element -- the repeated element
lower -- lower bound (defaults to 0)
upper -- upper bound (defaults to unbound, represented by None)
"""
super().__init__()
self._element = element
self._lower = lower
self._upper = upper
@property
def element(self) -> GrammarElement:
"""Get repeated element."""
return self._element
@property
def lower(self) -> int:
"""Get lower bound."""
return self._lower
@property
def upper(self) -> Optional[int]:
"""Get upper bound."""
return self._upper
def register(self, parent: Union[GrammarElement, Grammar],
rule: str, grammar: Grammar) -> None:
"""Register GrammarElement at a parent.
Arguments:
parent -- direct parent containing this element
(other GrammarElement or Grammar)
rule -- name of rule containing this element
grammar -- Grammar containing this element
(can be same as parent for direct right-hand sides)
Note: This method should only be called by Grammar.__init__ or
the register method of other GrammarElement instances.
"""
super().register(parent, rule, grammar)
self._element.register(self, rule, grammar)
def __eq__(self, other: object) -> bool:
"""Recursively check strict structural equality."""
if not isinstance(other, Repetition):
return False
return (self.element == other.element and
self.lower == other.lower and
self.upper == other.upper)
def __repr__(self, indent: int = 0) -> str:
"""Get evaluable representation."""
result = indent * ' '
result += 'abnfearley.Repetition('
result += os.linesep
result += self._element.__repr__(indent + 4)
result += ',' + os.linesep + (indent + 4) * ' '
result += repr(self._lower) + ', '
result += repr(self._upper) + ')'
return result
def __str__(self, needs_parens: bool = False) -> str:
"""Get ABNF representation."""
result = '()'
if self._lower == self._upper:
if self._lower == 1:
result = self._element.__str__(needs_parens)
elif self._lower > 1:
result = (str(self._lower) +
self._element.__str__(True))
elif self._upper is None:
if self._lower == 0:
result = '*' + self._element.__str__(True)
else:
result = (str(self._lower) + '*' +
self._element.__str__(True))
else: # self.lower != self.upper and self.upper < *
if self._upper == 1: # self.lower == 0
result = '[' + self._element.__str__(False) + ']'
else:
result = (str(self._lower) + '*' + str(self._upper) +
self._element.__str__(True))
return result
class LiteralString(GrammarElement):
"""Terminal literal string of grammar.
The string is matched verbatim against the input.
"""
def __init__(self, string: bytes, case_sensitive: bool = True) -> None:
"""Initialise with string to accept.
Arguments:
string -- the string of bytes to match
case_sensitive -- whether the string should match case-sensitive
"""
super().__init__()
self._string = string
self._case_sensitive = case_sensitive
@property
def string(self) -> bytes:
"""Get matched string of bytes."""
return self._string
@property
def case_sensitive(self) -> bool:
"""Get case-sensitivity."""
return self._case_sensitive
def __eq__(self, other: object) -> bool:
"""Recursively check strict structural equality."""
if not isinstance(other, LiteralString):
return False
return (self.string == other.string and
self.case_sensitive == other.case_sensitive)
def __repr__(self, indent: int = 0) -> str:
"""Get evaluable representation."""
result = indent * ' '
result += 'abnfearley.LiteralString('
result += repr(self._string)
if not self._case_sensitive:
result += ', False'
result += ')'
return result
def __str__(self, needs_parens: bool = False) -> str:
"""Get ABNF representation."""
result = ''
components = 0
in_char_val = False
in_hex_val = False
for byte in self._string:
if byte >= 0x20 and byte <= 0x7E and byte != 0x22:
if in_hex_val:
result += ' '
in_hex_val = False
if not in_char_val:
if self._case_sensitive:
result += '%s'
result += '"'
in_char_val = True
components += 1
result += chr(byte)
else:
if in_char_val:
result += '" '
in_char_val = False
if in_hex_val:
result += '.'
else:
result += '%x'
in_hex_val = True
components += 1
result += '{0:2X}'.format(byte)
if in_char_val:
result += '"'
in_char_val = False
if needs_parens and components != 1:
result = '(' + result + ')'
return result
class LiteralRange(GrammarElement):
"""Range of terminal literal bytes.
The current byte of the string is matched against a range of bytes.
"""
def __init__(self, first: int, last: int) -> None:
"""Initialise with first and last byte of range.
Arguments:
first -- code of the first byte of the matched range
last -- codef of the last byte of the matched range
"""
super().__init__()
self._first = first
self._last = last
@property
def first(self) -> int:
"""Get code of first byte of matched range."""
return self._first
@property
def last(self) -> int:
"""Get code of last byte of matched range."""
return self._last
def __eq__(self, other: object) -> bool:
"""Recursively check strict structural equality."""
if not isinstance(other, LiteralRange):
return False
return (self.first == other.first and
self.last == other.last)
def __repr__(self, indent: int = 0) -> str:
"""Get evaluable representation."""
result = indent * ' '
result += 'abnfearley.LiteralRange('
result += repr(self._first)
result += ', '
result += repr(self._last)
result += ')'
return result
def __str__(self, needs_parens: bool = False) -> str:
"""Get ABNF representation."""
result = '%x{0:2X}-{1:2X}'.format(self._first, self._last)
return result
class RuleCall(GrammarElement):
"""Call to rule of the Grammar.
The called rule has to match the input for the RuleCall to match.
"""
def __init__(self, call: str) -> None:
"""Initialise with name of called rule.
Arguments:
call -- the called rule
"""
super().__init__()
self._call = call
@property
def call(self) -> str:
"""Get called rule."""
return self._call
def register(self, parent: Union[GrammarElement, Grammar],
rule: str, grammar: Grammar) -> None:
"""Register GrammarElement at a parent.
Arguments:
parent -- direct parent containing this element
(other GrammarElement or Grammar)
rule -- name of rule containing this element
grammar -- Grammar containing this element
(can be same as parent for direct right-hand sides)
Note: This method should only be called by Grammar.__init__ or
the register method of other GrammarElement instances.
"""
super().register(parent, rule, grammar)
if self._call not in grammar:
raise ValueError(
"Called rule '{}' not defined in grammar {}.".format(
self._call, grammar.name))
def __eq__(self, other: object) -> bool:
"""Recursively check strict structural equality."""
if not isinstance(other, RuleCall):
return False
return self.call == other.call
def __repr__(self, indent: int = 0) -> str:
"""Get evaluable representation."""
result = indent * ' '
result += 'abnfearley.RuleCall('
result += repr(self._call)
result += ')'
return result
def __str__(self, needs_parens: bool = False) -> str:
"""Get ABNF representation."""
return self._call
| """Structure of ABNFEarley grammars.
The following classes are provided to define the structure of and
programmatically create Grammars:
Grammar -- whole grammar consists of rules with names as left-hand sides
and GrammarElements as right-hand side
GrammarElement is the abstract base class for all of these:
Alternation -- contains alternative GrammarElements
Concatenation -- contains concatenated GrammarElements
Repetition -- contains repeated GrammarElement with lower and upper
bound
LiteralString -- matches literal string to input
LiteralRange -- matches range of literal bytes to input
RuleCall -- call of grammar rule from within right-hand side
"""
import os
from abc import ABCMeta, abstractmethod
from typing import Optional, Union, Sequence, Mapping, Iterator
class Grammar(Mapping[str, 'GrammarElement']):
"""Whole grammar consisting of named rules.
A grammar consists of a mapping from names to GrammarElement
instances (which can be arbitrarily nested Alternation,
Concatenation, Repetition, literal and RuleCall instances).
"""
def __init__(self, name: str,
rules: Mapping[str, 'GrammarElement'],
imports: Optional[Sequence['Grammar']] = None) -> None:
"""Initialise with mapping from rule names to right-hand sides.
Arguments:
name -- name of the grammar to identify it to the user
rules -- mapping from rule names to right-hand sides
imports -- sequence of other grammars used in this one
Note: While mutable mappings and sequences could be given as
arguments, we assume that they are not mutated after
initialisation.
"""
self._name = name
self._rules = rules
if imports is not None:
self._imports = imports
else:
self._imports = []
for rule, rhs in self._rules.items():
rhs.register(self, rule, self)
@property
def name(self) -> str:
"""Get name of grammar."""
return self._name
@property
def rules(self) -> Mapping[str, 'GrammarElement']:
"""Get rules of grammar."""
return self._rules
@property
def imports(self) -> Sequence['Grammar']:
"""Get imported grammars."""
return self._imports
def __getitem__(self, rule: str) -> 'GrammarElement':
"""Get rule from grammar or its imports."""
if rule in self._rules:
return self._rules[rule]
for grammar in self._imports:
if rule in grammar:
return grammar[rule]
raise KeyError("Rule '{}' not defined in grammar '{}'.".format(
rule, self._name))
def __iter__(self) -> Iterator[str]:
"""Iterate over rules and imported rules."""
for rule in self._rules:
yield rule
for grammar in self._imports:
for rule in grammar:
yield rule
def __len__(self) -> int:
"""Total number of rules in grammar and imports."""
length = len(self._rules)
for grammar in self._imports:
length += len(grammar)
return length
def __eq__(self, other: object) -> bool:
"""Recursively check structural equality."""
if not isinstance(other, Grammar):
return False
return (self.name == other.name and
self.rules == other.rules and
self.imports == other.imports)
def __repr__(self, indent: int = 0) -> str:
"""Get evaluable representation."""
result = indent * ' '
result += 'abnfearley.Grammar({!r}, '.format(self._name)
result += 'collections.OrderedDict(['
first = True
for rule, rhs in self._rules.items():
if first:
first = False
else:
result += ','
result += os.linesep
result += indent * ' '
result += ' ({!r},'.format(rule) + os.linesep
result += rhs.__repr__(indent + 5)
result += ')'
result += ']), ['
first = True
for grammar in self._imports:
if first:
first = False
else:
result += ','
result += os.linesep
result += grammar.__repr__(indent + 4)
result += '])'
return result
def __str__(self) -> str:
"""Get ABNF representation."""
result = '; ===== Grammar {!s} ====='.format(self._name)
if self._imports:
result += os.linesep
result += '; uses rules from '
import_names = [g.name for g in self._imports]
result += ', '.join(import_names)
for rule, rhs in self._rules.items():
result += os.linesep
result += '{!s} = {!s}'.format(rule, rhs)
return result
class GrammarElement(metaclass=ABCMeta):
"""Abstract base class for all kinds of grammar elements."""
def __init__(self) -> None:
"""Trivially initialise GrammarElement."""
self._parent: Union['GrammarElement', Grammar, None] = None
self._rule = ''
self._grammar: Union[Grammar, None] = None
def _location(self,
parent: Union['GrammarElement', Grammar, None] = None,
rule: str = '',
grammar: Union[Grammar, None] = None) -> str:
if parent is None and hasattr(self, '_parent'):
parent = self._parent
if not rule and hasattr(self, '_rule'):
rule = self._rule
if grammar is None and hasattr(self, '_grammar'):
grammar = self._grammar
if isinstance(parent, Grammar):
return "rule '{}' in grammar '{}'".format(rule, parent.name)
elif isinstance(grammar, Grammar):
return "{} in rule '{}' in grammar '{}'".format(
type(parent), rule, grammar.name)
return "unknown location"
def register(self, parent: Union['GrammarElement', Grammar],
rule: str, grammar: Grammar) -> None:
"""Register GrammarElement at a parent.
Arguments:
parent -- direct parent containing this element
(other GrammarElement or Grammar)
rule -- name of rule containing this element
grammar -- Grammar containing this element
(can be same as parent for direct right-hand sides)
Note: This method should only be called by Grammar.__init__ or
the register method of other GrammarElement instances.
"""
if self._parent is not None:
raise ValueError(
'{} registered at {} is already registered at {}.'.format(
type(self),
self._location(parent, rule, grammar),
self._location()))
self._parent = parent
self._rule = rule
self._grammar = grammar
@property
def parent(self) -> Union['GrammarElement', Grammar, None]:
"""Get parent element."""
if not hasattr(self, '_parent'):
return None
return self._parent
@property
def rule(self) -> str:
"""Get name of rule containing element."""
if not hasattr(self, '_rule'):
return ''
return self._rule
@property
def grammar(self) -> Union[Grammar, None]:
"""Get grammar containing element."""
if not hasattr(self, '_grammar'):
return None
return self._grammar
@abstractmethod
def __eq__(self, other: object) -> bool:
"""Recursively check strict structural equality."""
raise NotImplementedError
@abstractmethod
def __repr__(self, indent: int = 0) -> str:
"""Get evaluable representation."""
raise NotImplementedError
@abstractmethod
def __str__(self, needs_parens: bool = False) -> str:
"""Get ABNF representation."""
raise NotImplementedError
class Alternation(GrammarElement, Sequence[GrammarElement]):
"""Alternation between GrammarElement instances.
At least one of the alternatives has to match the input for the
Alternation to match.
"""
def __init__(self, elements: Sequence[GrammarElement]) -> None:
"""Initialise with sequence of alternatives.
Argument:
elements -- the alternatives
Note: While a mutable sequence could be given as argument, we
assume that it is not mutated after initialisation.
"""
super().__init__()
self._elements = elements
def __getitem__(self, key): # type: ignore
"""Get alternative(s) at index or slice key."""
return self._elements[key] # type: ignore
def __len__(self) -> int:
"""Get number of alternatives."""
return len(self._elements)
def register(self, parent: Union[GrammarElement, Grammar],
rule: str, grammar: Grammar) -> None:
"""Register GrammarElement at a parent.
Arguments:
parent -- direct parent containing this element
(other GrammarElement or Grammar)
rule -- name of rule containing this element
grammar -- Grammar containing this element
(can be same as parent for direct right-hand sides)
Note: This method should only be called by Grammar.__init__ or
the register method of other GrammarElement instances.
"""
super().register(parent, rule, grammar)
for element in self._elements:
element.register(self, rule, grammar)
def __eq__(self, other: object) -> bool:
"""Recursively check strict structural equality."""
if not isinstance(other, Alternation):
return False
if len(self) != len(other):
return False
for self_element, other_element in zip(self, other):
if self_element != other_element:
return False
return True
def __repr__(self, indent: int = 0) -> str:
"""Get evaluable representation."""
result = indent * ' '
result += 'abnfearley.Alternation(['
first = True
for element in self._elements:
if first:
first = False
else:
result += ','
result += os.linesep
result += element.__repr__(indent + 4)
result += '])'
return result
def __str__(self, needs_parens: bool = False) -> str:
"""Get ABNF representation."""
result = '()'
if self._elements:
if len(self._elements) == 1:
result = self._elements[0].__str__(needs_parens)
else:
result = ' / '.join(
[element.__str__(True)
for element in self._elements])
if needs_parens:
result = '(' + result + ')'
return result
class Concatenation(GrammarElement, Sequence[GrammarElement]):
"""Concatenation of GrammarElements instances.
All contained elements have to match the input in order for the
Concatenation to match.
"""
def __init__(self, elements: Sequence[GrammarElement]) -> None:
"""Initialise with sequence of concatenated elements.
Argument:
elements -- the concatenated elements
Note: While a mutable sequence could be given as argument, we
assume that it is not mutated after initialisation.
"""
super().__init__()
self._elements = elements
def __getitem__(self, key): # type: ignore
"""Get concatenated element(s) at index or slice key."""
return self._elements[key] # type: ignore
def __len__(self) -> int:
"""Get length of concatenation."""
return len(self._elements)
def register(self, parent: Union[GrammarElement, Grammar],
rule: str, grammar: Grammar) -> None:
"""Register GrammarElement at a parent.
Arguments:
parent -- direct parent containing this element
(other GrammarElement or Grammar)
rule -- name of rule containing this element
grammar -- Grammar containing this element
(can be same as parent for direct right-hand sides)
Note: This method should only be called by Grammar.__init__ or
the register method of other GrammarElement instances.
"""
super().register(parent, rule, grammar)
for element in self._elements:
element.register(self, rule, grammar)
def __eq__(self, other: object) -> bool:
"""Recursively check strict structural equality."""
if not isinstance(other, Concatenation):
return False
if len(self) != len(other):
return False
for self_element, other_element in zip(self, other):
if self_element != other_element:
return False
return True
def __repr__(self, indent: int = 0) -> str:
"""Get evaluable representation."""
result = indent * ' '
result += 'abnfearley.Concatenation(['
first = True
for element in self._elements:
if first:
first = False
else:
result += ','
result += os.linesep
result += element.__repr__(indent + 4)
result += '])'
return result
def __str__(self, needs_parens: bool = False) -> str:
"""Get ABNF representation."""
result = '()'
if self._elements:
if len(self._elements) == 1:
result = self._elements[0].__str__(needs_parens)
else:
result = ' '.join(
[element.__str__(True)
for element in self._elements])
if needs_parens:
result = '(' + result + ')'
return result
class Repetition(GrammarElement):
"""Repetition of a GrammarElement instance.
The contained element has to match at least lower times and at most
upper times for the Repetition to match. If upper is None, it can
match arbitrarily often.
"""
def __init__(self, element: GrammarElement,
lower: int = 0, upper: Optional[int] = None) -> None:
"""Initialise with repeated element and optionally bounds.
Arguments:
element -- the repeated element
lower -- lower bound (defaults to 0)
upper -- upper bound (defaults to unbound, represented by None)
"""
super().__init__()
self._element = element
self._lower = lower
self._upper = upper
@property
def element(self) -> GrammarElement:
"""Get repeated element."""
return self._element
@property
def lower(self) -> int:
"""Get lower bound."""
return self._lower
@property
def upper(self) -> Optional[int]:
"""Get upper bound."""
return self._upper
def register(self, parent: Union[GrammarElement, Grammar],
rule: str, grammar: Grammar) -> None:
"""Register GrammarElement at a parent.
Arguments:
parent -- direct parent containing this element
(other GrammarElement or Grammar)
rule -- name of rule containing this element
grammar -- Grammar containing this element
(can be same as parent for direct right-hand sides)
Note: This method should only be called by Grammar.__init__ or
the register method of other GrammarElement instances.
"""
super().register(parent, rule, grammar)
self._element.register(self, rule, grammar)
def __eq__(self, other: object) -> bool:
"""Recursively check strict structural equality."""
if not isinstance(other, Repetition):
return False
return (self.element == other.element and
self.lower == other.lower and
self.upper == other.upper)
def __repr__(self, indent: int = 0) -> str:
"""Get evaluable representation."""
result = indent * ' '
result += 'abnfearley.Repetition('
result += os.linesep
result += self._element.__repr__(indent + 4)
result += ',' + os.linesep + (indent + 4) * ' '
result += repr(self._lower) + ', '
result += repr(self._upper) + ')'
return result
def __str__(self, needs_parens: bool = False) -> str:
"""Get ABNF representation."""
result = '()'
if self._lower == self._upper:
if self._lower == 1:
result = self._element.__str__(needs_parens)
elif self._lower > 1:
result = (str(self._lower) +
self._element.__str__(True))
elif self._upper is None:
if self._lower == 0:
result = '*' + self._element.__str__(True)
else:
result = (str(self._lower) + '*' +
self._element.__str__(True))
else: # self.lower != self.upper and self.upper < *
if self._upper == 1: # self.lower == 0
result = '[' + self._element.__str__(False) + ']'
else:
result = (str(self._lower) + '*' + str(self._upper) +
self._element.__str__(True))
return result
class LiteralString(GrammarElement):
"""Terminal literal string of grammar.
The string is matched verbatim against the input.
"""
def __init__(self, string: bytes, case_sensitive: bool = True) -> None:
"""Initialise with string to accept.
Arguments:
string -- the string of bytes to match
case_sensitive -- whether the string should match case-sensitive
"""
super().__init__()
self._string = string
self._case_sensitive = case_sensitive
@property
def string(self) -> bytes:
"""Get matched string of bytes."""
return self._string
@property
def case_sensitive(self) -> bool:
"""Get case-sensitivity."""
return self._case_sensitive
def __eq__(self, other: object) -> bool:
"""Recursively check strict structural equality."""
if not isinstance(other, LiteralString):
return False
return (self.string == other.string and
self.case_sensitive == other.case_sensitive)
def __repr__(self, indent: int = 0) -> str:
"""Get evaluable representation."""
result = indent * ' '
result += 'abnfearley.LiteralString('
result += repr(self._string)
if not self._case_sensitive:
result += ', False'
result += ')'
return result
def __str__(self, needs_parens: bool = False) -> str:
"""Get ABNF representation."""
result = ''
components = 0
in_char_val = False
in_hex_val = False
for byte in self._string:
if byte >= 0x20 and byte <= 0x7E and byte != 0x22:
if in_hex_val:
result += ' '
in_hex_val = False
if not in_char_val:
if self._case_sensitive:
result += '%s'
result += '"'
in_char_val = True
components += 1
result += chr(byte)
else:
if in_char_val:
result += '" '
in_char_val = False
if in_hex_val:
result += '.'
else:
result += '%x'
in_hex_val = True
components += 1
result += '{0:2X}'.format(byte)
if in_char_val:
result += '"'
in_char_val = False
if needs_parens and components != 1:
result = '(' + result + ')'
return result
class LiteralRange(GrammarElement):
"""Range of terminal literal bytes.
The current byte of the string is matched against a range of bytes.
"""
def __init__(self, first: int, last: int) -> None:
"""Initialise with first and last byte of range.
Arguments:
first -- code of the first byte of the matched range
last -- codef of the last byte of the matched range
"""
super().__init__()
self._first = first
self._last = last
@property
def first(self) -> int:
"""Get code of first byte of matched range."""
return self._first
@property
def last(self) -> int:
"""Get code of last byte of matched range."""
return self._last
def __eq__(self, other: object) -> bool:
"""Recursively check strict structural equality."""
if not isinstance(other, LiteralRange):
return False
return (self.first == other.first and
self.last == other.last)
def __repr__(self, indent: int = 0) -> str:
"""Get evaluable representation."""
result = indent * ' '
result += 'abnfearley.LiteralRange('
result += repr(self._first)
result += ', '
result += repr(self._last)
result += ')'
return result
def __str__(self, needs_parens: bool = False) -> str:
"""Get ABNF representation."""
result = '%x{0:2X}-{1:2X}'.format(self._first, self._last)
return result
class RuleCall(GrammarElement):
"""Call to rule of the Grammar.
The called rule has to match the input for the RuleCall to match.
"""
def __init__(self, call: str) -> None:
"""Initialise with name of called rule.
Arguments:
call -- the called rule
"""
super().__init__()
self._call = call
@property
def call(self) -> str:
"""Get called rule."""
return self._call
def register(self, parent: Union[GrammarElement, Grammar],
rule: str, grammar: Grammar) -> None:
"""Register GrammarElement at a parent.
Arguments:
parent -- direct parent containing this element
(other GrammarElement or Grammar)
rule -- name of rule containing this element
grammar -- Grammar containing this element
(can be same as parent for direct right-hand sides)
Note: This method should only be called by Grammar.__init__ or
the register method of other GrammarElement instances.
"""
super().register(parent, rule, grammar)
if self._call not in grammar:
raise ValueError(
"Called rule '{}' not defined in grammar {}.".format(
self._call, grammar.name))
def __eq__(self, other: object) -> bool:
"""Recursively check strict structural equality."""
if not isinstance(other, RuleCall):
return False
return self.call == other.call
def __repr__(self, indent: int = 0) -> str:
"""Get evaluable representation."""
result = indent * ' '
result += 'abnfearley.RuleCall('
result += repr(self._call)
result += ')'
return result
def __str__(self, needs_parens: bool = False) -> str:
"""Get ABNF representation."""
return self._call
| en | 0.784569 | Structure of ABNFEarley grammars. The following classes are provided to define the structure of and programmatically create Grammars: Grammar -- whole grammar consists of rules with names as left-hand sides and GrammarElements as right-hand side GrammarElement is the abstract base class for all of these: Alternation -- contains alternative GrammarElements Concatenation -- contains concatenated GrammarElements Repetition -- contains repeated GrammarElement with lower and upper bound LiteralString -- matches literal string to input LiteralRange -- matches range of literal bytes to input RuleCall -- call of grammar rule from within right-hand side Whole grammar consisting of named rules. A grammar consists of a mapping from names to GrammarElement instances (which can be arbitrarily nested Alternation, Concatenation, Repetition, literal and RuleCall instances). Initialise with mapping from rule names to right-hand sides. Arguments: name -- name of the grammar to identify it to the user rules -- mapping from rule names to right-hand sides imports -- sequence of other grammars used in this one Note: While mutable mappings and sequences could be given as arguments, we assume that they are not mutated after initialisation. Get name of grammar. Get rules of grammar. Get imported grammars. Get rule from grammar or its imports. Iterate over rules and imported rules. Total number of rules in grammar and imports. Recursively check structural equality. Get evaluable representation. Get ABNF representation. Abstract base class for all kinds of grammar elements. Trivially initialise GrammarElement. Register GrammarElement at a parent. Arguments: parent -- direct parent containing this element (other GrammarElement or Grammar) rule -- name of rule containing this element grammar -- Grammar containing this element (can be same as parent for direct right-hand sides) Note: This method should only be called by Grammar.__init__ or the register method of other GrammarElement instances. Get parent element. Get name of rule containing element. Get grammar containing element. Recursively check strict structural equality. Get evaluable representation. Get ABNF representation. Alternation between GrammarElement instances. At least one of the alternatives has to match the input for the Alternation to match. Initialise with sequence of alternatives. Argument: elements -- the alternatives Note: While a mutable sequence could be given as argument, we assume that it is not mutated after initialisation. # type: ignore Get alternative(s) at index or slice key. # type: ignore Get number of alternatives. Register GrammarElement at a parent. Arguments: parent -- direct parent containing this element (other GrammarElement or Grammar) rule -- name of rule containing this element grammar -- Grammar containing this element (can be same as parent for direct right-hand sides) Note: This method should only be called by Grammar.__init__ or the register method of other GrammarElement instances. Recursively check strict structural equality. Get evaluable representation. Get ABNF representation. Concatenation of GrammarElements instances. All contained elements have to match the input in order for the Concatenation to match. Initialise with sequence of concatenated elements. Argument: elements -- the concatenated elements Note: While a mutable sequence could be given as argument, we assume that it is not mutated after initialisation. # type: ignore Get concatenated element(s) at index or slice key. # type: ignore Get length of concatenation. Register GrammarElement at a parent. Arguments: parent -- direct parent containing this element (other GrammarElement or Grammar) rule -- name of rule containing this element grammar -- Grammar containing this element (can be same as parent for direct right-hand sides) Note: This method should only be called by Grammar.__init__ or the register method of other GrammarElement instances. Recursively check strict structural equality. Get evaluable representation. Get ABNF representation. Repetition of a GrammarElement instance. The contained element has to match at least lower times and at most upper times for the Repetition to match. If upper is None, it can match arbitrarily often. Initialise with repeated element and optionally bounds. Arguments: element -- the repeated element lower -- lower bound (defaults to 0) upper -- upper bound (defaults to unbound, represented by None) Get repeated element. Get lower bound. Get upper bound. Register GrammarElement at a parent. Arguments: parent -- direct parent containing this element (other GrammarElement or Grammar) rule -- name of rule containing this element grammar -- Grammar containing this element (can be same as parent for direct right-hand sides) Note: This method should only be called by Grammar.__init__ or the register method of other GrammarElement instances. Recursively check strict structural equality. Get evaluable representation. Get ABNF representation. # self.lower != self.upper and self.upper < * # self.lower == 0 Terminal literal string of grammar. The string is matched verbatim against the input. Initialise with string to accept. Arguments: string -- the string of bytes to match case_sensitive -- whether the string should match case-sensitive Get matched string of bytes. Get case-sensitivity. Recursively check strict structural equality. Get evaluable representation. Get ABNF representation. Range of terminal literal bytes. The current byte of the string is matched against a range of bytes. Initialise with first and last byte of range. Arguments: first -- code of the first byte of the matched range last -- codef of the last byte of the matched range Get code of first byte of matched range. Get code of last byte of matched range. Recursively check strict structural equality. Get evaluable representation. Get ABNF representation. Call to rule of the Grammar. The called rule has to match the input for the RuleCall to match. Initialise with name of called rule. Arguments: call -- the called rule Get called rule. Register GrammarElement at a parent. Arguments: parent -- direct parent containing this element (other GrammarElement or Grammar) rule -- name of rule containing this element grammar -- Grammar containing this element (can be same as parent for direct right-hand sides) Note: This method should only be called by Grammar.__init__ or the register method of other GrammarElement instances. Recursively check strict structural equality. Get evaluable representation. Get ABNF representation. | 3.538775 | 4 |
examples/cross-machine-mnist/client.py | SMILELab-FL/FedLab | 171 | 6619757 |
import argparse
import sys
import torch
from torch import nn
import torchvision
import torchvision.transforms as transforms
sys.path.append("../../")
from fedlab.core.client.manager import ClientPassiveManager
from fedlab.core.client.trainer import ClientSGDTrainer
from fedlab.core.network import DistNetwork
from fedlab.utils.logger import Logger
from fedlab.utils.dataset.sampler import RawPartitionSampler
parser = argparse.ArgumentParser(description="Distbelief training example")
parser.add_argument("--ip", type=str)
parser.add_argument("--port", type=str)
parser.add_argument("--world_size", type=int)
parser.add_argument("--rank", type=int)
parser.add_argument("--lr", type=float, default=0.01)
parser.add_argument("--epoch", type=int, default=2)
parser.add_argument("--dataset", type=str)
parser.add_argument("--batch_size", type=int, default=100)
parser.add_argument("--ethernet", type=str, default=None)
parser.add_argument("--cuda", type=bool, default=True)
args = parser.parse_args()
# get mnist dataset
root = "../../tests/data/mnist/"
trainset = torchvision.datasets.MNIST(root=root,
train=True,
download=True,
transform=transforms.ToTensor())
trainloader = torch.utils.data.DataLoader(
trainset,
sampler=RawPartitionSampler(trainset,
client_id=args.rank,
num_replicas=args.world_size - 1),
batch_size=args.batch_size,
drop_last=True,
num_workers=args.world_size)
# torch model
class MLP(nn.Module):
def __init__(self, input_size=784, output_size=10):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_size, 200)
self.fc2 = nn.Linear(200, 200)
self.fc3 = nn.Linear(200, output_size)
self.relu = nn.ReLU()
def forward(self, x):
x = x.view(x.shape[0], -1)
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.fc3(x)
return x
model = MLP().cuda()
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
criterion = nn.CrossEntropyLoss()
network = DistNetwork(
address=(args.ip, args.port),
world_size=args.world_size,
rank=args.rank,
ethernet=args.ethernet,
)
LOGGER = Logger(log_name="client " + str(args.rank))
trainer = ClientSGDTrainer(
model,
trainloader,
epochs=args.epoch,
optimizer=optimizer,
criterion=criterion,
cuda=args.cuda,
logger=LOGGER,
)
manager_ = ClientPassiveManager(trainer=trainer,
network=network,
logger=LOGGER)
manager_.run()
|
import argparse
import sys
import torch
from torch import nn
import torchvision
import torchvision.transforms as transforms
sys.path.append("../../")
from fedlab.core.client.manager import ClientPassiveManager
from fedlab.core.client.trainer import ClientSGDTrainer
from fedlab.core.network import DistNetwork
from fedlab.utils.logger import Logger
from fedlab.utils.dataset.sampler import RawPartitionSampler
parser = argparse.ArgumentParser(description="Distbelief training example")
parser.add_argument("--ip", type=str)
parser.add_argument("--port", type=str)
parser.add_argument("--world_size", type=int)
parser.add_argument("--rank", type=int)
parser.add_argument("--lr", type=float, default=0.01)
parser.add_argument("--epoch", type=int, default=2)
parser.add_argument("--dataset", type=str)
parser.add_argument("--batch_size", type=int, default=100)
parser.add_argument("--ethernet", type=str, default=None)
parser.add_argument("--cuda", type=bool, default=True)
args = parser.parse_args()
# get mnist dataset
root = "../../tests/data/mnist/"
trainset = torchvision.datasets.MNIST(root=root,
train=True,
download=True,
transform=transforms.ToTensor())
trainloader = torch.utils.data.DataLoader(
trainset,
sampler=RawPartitionSampler(trainset,
client_id=args.rank,
num_replicas=args.world_size - 1),
batch_size=args.batch_size,
drop_last=True,
num_workers=args.world_size)
# torch model
class MLP(nn.Module):
def __init__(self, input_size=784, output_size=10):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_size, 200)
self.fc2 = nn.Linear(200, 200)
self.fc3 = nn.Linear(200, output_size)
self.relu = nn.ReLU()
def forward(self, x):
x = x.view(x.shape[0], -1)
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.fc3(x)
return x
model = MLP().cuda()
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
criterion = nn.CrossEntropyLoss()
network = DistNetwork(
address=(args.ip, args.port),
world_size=args.world_size,
rank=args.rank,
ethernet=args.ethernet,
)
LOGGER = Logger(log_name="client " + str(args.rank))
trainer = ClientSGDTrainer(
model,
trainloader,
epochs=args.epoch,
optimizer=optimizer,
criterion=criterion,
cuda=args.cuda,
logger=LOGGER,
)
manager_ = ClientPassiveManager(trainer=trainer,
network=network,
logger=LOGGER)
manager_.run()
| en | 0.469362 | # get mnist dataset # torch model | 2.293517 | 2 |
lineage/query_context.py | yu-iskw/elementary | 282 | 6619758 | from datetime import datetime
from typing import Optional, Union
import dateutil.parser
from utils.time import format_milliseconds
class QueryContext(object):
def __init__(self, queried_database: Optional[str] = None, queried_schema: Optional[str] = None,
query_time: Optional[datetime] = None, query_volume: Optional[int] = None,
query_type: Optional[str] = None, user_name: Optional[str] = None,
role_name: Optional[str] = None, referenced_tables: [dict, list] = None,
destination_table: [dict, str] = None, duration: int = None,
query_id: str = None) -> None:
self.queried_database = queried_database
self.queried_schema = queried_schema
self.query_time = query_time
self.query_volume = query_volume
self.query_type = query_type
self.user_name = user_name
self.role_name = role_name
self.referenced_tables = referenced_tables if referenced_tables is not None else []
self.destination_table = destination_table
self.duration = duration
self.query_id = query_id
def to_dict(self) -> dict:
return {'queried_database': self.queried_database,
'queried_schema': self.queried_schema,
'query_time': self._query_time_to_str(self.query_time),
'query_volume': self.query_volume,
'query_type': self.query_type,
'user_name': self.user_name,
'role_name': self.role_name,
'referenced_tables': self.referenced_tables,
'destination_table': self.destination_table,
'duration': self.duration,
'query_id': self.query_id}
@staticmethod
def _query_time_to_str(query_time: Optional[datetime], fmt: str = None) -> Optional[str]:
if query_time is None:
return None
if fmt is None:
return query_time.isoformat()
return query_time.strftime(fmt)
@staticmethod
def _html_param_with_default(param: Union[str, int], default: Union[str, int] = 'Unknown') -> Union[str, int]:
return default if param is None else param
def to_html(self) -> str:
query_type = self._html_param_with_default(self.query_type)
user_name = self._html_param_with_default(self.user_name)
role_name = self._html_param_with_default(self.role_name)
query_time = self._query_time_to_str(self.query_time, fmt='%Y-%m-%d %H:%M:%S')
query_volume = self._html_param_with_default(self.query_volume, 0)
query_id = self._html_param_with_default(self.query_id)
volume_color = "DarkSlateGrey"
if query_volume == 0:
volume_color = "tomato"
is_view = 'view' in query_type.lower()
query_duration = 'Unknown'
if self.duration is not None:
query_duration = format_milliseconds(self.duration)
if is_view:
return f"""
<html>
<body>
<div style="font-family:arial;color:DarkSlateGrey;font-size:110%;">
<strong>
Last update</br>
</strong>
<div style="min-width:62px;display:inline-block">Type:</div> {query_type}</br>
<div style="min-width:62px;display:inline-block">Role:</div> {role_name}</br>
<div style="min-width:62px;display:inline-block">Time:</div> {query_time}</br>
</div>
</body>
</html>
"""
return f"""
<html>
<body>
<div style="font-family:arial;color:DarkSlateGrey;font-size:110%;">
<strong>
Last update</br>
</strong>
<div style="min-width:66px;display:inline-block">Type:</div> {query_type}</br>
<div style="min-width:66px;display:inline-block">User:</div> {user_name}</br>
<div style="min-width:66px;display:inline-block">Role:</div> {role_name}</br>
<div style="min-width:66px;display:inline-block">Time:</div> {query_time}</br>
<div style="min-width:66px;display:inline-block">Duration:</div> {query_duration}</br>
<div style="min-width:66px;display:inline-block;">Volume:</div> <a style="color:{volume_color}">{query_volume} rows</a></br>
<div style="min-width:66px;display:inline-block">Query ID:</div> {query_id}</br>
</div>
</body>
</html>
"""
@staticmethod
def from_dict(query_context_dict: dict) -> 'QueryContext':
if 'query_time' in query_context_dict and query_context_dict['query_time'] is not None:
query_context_dict['query_time'] = dateutil.parser.parse(query_context_dict['query_time'])
return QueryContext(**query_context_dict)
| from datetime import datetime
from typing import Optional, Union
import dateutil.parser
from utils.time import format_milliseconds
class QueryContext(object):
def __init__(self, queried_database: Optional[str] = None, queried_schema: Optional[str] = None,
query_time: Optional[datetime] = None, query_volume: Optional[int] = None,
query_type: Optional[str] = None, user_name: Optional[str] = None,
role_name: Optional[str] = None, referenced_tables: [dict, list] = None,
destination_table: [dict, str] = None, duration: int = None,
query_id: str = None) -> None:
self.queried_database = queried_database
self.queried_schema = queried_schema
self.query_time = query_time
self.query_volume = query_volume
self.query_type = query_type
self.user_name = user_name
self.role_name = role_name
self.referenced_tables = referenced_tables if referenced_tables is not None else []
self.destination_table = destination_table
self.duration = duration
self.query_id = query_id
def to_dict(self) -> dict:
return {'queried_database': self.queried_database,
'queried_schema': self.queried_schema,
'query_time': self._query_time_to_str(self.query_time),
'query_volume': self.query_volume,
'query_type': self.query_type,
'user_name': self.user_name,
'role_name': self.role_name,
'referenced_tables': self.referenced_tables,
'destination_table': self.destination_table,
'duration': self.duration,
'query_id': self.query_id}
@staticmethod
def _query_time_to_str(query_time: Optional[datetime], fmt: str = None) -> Optional[str]:
if query_time is None:
return None
if fmt is None:
return query_time.isoformat()
return query_time.strftime(fmt)
@staticmethod
def _html_param_with_default(param: Union[str, int], default: Union[str, int] = 'Unknown') -> Union[str, int]:
return default if param is None else param
def to_html(self) -> str:
query_type = self._html_param_with_default(self.query_type)
user_name = self._html_param_with_default(self.user_name)
role_name = self._html_param_with_default(self.role_name)
query_time = self._query_time_to_str(self.query_time, fmt='%Y-%m-%d %H:%M:%S')
query_volume = self._html_param_with_default(self.query_volume, 0)
query_id = self._html_param_with_default(self.query_id)
volume_color = "DarkSlateGrey"
if query_volume == 0:
volume_color = "tomato"
is_view = 'view' in query_type.lower()
query_duration = 'Unknown'
if self.duration is not None:
query_duration = format_milliseconds(self.duration)
if is_view:
return f"""
<html>
<body>
<div style="font-family:arial;color:DarkSlateGrey;font-size:110%;">
<strong>
Last update</br>
</strong>
<div style="min-width:62px;display:inline-block">Type:</div> {query_type}</br>
<div style="min-width:62px;display:inline-block">Role:</div> {role_name}</br>
<div style="min-width:62px;display:inline-block">Time:</div> {query_time}</br>
</div>
</body>
</html>
"""
return f"""
<html>
<body>
<div style="font-family:arial;color:DarkSlateGrey;font-size:110%;">
<strong>
Last update</br>
</strong>
<div style="min-width:66px;display:inline-block">Type:</div> {query_type}</br>
<div style="min-width:66px;display:inline-block">User:</div> {user_name}</br>
<div style="min-width:66px;display:inline-block">Role:</div> {role_name}</br>
<div style="min-width:66px;display:inline-block">Time:</div> {query_time}</br>
<div style="min-width:66px;display:inline-block">Duration:</div> {query_duration}</br>
<div style="min-width:66px;display:inline-block;">Volume:</div> <a style="color:{volume_color}">{query_volume} rows</a></br>
<div style="min-width:66px;display:inline-block">Query ID:</div> {query_id}</br>
</div>
</body>
</html>
"""
@staticmethod
def from_dict(query_context_dict: dict) -> 'QueryContext':
if 'query_time' in query_context_dict and query_context_dict['query_time'] is not None:
query_context_dict['query_time'] = dateutil.parser.parse(query_context_dict['query_time'])
return QueryContext(**query_context_dict)
| en | 0.128029 | <html> <body> <div style="font-family:arial;color:DarkSlateGrey;font-size:110%;"> <strong> Last update</br> </strong> <div style="min-width:62px;display:inline-block">Type:</div> {query_type}</br> <div style="min-width:62px;display:inline-block">Role:</div> {role_name}</br> <div style="min-width:62px;display:inline-block">Time:</div> {query_time}</br> </div> </body> </html> <html> <body> <div style="font-family:arial;color:DarkSlateGrey;font-size:110%;"> <strong> Last update</br> </strong> <div style="min-width:66px;display:inline-block">Type:</div> {query_type}</br> <div style="min-width:66px;display:inline-block">User:</div> {user_name}</br> <div style="min-width:66px;display:inline-block">Role:</div> {role_name}</br> <div style="min-width:66px;display:inline-block">Time:</div> {query_time}</br> <div style="min-width:66px;display:inline-block">Duration:</div> {query_duration}</br> <div style="min-width:66px;display:inline-block;">Volume:</div> <a style="color:{volume_color}">{query_volume} rows</a></br> <div style="min-width:66px;display:inline-block">Query ID:</div> {query_id}</br> </div> </body> </html> | 2.264649 | 2 |
cc/licenserdf/tools/support.py | projekt-opal/cc.licenserdf | 9 | 6619759 | """Support functions for license RDF tools."""
# Python2/3 Compatibility
from future import standard_library
standard_library.install_aliases()
# Standard library
from builtins import str
from distutils.version import StrictVersion
import os
# Third-party
from babel.messages import pofile
from rdflib import Literal, Namespace, RDF, URIRef
from rdflib.graph import Graph
import pkg_resources
# Local/library specific
from cc.i18n import mappers
from cc.i18n.util import locale_to_lower_lower
from cc.licenserdf import util
NS_DC = Namespace("http://purl.org/dc/elements/1.1/")
NS_DCQ = Namespace("http://purl.org/dc/terms/")
NS_RDF = Namespace("http://www.w3.org/1999/02/22-rdf-syntax-ns#")
NS_XSD = Namespace("http://www.w3.org/2001/XMLSchema-datatypes#")
NS_FOAF = Namespace("http://xmlns.com/foaf/0.1/")
NS_CC = Namespace("http://creativecommons.org/ns#")
NS_CC_JURISDICTION = Namespace("http://creativecommons.org/international/")
def graph():
"""Return an empty graph with common namespaces defined."""
store = Graph()
store.bind("cc", "http://creativecommons.org/ns#")
store.bind("dc", "http://purl.org/dc/elements/1.1/")
store.bind("dcq", "http://purl.org/dc/terms/")
store.bind("rdf", "http://www.w3.org/1999/02/22-rdf-syntax-ns#")
store.bind("foaf", "http://xmlns.com/foaf/0.1/")
return store
def load_graph(filename):
"""Load the specified filename; return a graph."""
store = graph()
store.load(filename)
return store
def save_graph(graph, filename):
"""Save the graph to the specified filename."""
output_file = open(filename,"w")
output_file.write(
graph.serialize(format="pretty-xml", max_depth=1)
)
output_file.close()
def gen_license_i18n_title(license_code, license_version, license_jurisdiction):
if license_code == 'devnations':
i18n_str = '${Developing Nations} License'
elif 'sampling' in license_code:
i18n_str = '${%s} %s' % (
mappers.LICENSE_NAME_MAP[license_code],
license_version)
elif license_code in ('MIT', 'BSD'):
i18n_str = license_code
elif license_code == 'LGPL':
i18n_str = '${GNU Lesser General Public License}'
elif license_code == 'GPL':
i18n_str = '${GNU General Public License}'
elif license_code == 'publicdomain':
i18n_str = '${Public Domain}'
elif license_code == 'mark':
i18n_str = '${Public Domain Mark} %s' % (license_version)
elif license_code == 'cc0':
i18n_str = 'CC0 %s ${Universal}' % (
license_version)
else:
# 'standard' license
if license_jurisdiction:
i18n_str = '${%s} %s ${%s}' % (
mappers.LICENSE_NAME_MAP[license_code],
license_version,
mappers.COUNTRY_MAP[license_jurisdiction])
else:
if StrictVersion(license_version) >= StrictVersion('4.0'):
i18n_str = '${%s} %s ${International}' % (
mappers.LICENSE_NAME_MAP[license_code],
license_version)
elif StrictVersion(license_version) >= StrictVersion('3.0'):
i18n_str = '${%s} %s ${Unported}' % (
mappers.LICENSE_NAME_MAP[license_code],
license_version)
else:
i18n_str = '${%s} %s ${Generic}' % (
mappers.LICENSE_NAME_MAP[license_code],
license_version)
return i18n_str
def translate_graph(graph):
"""
Look for title assertions with x-i18n as the lang, use their object
as the msgid to find additionaly title translations
Args:
graph: rdflib processed graph for us to walk through
i18n_dir: directory of PO files. Default directory is that
which is supplied with this package.
"""
lang_dirs = os.listdir(
os.path.abspath(
pkg_resources.resource_filename('cc.i18n', 'po')))
for subject, predicate, obj in graph.triples((
None, None, None)):
if not hasattr(obj, 'language') or obj.language != 'x-i18n':
continue
else:
str_id = str(obj)
if not str_id:
return None
old_objects = {}
# remove any previous instane of this language's
# translations.
for s, p, old_obj in graph.triples((subject, predicate, None)):
if lang_dirs.count(old_obj.language):
old_objects[old_obj.language] = old_obj
for lang in sorted(lang_dirs):
rdf_lang = locale_to_lower_lower(lang)
if rdf_lang in old_objects:
graph.remove((subject, predicate, old_objects[rdf_lang]))
translated = util.inverse_translate(str_id, lang)
graph.add((subject, predicate, Literal(translated, lang=rdf_lang)))
| """Support functions for license RDF tools."""
# Python2/3 Compatibility
from future import standard_library
standard_library.install_aliases()
# Standard library
from builtins import str
from distutils.version import StrictVersion
import os
# Third-party
from babel.messages import pofile
from rdflib import Literal, Namespace, RDF, URIRef
from rdflib.graph import Graph
import pkg_resources
# Local/library specific
from cc.i18n import mappers
from cc.i18n.util import locale_to_lower_lower
from cc.licenserdf import util
NS_DC = Namespace("http://purl.org/dc/elements/1.1/")
NS_DCQ = Namespace("http://purl.org/dc/terms/")
NS_RDF = Namespace("http://www.w3.org/1999/02/22-rdf-syntax-ns#")
NS_XSD = Namespace("http://www.w3.org/2001/XMLSchema-datatypes#")
NS_FOAF = Namespace("http://xmlns.com/foaf/0.1/")
NS_CC = Namespace("http://creativecommons.org/ns#")
NS_CC_JURISDICTION = Namespace("http://creativecommons.org/international/")
def graph():
"""Return an empty graph with common namespaces defined."""
store = Graph()
store.bind("cc", "http://creativecommons.org/ns#")
store.bind("dc", "http://purl.org/dc/elements/1.1/")
store.bind("dcq", "http://purl.org/dc/terms/")
store.bind("rdf", "http://www.w3.org/1999/02/22-rdf-syntax-ns#")
store.bind("foaf", "http://xmlns.com/foaf/0.1/")
return store
def load_graph(filename):
"""Load the specified filename; return a graph."""
store = graph()
store.load(filename)
return store
def save_graph(graph, filename):
"""Save the graph to the specified filename."""
output_file = open(filename,"w")
output_file.write(
graph.serialize(format="pretty-xml", max_depth=1)
)
output_file.close()
def gen_license_i18n_title(license_code, license_version, license_jurisdiction):
if license_code == 'devnations':
i18n_str = '${Developing Nations} License'
elif 'sampling' in license_code:
i18n_str = '${%s} %s' % (
mappers.LICENSE_NAME_MAP[license_code],
license_version)
elif license_code in ('MIT', 'BSD'):
i18n_str = license_code
elif license_code == 'LGPL':
i18n_str = '${GNU Lesser General Public License}'
elif license_code == 'GPL':
i18n_str = '${GNU General Public License}'
elif license_code == 'publicdomain':
i18n_str = '${Public Domain}'
elif license_code == 'mark':
i18n_str = '${Public Domain Mark} %s' % (license_version)
elif license_code == 'cc0':
i18n_str = 'CC0 %s ${Universal}' % (
license_version)
else:
# 'standard' license
if license_jurisdiction:
i18n_str = '${%s} %s ${%s}' % (
mappers.LICENSE_NAME_MAP[license_code],
license_version,
mappers.COUNTRY_MAP[license_jurisdiction])
else:
if StrictVersion(license_version) >= StrictVersion('4.0'):
i18n_str = '${%s} %s ${International}' % (
mappers.LICENSE_NAME_MAP[license_code],
license_version)
elif StrictVersion(license_version) >= StrictVersion('3.0'):
i18n_str = '${%s} %s ${Unported}' % (
mappers.LICENSE_NAME_MAP[license_code],
license_version)
else:
i18n_str = '${%s} %s ${Generic}' % (
mappers.LICENSE_NAME_MAP[license_code],
license_version)
return i18n_str
def translate_graph(graph):
"""
Look for title assertions with x-i18n as the lang, use their object
as the msgid to find additionaly title translations
Args:
graph: rdflib processed graph for us to walk through
i18n_dir: directory of PO files. Default directory is that
which is supplied with this package.
"""
lang_dirs = os.listdir(
os.path.abspath(
pkg_resources.resource_filename('cc.i18n', 'po')))
for subject, predicate, obj in graph.triples((
None, None, None)):
if not hasattr(obj, 'language') or obj.language != 'x-i18n':
continue
else:
str_id = str(obj)
if not str_id:
return None
old_objects = {}
# remove any previous instane of this language's
# translations.
for s, p, old_obj in graph.triples((subject, predicate, None)):
if lang_dirs.count(old_obj.language):
old_objects[old_obj.language] = old_obj
for lang in sorted(lang_dirs):
rdf_lang = locale_to_lower_lower(lang)
if rdf_lang in old_objects:
graph.remove((subject, predicate, old_objects[rdf_lang]))
translated = util.inverse_translate(str_id, lang)
graph.add((subject, predicate, Literal(translated, lang=rdf_lang)))
| en | 0.771706 | Support functions for license RDF tools. # Python2/3 Compatibility # Standard library # Third-party # Local/library specific #") #") #") Return an empty graph with common namespaces defined. #") #") Load the specified filename; return a graph. Save the graph to the specified filename. # 'standard' license Look for title assertions with x-i18n as the lang, use their object as the msgid to find additionaly title translations Args: graph: rdflib processed graph for us to walk through i18n_dir: directory of PO files. Default directory is that which is supplied with this package. # remove any previous instane of this language's # translations. | 2.0927 | 2 |
MyTools/ODE/RC_circuits_1.py | fovtran/PyGame_samples | 0 | 6619760 | import numpy as np
from numpy import arange, exp
import matplotlib.pyplot as plt
t=arange(0,10,0.01)
q=1-exp(-0.5*t)
i=0.5*exp(-0.5*t)
v=10*(1-exp(-0.5*t))
fig,ax = plt.subplots()
ax.plot(t, q, 'k--', label='charge')
ax.plot(t, i, 'k:', label='current')
ax.plot(t, v, 'k', label='voltage')
plt.xlabel('Time')
plt.ylabel('Current')
plt.title('R-C Circuit')
plt.show()
| import numpy as np
from numpy import arange, exp
import matplotlib.pyplot as plt
t=arange(0,10,0.01)
q=1-exp(-0.5*t)
i=0.5*exp(-0.5*t)
v=10*(1-exp(-0.5*t))
fig,ax = plt.subplots()
ax.plot(t, q, 'k--', label='charge')
ax.plot(t, i, 'k:', label='current')
ax.plot(t, v, 'k', label='voltage')
plt.xlabel('Time')
plt.ylabel('Current')
plt.title('R-C Circuit')
plt.show()
| none | 1 | 3.210167 | 3 | |
pre_system_svea/ctd_files.py | sharksmhi/pre_system_svea | 0 | 6619761 | <filename>pre_system_svea/ctd_files.py
from pathlib import Path
import os
import re
from abc import ABC, abstractmethod
class CtdFileType(ABC):
_pattern = ''
_example = ''
def __repr__(self):
return f'Pattern: {self._pattern}\nExample: {self._example}'
@property
def pattern(self):
return self._pattern
@property
def example(self):
return self._example
@abstractmethod
def year(self, file_path):
pass
@abstractmethod
def instrument(self, file_path):
pass
@abstractmethod
def ship(self, file_path):
pass
@abstractmethod
def serno(self, file_path):
pass
@abstractmethod
def cruise(self, file_path):
pass
class CtdFileTypeFormer(CtdFileType):
_pattern = '[^_]+_\d{4}_\d{8}_\d{4}_\d{2}_\d{2}_\d{4}'
_example = 'SBE09_1387_20210413_1113_77_10_0278'
def year(self, file_path):
return file_path.stem.split('_')[2][:4]
def instrument(self, file_path):
return file_path.stem.split('_')[0]
def ship(self, file_path):
return file_path.stem.split('_')[4] + file_path.stem.split('_')[5]
def serno(self, file_path):
return file_path.stem.split('_')[6]
def cruise(self, *args):
return None
class CtdFileTypeSvea(CtdFileType):
_pattern = '[^_]+_\d{4}_\d{8}_\d{4}_\d{2}[a-zA-Z]{2}_\d{2}_\d{4}'
_example = 'SBE09_1387_20210413_1113_77SE_01_0278'
def year(self, file_path):
return file_path.stem.split('_')[2][:4]
def instrument(self, file_path):
return file_path.stem.split('_')[0]
def ship(self, file_path):
return file_path.stem.split('_')[4]
def serno(self, file_path):
return file_path.stem.split('_')[6]
def cruise(self, file_path):
return file_path.stem.split('_')[5]
class CtdFile:
def __init__(self, path, file_types=None):
self.valid = True
self.path = Path(path)
self.name = self.path.name
self.stem = self.path.stem
self.suffix = self.path.suffix
for file_type in file_types:
match = re.findall(file_type.pattern, self.stem)
# print('match', match)
# print(match[0])
# print(self.stem)
if match and match[0] == self.stem:
# print('OK')
self.file_type = file_type
break
else:
self.valid = False
def get(self, item):
if hasattr(self.file_type, item):
return getattr(self.file_type, item)(self.path)
return None
def is_matching(self, **kwargs):
for key, value in kwargs.items():
item = self.get(key)
# print('TRY MATCHING:', key, value, item)
if not item:
continue
if item == None:
continue
if item != value:
# print('NOT MATCHING:', key, value, item)
return False
return True
class CtdFiles:
def __init__(self, root_directory, use_stem=False, suffix=None):
self.root_directory = Path(root_directory)
if not self.root_directory.exists():
raise NotADirectoryError(self.root_directory)
self._file_types = set()
self.files = {}
self.use_stem = use_stem
self.suffix = suffix
def check_directory(self):
self.files = {}
# print('ROOT directory in CtdFiles', self.root_directory)
for root, dirs, files in os.walk(self.root_directory, topdown=False):
for name in files:
path = Path(root, name)
path = CtdFile(path, file_types=self._file_types)
# print(path.valid)
if not path.valid:
continue
if self.suffix and path.suffix != self.suffix:
continue
key = name
if self.use_stem:
key = path.stem
self.files[key] = path
def add_file_type(self, file_type_object):
self._file_types.add(file_type_object)
def get_files_matching(self, as_list=False, **kwargs):
matching_series = {}
# print('self.files', self.files)
for name in sorted(self.files):
obj = self.files[name]
if obj.is_matching(**kwargs):
matching_series[name] = obj
if as_list:
return list(matching_series.values())
return matching_series
def get_latest_serno(self, **kwargs):
"""
Returns the highest serno found in files. Check for matching criteria in kwargs first.
:param serno:
:return:
"""
# print('get_latest_serno kwargs: ', kwargs)
matching_files = self.get_files_matching(**kwargs)
serno_list = sorted(set([obj.get('serno') for name, obj in matching_files.items()]))
if not serno_list:
return None
return serno_list[-1]
def get_latest_series(self, path=False, **kwargs):
serno = self.get_latest_serno(**kwargs)
kwargs['serno'] = serno
# print('ctd_files.get_latest_series kwargs', kwargs)
matching_files = self.get_files_matching(**kwargs)
if not matching_files:
return None
if len(matching_files) > 1:
raise ValueError('More than one mathing file')
obj = matching_files[list(matching_files.keys())[0]]
if path:
return obj.path
return obj
def get_next_serno(self, **kwargs):
latest_serno = self.get_latest_serno(**kwargs)
if not latest_serno:
return '0001'
next_serno = str(int(latest_serno)+1).zfill(4)
return next_serno
def series_exists(self, return_file_name=False, **kwargs):
matching = self.get_files_matching(**kwargs)
if matching:
if return_file_name:
return list(matching)[0]
return True
return False
def get_number_of_series(self):
return len(self.get_files_matching())
def get_ctd_files_object(directory, use_stem=False, suffix=None):
obj = CtdFiles(directory, use_stem=use_stem, suffix=suffix)
obj.add_file_type(CtdFileTypeFormer())
obj.check_directory()
return obj
if __name__ == '__main__':
c = get_ctd_files_object(r'C:\mw\temp_ctd_pre_system_data_root\data\2021\raw', suffix='.hex')
| <filename>pre_system_svea/ctd_files.py
from pathlib import Path
import os
import re
from abc import ABC, abstractmethod
class CtdFileType(ABC):
_pattern = ''
_example = ''
def __repr__(self):
return f'Pattern: {self._pattern}\nExample: {self._example}'
@property
def pattern(self):
return self._pattern
@property
def example(self):
return self._example
@abstractmethod
def year(self, file_path):
pass
@abstractmethod
def instrument(self, file_path):
pass
@abstractmethod
def ship(self, file_path):
pass
@abstractmethod
def serno(self, file_path):
pass
@abstractmethod
def cruise(self, file_path):
pass
class CtdFileTypeFormer(CtdFileType):
_pattern = '[^_]+_\d{4}_\d{8}_\d{4}_\d{2}_\d{2}_\d{4}'
_example = 'SBE09_1387_20210413_1113_77_10_0278'
def year(self, file_path):
return file_path.stem.split('_')[2][:4]
def instrument(self, file_path):
return file_path.stem.split('_')[0]
def ship(self, file_path):
return file_path.stem.split('_')[4] + file_path.stem.split('_')[5]
def serno(self, file_path):
return file_path.stem.split('_')[6]
def cruise(self, *args):
return None
class CtdFileTypeSvea(CtdFileType):
_pattern = '[^_]+_\d{4}_\d{8}_\d{4}_\d{2}[a-zA-Z]{2}_\d{2}_\d{4}'
_example = 'SBE09_1387_20210413_1113_77SE_01_0278'
def year(self, file_path):
return file_path.stem.split('_')[2][:4]
def instrument(self, file_path):
return file_path.stem.split('_')[0]
def ship(self, file_path):
return file_path.stem.split('_')[4]
def serno(self, file_path):
return file_path.stem.split('_')[6]
def cruise(self, file_path):
return file_path.stem.split('_')[5]
class CtdFile:
def __init__(self, path, file_types=None):
self.valid = True
self.path = Path(path)
self.name = self.path.name
self.stem = self.path.stem
self.suffix = self.path.suffix
for file_type in file_types:
match = re.findall(file_type.pattern, self.stem)
# print('match', match)
# print(match[0])
# print(self.stem)
if match and match[0] == self.stem:
# print('OK')
self.file_type = file_type
break
else:
self.valid = False
def get(self, item):
if hasattr(self.file_type, item):
return getattr(self.file_type, item)(self.path)
return None
def is_matching(self, **kwargs):
for key, value in kwargs.items():
item = self.get(key)
# print('TRY MATCHING:', key, value, item)
if not item:
continue
if item == None:
continue
if item != value:
# print('NOT MATCHING:', key, value, item)
return False
return True
class CtdFiles:
def __init__(self, root_directory, use_stem=False, suffix=None):
self.root_directory = Path(root_directory)
if not self.root_directory.exists():
raise NotADirectoryError(self.root_directory)
self._file_types = set()
self.files = {}
self.use_stem = use_stem
self.suffix = suffix
def check_directory(self):
self.files = {}
# print('ROOT directory in CtdFiles', self.root_directory)
for root, dirs, files in os.walk(self.root_directory, topdown=False):
for name in files:
path = Path(root, name)
path = CtdFile(path, file_types=self._file_types)
# print(path.valid)
if not path.valid:
continue
if self.suffix and path.suffix != self.suffix:
continue
key = name
if self.use_stem:
key = path.stem
self.files[key] = path
def add_file_type(self, file_type_object):
self._file_types.add(file_type_object)
def get_files_matching(self, as_list=False, **kwargs):
matching_series = {}
# print('self.files', self.files)
for name in sorted(self.files):
obj = self.files[name]
if obj.is_matching(**kwargs):
matching_series[name] = obj
if as_list:
return list(matching_series.values())
return matching_series
def get_latest_serno(self, **kwargs):
"""
Returns the highest serno found in files. Check for matching criteria in kwargs first.
:param serno:
:return:
"""
# print('get_latest_serno kwargs: ', kwargs)
matching_files = self.get_files_matching(**kwargs)
serno_list = sorted(set([obj.get('serno') for name, obj in matching_files.items()]))
if not serno_list:
return None
return serno_list[-1]
def get_latest_series(self, path=False, **kwargs):
serno = self.get_latest_serno(**kwargs)
kwargs['serno'] = serno
# print('ctd_files.get_latest_series kwargs', kwargs)
matching_files = self.get_files_matching(**kwargs)
if not matching_files:
return None
if len(matching_files) > 1:
raise ValueError('More than one mathing file')
obj = matching_files[list(matching_files.keys())[0]]
if path:
return obj.path
return obj
def get_next_serno(self, **kwargs):
latest_serno = self.get_latest_serno(**kwargs)
if not latest_serno:
return '0001'
next_serno = str(int(latest_serno)+1).zfill(4)
return next_serno
def series_exists(self, return_file_name=False, **kwargs):
matching = self.get_files_matching(**kwargs)
if matching:
if return_file_name:
return list(matching)[0]
return True
return False
def get_number_of_series(self):
return len(self.get_files_matching())
def get_ctd_files_object(directory, use_stem=False, suffix=None):
obj = CtdFiles(directory, use_stem=use_stem, suffix=suffix)
obj.add_file_type(CtdFileTypeFormer())
obj.check_directory()
return obj
if __name__ == '__main__':
c = get_ctd_files_object(r'C:\mw\temp_ctd_pre_system_data_root\data\2021\raw', suffix='.hex')
| en | 0.309753 | # print('match', match) # print(match[0]) # print(self.stem) # print('OK') # print('TRY MATCHING:', key, value, item) # print('NOT MATCHING:', key, value, item) # print('ROOT directory in CtdFiles', self.root_directory) # print(path.valid) # print('self.files', self.files) Returns the highest serno found in files. Check for matching criteria in kwargs first. :param serno: :return: # print('get_latest_serno kwargs: ', kwargs) # print('ctd_files.get_latest_series kwargs', kwargs) | 2.614374 | 3 |
datawinners/accountmanagement/post_registration_events.py | ICT4H/dcs-web | 1 | 6619762 | <reponame>ICT4H/dcs-web
# vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
from datawinners.accountmanagement.models import NGOUserProfile
def ngo_user_created(sender, user, request, **kwargs):
data = NGOUserProfile()
data.org_id = kwargs['organization_id']
data.title = kwargs['title']
data.mobile_phone = kwargs['mobile_phone']
data.reporter_id = kwargs['reporter_id']
data.user = user
data.save()
| # vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
from datawinners.accountmanagement.models import NGOUserProfile
def ngo_user_created(sender, user, request, **kwargs):
data = NGOUserProfile()
data.org_id = kwargs['organization_id']
data.title = kwargs['title']
data.mobile_phone = kwargs['mobile_phone']
data.reporter_id = kwargs['reporter_id']
data.user = user
data.save() | fr | 0.370952 | # vim: ai ts=4 sts=4 et sw=4 encoding=utf-8 | 1.721792 | 2 |
sort.py | belikor/lbrytools | 9 | 6619763 | #!/usr/bin/env python3
# --------------------------------------------------------------------------- #
# The MIT License (MIT) #
# #
# Copyright (c) 2021 <NAME> <<EMAIL>> #
# #
# Permission is hereby granted, free of charge, to any person obtaining #
# a copy of this software and associated documentation files #
# (the "Software"), to deal in the Software without restriction, including #
# without limitation the rights to use, copy, modify, merge, publish, #
# distribute, sublicense, and/or sell copies of the Software, and to permit #
# persons to whom the Software is furnished to do so, subject to the #
# following conditions: #
# #
# The above copyright notice and this permission notice shall be included #
# in all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL #
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# --------------------------------------------------------------------------- #
"""Functions to help with sorting downloaded claims from the LBRY network."""
import requests
import lbrytools.funcs as funcs
import lbrytools.search as srch
import lbrytools.search_utils as sutils
import lbrytools.search_ch as srch_ch
def sort_items(channel=None, reverse=False,
server="http://localhost:5279"):
"""Return a list of claims that were downloaded, sorted by time.
If `channel` is provided it will list the downloaded claims
by this channel only.
Otherwise it will list all claims.
Parameters
----------
channel: str, optional
It defaults to `None`.
A channel's name, full or partial:
`'@MyChannel#5'`, `'MyChannel#5'`, `'MyChannel'`
If a simplified name is used, and there are various channels
with the same name, the one with the highest LBC bid will be selected.
Enter the full name to choose the right one.
reverse: bool, optional
It defaults to `False`, in which case older items come first
in the output list.
If it is `True` newer claims are at the beginning of the list.
server: str, optional
It defaults to `'http://localhost:5279'`.
This is the address of the `lbrynet` daemon, which should be running
in your computer before using any `lbrynet` command.
Normally, there is no need to change this parameter from its default
value.
Returns
-------
list of dict
A list of dictionaries that represent the claims that were previously
downloaded fully or partially.
Each dictionary is filled with information from the standard output
of the `lbrynet file list` command.
The dictionaries are ordered by `'release_time'`, with older claims
appearing first.
Certain claims don't have `'release_time'` so for them we add
this key, and use the value of `'timestamp'` for it.
False
If there is a problem it will return `False`.
"""
if not funcs.server_exists(server=server):
return False
page_size = 99000
cmd = ["lbrynet",
"file",
"list",
"--page_size=" + str(page_size)]
if channel and not isinstance(channel, str):
print("Channel must be a string. Set to 'None'.")
print(f"channel={channel}")
channel = None
if channel:
if not channel.startswith("@"):
channel = "@" + channel
cmd.append("--channel_name=" + "'" + channel + "'")
print("List: " + " ".join(cmd))
print(80 * "-")
msg = {"method": cmd[1] + "_" + cmd[2],
"params": {"page_size": page_size}}
if channel:
msg["params"]["channel_name"] = channel
# A bug (lbryio/lbry-sdk #3316) prevents the `lbrynet file list`
# command from finding the channel, therefore the channel must be
# resolved with `lbrynet resolve` before it becomes known by other
# functions.
ch = srch_ch.resolve_channel(channel=channel, server=server)
if not ch:
return False
output = requests.post(server, json=msg).json()
if "error" in output:
print(">>> No 'result' in the JSON-RPC server output")
return False
items = output["result"]["items"]
n_items = len(items)
if n_items < 1:
if channel:
print("No items found; at least one item must be downloaded; "
f"check that the name is correct, channel={channel}")
else:
print("No items found; at least one item must be downloaded.")
return False
print(f"Number of items: {n_items}")
new_items = []
# Older claims may not have 'release_time'; we use the 'timestamp' instead
for it, item in enumerate(items, start=1):
if "release_time" not in item["metadata"]:
print(f"{it}/{n_items}, {item['claim_name']}, using 'timestamp'")
item["metadata"]["release_time"] = item["timestamp"]
new_items.append(item)
# Sort by using the original 'release_time'; older items first
sorted_items = sorted(new_items,
key=lambda v: int(v["metadata"]["release_time"]),
reverse=reverse)
return sorted_items
def sort_invalid(channel=None, reverse=False,
server="http://localhost:5279"):
"""Return a list of invalid claims that were previously downloaded.
Certain claims that were downloaded in the past may be invalid now because
they were removed by their authors from the network after
they were initially downloaded. This can be confirmed by looking up
the claim ID in the blockchain explorer, and finding the 'unspent'
transaction.
Parameters
----------
channel: str, optional
It defaults to `None`.
A channel's name, full or partial:
`'@MyChannel#5'`, `'MyChannel#5'`, `'MyChannel'`
If a simplified name is used, and there are various channels
with the same name, the one with the highest LBC bid will be selected.
Enter the full name to choose the right one.
reverse: bool, optional
It defaults to `False`, in which case older items come first
in the output list.
If it is `True` newer claims are at the beginning of the list.
server: str, optional
It defaults to `'http://localhost:5279'`.
This is the address of the `lbrynet` daemon, which should be running
in your computer before using any `lbrynet` command.
Normally, there is no need to change this parameter from its default
value.
Returns
-------
list of dict
A list of dictionaries that represent 'invalid claims'
that were previously downloaded fully or partially.
Each dictionary is filled with information from the standard output
of the `lbrynet file list` command, but filtered in such a way
that it only includes claims which are no longer searchable online
by `lbrynet resolve` or `lbrynet claim search`.
The dictionaries are ordered by `'release_time'`, with older claims
appearing first.
Certain claims don't have `'release_time'` so for them we add
this key, and use the value of `'timestamp'` for it.
False
If there is a problem it will return `False`.
"""
if not funcs.server_exists(server=server):
return False
items = sort_items(channel=channel, reverse=reverse,
server=server)
if not items:
return False
n_items = len(items)
invalid_items = []
for it, item in enumerate(items, start=1):
online_item = srch.search_item(cid=item["claim_id"], offline=False,
print_error=False,
server=server)
if not online_item:
if len(invalid_items) == 0:
print()
claim_id = item["claim_id"]
claim_name = item["claim_name"]
channel = item["channel_name"]
print(f"Claim {it:4d}/{n_items:4d}, "
f"{claim_id}, {channel}, {claim_name}")
invalid_items.append(item)
n_invalid = len(invalid_items)
if n_invalid > 0:
print(f"Invalid items found: {n_invalid} "
"(possibly deleted from the network)")
else:
print(f"Invalid items found: {n_invalid}")
return invalid_items
def sort_items_size(channel=None, reverse=False, invalid=False,
server="http://localhost:5279"):
"""Return a list of claims that were downloaded, their size and length.
Parameters
----------
channel: str, optional
It defaults to `None`.
A channel's name, full or partial:
`'@MyChannel#5'`, `'MyChannel#5'`, `'MyChannel'`
If a simplified name is used, and there are various channels
with the same name, the one with the highest LBC bid will be selected.
Enter the full name to choose the right one.
reverse: bool, optional
It defaults to `False`, in which case older items come first
in the output list.
If it is `True` newer claims are at the beginning of the list.
invalid: bool, optional
It defaults to `False`, in which case it will return all items
previously downloaded.
If it is `True` it will only return those claims that were removed
by their authors from the network after they were initially downloaded.
These can no longer be resolved online, nor can they be re-downloaded.
The blobs belonging to these claims can be considered orphaned
and can be removed to save hard disk space.
server: str, optional
It defaults to `'http://localhost:5279'`.
This is the address of the `lbrynet` daemon, which should be running
in your computer before using any `lbrynet` command.
Normally, there is no need to change this parameter from its default
value.
Returns
-------
dict
A dictionary with three keys:
- 'claims': a list of dictionaries where every dictionary represents
a claim returned by `file_list`.
The list is ordered in ascending order by default (old claims first),
and in descending order (newer claims first) if `reverse=True`.
Certain claims don't have `'release_time'` so for them we add
this key, and use the value of `'timestamp'` for it.
- 'size': total size of the claims in bytes.
It can be divided by 1024 to obtain kibibytes, by another 1024
to obtain mebibytes, and by another 1024 to obtain gibibytes.
- 'duration': total duration of the claims in seconds.
It will count only stream types which have a duration
such as audio and video.
The duration can be divided by 3600 to obtain hours,
then by 24 to obtain days.
False
If there is a problem it will return `False`.
"""
if not funcs.server_exists(server=server):
return False
if invalid:
claims = sort_invalid(channel=channel, reverse=reverse,
server=server)
else:
claims = sort_items(channel=channel, reverse=reverse,
server=server)
if not claims:
return False
print()
output = sutils.downloadable_size(claims, local=True)
total_size = output["size"]
total_duration = output["duration"]
n_claims = len(claims)
GB = total_size / (1024**3) # to GiB
hrs = total_duration / 3600
days = hrs / 24
hr = total_duration // 3600
mi = (total_duration % 3600) // 60
sec = (total_duration % 3600) % 60
print(40 * "-")
print(f"Total unique claims: {n_claims}")
print(f"Total download size: {GB:.4f} GiB")
print(f"Total duration: {hr} h {mi} min {sec} s, or {days:.4f} days")
return {"claims": claims,
"size": total_size,
"duration": total_duration}
| #!/usr/bin/env python3
# --------------------------------------------------------------------------- #
# The MIT License (MIT) #
# #
# Copyright (c) 2021 <NAME> <<EMAIL>> #
# #
# Permission is hereby granted, free of charge, to any person obtaining #
# a copy of this software and associated documentation files #
# (the "Software"), to deal in the Software without restriction, including #
# without limitation the rights to use, copy, modify, merge, publish, #
# distribute, sublicense, and/or sell copies of the Software, and to permit #
# persons to whom the Software is furnished to do so, subject to the #
# following conditions: #
# #
# The above copyright notice and this permission notice shall be included #
# in all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL #
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# --------------------------------------------------------------------------- #
"""Functions to help with sorting downloaded claims from the LBRY network."""
import requests
import lbrytools.funcs as funcs
import lbrytools.search as srch
import lbrytools.search_utils as sutils
import lbrytools.search_ch as srch_ch
def sort_items(channel=None, reverse=False,
server="http://localhost:5279"):
"""Return a list of claims that were downloaded, sorted by time.
If `channel` is provided it will list the downloaded claims
by this channel only.
Otherwise it will list all claims.
Parameters
----------
channel: str, optional
It defaults to `None`.
A channel's name, full or partial:
`'@MyChannel#5'`, `'MyChannel#5'`, `'MyChannel'`
If a simplified name is used, and there are various channels
with the same name, the one with the highest LBC bid will be selected.
Enter the full name to choose the right one.
reverse: bool, optional
It defaults to `False`, in which case older items come first
in the output list.
If it is `True` newer claims are at the beginning of the list.
server: str, optional
It defaults to `'http://localhost:5279'`.
This is the address of the `lbrynet` daemon, which should be running
in your computer before using any `lbrynet` command.
Normally, there is no need to change this parameter from its default
value.
Returns
-------
list of dict
A list of dictionaries that represent the claims that were previously
downloaded fully or partially.
Each dictionary is filled with information from the standard output
of the `lbrynet file list` command.
The dictionaries are ordered by `'release_time'`, with older claims
appearing first.
Certain claims don't have `'release_time'` so for them we add
this key, and use the value of `'timestamp'` for it.
False
If there is a problem it will return `False`.
"""
if not funcs.server_exists(server=server):
return False
page_size = 99000
cmd = ["lbrynet",
"file",
"list",
"--page_size=" + str(page_size)]
if channel and not isinstance(channel, str):
print("Channel must be a string. Set to 'None'.")
print(f"channel={channel}")
channel = None
if channel:
if not channel.startswith("@"):
channel = "@" + channel
cmd.append("--channel_name=" + "'" + channel + "'")
print("List: " + " ".join(cmd))
print(80 * "-")
msg = {"method": cmd[1] + "_" + cmd[2],
"params": {"page_size": page_size}}
if channel:
msg["params"]["channel_name"] = channel
# A bug (lbryio/lbry-sdk #3316) prevents the `lbrynet file list`
# command from finding the channel, therefore the channel must be
# resolved with `lbrynet resolve` before it becomes known by other
# functions.
ch = srch_ch.resolve_channel(channel=channel, server=server)
if not ch:
return False
output = requests.post(server, json=msg).json()
if "error" in output:
print(">>> No 'result' in the JSON-RPC server output")
return False
items = output["result"]["items"]
n_items = len(items)
if n_items < 1:
if channel:
print("No items found; at least one item must be downloaded; "
f"check that the name is correct, channel={channel}")
else:
print("No items found; at least one item must be downloaded.")
return False
print(f"Number of items: {n_items}")
new_items = []
# Older claims may not have 'release_time'; we use the 'timestamp' instead
for it, item in enumerate(items, start=1):
if "release_time" not in item["metadata"]:
print(f"{it}/{n_items}, {item['claim_name']}, using 'timestamp'")
item["metadata"]["release_time"] = item["timestamp"]
new_items.append(item)
# Sort by using the original 'release_time'; older items first
sorted_items = sorted(new_items,
key=lambda v: int(v["metadata"]["release_time"]),
reverse=reverse)
return sorted_items
def sort_invalid(channel=None, reverse=False,
server="http://localhost:5279"):
"""Return a list of invalid claims that were previously downloaded.
Certain claims that were downloaded in the past may be invalid now because
they were removed by their authors from the network after
they were initially downloaded. This can be confirmed by looking up
the claim ID in the blockchain explorer, and finding the 'unspent'
transaction.
Parameters
----------
channel: str, optional
It defaults to `None`.
A channel's name, full or partial:
`'@MyChannel#5'`, `'MyChannel#5'`, `'MyChannel'`
If a simplified name is used, and there are various channels
with the same name, the one with the highest LBC bid will be selected.
Enter the full name to choose the right one.
reverse: bool, optional
It defaults to `False`, in which case older items come first
in the output list.
If it is `True` newer claims are at the beginning of the list.
server: str, optional
It defaults to `'http://localhost:5279'`.
This is the address of the `lbrynet` daemon, which should be running
in your computer before using any `lbrynet` command.
Normally, there is no need to change this parameter from its default
value.
Returns
-------
list of dict
A list of dictionaries that represent 'invalid claims'
that were previously downloaded fully or partially.
Each dictionary is filled with information from the standard output
of the `lbrynet file list` command, but filtered in such a way
that it only includes claims which are no longer searchable online
by `lbrynet resolve` or `lbrynet claim search`.
The dictionaries are ordered by `'release_time'`, with older claims
appearing first.
Certain claims don't have `'release_time'` so for them we add
this key, and use the value of `'timestamp'` for it.
False
If there is a problem it will return `False`.
"""
if not funcs.server_exists(server=server):
return False
items = sort_items(channel=channel, reverse=reverse,
server=server)
if not items:
return False
n_items = len(items)
invalid_items = []
for it, item in enumerate(items, start=1):
online_item = srch.search_item(cid=item["claim_id"], offline=False,
print_error=False,
server=server)
if not online_item:
if len(invalid_items) == 0:
print()
claim_id = item["claim_id"]
claim_name = item["claim_name"]
channel = item["channel_name"]
print(f"Claim {it:4d}/{n_items:4d}, "
f"{claim_id}, {channel}, {claim_name}")
invalid_items.append(item)
n_invalid = len(invalid_items)
if n_invalid > 0:
print(f"Invalid items found: {n_invalid} "
"(possibly deleted from the network)")
else:
print(f"Invalid items found: {n_invalid}")
return invalid_items
def sort_items_size(channel=None, reverse=False, invalid=False,
server="http://localhost:5279"):
"""Return a list of claims that were downloaded, their size and length.
Parameters
----------
channel: str, optional
It defaults to `None`.
A channel's name, full or partial:
`'@MyChannel#5'`, `'MyChannel#5'`, `'MyChannel'`
If a simplified name is used, and there are various channels
with the same name, the one with the highest LBC bid will be selected.
Enter the full name to choose the right one.
reverse: bool, optional
It defaults to `False`, in which case older items come first
in the output list.
If it is `True` newer claims are at the beginning of the list.
invalid: bool, optional
It defaults to `False`, in which case it will return all items
previously downloaded.
If it is `True` it will only return those claims that were removed
by their authors from the network after they were initially downloaded.
These can no longer be resolved online, nor can they be re-downloaded.
The blobs belonging to these claims can be considered orphaned
and can be removed to save hard disk space.
server: str, optional
It defaults to `'http://localhost:5279'`.
This is the address of the `lbrynet` daemon, which should be running
in your computer before using any `lbrynet` command.
Normally, there is no need to change this parameter from its default
value.
Returns
-------
dict
A dictionary with three keys:
- 'claims': a list of dictionaries where every dictionary represents
a claim returned by `file_list`.
The list is ordered in ascending order by default (old claims first),
and in descending order (newer claims first) if `reverse=True`.
Certain claims don't have `'release_time'` so for them we add
this key, and use the value of `'timestamp'` for it.
- 'size': total size of the claims in bytes.
It can be divided by 1024 to obtain kibibytes, by another 1024
to obtain mebibytes, and by another 1024 to obtain gibibytes.
- 'duration': total duration of the claims in seconds.
It will count only stream types which have a duration
such as audio and video.
The duration can be divided by 3600 to obtain hours,
then by 24 to obtain days.
False
If there is a problem it will return `False`.
"""
if not funcs.server_exists(server=server):
return False
if invalid:
claims = sort_invalid(channel=channel, reverse=reverse,
server=server)
else:
claims = sort_items(channel=channel, reverse=reverse,
server=server)
if not claims:
return False
print()
output = sutils.downloadable_size(claims, local=True)
total_size = output["size"]
total_duration = output["duration"]
n_claims = len(claims)
GB = total_size / (1024**3) # to GiB
hrs = total_duration / 3600
days = hrs / 24
hr = total_duration // 3600
mi = (total_duration % 3600) // 60
sec = (total_duration % 3600) % 60
print(40 * "-")
print(f"Total unique claims: {n_claims}")
print(f"Total download size: {GB:.4f} GiB")
print(f"Total duration: {hr} h {mi} min {sec} s, or {days:.4f} days")
return {"claims": claims,
"size": total_size,
"duration": total_duration}
| en | 0.87706 | #!/usr/bin/env python3 # --------------------------------------------------------------------------- # # The MIT License (MIT) # # # # Copyright (c) 2021 <NAME> <<EMAIL>> # # # # Permission is hereby granted, free of charge, to any person obtaining # # a copy of this software and associated documentation files # # (the "Software"), to deal in the Software without restriction, including # # without limitation the rights to use, copy, modify, merge, publish, # # distribute, sublicense, and/or sell copies of the Software, and to permit # # persons to whom the Software is furnished to do so, subject to the # # following conditions: # # # # The above copyright notice and this permission notice shall be included # # in all copies or substantial portions of the Software. # # # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # # DEALINGS IN THE SOFTWARE. # # --------------------------------------------------------------------------- # Functions to help with sorting downloaded claims from the LBRY network. Return a list of claims that were downloaded, sorted by time. If `channel` is provided it will list the downloaded claims by this channel only. Otherwise it will list all claims. Parameters ---------- channel: str, optional It defaults to `None`. A channel's name, full or partial: `'@MyChannel#5'`, `'MyChannel#5'`, `'MyChannel'` If a simplified name is used, and there are various channels with the same name, the one with the highest LBC bid will be selected. Enter the full name to choose the right one. reverse: bool, optional It defaults to `False`, in which case older items come first in the output list. If it is `True` newer claims are at the beginning of the list. server: str, optional It defaults to `'http://localhost:5279'`. This is the address of the `lbrynet` daemon, which should be running in your computer before using any `lbrynet` command. Normally, there is no need to change this parameter from its default value. Returns ------- list of dict A list of dictionaries that represent the claims that were previously downloaded fully or partially. Each dictionary is filled with information from the standard output of the `lbrynet file list` command. The dictionaries are ordered by `'release_time'`, with older claims appearing first. Certain claims don't have `'release_time'` so for them we add this key, and use the value of `'timestamp'` for it. False If there is a problem it will return `False`. # A bug (lbryio/lbry-sdk #3316) prevents the `lbrynet file list` # command from finding the channel, therefore the channel must be # resolved with `lbrynet resolve` before it becomes known by other # functions. # Older claims may not have 'release_time'; we use the 'timestamp' instead # Sort by using the original 'release_time'; older items first Return a list of invalid claims that were previously downloaded. Certain claims that were downloaded in the past may be invalid now because they were removed by their authors from the network after they were initially downloaded. This can be confirmed by looking up the claim ID in the blockchain explorer, and finding the 'unspent' transaction. Parameters ---------- channel: str, optional It defaults to `None`. A channel's name, full or partial: `'@MyChannel#5'`, `'MyChannel#5'`, `'MyChannel'` If a simplified name is used, and there are various channels with the same name, the one with the highest LBC bid will be selected. Enter the full name to choose the right one. reverse: bool, optional It defaults to `False`, in which case older items come first in the output list. If it is `True` newer claims are at the beginning of the list. server: str, optional It defaults to `'http://localhost:5279'`. This is the address of the `lbrynet` daemon, which should be running in your computer before using any `lbrynet` command. Normally, there is no need to change this parameter from its default value. Returns ------- list of dict A list of dictionaries that represent 'invalid claims' that were previously downloaded fully or partially. Each dictionary is filled with information from the standard output of the `lbrynet file list` command, but filtered in such a way that it only includes claims which are no longer searchable online by `lbrynet resolve` or `lbrynet claim search`. The dictionaries are ordered by `'release_time'`, with older claims appearing first. Certain claims don't have `'release_time'` so for them we add this key, and use the value of `'timestamp'` for it. False If there is a problem it will return `False`. Return a list of claims that were downloaded, their size and length. Parameters ---------- channel: str, optional It defaults to `None`. A channel's name, full or partial: `'@MyChannel#5'`, `'MyChannel#5'`, `'MyChannel'` If a simplified name is used, and there are various channels with the same name, the one with the highest LBC bid will be selected. Enter the full name to choose the right one. reverse: bool, optional It defaults to `False`, in which case older items come first in the output list. If it is `True` newer claims are at the beginning of the list. invalid: bool, optional It defaults to `False`, in which case it will return all items previously downloaded. If it is `True` it will only return those claims that were removed by their authors from the network after they were initially downloaded. These can no longer be resolved online, nor can they be re-downloaded. The blobs belonging to these claims can be considered orphaned and can be removed to save hard disk space. server: str, optional It defaults to `'http://localhost:5279'`. This is the address of the `lbrynet` daemon, which should be running in your computer before using any `lbrynet` command. Normally, there is no need to change this parameter from its default value. Returns ------- dict A dictionary with three keys: - 'claims': a list of dictionaries where every dictionary represents a claim returned by `file_list`. The list is ordered in ascending order by default (old claims first), and in descending order (newer claims first) if `reverse=True`. Certain claims don't have `'release_time'` so for them we add this key, and use the value of `'timestamp'` for it. - 'size': total size of the claims in bytes. It can be divided by 1024 to obtain kibibytes, by another 1024 to obtain mebibytes, and by another 1024 to obtain gibibytes. - 'duration': total duration of the claims in seconds. It will count only stream types which have a duration such as audio and video. The duration can be divided by 3600 to obtain hours, then by 24 to obtain days. False If there is a problem it will return `False`. # to GiB | 1.404568 | 1 |
notifier/grabbers/grw.py | thejeshpr/notifier | 0 | 6619764 | <filename>notifier/grabbers/grw.py
import os
from notifier.grabbers.base import Base, Internet
class Grw(object):
@staticmethod
def sync(obj: Base, *args, **kwargs):
# https://groww.in/slr/v1/search/derived/scheme?available_for_investment=true&doc_type=scheme&page=0&plan_type=Direct&q=&size=16&sort_by=3
# sort_by 1: Rating High to low
# sort_by 2: Rating Low to high
# sort_by 3: Rating popularity
data = Internet.post_phjs(url=obj.sync_type.base_url, return_json=True)['content']
for post in data.get("data").get("posts"):
data = {
"caption": "{}\n{}".format(post.get("title"), post.get("url")),
"title": post.get("title"),
"nsfw": post.get("nsfw"),
"post_url": post.get("url"),
"content_type": post.get("type"),
"up_vote": post.get("upVoteCount"),
"down_vote": post.get("downVoteCount"),
"description": post.get("description"),
"comments_count": post.get("commentsCount")
}
# check post type
if post["type"] == "Photo":
data["url"] = post.get("images").get("image700").get("url")
obj.add_photo_task(
unique_key=post.get("id"),
name=post['title'],
url=post.get("url"),
data=data
)
elif post["type"] == "Animated":
data["url"] = post.get("images").get("image460sv").get("url")
obj.add_video_task(
unique_key=post.get("id"),
name=post['title'],
url=post.get("url"),
data=data
)
| <filename>notifier/grabbers/grw.py
import os
from notifier.grabbers.base import Base, Internet
class Grw(object):
@staticmethod
def sync(obj: Base, *args, **kwargs):
# https://groww.in/slr/v1/search/derived/scheme?available_for_investment=true&doc_type=scheme&page=0&plan_type=Direct&q=&size=16&sort_by=3
# sort_by 1: Rating High to low
# sort_by 2: Rating Low to high
# sort_by 3: Rating popularity
data = Internet.post_phjs(url=obj.sync_type.base_url, return_json=True)['content']
for post in data.get("data").get("posts"):
data = {
"caption": "{}\n{}".format(post.get("title"), post.get("url")),
"title": post.get("title"),
"nsfw": post.get("nsfw"),
"post_url": post.get("url"),
"content_type": post.get("type"),
"up_vote": post.get("upVoteCount"),
"down_vote": post.get("downVoteCount"),
"description": post.get("description"),
"comments_count": post.get("commentsCount")
}
# check post type
if post["type"] == "Photo":
data["url"] = post.get("images").get("image700").get("url")
obj.add_photo_task(
unique_key=post.get("id"),
name=post['title'],
url=post.get("url"),
data=data
)
elif post["type"] == "Animated":
data["url"] = post.get("images").get("image460sv").get("url")
obj.add_video_task(
unique_key=post.get("id"),
name=post['title'],
url=post.get("url"),
data=data
)
| en | 0.668985 | # https://groww.in/slr/v1/search/derived/scheme?available_for_investment=true&doc_type=scheme&page=0&plan_type=Direct&q=&size=16&sort_by=3 # sort_by 1: Rating High to low # sort_by 2: Rating Low to high # sort_by 3: Rating popularity # check post type | 2.151109 | 2 |
data_aggregator/migrations/0005_auto_20210219_1804.py | uw-it-aca/canvas-analytics | 0 | 6619765 | # Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
# Generated by Django 3.1.6 on 2021-02-19 18:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('data_aggregator', '0004_auto_20210218_0102'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='short_name',
),
migrations.AlterField(
model_name='course',
name='canvas_account_id',
field=models.BigIntegerField(null=True),
),
migrations.AlterField(
model_name='course',
name='canvas_course_id',
field=models.BigIntegerField(),
),
migrations.AlterField(
model_name='user',
name='canvas_user_id',
field=models.BigIntegerField(unique=True),
),
]
| # Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
# Generated by Django 3.1.6 on 2021-02-19 18:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('data_aggregator', '0004_auto_20210218_0102'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='short_name',
),
migrations.AlterField(
model_name='course',
name='canvas_account_id',
field=models.BigIntegerField(null=True),
),
migrations.AlterField(
model_name='course',
name='canvas_course_id',
field=models.BigIntegerField(),
),
migrations.AlterField(
model_name='user',
name='canvas_user_id',
field=models.BigIntegerField(unique=True),
),
]
| en | 0.533563 | # Copyright 2021 UW-IT, University of Washington # SPDX-License-Identifier: Apache-2.0 # Generated by Django 3.1.6 on 2021-02-19 18:04 | 1.539131 | 2 |
ok/ggv.py | hanswenzel/opticks | 11 | 6619766 | #!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
For grabbing ggv arguments for checking
"""
import numpy as np
from env.numerics.npy.prism import Prism, Box
import argparse
def parse_args():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--save", action="store_true", default=False )
parser.add_argument("--test", action="store_true", default=False )
parser.add_argument("--torch", action="store_true", default=False )
parser.add_argument("--tag", default="" )
parser.add_argument("--testconfig", default="" )
parser.add_argument("--torchconfig", default="" )
parser.add_argument("--animtimemax", default=100 )
args = parser.parse_args()
return args
kv_ = lambda s:map(lambda _:_.split("="),s.split("_"))
class Torch(object):
def __init__(self, config):
self.config = kv_(config)
self.source = None
self.target = None
for k,v in self.config:
if k == "source":
self.source = np.fromstring(v, sep=",")
elif k == "target":
self.target = np.fromstring(v, sep=",")
else:
pass
pass
pass
self.direction = self.target - self.source
def __repr__(self):
return "\n".join([
"source %25s " % self.source,
"target %25s " % self.target,
"direction %25s " % self.direction
])
def __str__(self):
return "\n".join(["%20s : %s " % (k,v) for k,v in self.config])
class Test(object):
def __init__(self, config):
self.config = kv_(config)
shapes = []
boundaries = []
parameters = []
for k,v in self.config:
if k == "shape":
shapes.append(v)
elif k == "boundary":
boundaries.append(v)
elif k == "parameters":
parameters.append(v)
else:
pass
assert len(shapes) == len(boundaries) == len(parameters)
self.shapes = []
for i in range(len(shapes)):
shape = None
if shapes[i] == "box":
shape = Box(parameters[i], boundaries[i])
elif shapes[i] == "prism":
shape = Prism(parameters[i], boundaries[i])
else:
assert 0
pass
self.shapes.append(shape)
def __str__(self):
return "\n".join(map(str, self.shapes))
def __repr__(self):
return "\n".join(["%20s : %s " % (k,v) for k,v in self.config])
if __name__ == '__main__':
#print "\n".join(sys.argv)
args = parse_args()
torch = Torch(args.torchconfig)
test = Test(args.testconfig)
sh = test.shapes[-1]
print "torch:\n", torch
print repr(torch)
print "test:\n", test
print "sh:\n", sh
| #!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
For grabbing ggv arguments for checking
"""
import numpy as np
from env.numerics.npy.prism import Prism, Box
import argparse
def parse_args():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--save", action="store_true", default=False )
parser.add_argument("--test", action="store_true", default=False )
parser.add_argument("--torch", action="store_true", default=False )
parser.add_argument("--tag", default="" )
parser.add_argument("--testconfig", default="" )
parser.add_argument("--torchconfig", default="" )
parser.add_argument("--animtimemax", default=100 )
args = parser.parse_args()
return args
kv_ = lambda s:map(lambda _:_.split("="),s.split("_"))
class Torch(object):
def __init__(self, config):
self.config = kv_(config)
self.source = None
self.target = None
for k,v in self.config:
if k == "source":
self.source = np.fromstring(v, sep=",")
elif k == "target":
self.target = np.fromstring(v, sep=",")
else:
pass
pass
pass
self.direction = self.target - self.source
def __repr__(self):
return "\n".join([
"source %25s " % self.source,
"target %25s " % self.target,
"direction %25s " % self.direction
])
def __str__(self):
return "\n".join(["%20s : %s " % (k,v) for k,v in self.config])
class Test(object):
def __init__(self, config):
self.config = kv_(config)
shapes = []
boundaries = []
parameters = []
for k,v in self.config:
if k == "shape":
shapes.append(v)
elif k == "boundary":
boundaries.append(v)
elif k == "parameters":
parameters.append(v)
else:
pass
assert len(shapes) == len(boundaries) == len(parameters)
self.shapes = []
for i in range(len(shapes)):
shape = None
if shapes[i] == "box":
shape = Box(parameters[i], boundaries[i])
elif shapes[i] == "prism":
shape = Prism(parameters[i], boundaries[i])
else:
assert 0
pass
self.shapes.append(shape)
def __str__(self):
return "\n".join(map(str, self.shapes))
def __repr__(self):
return "\n".join(["%20s : %s " % (k,v) for k,v in self.config])
if __name__ == '__main__':
#print "\n".join(sys.argv)
args = parse_args()
torch = Torch(args.torchconfig)
test = Test(args.testconfig)
sh = test.shapes[-1]
print "torch:\n", torch
print repr(torch)
print "test:\n", test
print "sh:\n", sh
| en | 0.7973 | #!/usr/bin/env python # # Copyright (c) 2019 Opticks Team. All Rights Reserved. # # This file is part of Opticks # (see https://bitbucket.org/simoncblyth/opticks). # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # For grabbing ggv arguments for checking #print "\n".join(sys.argv) | 1.85237 | 2 |
sp_astar.py | Architecton/route-optimization-using-machine-learning | 0 | 6619767 | import numpy as np
import networkx as nx
import argparse
import random
from models.distance import get_dist_func
# NOTE: g-score is the path cost.
# NOTE: f-score is the path cost + heuristic.
def heuristic(network, node, goal, dist_func):
"""
Heuristic function for estimating distance from specified node to goal node.
Args:
network (object): Networkx representation of the network
node (int): Node for which to compute the heuristic
goal (int): Goal node
dist_func (function): Function used to compute distance between two nodes.
Returns:
(float): Computed heuristic
"""
# Compute distance from node to goal.
return dist_func(node, goal)
def reconstruct_path(current, came_from):
"""
Reconstruct path using last node and dictionary that maps each node on path
to its predecessor.
Args:
current (int): Last node in discovered path
came_from (dict): Dictionary mapping nodes on path to their predecessors
Retuns:
(tuple): Path in the form of a list and the same path encoded in an edge list
"""
# Initialize path and add last found node.
path = [current]
# Reconstruct.
while current in came_from:
current = came_from[current]
path.insert(0, current)
# Construct edgelist.
edgelist = [(path[idx], path[idx+1]) for idx in range(len(path)-1)]
# Return path and edge list.
return path, edgelist
def a_star(network, start, goal, dist_func):
"""
Perform A* search algorithm to find path from starting node to goal node.
Args:
network (object): Networkx representation of the network
start (int): Starting node for the search
goal (int): Goal node for the search
dist_func (function): Distance function mapping two nodes to the
distance between them.
Returns:
(tuple) : list representing the found path, edgelist representation of the found path,
list of edge lists that can be used for animating the process.
"""
# Initialize list for storing edge lists (for animation).
edgelists = []
# Partially apply heuristic with network and goal node.
h = lambda node: heuristic(network, node, goal, dist_func)
# Initialize array of node IDs.
node_list = np.array(list(network.nodes()))
# Initialize set of unvisited nodes
# with starting node.
open_set = {start}
# Initialize dictionary mapping nodes to nodes immediately
# preceding them on the cheapest path.
came_from = dict()
# Initialize dictionary mapping nodes to cost of cheapest path from start
# to the node currently known.
g_score = dict.fromkeys(node_list, np.inf)
g_score[start] = 0.0
# Initialize dictionary mapping nodes to the current best guess as to
# how short a path from start to finish can be if it goes through n.
f_score = dict.fromkeys(node_list, np.inf)
f_score[start] = h(start)
# While set of open nodes is not empty.
while len(open_set) > 0:
# Set node in open set with lowest f-score as current node and remove
# from set of open nodes.
current = min([(el, f_score[el]) for el in open_set], key=lambda x: x[1])[0]
open_set.remove(current)
# Reconstruct path from current node and append to list of edge lists.
_, edgelist = reconstruct_path(current, came_from)
edgelists.append(edgelist)
# Check if goal.
if current == goal:
path, edgelist = reconstruct_path(current, came_from)
return path, edgelist, edgelists
else:
# Go over neighbors of current node.
for neighbor in network.neighbors(current):
# Compute tentative g-score and check if better than g-score of node.
g_score_found = g_score[current] + dist_func(current, neighbor)
if g_score_found < g_score[neighbor]:
# If g-score better, set new g-score and set predecessor to current.
g_score[neighbor] = g_score_found
came_from[neighbor] = current
# Compute f-score of neighbor (cost path + heuristic).
f_score[neighbor] = g_score[neighbor] + h(neighbor)
# If neighbor not yet explored, add to open set.
if neighbor not in open_set:
open_set.add(neighbor)
# If goal node not found, return signal values.
return [], [], []
if __name__ == '__main__':
### PARSE ARGUMENTS ###
parser = argparse.ArgumentParser(description='Approximate solution to TSP using particle swarm optimization.')
parser.add_argument('--new-network', action='store_true',
help='create new network and select random nodes as start and goal nodes.')
parser.add_argument('--num-nodes', type=int, default=150, help='Number of nodes to use')
parser.add_argument('--dist-func', type=str, default='geodesic', choices=['geodesic', 'learned'],
help='Distance function to use')
parser.add_argument('--prediction-model', type=str, default='gboosting', choices=['gboosting', 'rf'],
help='Prediction model to use for learned distance function')
parser.add_argument('--n-nearest', type=int, default=3,
help='Number of nearest nodes with which to connect a node when constructing the network.')
args = parser.parse_args()
#######################
# Parse problem network.
if args.new_network:
network = nx.read_gpickle('./data/grid_data/grid_network.gpickle')
else:
network = nx.read_gpickle('./data/grid_data/grid_network_sp.gpickle')
# Get distance function.
dist_func = get_dist_func(network, which=args.dist_func, prediction_model=args.prediction_model)
if args.dist_func == 'learned':
dist_func.type = 'learned'
else:
dist_func.type = 'geodesic'
if args.new_network:
# If creating a new network.
# Number of nodes to remove from network.
to_remove = network.number_of_nodes() - args.num_nodes
# Remove randomly sampled nodes to get specified number of nodes.
network.remove_nodes_from(random.sample(list(network.nodes), to_remove))
# Connect each node with specified number of its nearest unconnected neighbors.
for node in network.nodes():
# Get list of nodes with which the node can connect.
free_nodes = [n for n in network.nodes() if n != node and not network.has_edge(n, node)]
# Sort free nodes by distance can choose nodes with which to connect.
closest_nodes = sorted([(n, dist_func(node, n)) for n in free_nodes], key=lambda x: x[1])
connect_to = list(map(lambda x: x[0], closest_nodes[:args.n_nearest]))
# Connect to specified number of closest neighbors.
for idx in range(len(connect_to)):
network.add_edge(node, connect_to[idx])
# Get connected components in network.
connected_components = list(map(list, nx.connected_components(network)))
# Connect connected components.
for idx in range(len(connected_components)-1):
n1 = random.choice(connected_components[idx])
n2 = random.choice(connected_components[idx+1])
network.add_edge(n1, n2)
# Set start and end nodes.
START_NODE = random.choice(list(network.nodes()))
dists = sorted([(n, dist_func(START_NODE, n)) for n in network.nodes()], key=lambda x: x[1])
GOAL_NODE = dists[-1][0]
else:
# If using a pre-set network.
# Set pre-set nodes as start and end nodes.
START_NODE = 450
GOAL_NODE = 4
# Get solution using A* search.
path, edgelist, edgelists = a_star(network, START_NODE, GOAL_NODE, dist_func)
if len(path) == 0 and len(edgelist) == 0 and len(edgelists) == 0:
print("Goal node unreachable from starting node!")
else:
# Save list of edge lists and network for animation.
np.save('./results/edgelists/edgelist_sp_astar.npy', list(map(np.vstack, filter(lambda x: len(x) > 0, edgelists))))
nx.write_gpickle(network, './results/networks/network_sp_astar.gpickle')
| import numpy as np
import networkx as nx
import argparse
import random
from models.distance import get_dist_func
# NOTE: g-score is the path cost.
# NOTE: f-score is the path cost + heuristic.
def heuristic(network, node, goal, dist_func):
"""
Heuristic function for estimating distance from specified node to goal node.
Args:
network (object): Networkx representation of the network
node (int): Node for which to compute the heuristic
goal (int): Goal node
dist_func (function): Function used to compute distance between two nodes.
Returns:
(float): Computed heuristic
"""
# Compute distance from node to goal.
return dist_func(node, goal)
def reconstruct_path(current, came_from):
"""
Reconstruct path using last node and dictionary that maps each node on path
to its predecessor.
Args:
current (int): Last node in discovered path
came_from (dict): Dictionary mapping nodes on path to their predecessors
Retuns:
(tuple): Path in the form of a list and the same path encoded in an edge list
"""
# Initialize path and add last found node.
path = [current]
# Reconstruct.
while current in came_from:
current = came_from[current]
path.insert(0, current)
# Construct edgelist.
edgelist = [(path[idx], path[idx+1]) for idx in range(len(path)-1)]
# Return path and edge list.
return path, edgelist
def a_star(network, start, goal, dist_func):
"""
Perform A* search algorithm to find path from starting node to goal node.
Args:
network (object): Networkx representation of the network
start (int): Starting node for the search
goal (int): Goal node for the search
dist_func (function): Distance function mapping two nodes to the
distance between them.
Returns:
(tuple) : list representing the found path, edgelist representation of the found path,
list of edge lists that can be used for animating the process.
"""
# Initialize list for storing edge lists (for animation).
edgelists = []
# Partially apply heuristic with network and goal node.
h = lambda node: heuristic(network, node, goal, dist_func)
# Initialize array of node IDs.
node_list = np.array(list(network.nodes()))
# Initialize set of unvisited nodes
# with starting node.
open_set = {start}
# Initialize dictionary mapping nodes to nodes immediately
# preceding them on the cheapest path.
came_from = dict()
# Initialize dictionary mapping nodes to cost of cheapest path from start
# to the node currently known.
g_score = dict.fromkeys(node_list, np.inf)
g_score[start] = 0.0
# Initialize dictionary mapping nodes to the current best guess as to
# how short a path from start to finish can be if it goes through n.
f_score = dict.fromkeys(node_list, np.inf)
f_score[start] = h(start)
# While set of open nodes is not empty.
while len(open_set) > 0:
# Set node in open set with lowest f-score as current node and remove
# from set of open nodes.
current = min([(el, f_score[el]) for el in open_set], key=lambda x: x[1])[0]
open_set.remove(current)
# Reconstruct path from current node and append to list of edge lists.
_, edgelist = reconstruct_path(current, came_from)
edgelists.append(edgelist)
# Check if goal.
if current == goal:
path, edgelist = reconstruct_path(current, came_from)
return path, edgelist, edgelists
else:
# Go over neighbors of current node.
for neighbor in network.neighbors(current):
# Compute tentative g-score and check if better than g-score of node.
g_score_found = g_score[current] + dist_func(current, neighbor)
if g_score_found < g_score[neighbor]:
# If g-score better, set new g-score and set predecessor to current.
g_score[neighbor] = g_score_found
came_from[neighbor] = current
# Compute f-score of neighbor (cost path + heuristic).
f_score[neighbor] = g_score[neighbor] + h(neighbor)
# If neighbor not yet explored, add to open set.
if neighbor not in open_set:
open_set.add(neighbor)
# If goal node not found, return signal values.
return [], [], []
if __name__ == '__main__':
### PARSE ARGUMENTS ###
parser = argparse.ArgumentParser(description='Approximate solution to TSP using particle swarm optimization.')
parser.add_argument('--new-network', action='store_true',
help='create new network and select random nodes as start and goal nodes.')
parser.add_argument('--num-nodes', type=int, default=150, help='Number of nodes to use')
parser.add_argument('--dist-func', type=str, default='geodesic', choices=['geodesic', 'learned'],
help='Distance function to use')
parser.add_argument('--prediction-model', type=str, default='gboosting', choices=['gboosting', 'rf'],
help='Prediction model to use for learned distance function')
parser.add_argument('--n-nearest', type=int, default=3,
help='Number of nearest nodes with which to connect a node when constructing the network.')
args = parser.parse_args()
#######################
# Parse problem network.
if args.new_network:
network = nx.read_gpickle('./data/grid_data/grid_network.gpickle')
else:
network = nx.read_gpickle('./data/grid_data/grid_network_sp.gpickle')
# Get distance function.
dist_func = get_dist_func(network, which=args.dist_func, prediction_model=args.prediction_model)
if args.dist_func == 'learned':
dist_func.type = 'learned'
else:
dist_func.type = 'geodesic'
if args.new_network:
# If creating a new network.
# Number of nodes to remove from network.
to_remove = network.number_of_nodes() - args.num_nodes
# Remove randomly sampled nodes to get specified number of nodes.
network.remove_nodes_from(random.sample(list(network.nodes), to_remove))
# Connect each node with specified number of its nearest unconnected neighbors.
for node in network.nodes():
# Get list of nodes with which the node can connect.
free_nodes = [n for n in network.nodes() if n != node and not network.has_edge(n, node)]
# Sort free nodes by distance can choose nodes with which to connect.
closest_nodes = sorted([(n, dist_func(node, n)) for n in free_nodes], key=lambda x: x[1])
connect_to = list(map(lambda x: x[0], closest_nodes[:args.n_nearest]))
# Connect to specified number of closest neighbors.
for idx in range(len(connect_to)):
network.add_edge(node, connect_to[idx])
# Get connected components in network.
connected_components = list(map(list, nx.connected_components(network)))
# Connect connected components.
for idx in range(len(connected_components)-1):
n1 = random.choice(connected_components[idx])
n2 = random.choice(connected_components[idx+1])
network.add_edge(n1, n2)
# Set start and end nodes.
START_NODE = random.choice(list(network.nodes()))
dists = sorted([(n, dist_func(START_NODE, n)) for n in network.nodes()], key=lambda x: x[1])
GOAL_NODE = dists[-1][0]
else:
# If using a pre-set network.
# Set pre-set nodes as start and end nodes.
START_NODE = 450
GOAL_NODE = 4
# Get solution using A* search.
path, edgelist, edgelists = a_star(network, START_NODE, GOAL_NODE, dist_func)
if len(path) == 0 and len(edgelist) == 0 and len(edgelists) == 0:
print("Goal node unreachable from starting node!")
else:
# Save list of edge lists and network for animation.
np.save('./results/edgelists/edgelist_sp_astar.npy', list(map(np.vstack, filter(lambda x: len(x) > 0, edgelists))))
nx.write_gpickle(network, './results/networks/network_sp_astar.gpickle')
| en | 0.839952 | # NOTE: g-score is the path cost. # NOTE: f-score is the path cost + heuristic. Heuristic function for estimating distance from specified node to goal node. Args: network (object): Networkx representation of the network node (int): Node for which to compute the heuristic goal (int): Goal node dist_func (function): Function used to compute distance between two nodes. Returns: (float): Computed heuristic # Compute distance from node to goal. Reconstruct path using last node and dictionary that maps each node on path to its predecessor. Args: current (int): Last node in discovered path came_from (dict): Dictionary mapping nodes on path to their predecessors Retuns: (tuple): Path in the form of a list and the same path encoded in an edge list # Initialize path and add last found node. # Reconstruct. # Construct edgelist. # Return path and edge list. Perform A* search algorithm to find path from starting node to goal node. Args: network (object): Networkx representation of the network start (int): Starting node for the search goal (int): Goal node for the search dist_func (function): Distance function mapping two nodes to the distance between them. Returns: (tuple) : list representing the found path, edgelist representation of the found path, list of edge lists that can be used for animating the process. # Initialize list for storing edge lists (for animation). # Partially apply heuristic with network and goal node. # Initialize array of node IDs. # Initialize set of unvisited nodes # with starting node. # Initialize dictionary mapping nodes to nodes immediately # preceding them on the cheapest path. # Initialize dictionary mapping nodes to cost of cheapest path from start # to the node currently known. # Initialize dictionary mapping nodes to the current best guess as to # how short a path from start to finish can be if it goes through n. # While set of open nodes is not empty. # Set node in open set with lowest f-score as current node and remove # from set of open nodes. # Reconstruct path from current node and append to list of edge lists. # Check if goal. # Go over neighbors of current node. # Compute tentative g-score and check if better than g-score of node. # If g-score better, set new g-score and set predecessor to current. # Compute f-score of neighbor (cost path + heuristic). # If neighbor not yet explored, add to open set. # If goal node not found, return signal values. ### PARSE ARGUMENTS ### ####################### # Parse problem network. # Get distance function. # If creating a new network. # Number of nodes to remove from network. # Remove randomly sampled nodes to get specified number of nodes. # Connect each node with specified number of its nearest unconnected neighbors. # Get list of nodes with which the node can connect. # Sort free nodes by distance can choose nodes with which to connect. # Connect to specified number of closest neighbors. # Get connected components in network. # Connect connected components. # Set start and end nodes. # If using a pre-set network. # Set pre-set nodes as start and end nodes. # Get solution using A* search. # Save list of edge lists and network for animation. | 3.3365 | 3 |
python/requ.py | mmz211/workspace | 0 | 6619768 | <reponame>mmz211/workspace
# -*- coding:UTF-8 -*-
import requests
if __name__ == '__main__':
target = 'http://gitbook.cn/'
req = requests.get(url=target)
print(req.text)
| # -*- coding:UTF-8 -*-
import requests
if __name__ == '__main__':
target = 'http://gitbook.cn/'
req = requests.get(url=target)
print(req.text) | en | 0.273539 | # -*- coding:UTF-8 -*- | 2.133934 | 2 |
MUNDO1/Ex012_Prod_Desc.py | KayDeVC/Python-CeV | 0 | 6619769 | print('-->PROMOÇÃO E PRAMOCINHA<--')
valor = float(input('O valor do produto sem desconto é: R$'))
noval = valor*0.95
print('Com o desconto o valor se torna R${:.2f}!'.format(noval))
| print('-->PROMOÇÃO E PRAMOCINHA<--')
valor = float(input('O valor do produto sem desconto é: R$'))
noval = valor*0.95
print('Com o desconto o valor se torna R${:.2f}!'.format(noval))
| none | 1 | 3.957537 | 4 | |
face__training.py | ASP1527/Secure-Password-Manager | 0 | 6619770 | <reponame>ASP1527/Secure-Password-Manager<gh_stars>0
import cv2
import numpy as np
from PIL import Image
import os
def train():
#path for face image database
path = 'dataset'
recognizer = cv2.face.LBPHFaceRecognizer_create()
detector = cv2.CascadeClassifier("face.xml") #xml file to recognise the faces
# function to get the images and label data
def getImagesAndLabels(path):
imagePaths = [os.path.join(path,f) for f in os.listdir(path)]
faceSamples=[]
ids = []
for imagePath in imagePaths:
PIL_img = Image.open(imagePath).convert('L') #grayscale
img_numpy = np.array(PIL_img,'uint8') #creates an array of the images
id = int(os.path.split(imagePath)[-1].split(".")[1])
faces = detector.detectMultiScale(img_numpy) #detect the face
for (x,y,w,h) in faces: #add the faces and ids into the arrays
faceSamples.append(img_numpy[y:y+h,x:x+w])
ids.append(id)
return faceSamples,ids
print ("\n [INFO] Training faces. It will take a few seconds. Wait ...")
faces,ids = getImagesAndLabels(path) #does the above function for each face and id in the dataset folder
recognizer.train(faces, np.array(ids))
#save the model into trainer/trainer.yml
recognizer.write('trainer/trainer.yml') #write the yml file with the faces
#print the numer of faces trained and end program
print("\n [INFO] {0} faces trained. Exiting Program".format(len(np.unique(ids))))
f = open("captured.txt", 'w') #write into captured to show that the faces are captured and trained
f.write("True")
f.close() | import cv2
import numpy as np
from PIL import Image
import os
def train():
#path for face image database
path = 'dataset'
recognizer = cv2.face.LBPHFaceRecognizer_create()
detector = cv2.CascadeClassifier("face.xml") #xml file to recognise the faces
# function to get the images and label data
def getImagesAndLabels(path):
imagePaths = [os.path.join(path,f) for f in os.listdir(path)]
faceSamples=[]
ids = []
for imagePath in imagePaths:
PIL_img = Image.open(imagePath).convert('L') #grayscale
img_numpy = np.array(PIL_img,'uint8') #creates an array of the images
id = int(os.path.split(imagePath)[-1].split(".")[1])
faces = detector.detectMultiScale(img_numpy) #detect the face
for (x,y,w,h) in faces: #add the faces and ids into the arrays
faceSamples.append(img_numpy[y:y+h,x:x+w])
ids.append(id)
return faceSamples,ids
print ("\n [INFO] Training faces. It will take a few seconds. Wait ...")
faces,ids = getImagesAndLabels(path) #does the above function for each face and id in the dataset folder
recognizer.train(faces, np.array(ids))
#save the model into trainer/trainer.yml
recognizer.write('trainer/trainer.yml') #write the yml file with the faces
#print the numer of faces trained and end program
print("\n [INFO] {0} faces trained. Exiting Program".format(len(np.unique(ids))))
f = open("captured.txt", 'w') #write into captured to show that the faces are captured and trained
f.write("True")
f.close() | en | 0.858615 | #path for face image database #xml file to recognise the faces # function to get the images and label data #grayscale #creates an array of the images #detect the face #add the faces and ids into the arrays #does the above function for each face and id in the dataset folder #save the model into trainer/trainer.yml #write the yml file with the faces #print the numer of faces trained and end program #write into captured to show that the faces are captured and trained | 3.197043 | 3 |
netbox/users/migrations/0010_update_jsonfield.py | esljaz/netbox | 2 | 6619771 | <reponame>esljaz/netbox
# Generated by Django 3.1b1 on 2020-07-16 16:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0009_replicate_permissions'),
]
operations = [
migrations.AlterField(
model_name='objectpermission',
name='constraints',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='userconfig',
name='data',
field=models.JSONField(default=dict),
),
]
| # Generated by Django 3.1b1 on 2020-07-16 16:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0009_replicate_permissions'),
]
operations = [
migrations.AlterField(
model_name='objectpermission',
name='constraints',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='userconfig',
name='data',
field=models.JSONField(default=dict),
),
] | en | 0.770659 | # Generated by Django 3.1b1 on 2020-07-16 16:01 | 1.586507 | 2 |
calc/__main__.py | vanadium23/interpreters.py | 0 | 6619772 | <filename>calc/__main__.py
from interpreter import Interpreter
from lexer import Lexer
while True:
try:
text = input('calc> ')
except EOFError:
break
if not text:
continue
lexer = Lexer(text)
interpreter = Interpreter(lexer)
result = interpreter.expr()
print(result)
| <filename>calc/__main__.py
from interpreter import Interpreter
from lexer import Lexer
while True:
try:
text = input('calc> ')
except EOFError:
break
if not text:
continue
lexer = Lexer(text)
interpreter = Interpreter(lexer)
result = interpreter.expr()
print(result)
| none | 1 | 2.822511 | 3 | |
main.py | dunzhu678/pubgithubpythondemo | 0 | 6619773 | <reponame>dunzhu678/pubgithubpythondemo
#!flask/bin/python
# -- coding: utf-8 --
__author__ = 'cloudtogo'
from flask import Flask
from flask import request
import json
import hashlib
import time
app = Flask(__name__)
user_id = 1
USER = {}
token_user = {}
@app.route('/')
def index():
return "hello world"
@app.route('/user/registry', methods=['POST'])
def registry():
global USER
global user_id
global token_user
try:
body = request.get_json()
account = body.get('account')
password = body.get('password')
if account and password:
if len(password) < 6 and len(account)<3:
result = {
"code": "error",
"message": "Your password or account is too short or does not meet other minimum requirements"
}
return json.dumps(result)
if USER:
if USER.get(account):
result = {
"code": "error",
"message": "The account has been registered"
}
return json.dumps(result)
else:
m = hashlib.md5()
p = str(time.time()) + account + password
a = p.encode(encoding='utf-8')
m.update(a)
token = m.hexdigest()
ac = {
"account": account,
"user_id": user_id,
"password": password
}
USER[account] = ac
token_user[token] = ac
data = {
"code": 0,
"data": {
"token": token,
"user_id": user_id
},
"message": "Your registration was successful"
}
user_id = user_id + 1
return json.dumps(data)
else:
m = hashlib.md5()
p = str(time.time()) + account + password
a = p.encode(encoding='utf-8')
m.update(a)
token = m.hexdigest()
ac = {
"account": account,
"token": token,
"user_id": user_id,
"password": password
}
USER[account] = ac
token_user[token] = ac
data = {
"code": 0,
"data": {
"token": token,
"user_id": user_id
},
"message": "Your registration was successful"
}
user_id = user_id + 1
return json.dumps(data)
except Exception as e:
err = {
"code": "error",
"message": "Argument error"
}
return json.dumps(err)
@app.route('/user/login', methods=['POST'])
def login():
global USER
global token_user
try:
body = request.get_json(silent=True)
print(body)
account = body.get("account")
password = body.get("password")
ac = USER.get(account)
if ac:
pwd = ac.get('password')
if pwd == password:
m = hashlib.md5()
p = str(time.time()) + account + password
a = p.encode(encoding='utf-8')
m.update(a)
token = m.hexdigest()
token_user[token] = ac
user_id = ac.get('user_id')
data = {
"code": 0,
"data": {
"token": token,
"user_id": user_id
},
"message": "success"
}
return json.dumps(data)
data = {
"code": "error1",
"message": "Account or password error"
}
return json.dumps(data)
except Exception as e:
err = {
"code": "error2",
"message": "Argument error"
}
return json.dumps(err)
# 添加/完善/查询个人信息
@app.route('/user', methods=['PUT', 'GET'])
def user():
global token_user
token = request.headers.get("token")
method = request.method
# 获取用户信息
if method.upper() == 'GET':
result = user_get(request)
return result
try:
body = request.get_json(silent=True)
name = body.get('name')
age = body.get('age')
phone = body.get('phone')
"""
{
"account": account,
"token": token,
"user_id": user_id,
"password": password
}
"""
us = token_user.get(token)
if us:
us['age'] = age
us['name'] = name
us['phone'] = phone
token_user[token] = us
data = {
"code": 0,
"message": "success"
}
return json.dumps(data)
else:
err = {
"code": "0",
"data": {}
}
return json.dumps(err)
except:
err = {
"code": "error",
"message": "Argument Error"
}
return json.dumps(err)
# @app.route('/user', methods=['GET'])
def user_get(re):
global token_user
token = re.headers.get("token")
try:
re = token_user.get(token)
if re:
return re
else:
data={
"code": 0,
"data":{}
}
except:
data = {
"code": "error",
"message": "Miss authorization information"
}
return json.dumps(data)
@app.route('/sum', methods=['POST'])
def sum():
try:
body = request.get_json()
int_value1 = int(body.get('number1'))
int_value2 = int(body.get('number2'))
result=int_value1+int_value2
data = {
"code": 0,
"data": {
"result": result
},
"message": "Your calculation was successful"
}
return json.dumps(data)
except Exception as e:
err = {
"code": "error",
"message": "Argument error"
}
return json.dumps(err)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080)
| #!flask/bin/python
# -- coding: utf-8 --
__author__ = 'cloudtogo'
from flask import Flask
from flask import request
import json
import hashlib
import time
app = Flask(__name__)
user_id = 1
USER = {}
token_user = {}
@app.route('/')
def index():
return "hello world"
@app.route('/user/registry', methods=['POST'])
def registry():
global USER
global user_id
global token_user
try:
body = request.get_json()
account = body.get('account')
password = body.get('password')
if account and password:
if len(password) < 6 and len(account)<3:
result = {
"code": "error",
"message": "Your password or account is too short or does not meet other minimum requirements"
}
return json.dumps(result)
if USER:
if USER.get(account):
result = {
"code": "error",
"message": "The account has been registered"
}
return json.dumps(result)
else:
m = hashlib.md5()
p = str(time.time()) + account + password
a = p.encode(encoding='utf-8')
m.update(a)
token = m.hexdigest()
ac = {
"account": account,
"user_id": user_id,
"password": password
}
USER[account] = ac
token_user[token] = ac
data = {
"code": 0,
"data": {
"token": token,
"user_id": user_id
},
"message": "Your registration was successful"
}
user_id = user_id + 1
return json.dumps(data)
else:
m = hashlib.md5()
p = str(time.time()) + account + password
a = p.encode(encoding='utf-8')
m.update(a)
token = m.hexdigest()
ac = {
"account": account,
"token": token,
"user_id": user_id,
"password": password
}
USER[account] = ac
token_user[token] = ac
data = {
"code": 0,
"data": {
"token": token,
"user_id": user_id
},
"message": "Your registration was successful"
}
user_id = user_id + 1
return json.dumps(data)
except Exception as e:
err = {
"code": "error",
"message": "Argument error"
}
return json.dumps(err)
@app.route('/user/login', methods=['POST'])
def login():
global USER
global token_user
try:
body = request.get_json(silent=True)
print(body)
account = body.get("account")
password = body.get("password")
ac = USER.get(account)
if ac:
pwd = ac.get('password')
if pwd == password:
m = hashlib.md5()
p = str(time.time()) + account + password
a = p.encode(encoding='utf-8')
m.update(a)
token = m.hexdigest()
token_user[token] = ac
user_id = ac.get('user_id')
data = {
"code": 0,
"data": {
"token": token,
"user_id": user_id
},
"message": "success"
}
return json.dumps(data)
data = {
"code": "error1",
"message": "Account or password error"
}
return json.dumps(data)
except Exception as e:
err = {
"code": "error2",
"message": "Argument error"
}
return json.dumps(err)
# 添加/完善/查询个人信息
@app.route('/user', methods=['PUT', 'GET'])
def user():
global token_user
token = request.headers.get("token")
method = request.method
# 获取用户信息
if method.upper() == 'GET':
result = user_get(request)
return result
try:
body = request.get_json(silent=True)
name = body.get('name')
age = body.get('age')
phone = body.get('phone')
"""
{
"account": account,
"token": token,
"user_id": user_id,
"password": password
}
"""
us = token_user.get(token)
if us:
us['age'] = age
us['name'] = name
us['phone'] = phone
token_user[token] = us
data = {
"code": 0,
"message": "success"
}
return json.dumps(data)
else:
err = {
"code": "0",
"data": {}
}
return json.dumps(err)
except:
err = {
"code": "error",
"message": "Argument Error"
}
return json.dumps(err)
# @app.route('/user', methods=['GET'])
def user_get(re):
global token_user
token = re.headers.get("token")
try:
re = token_user.get(token)
if re:
return re
else:
data={
"code": 0,
"data":{}
}
except:
data = {
"code": "error",
"message": "Miss authorization information"
}
return json.dumps(data)
@app.route('/sum', methods=['POST'])
def sum():
try:
body = request.get_json()
int_value1 = int(body.get('number1'))
int_value2 = int(body.get('number2'))
result=int_value1+int_value2
data = {
"code": 0,
"data": {
"result": result
},
"message": "Your calculation was successful"
}
return json.dumps(data)
except Exception as e:
err = {
"code": "error",
"message": "Argument error"
}
return json.dumps(err)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080) | en | 0.196361 | #!flask/bin/python # -- coding: utf-8 -- # 添加/完善/查询个人信息 # 获取用户信息 { "account": account, "token": token, "user_id": user_id, "password": password } # @app.route('/user', methods=['GET']) | 2.70812 | 3 |
batch-utils/TestConfigs.py | mbirkett-liv/FindTrajectory | 0 | 6619774 | <filename>batch-utils/TestConfigs.py
"""Examine XML config files for duplicate CSD refcodes.
"""
import os
import sys
from xml.etree import cElementTree as xmlTree # XML library
def XmlAttr(xnNode,sAttrName,bRequired):
"""Read XML attribute value.
[xnNode]: XML node, containing attribute value.
[sAttrName]: string, name of attribute to read.
[bRequired]: bool, attribute is required; Exception if attribute missing.
<retval>: string, attribute value.
<except>: Exception, if attribute required and missing.
"""
sAttrVal=xnNode.get(sAttrName);
if(sAttrVal==None):
if(bRequired): raise Exception("Required attribute '"+sAttrName+"' missing");
else: sAttrVal=sAttrVal.strip()
return sAttrVal
def ReadMOFsFromConfigFile(dtMOFs,sConfigFilePath):
"""Read the active refcodes from the given config file.
[dtMOFs]: dict, key: string, refcode; value: list of config files featuring this MOF.
[sConfigFilePath]: string, config file name path.
"""
xnTrajectoryFinder= xmlTree.parse(sConfigFilePath).getroot();
for xnDataset in xnTrajectoryFinder.findall(".//dataset"):
if XmlAttr(xnDataset,"active",True)!="1": continue
for xnHostCrystal in xnDataset.findall(".//hostCrystal"):
if XmlAttr(xnHostCrystal,"active",True)=="1":
sRefcode= XmlAttr(xnHostCrystal,"id",True)
if sRefcode in dtMOFs:
dtMOFs[sRefcode].append(sConfigFilePath)
else:
dtMOFs[sRefcode]= [sConfigFilePath]
def ReadConfigMOFs(dtMOFs,sDirPath):
"""Read refcodes from MOFs below given directory.
[dtMOFs]: dict, key: string, refcode; value: list of config files featuring this MOF.
[sDirPath]: string, directory to scan for MOFs.
"""
for sEntryName in os.listdir(sDirPath):
sEntryPath= sDirPath+sEntryName
if os.path.isdir(sEntryPath):
sSubDirPath= sEntryPath+"/"
for sConfigFileName in os.listdir(sSubDirPath):
sConfigFilePath= sSubDirPath+sConfigFileName
ReadMOFsFromConfigFile(dtMOFs,sConfigFilePath)
else:
ReadMOFsFromConfigFile(dtMOFs,sEntryPath)
def ReportDuplicateRefcodes(dtMOFs):
"""Report cases of duplicate Refcodes.
[dtMOFs]: dict, key: string, refcode; value: list of config files featuring this MOF.
"""
iDuplicate=0
for sRefcode,lsConfigFiles in dtMOFs.items():
if len(lsConfigFiles)>1:
iDuplicate+=1
if len(lsConfigFiles)>2:
print("Mad {sRefcode}")
#sPlace1= "SAT" if lsConfigFiles[0].find("configs-satya")>-1 else "SDR"
#sPlace2= "SAT" if lsConfigFiles[1].find("configs-satya")>-1 else "SDR"
sPlace1= lsConfigFiles[0][:lsConfigFiles[0].rfind("/")]
sPlace2= lsConfigFiles[1][:lsConfigFiles[1].rfind("/")]
print(f"Duplicate {sRefcode} {sPlace1}, {sPlace2}")
print(f"{len(dtMOFs)} MOFs, with {iDuplicate} duplicates.")
def Main():
"""Entry point.
"""
if len(sys.argv)==1:
print("Syntax: python TestConfigs.py <dir1> [<dir2> ....]\n"
" where <dir1> is a directory which contains config files.")
sys.exit(1)
# examine config files
dtMOFs= {}
for sConfigDir in sys.argv[1:]:
if sConfigDir[-1]!="/": sConfigDir+="/"
ReadConfigMOFs(dtMOFs,sConfigDir)
ReportDuplicateRefcodes(dtMOFs)
if __name__=="__main__": Main()
| <filename>batch-utils/TestConfigs.py
"""Examine XML config files for duplicate CSD refcodes.
"""
import os
import sys
from xml.etree import cElementTree as xmlTree # XML library
def XmlAttr(xnNode,sAttrName,bRequired):
"""Read XML attribute value.
[xnNode]: XML node, containing attribute value.
[sAttrName]: string, name of attribute to read.
[bRequired]: bool, attribute is required; Exception if attribute missing.
<retval>: string, attribute value.
<except>: Exception, if attribute required and missing.
"""
sAttrVal=xnNode.get(sAttrName);
if(sAttrVal==None):
if(bRequired): raise Exception("Required attribute '"+sAttrName+"' missing");
else: sAttrVal=sAttrVal.strip()
return sAttrVal
def ReadMOFsFromConfigFile(dtMOFs,sConfigFilePath):
"""Read the active refcodes from the given config file.
[dtMOFs]: dict, key: string, refcode; value: list of config files featuring this MOF.
[sConfigFilePath]: string, config file name path.
"""
xnTrajectoryFinder= xmlTree.parse(sConfigFilePath).getroot();
for xnDataset in xnTrajectoryFinder.findall(".//dataset"):
if XmlAttr(xnDataset,"active",True)!="1": continue
for xnHostCrystal in xnDataset.findall(".//hostCrystal"):
if XmlAttr(xnHostCrystal,"active",True)=="1":
sRefcode= XmlAttr(xnHostCrystal,"id",True)
if sRefcode in dtMOFs:
dtMOFs[sRefcode].append(sConfigFilePath)
else:
dtMOFs[sRefcode]= [sConfigFilePath]
def ReadConfigMOFs(dtMOFs,sDirPath):
"""Read refcodes from MOFs below given directory.
[dtMOFs]: dict, key: string, refcode; value: list of config files featuring this MOF.
[sDirPath]: string, directory to scan for MOFs.
"""
for sEntryName in os.listdir(sDirPath):
sEntryPath= sDirPath+sEntryName
if os.path.isdir(sEntryPath):
sSubDirPath= sEntryPath+"/"
for sConfigFileName in os.listdir(sSubDirPath):
sConfigFilePath= sSubDirPath+sConfigFileName
ReadMOFsFromConfigFile(dtMOFs,sConfigFilePath)
else:
ReadMOFsFromConfigFile(dtMOFs,sEntryPath)
def ReportDuplicateRefcodes(dtMOFs):
"""Report cases of duplicate Refcodes.
[dtMOFs]: dict, key: string, refcode; value: list of config files featuring this MOF.
"""
iDuplicate=0
for sRefcode,lsConfigFiles in dtMOFs.items():
if len(lsConfigFiles)>1:
iDuplicate+=1
if len(lsConfigFiles)>2:
print("Mad {sRefcode}")
#sPlace1= "SAT" if lsConfigFiles[0].find("configs-satya")>-1 else "SDR"
#sPlace2= "SAT" if lsConfigFiles[1].find("configs-satya")>-1 else "SDR"
sPlace1= lsConfigFiles[0][:lsConfigFiles[0].rfind("/")]
sPlace2= lsConfigFiles[1][:lsConfigFiles[1].rfind("/")]
print(f"Duplicate {sRefcode} {sPlace1}, {sPlace2}")
print(f"{len(dtMOFs)} MOFs, with {iDuplicate} duplicates.")
def Main():
"""Entry point.
"""
if len(sys.argv)==1:
print("Syntax: python TestConfigs.py <dir1> [<dir2> ....]\n"
" where <dir1> is a directory which contains config files.")
sys.exit(1)
# examine config files
dtMOFs= {}
for sConfigDir in sys.argv[1:]:
if sConfigDir[-1]!="/": sConfigDir+="/"
ReadConfigMOFs(dtMOFs,sConfigDir)
ReportDuplicateRefcodes(dtMOFs)
if __name__=="__main__": Main()
| en | 0.546792 | Examine XML config files for duplicate CSD refcodes. # XML library Read XML attribute value. [xnNode]: XML node, containing attribute value. [sAttrName]: string, name of attribute to read. [bRequired]: bool, attribute is required; Exception if attribute missing. <retval>: string, attribute value. <except>: Exception, if attribute required and missing. Read the active refcodes from the given config file. [dtMOFs]: dict, key: string, refcode; value: list of config files featuring this MOF. [sConfigFilePath]: string, config file name path. Read refcodes from MOFs below given directory. [dtMOFs]: dict, key: string, refcode; value: list of config files featuring this MOF. [sDirPath]: string, directory to scan for MOFs. Report cases of duplicate Refcodes. [dtMOFs]: dict, key: string, refcode; value: list of config files featuring this MOF. #sPlace1= "SAT" if lsConfigFiles[0].find("configs-satya")>-1 else "SDR" #sPlace2= "SAT" if lsConfigFiles[1].find("configs-satya")>-1 else "SDR" Entry point. # examine config files | 2.514481 | 3 |
lambda-code/lambda_function.py | dsmdavid/tableau-style-validator | 2 | 6619775 | import os
import json
from helpers import init_env
from download_workbook import download_workbook
from validate_styles import validate_styles
def lambda_handler(event, context):
try:
print('LAMBDA HANDLER event: ', event)
init_env(event)
# Get workbook
tableau_workbook = download_workbook()
# Get style guide
with open(os.getenv('STYLE_GUIDE_PATH')) as sg:
style_guide = json.load(sg)
# Test workbook against style guide
validate_styles(style_guide, tableau_workbook)
except Exception as e:
print(e)
raise e
if __name__ == '__main__':
if os.getenv('AWS_EXECUTION_ENV') is None:
lambda_handler({}, {})
else:
print('i am a little teapot.')
| import os
import json
from helpers import init_env
from download_workbook import download_workbook
from validate_styles import validate_styles
def lambda_handler(event, context):
try:
print('LAMBDA HANDLER event: ', event)
init_env(event)
# Get workbook
tableau_workbook = download_workbook()
# Get style guide
with open(os.getenv('STYLE_GUIDE_PATH')) as sg:
style_guide = json.load(sg)
# Test workbook against style guide
validate_styles(style_guide, tableau_workbook)
except Exception as e:
print(e)
raise e
if __name__ == '__main__':
if os.getenv('AWS_EXECUTION_ENV') is None:
lambda_handler({}, {})
else:
print('i am a little teapot.')
| en | 0.725097 | # Get workbook # Get style guide # Test workbook against style guide | 2.232648 | 2 |
src/controllers/chat.py | ChillerDragon-backup/TeeworldsEconMod | 5 | 6619776 | <gh_stars>1-10
#!/usr/bin/env python3
"""Chat message related module"""
import sys
import datetime
import re
from base.rcon import say, send_discord
import base.settings
import base.generic
import sql_stats
import version
class ChatController:
"""Handles chat messages"""
def __init__(self):
self.settings = base.settings.Settings()
self.players_controller = None
self.achievements_controller = None
self.CHAT_NONE=0
self.CHAT_ALL=1
self.CHAT_TEAM=2
self.CHAT_WHISPER=3
def init(self, players_controller, achievements_controller):
"""Init controllers"""
self.players_controller = players_controller
self.achievements_controller = achievements_controller
def is_ban_reason_in_str(self, string):
"""Search banned keywords in given string"""
words = self.settings.get("chat_filter")
if not words:
return False
for word in words:
if string.find(word) != -1:
return True
return False
# get by id if no argument given
# get by name if name is argument
# return player object and identifier (id/name)
def get_rank_player(self, msg, rank_cmd):
"""Parse command and return player object"""
if self.settings.get("stats_mode") != "sql":
say("not supported in file stats mode")
return None, None
msg_normal = msg
msg = msg.lower()
id_str = self.get_chat_id(msg_normal)
rankname_start = -1
if msg.find(rank_cmd + " ") != -1:
cmd_end = msg.rfind(rank_cmd)
rankname_start = msg.find(rank_cmd + " ", cmd_end) + len(rank_cmd + " ")
rankname_end = len(msg) - 1 # cut off newline
rankname = msg_normal[rankname_start:rankname_end]
if not rankname or rankname == "" or rankname_start == -1:
return self.players_controller.get_player_by_id(id_str), id_str
argplayer = self.players_controller.get_player_by_name(rankname)
if not argplayer:
# try to find id prefix in argument name
pattern = r'(\d{1,2}):(.*)'
if self.settings.get("tw_version") == "ddnet":
# F-DDrace 128 slots
pattern = r'(\d{1,3}):(.*)'
match = re.match(pattern, rankname)
if match:
r_id = match.group(1)
r_name = match.group(2)
r_player = self.players_controller.get_player_by_id(r_id)
if r_player and r_player.name == r_name:
argplayer = r_player
return argplayer, rankname
def get_rank_name(self, msg, rank_cmd):
"""Parse message and return playername"""
if self.settings.get("stats_mode") != "sql":
say("not supported in file stats mode")
return None
msg_normal = msg
msg = msg.lower()
name_start = base.generic.cfind(msg, ":", 3) + 1
name_end = msg.find(rank_cmd, name_start)
name_end = msg.rfind(": ", name_end)
name = msg_normal[name_start:name_end]
rankname_start = -1
if msg.find(rank_cmd + " ") != -1:
rankname_start = msg.find(rank_cmd + " ", name_end) + len(rank_cmd + " ")
rankname_end = len(msg) - 1 # cut off newline
rankname = msg_normal[rankname_start:rankname_end]
if not rankname or rankname == "" or rankname_start == -1:
return name
return rankname
def get_chat_id(self, msg):
"""Get clientID of the sender"""
# TODO: move constants to better place
# TODO: refactor this code
# TODO: support versions higher than 0.7.5 (care "0.7.10" < "0.7.5" is true in python)
# in 0.7.5 id position was swapped
# https://github.com/teeworlds/teeworlds/commit/5090c39d94bad0b6dda8caaef271133c46c00ee0#diff-a2df712cfb938eda9a173f36c865c2cc
id_str = None # python scoping ?!
if self.settings.get("tw_version") == "0.7.5":
mode_start = msg.find(" ") + 1
mode_end = base.generic.cfind(msg, ":", 2)
mode_str = msg[mode_start:mode_end]
msg = msg[mode_end:-1]
if int(mode_str) == self.CHAT_TEAM:
id_start = base.generic.cfind(msg, ":", 2) + 1
id_end = base.generic.cfind(msg, ":", 3)
id_str = msg[id_start:id_end]
else:
id_start = msg.find(":") + 1
id_end = base.generic.cfind(msg, ":", 2)
id_str = msg[id_start:id_end]
else:
id_start = msg.find(" ") + 1
id_end = base.generic.cfind(msg, ":", 2)
id_str = msg[id_start:id_end]
return id_str
def get_spam_player(self, msg):
"""Recive the player name of a message"""
id_str = self.get_chat_id(msg)
return self.players_controller.get_player_by_id(id_str)
def spam_protection(self, msg):
"""Takes a message and mutes the author if it is spam"""
player = self.get_spam_player(msg)
if not player:
if self.settings.get("hotplug") == 1:
return False
say("[ERROR] spam_protection() failed! please contact an admin")
sys.exit(1)
now = datetime.datetime.now()
diff = now - player.last_chat
player.last_chat = now
#say("chat diff seconds: " + str(diff.seconds) + " last_chat: " + str(player.last_chat))
seconds = diff.seconds
if seconds < 15:
player.mute_score += 1
if player.mute_score > 5:
if not player.is_muted:
player.is_muted = True
say("'" + str(player.name) + "' is banned from the command system (spam)")
if seconds > 120:
player.is_muted = False
player.mute_score = 0
if player.is_muted:
return True
return False
def is_muted(self, msg):
"""Take a message and return of the message author is muted or not"""
player = self.get_spam_player(msg)
if not player:
if self.settings.get("hotplug") == 1:
return False
say("[WARNING] is_muted() failed! please contact an admin")
return False
if player.is_muted:
return True
return False
def handle_chat_message(self, msg):
"""
Main method of this module
takes a message and parses it for chat commands
"""
if self.is_muted(msg):
return
prefix = self.settings.get("chat_command_prefix")
is_cmd = True
msg_normal = msg
msg = msg.lower()
# the first possible occurence of a chat command (to filter chat command names)
chat_cmd_start = base.generic.cfind(msg, ":", 4)
cmd = msg[chat_cmd_start:-1] # cut newline at end
if cmd.endswith(": " + prefix + "help") or \
cmd.endswith(": " + prefix + "info") or \
cmd.endswith(": " + prefix + "cmdlist"):
say("==== Teeworlds Econ Mod (TEM) ====")
say("developed by ChillerDragon version: " + str(version.VERSION))
say("https://github.com/ChillaVanilla/TeeworldsEconMod")
say("'" + prefix + "help' to show this help")
say("'" + prefix + "stats' to show round stats")
say("'" + prefix + "achievements' to show achievements")
if self.settings.get("stats_mode") == "sql":
say("'" + prefix + "top5' for all time stats commands")
say("'" + prefix + "rank' for all rank commands")
elif cmd.endswith(": " + prefix + "top5"):
if self.settings.get("stats_mode") == "sql":
say("'" + prefix + "top_kills' to see top5 killers of all time")
if self.settings.get("stats_mode") == "sql":
say("'" + prefix + "top_flags' to see top5 flag cap times of all time")
if self.settings.get("stats_mode") == "sql":
say("'" + prefix + "top_caps' to see top5 flag amount of all time")
if self.settings.get("stats_mode") == "sql":
say("'" + prefix + "top_sprees' to see top5 killing sprees of all time")
else:
say("not supported in file stats mode")
#elif cmd.endswith(": " + prefix + "stats_all"):
#player.print_stats_all(True)
elif cmd.find(": " + prefix + "stats") != -1:
if self.settings.get("stats_mode") != "sql":
say("not supported in file stats mode")
return
player, name = self.get_rank_player(msg_normal, ": " + prefix + "stats")
if not player:
say("[stats] player '" + str(name) + "' is not online.")
return
player.show_stats_round()
#player.print_stats_all()
elif cmd.endswith(": " + prefix + "top_caps"):
if self.settings.get("stats_mode") == "sql":
sql_stats.best_flag_caps()
else:
say("not supported in file stats mode")
elif cmd.endswith(": " + prefix + "top_flags"):
if self.settings.get("stats_mode") == "sql":
sql_stats.best_times()
else:
say("not supported in file stats mode")
elif cmd.endswith(": " + prefix + "top_kills"):
if self.settings.get("stats_mode") == "sql":
sql_stats.best_killers()
else:
say("not supported in file stats mode")
elif cmd.endswith(": " + prefix + "top_sprees"):
if self.settings.get("stats_mode") == "sql":
sql_stats.best_spree()
else:
say("not supported in file stats mode")
elif cmd.find("" + prefix + "rank_kills") != - 1:
sql_stats.rank_kills(self.get_rank_name(msg_normal, ": " + prefix + "rank_kills"))
elif msg.find("" + prefix + "rank_flags") != - 1:
sql_stats.rank_flag_time(self.get_rank_name(msg_normal, ": " + prefix + "rank_flags"))
elif msg.find("" + prefix + "rank_caps") != - 1:
sql_stats.rank_flag_caps(self.get_rank_name(msg_normal, ": " + prefix + "rank_caps"))
elif cmd.find("" + prefix + "rank_sprees") != - 1:
sql_stats.rank_spree(self.get_rank_name(msg_normal, ": " + prefix + "rank_sprees"))
elif cmd.find("" + prefix + "rank_all") != - 1:
name = self.get_rank_name(msg_normal, ": " + prefix + "rank_all")
if not name:
return
say("=== '" + str(name) + "'s stats ===")
sql_stats.rank_kills(str(name))
sql_stats.rank_flag_time(str(name))
sql_stats.rank_flag_caps(str(name))
sql_stats.rank_spree(str(name))
elif cmd.find("" + prefix + "rank") != - 1:
if self.settings.get("stats_mode") != "sql":
say("not supported in file stats mode")
return
say("'" + prefix + "rank_kills' to show global kills rank")
say("'" + prefix + "rank_sprees' to show global spree rank")
say("'" + prefix + "rank_flags' to show global flag time rank")
say("'" + prefix + "rank_caps' to show global flag capture rank")
elif cmd.find("" + prefix + "achievements") != - 1:
name = self.get_rank_name(msg_normal, ": " + prefix + "achievements")
self.achievements_controller.show_achievements(name)
elif cmd.endswith(": " + prefix + "test"):
player, name = self.get_rank_player(msg_normal, ": " + prefix + "test")
if not player:
if self.settings.get("hotplug") == 1:
return
say("error")
sys.exit(1)
say("got player: " + str(name))
# say("current spree: " + str(p.killingspree))
# handle this like a chat command (so it has spam prot)
elif self.is_ban_reason_in_str(cmd):
self.admin_contact_msg()
# players containing : will be cutted in discord message but this is fine for now
name = self.get_rank_name(msg_normal, ": ")
if self.settings.get("filter_discord") == 1:
send_discord(
"chat trigger " + \
str(self.settings.get("mod_discord")) + \
"!\n" + str(msg)
)
else:
is_cmd = False
if is_cmd:
self.spam_protection(msg_normal)
def admin_contact_msg(self):
"""Display the admin contact message in chat"""
if str(self.settings.get("admin_contact")) == "":
return
say(
"[INFO] Contact the admin " + \
str(self.settings.get("admin_contact")) + \
" to report players."
)
| #!/usr/bin/env python3
"""Chat message related module"""
import sys
import datetime
import re
from base.rcon import say, send_discord
import base.settings
import base.generic
import sql_stats
import version
class ChatController:
"""Handles chat messages"""
def __init__(self):
self.settings = base.settings.Settings()
self.players_controller = None
self.achievements_controller = None
self.CHAT_NONE=0
self.CHAT_ALL=1
self.CHAT_TEAM=2
self.CHAT_WHISPER=3
def init(self, players_controller, achievements_controller):
"""Init controllers"""
self.players_controller = players_controller
self.achievements_controller = achievements_controller
def is_ban_reason_in_str(self, string):
"""Search banned keywords in given string"""
words = self.settings.get("chat_filter")
if not words:
return False
for word in words:
if string.find(word) != -1:
return True
return False
# get by id if no argument given
# get by name if name is argument
# return player object and identifier (id/name)
def get_rank_player(self, msg, rank_cmd):
"""Parse command and return player object"""
if self.settings.get("stats_mode") != "sql":
say("not supported in file stats mode")
return None, None
msg_normal = msg
msg = msg.lower()
id_str = self.get_chat_id(msg_normal)
rankname_start = -1
if msg.find(rank_cmd + " ") != -1:
cmd_end = msg.rfind(rank_cmd)
rankname_start = msg.find(rank_cmd + " ", cmd_end) + len(rank_cmd + " ")
rankname_end = len(msg) - 1 # cut off newline
rankname = msg_normal[rankname_start:rankname_end]
if not rankname or rankname == "" or rankname_start == -1:
return self.players_controller.get_player_by_id(id_str), id_str
argplayer = self.players_controller.get_player_by_name(rankname)
if not argplayer:
# try to find id prefix in argument name
pattern = r'(\d{1,2}):(.*)'
if self.settings.get("tw_version") == "ddnet":
# F-DDrace 128 slots
pattern = r'(\d{1,3}):(.*)'
match = re.match(pattern, rankname)
if match:
r_id = match.group(1)
r_name = match.group(2)
r_player = self.players_controller.get_player_by_id(r_id)
if r_player and r_player.name == r_name:
argplayer = r_player
return argplayer, rankname
def get_rank_name(self, msg, rank_cmd):
"""Parse message and return playername"""
if self.settings.get("stats_mode") != "sql":
say("not supported in file stats mode")
return None
msg_normal = msg
msg = msg.lower()
name_start = base.generic.cfind(msg, ":", 3) + 1
name_end = msg.find(rank_cmd, name_start)
name_end = msg.rfind(": ", name_end)
name = msg_normal[name_start:name_end]
rankname_start = -1
if msg.find(rank_cmd + " ") != -1:
rankname_start = msg.find(rank_cmd + " ", name_end) + len(rank_cmd + " ")
rankname_end = len(msg) - 1 # cut off newline
rankname = msg_normal[rankname_start:rankname_end]
if not rankname or rankname == "" or rankname_start == -1:
return name
return rankname
def get_chat_id(self, msg):
"""Get clientID of the sender"""
# TODO: move constants to better place
# TODO: refactor this code
# TODO: support versions higher than 0.7.5 (care "0.7.10" < "0.7.5" is true in python)
# in 0.7.5 id position was swapped
# https://github.com/teeworlds/teeworlds/commit/5090c39d94bad0b6dda8caaef271133c46c00ee0#diff-a2df712cfb938eda9a173f36c865c2cc
id_str = None # python scoping ?!
if self.settings.get("tw_version") == "0.7.5":
mode_start = msg.find(" ") + 1
mode_end = base.generic.cfind(msg, ":", 2)
mode_str = msg[mode_start:mode_end]
msg = msg[mode_end:-1]
if int(mode_str) == self.CHAT_TEAM:
id_start = base.generic.cfind(msg, ":", 2) + 1
id_end = base.generic.cfind(msg, ":", 3)
id_str = msg[id_start:id_end]
else:
id_start = msg.find(":") + 1
id_end = base.generic.cfind(msg, ":", 2)
id_str = msg[id_start:id_end]
else:
id_start = msg.find(" ") + 1
id_end = base.generic.cfind(msg, ":", 2)
id_str = msg[id_start:id_end]
return id_str
def get_spam_player(self, msg):
"""Recive the player name of a message"""
id_str = self.get_chat_id(msg)
return self.players_controller.get_player_by_id(id_str)
def spam_protection(self, msg):
"""Takes a message and mutes the author if it is spam"""
player = self.get_spam_player(msg)
if not player:
if self.settings.get("hotplug") == 1:
return False
say("[ERROR] spam_protection() failed! please contact an admin")
sys.exit(1)
now = datetime.datetime.now()
diff = now - player.last_chat
player.last_chat = now
#say("chat diff seconds: " + str(diff.seconds) + " last_chat: " + str(player.last_chat))
seconds = diff.seconds
if seconds < 15:
player.mute_score += 1
if player.mute_score > 5:
if not player.is_muted:
player.is_muted = True
say("'" + str(player.name) + "' is banned from the command system (spam)")
if seconds > 120:
player.is_muted = False
player.mute_score = 0
if player.is_muted:
return True
return False
def is_muted(self, msg):
"""Take a message and return of the message author is muted or not"""
player = self.get_spam_player(msg)
if not player:
if self.settings.get("hotplug") == 1:
return False
say("[WARNING] is_muted() failed! please contact an admin")
return False
if player.is_muted:
return True
return False
def handle_chat_message(self, msg):
"""
Main method of this module
takes a message and parses it for chat commands
"""
if self.is_muted(msg):
return
prefix = self.settings.get("chat_command_prefix")
is_cmd = True
msg_normal = msg
msg = msg.lower()
# the first possible occurence of a chat command (to filter chat command names)
chat_cmd_start = base.generic.cfind(msg, ":", 4)
cmd = msg[chat_cmd_start:-1] # cut newline at end
if cmd.endswith(": " + prefix + "help") or \
cmd.endswith(": " + prefix + "info") or \
cmd.endswith(": " + prefix + "cmdlist"):
say("==== Teeworlds Econ Mod (TEM) ====")
say("developed by ChillerDragon version: " + str(version.VERSION))
say("https://github.com/ChillaVanilla/TeeworldsEconMod")
say("'" + prefix + "help' to show this help")
say("'" + prefix + "stats' to show round stats")
say("'" + prefix + "achievements' to show achievements")
if self.settings.get("stats_mode") == "sql":
say("'" + prefix + "top5' for all time stats commands")
say("'" + prefix + "rank' for all rank commands")
elif cmd.endswith(": " + prefix + "top5"):
if self.settings.get("stats_mode") == "sql":
say("'" + prefix + "top_kills' to see top5 killers of all time")
if self.settings.get("stats_mode") == "sql":
say("'" + prefix + "top_flags' to see top5 flag cap times of all time")
if self.settings.get("stats_mode") == "sql":
say("'" + prefix + "top_caps' to see top5 flag amount of all time")
if self.settings.get("stats_mode") == "sql":
say("'" + prefix + "top_sprees' to see top5 killing sprees of all time")
else:
say("not supported in file stats mode")
#elif cmd.endswith(": " + prefix + "stats_all"):
#player.print_stats_all(True)
elif cmd.find(": " + prefix + "stats") != -1:
if self.settings.get("stats_mode") != "sql":
say("not supported in file stats mode")
return
player, name = self.get_rank_player(msg_normal, ": " + prefix + "stats")
if not player:
say("[stats] player '" + str(name) + "' is not online.")
return
player.show_stats_round()
#player.print_stats_all()
elif cmd.endswith(": " + prefix + "top_caps"):
if self.settings.get("stats_mode") == "sql":
sql_stats.best_flag_caps()
else:
say("not supported in file stats mode")
elif cmd.endswith(": " + prefix + "top_flags"):
if self.settings.get("stats_mode") == "sql":
sql_stats.best_times()
else:
say("not supported in file stats mode")
elif cmd.endswith(": " + prefix + "top_kills"):
if self.settings.get("stats_mode") == "sql":
sql_stats.best_killers()
else:
say("not supported in file stats mode")
elif cmd.endswith(": " + prefix + "top_sprees"):
if self.settings.get("stats_mode") == "sql":
sql_stats.best_spree()
else:
say("not supported in file stats mode")
elif cmd.find("" + prefix + "rank_kills") != - 1:
sql_stats.rank_kills(self.get_rank_name(msg_normal, ": " + prefix + "rank_kills"))
elif msg.find("" + prefix + "rank_flags") != - 1:
sql_stats.rank_flag_time(self.get_rank_name(msg_normal, ": " + prefix + "rank_flags"))
elif msg.find("" + prefix + "rank_caps") != - 1:
sql_stats.rank_flag_caps(self.get_rank_name(msg_normal, ": " + prefix + "rank_caps"))
elif cmd.find("" + prefix + "rank_sprees") != - 1:
sql_stats.rank_spree(self.get_rank_name(msg_normal, ": " + prefix + "rank_sprees"))
elif cmd.find("" + prefix + "rank_all") != - 1:
name = self.get_rank_name(msg_normal, ": " + prefix + "rank_all")
if not name:
return
say("=== '" + str(name) + "'s stats ===")
sql_stats.rank_kills(str(name))
sql_stats.rank_flag_time(str(name))
sql_stats.rank_flag_caps(str(name))
sql_stats.rank_spree(str(name))
elif cmd.find("" + prefix + "rank") != - 1:
if self.settings.get("stats_mode") != "sql":
say("not supported in file stats mode")
return
say("'" + prefix + "rank_kills' to show global kills rank")
say("'" + prefix + "rank_sprees' to show global spree rank")
say("'" + prefix + "rank_flags' to show global flag time rank")
say("'" + prefix + "rank_caps' to show global flag capture rank")
elif cmd.find("" + prefix + "achievements") != - 1:
name = self.get_rank_name(msg_normal, ": " + prefix + "achievements")
self.achievements_controller.show_achievements(name)
elif cmd.endswith(": " + prefix + "test"):
player, name = self.get_rank_player(msg_normal, ": " + prefix + "test")
if not player:
if self.settings.get("hotplug") == 1:
return
say("error")
sys.exit(1)
say("got player: " + str(name))
# say("current spree: " + str(p.killingspree))
# handle this like a chat command (so it has spam prot)
elif self.is_ban_reason_in_str(cmd):
self.admin_contact_msg()
# players containing : will be cutted in discord message but this is fine for now
name = self.get_rank_name(msg_normal, ": ")
if self.settings.get("filter_discord") == 1:
send_discord(
"chat trigger " + \
str(self.settings.get("mod_discord")) + \
"!\n" + str(msg)
)
else:
is_cmd = False
if is_cmd:
self.spam_protection(msg_normal)
def admin_contact_msg(self):
"""Display the admin contact message in chat"""
if str(self.settings.get("admin_contact")) == "":
return
say(
"[INFO] Contact the admin " + \
str(self.settings.get("admin_contact")) + \
" to report players."
) | en | 0.684651 | #!/usr/bin/env python3 Chat message related module Handles chat messages Init controllers Search banned keywords in given string # get by id if no argument given # get by name if name is argument # return player object and identifier (id/name) Parse command and return player object # cut off newline # try to find id prefix in argument name # F-DDrace 128 slots Parse message and return playername # cut off newline Get clientID of the sender # TODO: move constants to better place # TODO: refactor this code # TODO: support versions higher than 0.7.5 (care "0.7.10" < "0.7.5" is true in python) # in 0.7.5 id position was swapped # https://github.com/teeworlds/teeworlds/commit/5090c39d94bad0b6dda8caaef271133c46c00ee0#diff-a2df712cfb938eda9a173f36c865c2cc # python scoping ?! Recive the player name of a message Takes a message and mutes the author if it is spam #say("chat diff seconds: " + str(diff.seconds) + " last_chat: " + str(player.last_chat)) Take a message and return of the message author is muted or not Main method of this module takes a message and parses it for chat commands # the first possible occurence of a chat command (to filter chat command names) # cut newline at end #elif cmd.endswith(": " + prefix + "stats_all"): #player.print_stats_all(True) #player.print_stats_all() # say("current spree: " + str(p.killingspree)) # handle this like a chat command (so it has spam prot) # players containing : will be cutted in discord message but this is fine for now Display the admin contact message in chat | 2.719488 | 3 |
Learn/idp_evaluate.py | ApocalyVec/mGesf | 18 | 6619777 | # temporal probability
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from keras.engine.saving import load_model
from sklearn.preprocessing import OneHotEncoder
from Learn.data_in import idp_preprocess_legacy, resolve_points_per_sample
from utils.data_utils import prepare_x, moving_average
########################################################################################################################
# idp_data_dir = ['../data/idp-ABCDE-rpt10', '../data/idp-ABCDE-rpt2']
# num_repeats = [10, 2]
# classes = ['A', 'B', 'C', 'D', 'E']
########################################################################################################################
# idp_data_dir = ['../data/idp-FGHIJ-rpt10']
# num_repeats = [10]
# classes = ['F', 'G', 'H', 'I', 'L']
########################################################################################################################
# idp_data_dir = ['../data/idp-KLMNO-rpt10']
# num_repeats = [10]
# sample_classes = [['K', 'L', 'M', 'N', 'O']]
# classes = ['K', 'L', 'M', 'N', 'O']
########################################################################################################################
# idp_data_dir = ['../data/idp-PQRST-rpt10']
# num_repeats = [10]
# sample_classes = [['P', 'Q', 'R', 'S', 'T']]
# classes = ['P', 'Q', 'R', 'S', 'T']
########################################################################################################################
# idp_data_dir = ['../data/idp-UVWXY-rpt10']
# num_repeats = [10]
# sample_classes = [['U', 'V', 'W', 'X', 'Y']]
# classes = ['U', 'V', 'W', 'X', 'Y']
########################################################################################################################
# idp_data_dir = ['../data/idp-ZSpcBspcEnt-rpt10']
# num_repeats = [10]
# sample_classes = [['Z', 'Spc', 'Bspc', 'Ent']]
# classes = ['Z', 'Spc', 'Bspc', 'Ent']
########################################################################################################################
# idp_data_dir = ['/Users/Leo/Documents/data/idp_29/data/idp-ABCDE-rpt10',
# '/Users/Leo/Documents/data/idp_29/data/idp-ABCDE-rpt2',
# '/Users/Leo/Documents/data/idp_29/data/idp-FGHIJ-rpt10',
# '/Users/Leo/Documents/data/idp_29/data/idp-KLMNO-rpt10',
# '/Users/Leo/Documents/data/idp_29/data/idp-PQRST-rpt10',
# '/Users/Leo/Documents/data/idp_29/data/idp-UVWXY-rpt10',
# '/Users/Leo/Documents/data/idp_29/data/idp-ZSpcBspcEnt-rpt10']
idp_data_dir = ['D:\PycharmProjects\mGesf\data/idp-ABCDE-rpt10',
'D:\PycharmProjects\mGesf\data/idp-ABCDE-rpt2',
'D:\PycharmProjects\mGesf\data/idp-FGHIJ-rpt10',
'D:\PycharmProjects\mGesf\data/idp-KLMNO-rpt10',
'D:\PycharmProjects\mGesf\data/idp-PQRST-rpt10',
'D:\PycharmProjects\mGesf\data/idp-UVWXY-rpt10',
'D:\PycharmProjects\mGesf\data/idp-ZSpcBspcEnt-rpt10']
num_repeats = [10, 2, 10, 10, 10, 10, 10]
sample_classes = [['A', 'B', 'C', 'D', 'E'],
['A', 'B', 'C', 'D', 'E'], # some of the ABCDE data are repeated twice
['F', 'G', 'H', 'I', 'J'],
['K', 'L', 'M', 'N', 'O'],
['P', 'Q', 'R', 'S', 'T'],
['U', 'V', 'W', 'X', 'Y'],
['Z', 'Spc', 'Bspc', 'Ent']]
classes = ['A', 'B', 'C', 'D', 'E',
'F', 'G', 'H', 'I', 'J',
'K', 'L', 'M', 'N', 'O',
'P', 'Q', 'R', 'S', 'T',
'U', 'V', 'W', 'X', 'Y',
'Z', 'Spc', 'Bspc', 'Ent']
assert len(idp_data_dir) == len(num_repeats) == len(sample_classes) # check the consistency of zip variables
assert set(classes) == set([item for sublist in sample_classes for item in sublist]) # check categorical consistency
########################################################################################################################
interval_duration = 4.0 # how long does one writing take
period = 33 # ms
# classes = set([item for sublist in sample_classes for item in sublist]) # reduce to categorical classes
ls_dicts = \
[idp_preprocess_legacy(dr, interval_duration, classes=cs, num_repeat=nr, period=period)
for dr, nr, cs in zip(idp_data_dir, num_repeats, sample_classes)]
points_per_sample = int(resolve_points_per_sample(period, interval_duration))
# create input features
Y = []
X_mmw_rD = []
X_mmw_rA = []
# add to x and y
for lsd in ls_dicts:
for key, value in lsd.items():
X_mmw_rD += [d for d in value['mmw']['range_doppler']]
X_mmw_rA += [a for a in value['mmw']['range_azi']]
Y += [key for i in range(value['mmw']['range_doppler'].shape[0])]
pass
X_mmw_rD = np.asarray(X_mmw_rD)
X_mmw_rA = np.asarray(X_mmw_rA)
Y = np.asarray(Y)
encoder = OneHotEncoder(categories='auto')
Y = encoder.fit_transform(np.expand_dims(Y, axis=1)).toarray()
idp_model = load_model('E:\mgesf_backup\models\idp\idp_29_2020-05-04_03-24-10.425555.h5')
# make a contiuous temporal sequence A, B, C, D, E
# TODO have this followed by a void character
# key_indices = [0, 160, 320, 480, 640] # A, B, C, D, E
sequence = np.reshape(np.array(['H', 'E', 'L', 'L', 'O', 'Spc', 'W', 'O', 'R', 'L', 'D', 'Ent']), newshape=(-1, 1))
valid_indices = np.argmax(encoder.transform(sequence).toarray(), axis=1)
index_class_dict = dict([(index, clss[0]) for index, clss in zip(valid_indices, sequence)])
H_index = np.where(np.all(Y == encoder.transform([['H']]).toarray(), axis=1))[0][0]
E_index = np.where(np.all(Y == encoder.transform([['E']]).toarray(), axis=1))[0][0]
L_index = np.where(np.all(Y == encoder.transform([['L']]).toarray(), axis=1))[0][0]
O_index = np.where(np.all(Y == encoder.transform([['O']]).toarray(), axis=1))[0][0]
Spc_index = np.where(np.all(Y == encoder.transform([['Spc']]).toarray(), axis=1))[0][0]
W_index = np.where(np.all(Y == encoder.transform([['W']]).toarray(), axis=1))[0][0]
R_index = np.where(np.all(Y == encoder.transform([['R']]).toarray(), axis=1))[0][0]
D_index = np.where(np.all(Y == encoder.transform([['D']]).toarray(), axis=1))[0][0]
Ent_index = np.where(np.all(Y == encoder.transform([['Ent']]).toarray(), axis=1))[0][0]
key_indices = np.array([H_index, H_index + 1, E_index, L_index, L_index + 1, O_index,
Spc_index, W_index + 1, O_index + 1, R_index, L_index + 2, D_index, Ent_index, Ent_index + 1]) # H, E, L, L, O, spc, W, O, R, L, D
ys = np.array([Y[ki] for ki in key_indices])
print('Working with sequence: ' + str(encoder.inverse_transform(ys)))
rA_seq = np.array([X_mmw_rA[i] for i in key_indices])
rA_seq = np.reshape(rA_seq,
newshape=[-1] + list(rA_seq.shape[2:])) # flatten the sample dimension to create temporal sequence
rD_seq = np.array([X_mmw_rD[i] for i in key_indices])
rD_seq = np.reshape(rD_seq,
newshape=[-1] + list(rD_seq.shape[2:])) # flatten the sample dimension to create temporal sequence
# sample from the temporal sequence
rA_samples = prepare_x(rA_seq, window_size=121, stride=1)
rD_samples = prepare_x(rD_seq, window_size=121, stride=1)
y_pred = idp_model.predict([rD_samples, rA_samples], batch_size=32)
y_pred = y_pred[60:len(y_pred) - 60]
# plottings
matplotlib.rcParams.update({'font.size': 14})
plt.figure(figsize=(20, 6))
is_plotted_others = False
for i, col in enumerate(np.transpose(y_pred)):
if i in valid_indices:
plt.plot(moving_average(col, n=16), label='Predicted gesture: ' + index_class_dict[i], linewidth=3)
else:
plt.plot(moving_average(col, n=16), c='gray', label='Gestures for other chars') if not is_plotted_others else plt.plot(col, c='gray')
is_plotted_others = True
# plot char separation lines
for i in range(1, len(key_indices) - 2):
plt.axvline(x=121 * i, c='0.3', linewidth=5)
# debouncer_frame_threshold = 30
# debouncer_prob_threshold = 0.9
# debouncer = [0] * len(classes)
# for i, frame_pred in enumerate(y_pred):
# break_indices = np.argwhere(frame_pred > debouncer_prob_threshold)
# for bi in break_indices:
# bi = bi[0]
# debouncer[bi] = debouncer[bi] + 1
# if debouncer[bi] > debouncer_frame_threshold:
# plt.plot([i], [0.9], 'bo')
# plt.text(i, 0.95, index_class_dict[bi] + 'Detected ', fontsize=12, c='blue')
# debouncer = [0] * len(classes)
# plt.legend(loc=4)
plt.xlabel('Frames (30 frames per second)')
plt.ylabel('Probability of class prediction')
# plt.title('Temporal Probability cross a Continuous Sequence of "A, B, C, D, E"')
plt.title('Temporal Probability cross a Continuous Sequence of "H, E, L, L, O, Space, W, O, R, L, D"')
plt.title('Temporal Probability cross a Continuous Sequence of "H, E, L, L, O, Space, W, O, R, L, D", with Debouncer Detection')
plt.show()
| # temporal probability
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from keras.engine.saving import load_model
from sklearn.preprocessing import OneHotEncoder
from Learn.data_in import idp_preprocess_legacy, resolve_points_per_sample
from utils.data_utils import prepare_x, moving_average
########################################################################################################################
# idp_data_dir = ['../data/idp-ABCDE-rpt10', '../data/idp-ABCDE-rpt2']
# num_repeats = [10, 2]
# classes = ['A', 'B', 'C', 'D', 'E']
########################################################################################################################
# idp_data_dir = ['../data/idp-FGHIJ-rpt10']
# num_repeats = [10]
# classes = ['F', 'G', 'H', 'I', 'L']
########################################################################################################################
# idp_data_dir = ['../data/idp-KLMNO-rpt10']
# num_repeats = [10]
# sample_classes = [['K', 'L', 'M', 'N', 'O']]
# classes = ['K', 'L', 'M', 'N', 'O']
########################################################################################################################
# idp_data_dir = ['../data/idp-PQRST-rpt10']
# num_repeats = [10]
# sample_classes = [['P', 'Q', 'R', 'S', 'T']]
# classes = ['P', 'Q', 'R', 'S', 'T']
########################################################################################################################
# idp_data_dir = ['../data/idp-UVWXY-rpt10']
# num_repeats = [10]
# sample_classes = [['U', 'V', 'W', 'X', 'Y']]
# classes = ['U', 'V', 'W', 'X', 'Y']
########################################################################################################################
# idp_data_dir = ['../data/idp-ZSpcBspcEnt-rpt10']
# num_repeats = [10]
# sample_classes = [['Z', 'Spc', 'Bspc', 'Ent']]
# classes = ['Z', 'Spc', 'Bspc', 'Ent']
########################################################################################################################
# idp_data_dir = ['/Users/Leo/Documents/data/idp_29/data/idp-ABCDE-rpt10',
# '/Users/Leo/Documents/data/idp_29/data/idp-ABCDE-rpt2',
# '/Users/Leo/Documents/data/idp_29/data/idp-FGHIJ-rpt10',
# '/Users/Leo/Documents/data/idp_29/data/idp-KLMNO-rpt10',
# '/Users/Leo/Documents/data/idp_29/data/idp-PQRST-rpt10',
# '/Users/Leo/Documents/data/idp_29/data/idp-UVWXY-rpt10',
# '/Users/Leo/Documents/data/idp_29/data/idp-ZSpcBspcEnt-rpt10']
idp_data_dir = ['D:\PycharmProjects\mGesf\data/idp-ABCDE-rpt10',
'D:\PycharmProjects\mGesf\data/idp-ABCDE-rpt2',
'D:\PycharmProjects\mGesf\data/idp-FGHIJ-rpt10',
'D:\PycharmProjects\mGesf\data/idp-KLMNO-rpt10',
'D:\PycharmProjects\mGesf\data/idp-PQRST-rpt10',
'D:\PycharmProjects\mGesf\data/idp-UVWXY-rpt10',
'D:\PycharmProjects\mGesf\data/idp-ZSpcBspcEnt-rpt10']
num_repeats = [10, 2, 10, 10, 10, 10, 10]
sample_classes = [['A', 'B', 'C', 'D', 'E'],
['A', 'B', 'C', 'D', 'E'], # some of the ABCDE data are repeated twice
['F', 'G', 'H', 'I', 'J'],
['K', 'L', 'M', 'N', 'O'],
['P', 'Q', 'R', 'S', 'T'],
['U', 'V', 'W', 'X', 'Y'],
['Z', 'Spc', 'Bspc', 'Ent']]
classes = ['A', 'B', 'C', 'D', 'E',
'F', 'G', 'H', 'I', 'J',
'K', 'L', 'M', 'N', 'O',
'P', 'Q', 'R', 'S', 'T',
'U', 'V', 'W', 'X', 'Y',
'Z', 'Spc', 'Bspc', 'Ent']
assert len(idp_data_dir) == len(num_repeats) == len(sample_classes) # check the consistency of zip variables
assert set(classes) == set([item for sublist in sample_classes for item in sublist]) # check categorical consistency
########################################################################################################################
interval_duration = 4.0 # how long does one writing take
period = 33 # ms
# classes = set([item for sublist in sample_classes for item in sublist]) # reduce to categorical classes
ls_dicts = \
[idp_preprocess_legacy(dr, interval_duration, classes=cs, num_repeat=nr, period=period)
for dr, nr, cs in zip(idp_data_dir, num_repeats, sample_classes)]
points_per_sample = int(resolve_points_per_sample(period, interval_duration))
# create input features
Y = []
X_mmw_rD = []
X_mmw_rA = []
# add to x and y
for lsd in ls_dicts:
for key, value in lsd.items():
X_mmw_rD += [d for d in value['mmw']['range_doppler']]
X_mmw_rA += [a for a in value['mmw']['range_azi']]
Y += [key for i in range(value['mmw']['range_doppler'].shape[0])]
pass
X_mmw_rD = np.asarray(X_mmw_rD)
X_mmw_rA = np.asarray(X_mmw_rA)
Y = np.asarray(Y)
encoder = OneHotEncoder(categories='auto')
Y = encoder.fit_transform(np.expand_dims(Y, axis=1)).toarray()
idp_model = load_model('E:\mgesf_backup\models\idp\idp_29_2020-05-04_03-24-10.425555.h5')
# make a contiuous temporal sequence A, B, C, D, E
# TODO have this followed by a void character
# key_indices = [0, 160, 320, 480, 640] # A, B, C, D, E
sequence = np.reshape(np.array(['H', 'E', 'L', 'L', 'O', 'Spc', 'W', 'O', 'R', 'L', 'D', 'Ent']), newshape=(-1, 1))
valid_indices = np.argmax(encoder.transform(sequence).toarray(), axis=1)
index_class_dict = dict([(index, clss[0]) for index, clss in zip(valid_indices, sequence)])
H_index = np.where(np.all(Y == encoder.transform([['H']]).toarray(), axis=1))[0][0]
E_index = np.where(np.all(Y == encoder.transform([['E']]).toarray(), axis=1))[0][0]
L_index = np.where(np.all(Y == encoder.transform([['L']]).toarray(), axis=1))[0][0]
O_index = np.where(np.all(Y == encoder.transform([['O']]).toarray(), axis=1))[0][0]
Spc_index = np.where(np.all(Y == encoder.transform([['Spc']]).toarray(), axis=1))[0][0]
W_index = np.where(np.all(Y == encoder.transform([['W']]).toarray(), axis=1))[0][0]
R_index = np.where(np.all(Y == encoder.transform([['R']]).toarray(), axis=1))[0][0]
D_index = np.where(np.all(Y == encoder.transform([['D']]).toarray(), axis=1))[0][0]
Ent_index = np.where(np.all(Y == encoder.transform([['Ent']]).toarray(), axis=1))[0][0]
key_indices = np.array([H_index, H_index + 1, E_index, L_index, L_index + 1, O_index,
Spc_index, W_index + 1, O_index + 1, R_index, L_index + 2, D_index, Ent_index, Ent_index + 1]) # H, E, L, L, O, spc, W, O, R, L, D
ys = np.array([Y[ki] for ki in key_indices])
print('Working with sequence: ' + str(encoder.inverse_transform(ys)))
rA_seq = np.array([X_mmw_rA[i] for i in key_indices])
rA_seq = np.reshape(rA_seq,
newshape=[-1] + list(rA_seq.shape[2:])) # flatten the sample dimension to create temporal sequence
rD_seq = np.array([X_mmw_rD[i] for i in key_indices])
rD_seq = np.reshape(rD_seq,
newshape=[-1] + list(rD_seq.shape[2:])) # flatten the sample dimension to create temporal sequence
# sample from the temporal sequence
rA_samples = prepare_x(rA_seq, window_size=121, stride=1)
rD_samples = prepare_x(rD_seq, window_size=121, stride=1)
y_pred = idp_model.predict([rD_samples, rA_samples], batch_size=32)
y_pred = y_pred[60:len(y_pred) - 60]
# plottings
matplotlib.rcParams.update({'font.size': 14})
plt.figure(figsize=(20, 6))
is_plotted_others = False
for i, col in enumerate(np.transpose(y_pred)):
if i in valid_indices:
plt.plot(moving_average(col, n=16), label='Predicted gesture: ' + index_class_dict[i], linewidth=3)
else:
plt.plot(moving_average(col, n=16), c='gray', label='Gestures for other chars') if not is_plotted_others else plt.plot(col, c='gray')
is_plotted_others = True
# plot char separation lines
for i in range(1, len(key_indices) - 2):
plt.axvline(x=121 * i, c='0.3', linewidth=5)
# debouncer_frame_threshold = 30
# debouncer_prob_threshold = 0.9
# debouncer = [0] * len(classes)
# for i, frame_pred in enumerate(y_pred):
# break_indices = np.argwhere(frame_pred > debouncer_prob_threshold)
# for bi in break_indices:
# bi = bi[0]
# debouncer[bi] = debouncer[bi] + 1
# if debouncer[bi] > debouncer_frame_threshold:
# plt.plot([i], [0.9], 'bo')
# plt.text(i, 0.95, index_class_dict[bi] + 'Detected ', fontsize=12, c='blue')
# debouncer = [0] * len(classes)
# plt.legend(loc=4)
plt.xlabel('Frames (30 frames per second)')
plt.ylabel('Probability of class prediction')
# plt.title('Temporal Probability cross a Continuous Sequence of "A, B, C, D, E"')
plt.title('Temporal Probability cross a Continuous Sequence of "H, E, L, L, O, Space, W, O, R, L, D"')
plt.title('Temporal Probability cross a Continuous Sequence of "H, E, L, L, O, Space, W, O, R, L, D", with Debouncer Detection')
plt.show()
| en | 0.311716 | # temporal probability ######################################################################################################################## # idp_data_dir = ['../data/idp-ABCDE-rpt10', '../data/idp-ABCDE-rpt2'] # num_repeats = [10, 2] # classes = ['A', 'B', 'C', 'D', 'E'] ######################################################################################################################## # idp_data_dir = ['../data/idp-FGHIJ-rpt10'] # num_repeats = [10] # classes = ['F', 'G', 'H', 'I', 'L'] ######################################################################################################################## # idp_data_dir = ['../data/idp-KLMNO-rpt10'] # num_repeats = [10] # sample_classes = [['K', 'L', 'M', 'N', 'O']] # classes = ['K', 'L', 'M', 'N', 'O'] ######################################################################################################################## # idp_data_dir = ['../data/idp-PQRST-rpt10'] # num_repeats = [10] # sample_classes = [['P', 'Q', 'R', 'S', 'T']] # classes = ['P', 'Q', 'R', 'S', 'T'] ######################################################################################################################## # idp_data_dir = ['../data/idp-UVWXY-rpt10'] # num_repeats = [10] # sample_classes = [['U', 'V', 'W', 'X', 'Y']] # classes = ['U', 'V', 'W', 'X', 'Y'] ######################################################################################################################## # idp_data_dir = ['../data/idp-ZSpcBspcEnt-rpt10'] # num_repeats = [10] # sample_classes = [['Z', 'Spc', 'Bspc', 'Ent']] # classes = ['Z', 'Spc', 'Bspc', 'Ent'] ######################################################################################################################## # idp_data_dir = ['/Users/Leo/Documents/data/idp_29/data/idp-ABCDE-rpt10', # '/Users/Leo/Documents/data/idp_29/data/idp-ABCDE-rpt2', # '/Users/Leo/Documents/data/idp_29/data/idp-FGHIJ-rpt10', # '/Users/Leo/Documents/data/idp_29/data/idp-KLMNO-rpt10', # '/Users/Leo/Documents/data/idp_29/data/idp-PQRST-rpt10', # '/Users/Leo/Documents/data/idp_29/data/idp-UVWXY-rpt10', # '/Users/Leo/Documents/data/idp_29/data/idp-ZSpcBspcEnt-rpt10'] # some of the ABCDE data are repeated twice # check the consistency of zip variables # check categorical consistency ######################################################################################################################## # how long does one writing take # ms # classes = set([item for sublist in sample_classes for item in sublist]) # reduce to categorical classes # create input features # add to x and y # make a contiuous temporal sequence A, B, C, D, E # TODO have this followed by a void character # key_indices = [0, 160, 320, 480, 640] # A, B, C, D, E # H, E, L, L, O, spc, W, O, R, L, D # flatten the sample dimension to create temporal sequence # flatten the sample dimension to create temporal sequence # sample from the temporal sequence # plottings # plot char separation lines # debouncer_frame_threshold = 30 # debouncer_prob_threshold = 0.9 # debouncer = [0] * len(classes) # for i, frame_pred in enumerate(y_pred): # break_indices = np.argwhere(frame_pred > debouncer_prob_threshold) # for bi in break_indices: # bi = bi[0] # debouncer[bi] = debouncer[bi] + 1 # if debouncer[bi] > debouncer_frame_threshold: # plt.plot([i], [0.9], 'bo') # plt.text(i, 0.95, index_class_dict[bi] + 'Detected ', fontsize=12, c='blue') # debouncer = [0] * len(classes) # plt.legend(loc=4) # plt.title('Temporal Probability cross a Continuous Sequence of "A, B, C, D, E"') | 1.68712 | 2 |
SiameseNetworks/predict.py | kleofas97/MasterThesis | 0 | 6619778 | <filename>SiameseNetworks/predict.py
import numpy as np
import cv2
import os
from tensorflow.keras.models import load_model
from sklearn.decomposition import PCA
import matplotlib
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
def get_intensity_value(value, min_val, max_val):
if np.isnan(value):
value = 0
if np.isnan(max_val):
max_val = 0.0001
if np.isnan(min_val):
min_val = 0
nor_val = 255 * ((value - min_val) / (max_val - min_val))
if np.isnan(nor_val):
nor_val = np.nan_to_num(nor_val)
else:
nor_val = int(nor_val)
return nor_val
def pca(path_to_model,test_folder):
outersize = 150
trimsize = 65
innersize = outersize - 2 * trimsize
model = load_model(path_to_model)
# pages = 'complex_test'
predict_folder = os.path.join(test_folder,"output",str(trimsize))
test_folder = os.path.join(test_folder,"input")
output_layer = 2
model = model.layers[output_layer]
os.makedirs(predict_folder, exist_ok=True)
os.makedirs(os.path.join(predict_folder, 'cv2_vis1'), exist_ok=True)
os.makedirs(os.path.join(predict_folder, 'cv2_vis2'), exist_ok=True)
for imgp in os.listdir(test_folder):
print(imgp)
page = cv2.imread('{}/{}'.format(test_folder, imgp), 0)
rows, cols = page.shape
x = rows // innersize
y = cols // innersize
prows = (x + 1) * innersize + 2 * trimsize
pcols = (y + 1) * innersize + 2 * trimsize
ppage = np.zeros([prows, pcols])
ppage[trimsize:rows + trimsize, trimsize:cols + trimsize] = page[:, :]
predicted_patch = model.predict(np.zeros((1, outersize, outersize, 1)))
predicted_img = np.zeros((x + 1, y + 1, predicted_patch.shape[1]), np.float32)
for i in range(0, x + 1):
for j in range(0, y + 1):
patch = ppage[i * innersize:i * innersize + outersize,
j * innersize:j * innersize + outersize]
patch = np.expand_dims(patch, axis=0)
patch = np.expand_dims(patch, axis=3)
predicted_patch = model.predict(patch)[0]
predicted_img[i, j, :] = predicted_patch
pca = PCA(n_components=predicted_img.shape[2])
features = predicted_img.reshape(-1, predicted_img.shape[2])
pca_t_features = pca.fit_transform(features)
pca_t_features = pca_t_features[:, :3]
rgb = [[get_intensity_value(pca_t_features[i, 0], pca_t_features[:, 0].min(),
pca_t_features[:, 0].max()),
get_intensity_value(pca_t_features[i, 1], pca_t_features[:, 1].min(),
pca_t_features[:, 1].max()),
get_intensity_value(pca_t_features[i, 2], pca_t_features[:, 2].min(),
pca_t_features[:, 2].max())]
for i in range(pca_t_features.shape[0])]
rgb = np.asarray(rgb, dtype=np.uint8).reshape((*predicted_img.shape[:2], 3))
rgb_rows, rgb_cols, _ = rgb.shape
result = np.zeros([rows, cols, 3])
for i in range(rgb_rows):
for j in range(rgb_cols):
pixel_value = rgb[i, j, :]
result[i * innersize:i * innersize + innersize,
j * innersize:j * innersize + innersize, :] = pixel_value
# big_rgb=cv2.resize(rgb,(page.shape[1]-pad_h,page.shape[0]-pad_w))
# org_rgb=np.zeros([page.shape[0],page.shape[1],3])
# org_rgb[:-pad_w,:-pad_h]=big_rgb
cv2.imwrite('{}/{}'.format(os.path.join(predict_folder, 'cv2_vis1'), imgp), rgb)
cv2.imwrite('{}/{}'.format(os.path.join(predict_folder, 'cv2_vis2'), imgp), result)
| <filename>SiameseNetworks/predict.py
import numpy as np
import cv2
import os
from tensorflow.keras.models import load_model
from sklearn.decomposition import PCA
import matplotlib
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
def get_intensity_value(value, min_val, max_val):
if np.isnan(value):
value = 0
if np.isnan(max_val):
max_val = 0.0001
if np.isnan(min_val):
min_val = 0
nor_val = 255 * ((value - min_val) / (max_val - min_val))
if np.isnan(nor_val):
nor_val = np.nan_to_num(nor_val)
else:
nor_val = int(nor_val)
return nor_val
def pca(path_to_model,test_folder):
outersize = 150
trimsize = 65
innersize = outersize - 2 * trimsize
model = load_model(path_to_model)
# pages = 'complex_test'
predict_folder = os.path.join(test_folder,"output",str(trimsize))
test_folder = os.path.join(test_folder,"input")
output_layer = 2
model = model.layers[output_layer]
os.makedirs(predict_folder, exist_ok=True)
os.makedirs(os.path.join(predict_folder, 'cv2_vis1'), exist_ok=True)
os.makedirs(os.path.join(predict_folder, 'cv2_vis2'), exist_ok=True)
for imgp in os.listdir(test_folder):
print(imgp)
page = cv2.imread('{}/{}'.format(test_folder, imgp), 0)
rows, cols = page.shape
x = rows // innersize
y = cols // innersize
prows = (x + 1) * innersize + 2 * trimsize
pcols = (y + 1) * innersize + 2 * trimsize
ppage = np.zeros([prows, pcols])
ppage[trimsize:rows + trimsize, trimsize:cols + trimsize] = page[:, :]
predicted_patch = model.predict(np.zeros((1, outersize, outersize, 1)))
predicted_img = np.zeros((x + 1, y + 1, predicted_patch.shape[1]), np.float32)
for i in range(0, x + 1):
for j in range(0, y + 1):
patch = ppage[i * innersize:i * innersize + outersize,
j * innersize:j * innersize + outersize]
patch = np.expand_dims(patch, axis=0)
patch = np.expand_dims(patch, axis=3)
predicted_patch = model.predict(patch)[0]
predicted_img[i, j, :] = predicted_patch
pca = PCA(n_components=predicted_img.shape[2])
features = predicted_img.reshape(-1, predicted_img.shape[2])
pca_t_features = pca.fit_transform(features)
pca_t_features = pca_t_features[:, :3]
rgb = [[get_intensity_value(pca_t_features[i, 0], pca_t_features[:, 0].min(),
pca_t_features[:, 0].max()),
get_intensity_value(pca_t_features[i, 1], pca_t_features[:, 1].min(),
pca_t_features[:, 1].max()),
get_intensity_value(pca_t_features[i, 2], pca_t_features[:, 2].min(),
pca_t_features[:, 2].max())]
for i in range(pca_t_features.shape[0])]
rgb = np.asarray(rgb, dtype=np.uint8).reshape((*predicted_img.shape[:2], 3))
rgb_rows, rgb_cols, _ = rgb.shape
result = np.zeros([rows, cols, 3])
for i in range(rgb_rows):
for j in range(rgb_cols):
pixel_value = rgb[i, j, :]
result[i * innersize:i * innersize + innersize,
j * innersize:j * innersize + innersize, :] = pixel_value
# big_rgb=cv2.resize(rgb,(page.shape[1]-pad_h,page.shape[0]-pad_w))
# org_rgb=np.zeros([page.shape[0],page.shape[1],3])
# org_rgb[:-pad_w,:-pad_h]=big_rgb
cv2.imwrite('{}/{}'.format(os.path.join(predict_folder, 'cv2_vis1'), imgp), rgb)
cv2.imwrite('{}/{}'.format(os.path.join(predict_folder, 'cv2_vis2'), imgp), result)
| en | 0.152671 | # pages = 'complex_test' # big_rgb=cv2.resize(rgb,(page.shape[1]-pad_h,page.shape[0]-pad_w)) # org_rgb=np.zeros([page.shape[0],page.shape[1],3]) # org_rgb[:-pad_w,:-pad_h]=big_rgb | 2.519608 | 3 |
keystoneauth_oidc_refreshtoken/plugin.py | quinoescobar/keystoneauth-oidc-refreshtoken | 2 | 6619779 | <reponame>quinoescobar/keystoneauth-oidc-refreshtoken<filename>keystoneauth_oidc_refreshtoken/plugin.py<gh_stars>1-10
# coding=utf-8
# Copyright 2017 <NAME>
# File: plugin.py
# Description:
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keystoneauth1 import _utils as utils
from keystoneauth1 import access
from keystoneauth1.exceptions import oidc as exceptions
from keystoneauth1.identity.v3 import oidc
from positional import positional
# from keystoneauth_oidc_refreshtoken import exceptions
_logger = utils.get_logger(__name__)
class OidcRefreshToken(oidc._OidcBase):
"""Access Token Procurement Through Refresh Token Implementation."""
grant_type = "refresh_token"
@positional(4)
def __init__(self, auth_url, identity_provider, protocol,
client_id, client_secret,
access_token_endpoint=None,
discovery_endpoint=None,
access_token_type='access_token',
refresh_token=None,
**kwargs):
"""The OpenID Refresh Token plugin, It expects the following.
:param auth_url: URL of the Identity Service
:type auth_url: string
:param identity_provider: Name of the Identity Provider the client
will authenticate against
:type identity_provider: string
:param protocol: Protocol name as configured at keystone
:type protocol: string
:param client_id: OAuth 2.0 Client ID
:type client_id: string
:param client_secret: OAuth 2.0 Client Secret
:type client_secret: string
:param access_token_endpoint: OpenID Connect Provider Token Endpoint,
for example:
https://localhost:8020/oidc/OP/token
Note that if a discovery document is
provided this value will override
the discovered one.
:type access_token_endpoint: string
:param refresh_token: OpenID Connect Refresh Token
:type refresh_token: string
"""
super(OidcRefreshToken, self).__init__(
auth_url=auth_url,
identity_provider=identity_provider,
protocol=protocol,
client_id=client_id,
client_secret=client_secret,
access_token_endpoint=access_token_endpoint,
discovery_endpoint=discovery_endpoint,
access_token_type=access_token_type,
**kwargs)
self.refresh_token = refresh_token
def get_payload(self, session):
"""Get an authorization grant for "refresh_token" grant type.
:param session: a session object to send out HTTP requests.
:type session: keystoneauth1.session.Session
:returns: A dictionary containing the payload to be exchanged
:rtype: dict
"""
payload = {'refresh_token': self.refresh_token,
'grant_type': self.grant_type}
return payload
def get_unscoped_auth_ref(self, session):
"""Authenticate with OpenID Connect and get back the access token.
Exchange the refresh token to get a new access token issued by the
authentication server.
:param session: a session object to send out HTTP requests.
:type session: keystoneclient.session.Session
:returns: a token data representation
:rtype: :py:class:`keystoneauth1.access.AccessInfoV3`
"""
discovery = self._get_discovery_document(session)
grant_types = discovery.get("grant_types_supported")
if (grant_types and
self.grant_type is not None and
self.grant_type not in grant_types):
raise exceptions.OidcPluginNotSupported()
payload = self.get_payload(session)
access_token = self._get_access_token(session, payload)
response = self._get_keystone_token(session, access_token)
return access.create(resp=response)
| # coding=utf-8
# Copyright 2017 <NAME>
# File: plugin.py
# Description:
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keystoneauth1 import _utils as utils
from keystoneauth1 import access
from keystoneauth1.exceptions import oidc as exceptions
from keystoneauth1.identity.v3 import oidc
from positional import positional
# from keystoneauth_oidc_refreshtoken import exceptions
_logger = utils.get_logger(__name__)
class OidcRefreshToken(oidc._OidcBase):
"""Access Token Procurement Through Refresh Token Implementation."""
grant_type = "refresh_token"
@positional(4)
def __init__(self, auth_url, identity_provider, protocol,
client_id, client_secret,
access_token_endpoint=None,
discovery_endpoint=None,
access_token_type='access_token',
refresh_token=None,
**kwargs):
"""The OpenID Refresh Token plugin, It expects the following.
:param auth_url: URL of the Identity Service
:type auth_url: string
:param identity_provider: Name of the Identity Provider the client
will authenticate against
:type identity_provider: string
:param protocol: Protocol name as configured at keystone
:type protocol: string
:param client_id: OAuth 2.0 Client ID
:type client_id: string
:param client_secret: OAuth 2.0 Client Secret
:type client_secret: string
:param access_token_endpoint: OpenID Connect Provider Token Endpoint,
for example:
https://localhost:8020/oidc/OP/token
Note that if a discovery document is
provided this value will override
the discovered one.
:type access_token_endpoint: string
:param refresh_token: OpenID Connect Refresh Token
:type refresh_token: string
"""
super(OidcRefreshToken, self).__init__(
auth_url=auth_url,
identity_provider=identity_provider,
protocol=protocol,
client_id=client_id,
client_secret=client_secret,
access_token_endpoint=access_token_endpoint,
discovery_endpoint=discovery_endpoint,
access_token_type=access_token_type,
**kwargs)
self.refresh_token = refresh_token
def get_payload(self, session):
"""Get an authorization grant for "refresh_token" grant type.
:param session: a session object to send out HTTP requests.
:type session: keystoneauth1.session.Session
:returns: A dictionary containing the payload to be exchanged
:rtype: dict
"""
payload = {'refresh_token': self.refresh_token,
'grant_type': self.grant_type}
return payload
def get_unscoped_auth_ref(self, session):
"""Authenticate with OpenID Connect and get back the access token.
Exchange the refresh token to get a new access token issued by the
authentication server.
:param session: a session object to send out HTTP requests.
:type session: keystoneclient.session.Session
:returns: a token data representation
:rtype: :py:class:`keystoneauth1.access.AccessInfoV3`
"""
discovery = self._get_discovery_document(session)
grant_types = discovery.get("grant_types_supported")
if (grant_types and
self.grant_type is not None and
self.grant_type not in grant_types):
raise exceptions.OidcPluginNotSupported()
payload = self.get_payload(session)
access_token = self._get_access_token(session, payload)
response = self._get_keystone_token(session, access_token)
return access.create(resp=response) | en | 0.687782 | # coding=utf-8 # Copyright 2017 <NAME> # File: plugin.py # Description: # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from keystoneauth_oidc_refreshtoken import exceptions Access Token Procurement Through Refresh Token Implementation. The OpenID Refresh Token plugin, It expects the following.
:param auth_url: URL of the Identity Service
:type auth_url: string
:param identity_provider: Name of the Identity Provider the client
will authenticate against
:type identity_provider: string
:param protocol: Protocol name as configured at keystone
:type protocol: string
:param client_id: OAuth 2.0 Client ID
:type client_id: string
:param client_secret: OAuth 2.0 Client Secret
:type client_secret: string
:param access_token_endpoint: OpenID Connect Provider Token Endpoint,
for example:
https://localhost:8020/oidc/OP/token
Note that if a discovery document is
provided this value will override
the discovered one.
:type access_token_endpoint: string
:param refresh_token: OpenID Connect Refresh Token
:type refresh_token: string Get an authorization grant for "refresh_token" grant type.
:param session: a session object to send out HTTP requests.
:type session: keystoneauth1.session.Session
:returns: A dictionary containing the payload to be exchanged
:rtype: dict Authenticate with OpenID Connect and get back the access token.
Exchange the refresh token to get a new access token issued by the
authentication server.
:param session: a session object to send out HTTP requests.
:type session: keystoneclient.session.Session
:returns: a token data representation
:rtype: :py:class:`keystoneauth1.access.AccessInfoV3` | 2.223877 | 2 |
orchestration/api/services.py | wisererik/service_catalog | 0 | 6619780 | from flask import jsonify
from flask import Blueprint
# from flask import request
service = Blueprint("service", __name__)
@service.route("/v1/orchestration/services/<string:id>", methods=['GET'])
def get_service(id):
return jsonify(id=id, name="Hello World!"), 200
| from flask import jsonify
from flask import Blueprint
# from flask import request
service = Blueprint("service", __name__)
@service.route("/v1/orchestration/services/<string:id>", methods=['GET'])
def get_service(id):
return jsonify(id=id, name="Hello World!"), 200
| en | 0.693768 | # from flask import request | 2.476632 | 2 |
bfillings/tests/test_sortmerna_v2.py | gregcaporaso/burrito-fillings | 0 | 6619781 | #!/usr/bin/env python
#-----------------------------------------------------------------------------
# Copyright (c) 2013--, biocore development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
"""
Unit tests for the SortMeRNA version 2.0 Application controller
===============================================================
"""
from unittest import TestCase, main
import re
from os import close
from os.path import abspath, exists, join, dirname
from tempfile import mkstemp, mkdtemp
from shutil import rmtree
from skbio.util import remove_files
from skbio.parse.sequences import parse_fasta
from bfillings.sortmerna_v2 import (build_database_sortmerna,
sortmerna_ref_cluster,
sortmerna_map)
# ----------------------------------------------------------------------------
# Copyright (c) 2014--, biocore development team
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
# Test class and cases
class SortmernaV2Tests(TestCase):
""" Tests for SortMeRNA version 2.0 functionality """
def setUp(self):
self.output_dir = mkdtemp()
self.reference_seq_fp = reference_seqs_fp
self.read_seqs_fp = read_seqs_fp
# create temporary file with reference sequences defined
# in reference_seqs_fp
f, self.file_reference_seq_fp = mkstemp(prefix='temp_references_',
suffix='.fasta')
close(f)
# write _reference_ sequences to tmp file
with open(self.file_reference_seq_fp, 'w') as tmp:
tmp.write(self.reference_seq_fp)
tmp.close()
# create temporary file with read sequences defined in read_seqs_fp
f, self.file_read_seqs_fp = mkstemp(prefix='temp_reads_',
suffix='.fasta')
close(f)
# write _read_ sequences to tmp file
with open(self.file_read_seqs_fp, 'w') as tmp:
tmp.write(self.read_seqs_fp)
tmp.close()
# list of files to remove
self.files_to_remove = [self.file_reference_seq_fp,
self.file_read_seqs_fp]
def tearDown(self):
remove_files(self.files_to_remove)
rmtree(self.output_dir)
def test_indexdb_default_param(self):
""" Test indexing a database using SortMeRNA
"""
sortmerna_db, db_files_to_remove = build_database_sortmerna(
abspath(self.file_reference_seq_fp),
max_pos=250,
output_dir=self.output_dir)
expected_db_files = set(sortmerna_db + ext
for ext in ['.bursttrie_0.dat', '.kmer_0.dat',
'.pos_0.dat', '.stats'])
# Make sure all db_files exist
for fp in expected_db_files:
self.assertTrue(exists(fp))
# Add files to be remove
self.files_to_remove.extend(db_files_to_remove)
def test_empty_fasta_path(self):
""" Indexdb should fail with an empty fasta path
"""
self.assertRaises(ValueError,
build_database_sortmerna,
fasta_path=None,
max_pos=250,
output_dir=self.output_dir)
def test_empty_inputs(self):
""" (1) Indexdb should set output_dir to the same directory
as where the input FASTA file is located;
(2) SortMeRNA should fail if an empty result path is
passed;
(3) SortMeRNA should fail if an empty seq path is passed
"""
sortmerna_db, db_files_to_remove = build_database_sortmerna(
abspath(self.file_reference_seq_fp),
max_pos=250,
output_dir=None)
self.files_to_remove.extend(db_files_to_remove)
fasta_dir = dirname(abspath(self.file_reference_seq_fp))
out_dir = dirname(sortmerna_db)
self.assertEqual(fasta_dir, out_dir)
self.assertRaises(ValueError,
sortmerna_ref_cluster,
seq_path=self.file_read_seqs_fp,
sortmerna_db=sortmerna_db,
refseqs_fp=self.file_reference_seq_fp,
result_path=None)
self.assertRaises(ValueError,
sortmerna_ref_cluster,
seq_path=None,
sortmerna_db=sortmerna_db,
refseqs_fp=self.file_reference_seq_fp,
result_path=join(self.output_dir,
"sortmerna_otus.txt"))
def test_tabular_output(self):
""" SortMeRNA should output a BLAST tabular output
"""
sortmerna_db, db_files_to_remove = build_database_sortmerna(
abspath(self.file_reference_seq_fp),
max_pos=250,
output_dir=self.output_dir)
self.files_to_remove.extend(db_files_to_remove)
# Run SortMeRNA
clusters, failures, smr_files_to_remove = sortmerna_ref_cluster(
seq_path=self.file_read_seqs_fp,
sortmerna_db=sortmerna_db,
refseqs_fp=self.file_reference_seq_fp,
result_path=join(self.output_dir, "sortmerna_otus.txt"),
tabular=True)
self.assertTrue(exists(join(self.output_dir,
"sortmerna_otus.blast")))
def test_empty_result_path(self):
""" SortMeRNA should fail with an empty indexed database
"""
self.assertRaises(ValueError,
sortmerna_ref_cluster,
seq_path=self.file_read_seqs_fp,
sortmerna_db=None,
refseqs_fp=self.file_reference_seq_fp,
result_path=join(self.output_dir,
"sortmerna_otus.txt")
)
def test_sortmerna_default_param(self):
""" SortMeRNA version 2.0 reference OTU picking works with default settings
"""
# rebuild the index
sortmerna_db, db_files_to_remove = build_database_sortmerna(
abspath(self.file_reference_seq_fp),
max_pos=250,
output_dir=self.output_dir)
# Files created by indexdb_rna to be deleted
self.files_to_remove.extend(db_files_to_remove)
# Run SortMeRNA
cluster_map, failures, smr_files_to_remove = sortmerna_ref_cluster(
seq_path=self.file_read_seqs_fp,
sortmerna_db=sortmerna_db,
refseqs_fp=self.file_reference_seq_fp,
result_path=join(self.output_dir, "sortmerna_otus.txt"))
# Check all sortmerna output files exist
output_files = [join(self.output_dir, ext)
for ext in ['sortmerna_otus_otus.txt',
'sortmerna_otus.log',
'sortmerna_otus_denovo.fasta',
'sortmerna_otus.fasta']]
# Check output files exist
for fp in output_files:
self.assertTrue(exists(fp))
# Files created sortmerna to be deleted (StdErr and StdOut were already
# removed in sortmerna_ref_cluster)
self.files_to_remove.extend(output_files)
# Random reads that should not appear in any output file
random_reads = ['simulated_random_reads.fa.000000000',
'simulated_random_reads.fa.000000001',
'simulated_random_reads.fa.000000002',
'simulated_random_reads.fa.000000003',
'simulated_random_reads.fa.000000004',
'simulated_random_reads.fa.000000005',
'simulated_random_reads.fa.000000006',
'simulated_random_reads.fa.000000007',
'simulated_random_reads.fa.000000008',
'simulated_random_reads.fa.000000009']
# Reads passing E-value threshold and with similarity/coverage >=97%
otu_reads = ['HMPMockV1.2.Staggered2.673827_47',
'HMPMockV1.2.Staggered2.673827_115',
'HMPMockV1.2.Staggered2.673827_122',
'HMPMockV1.2.Staggered2.673827_161',
'HMPMockV1.2.Staggered2.673827_180',
'HMPMockV1.2.Staggered2.673827_203',
'HMPMockV1.2.Staggered2.673827_207',
'HMPMockV1.2.Staggered2.673827_215',
'HMPMockV1.2.Staggered2.673827_218',
'HMPMockV1.2.Staggered2.673827_220']
# Reads passing E-value threshold and with similarity/coverage <97%
denovo_reads = ['HMPMockV1.2.Staggered2.673827_0',
'HMPMockV1.2.Staggered2.673827_1',
'HMPMockV1.2.Staggered2.673827_2',
'HMPMockV1.2.Staggered2.673827_3',
'HMPMockV1.2.Staggered2.673827_4',
'HMPMockV1.2.Staggered2.673827_5',
'HMPMockV1.2.Staggered2.673827_6',
'HMPMockV1.2.Staggered2.673827_7',
'HMPMockV1.2.Staggered2.673827_8',
'HMPMockV1.2.Staggered2.673827_9']
# Check correct number of OTU clusters in file
otu_clusters = ['295053']
f_aligned = open(output_files[3], "U")
f_otumap = open(output_files[0], "U")
f_denovo = open(output_files[2], "U")
# Verify the aligned FASTA file
for label, seq in parse_fasta(f_aligned):
id = label.split()[0]
# Read is not random
self.assertNotIn(id, random_reads)
# Read is either in otu_reads or denovo_reads
self.assertIn(id, otu_reads+denovo_reads)
f_aligned.close()
# Verify the de novo reads FASTA file
for label, seq in parse_fasta(f_denovo):
id = label.split()[0]
# Read is not random
self.assertNotIn(id, random_reads)
# Read is not an OTU read
self.assertNotIn(id, otu_reads)
# Read is a de novo read
self.assertIn(id, denovo_reads)
f_denovo.close()
# Check the OTU map
for line in f_otumap:
otu_entry = line.split()
# Cluster ID is correct
self.assertIn(otu_entry[0], otu_clusters)
# Each read in the cluster must exclusively be an OTU read
for read in otu_entry[1:]:
self.assertNotIn(read, random_reads)
self.assertNotIn(read, denovo_reads)
self.assertIn(read, otu_reads)
f_otumap.close()
# Check returned list of lists of clusters
expected_cluster = ['HMPMockV1.2.Staggered2.673827_47',
'HMPMockV1.2.Staggered2.673827_115',
'HMPMockV1.2.Staggered2.673827_122',
'HMPMockV1.2.Staggered2.673827_161',
'HMPMockV1.2.Staggered2.673827_180',
'HMPMockV1.2.Staggered2.673827_203',
'HMPMockV1.2.Staggered2.673827_207',
'HMPMockV1.2.Staggered2.673827_215',
'HMPMockV1.2.Staggered2.673827_218',
'HMPMockV1.2.Staggered2.673827_220']
# Should only have 1 cluster
self.assertEqual(1, len(cluster_map))
for actual_cluster in cluster_map.itervalues():
actual_cluster.sort()
expected_cluster.sort()
self.assertEqual(actual_cluster, expected_cluster)
# Check log file number of clusters and failures corresponds to
# the results in the output files
f_log = open(output_files[1], "U")
num_clusters = 0
num_failures = 0
for line in f_log:
if line.startswith(" Total OTUs"):
num_clusters = (re.split(' = ', line)[1]).strip()
elif line.startswith(" Total reads for de novo clustering"):
num_failures = (re.split(' = ', line)[1]).strip()
f_log.close()
self.assertEqual(int(num_clusters), len(otu_clusters))
self.assertEqual(int(num_failures), len(denovo_reads))
def test_sortmerna_map_default(self):
""" SortMeRNA version 2.0 for mapping sequences onto a reference
using default parameters
"""
# Rebuild the index
sortmerna_db, db_files_to_remove = build_database_sortmerna(
abspath(self.file_reference_seq_fp),
max_pos=250,
output_dir=self.output_dir)
# Files created by indexdb_rna to be deleted
self.files_to_remove.extend(db_files_to_remove)
# Run SortMeRNA mapper
app_result = sortmerna_map(seq_path=self.file_read_seqs_fp,
output_dir=self.output_dir,
refseqs_fp=self.file_reference_seq_fp,
sortmerna_db=sortmerna_db)
# Check all sortmerna output files exist
output_files = [join(self.output_dir, ext)
for ext in ['sortmerna_map.blast',
'sortmerna_map.log']]
# Check output files exist
for fp in output_files:
self.assertTrue(exists(fp))
blast_alignments_fp = app_result['BlastAlignments'].name
# Check there are 30 alignments (1 per read)
with open(blast_alignments_fp, 'U') as blast_actual:
entries = (line.strip().split('\t') for line in blast_actual)
actual_alignments = {r[0]: r[1:] for r in entries}
self.assertEqual(30, len(actual_alignments))
# Check this alignment exists
self.assertTrue("HMPMockV1.2.Staggered2.673827_47"
in actual_alignments)
self.assertEqual("97.3", actual_alignments[
"HMPMockV1.2.Staggered2.673827_47"][1])
self.assertEqual("100", actual_alignments[
"HMPMockV1.2.Staggered2.673827_47"][12])
# Check alignment for random read is NULL
self.assertTrue("simulated_random_reads.fa.000000000"
in actual_alignments)
self.assertEqual("*", actual_alignments[
"simulated_random_reads.fa.000000000"][0])
def test_sortmerna_map_sam_alignments(self):
""" SortMeRNA version 2.0 for mapping sequences onto a reference
outputting Blast and SAM alignments
"""
# Rebuild the index
sortmerna_db, db_files_to_remove = build_database_sortmerna(
abspath(self.file_reference_seq_fp),
max_pos=250,
output_dir=self.output_dir)
# Files created by indexdb_rna to be deleted
self.files_to_remove.extend(db_files_to_remove)
# Run SortMeRNA mapper
app_result = sortmerna_map(seq_path=self.file_read_seqs_fp,
output_dir=self.output_dir,
refseqs_fp=self.file_reference_seq_fp,
sortmerna_db=sortmerna_db,
output_sam=True)
# Check all sortmerna output files exist
output_files = [join(self.output_dir, ext)
for ext in ['sortmerna_map.blast',
'sortmerna_map.sam',
'sortmerna_map.log']]
# Check output files exist
for fp in output_files:
self.assertTrue(exists(fp))
sam_alignments_fp = app_result['SAMAlignments'].name
# Check there are 30 alignments in the SAM output (1 per read)
with open(sam_alignments_fp, 'U') as sam_actual:
entries = (line.strip().split('\t') for line in sam_actual)
actual_alignments = {r[0]: r[1:] for r in entries}
# 30 alignments expected + 2 lines for @HD and @PG fields
self.assertEqual(32, len(actual_alignments))
# Check this alignment exists
self.assertTrue("HMPMockV1.2.Staggered2.673827_47"
in actual_alignments)
self.assertEqual("295053", actual_alignments[
"HMPMockV1.2.Staggered2.673827_47"][1])
self.assertEqual("AS:i:418", actual_alignments[
"HMPMockV1.2.Staggered2.673827_47"][10])
# Check alignment for random read is NULL
self.assertTrue("simulated_random_reads.fa.000000000"
in actual_alignments)
self.assertEqual("*", actual_alignments[
"simulated_random_reads.fa.000000000"][1])
def test_sortmerna_map_sam_alignments_with_tags(self):
""" SortMeRNA version 2.0 for mapping sequences onto a reference
outputting SAM alignments with @SQ tags
"""
# Rebuild the index
sortmerna_db, db_files_to_remove = build_database_sortmerna(
abspath(self.file_reference_seq_fp),
max_pos=250,
output_dir=self.output_dir)
# Files created by indexdb_rna to be deleted
self.files_to_remove.extend(db_files_to_remove)
# Run SortMeRNA mapper
app_result = sortmerna_map(seq_path=self.file_read_seqs_fp,
output_dir=self.output_dir,
refseqs_fp=self.file_reference_seq_fp,
sortmerna_db=sortmerna_db,
output_sam=True,
sam_SQ_tags=True,
blast_format=None)
# Check all sortmerna output files exist
output_files = [join(self.output_dir, ext)
for ext in ['sortmerna_map.sam',
'sortmerna_map.log']]
# Check output files exist
for fp in output_files:
self.assertTrue(exists(fp))
sam_alignments_fp = app_result['SAMAlignments'].name
# Check there are 30 alignments in the SAM output (1 per read)
with open(sam_alignments_fp, 'U') as sam_actual:
actual_entries = [line.strip().split('\t') for line in sam_actual]
# 30 alignments expected + 2 lines for @HD and @PG fields + 5 lines
# for the @SQ tags
self.assertEqual(37, len(actual_entries))
# Check all expected @SQ tags have been included
SQ_array = [['@SQ', 'SN:42684', 'LN:1501'],
['@SQ', 'SN:342684', 'LN:1486'],
['@SQ', 'SN:426848', 'LN:1486'],
['@SQ', 'SN:295053', 'LN:1389'],
['@SQ', 'SN:879972', 'LN:1371']]
for entry in SQ_array:
self.assertTrue(entry in actual_entries)
def test_sortmerna_map_blast_no_null_alignments(self):
""" SortMeRNA version 2.0 for mapping sequences onto a reference
using Blast with --print_all_reads option set to False
(no NULL alignments output)
"""
# Rebuild the index
sortmerna_db, db_files_to_remove = build_database_sortmerna(
abspath(self.file_reference_seq_fp),
max_pos=250,
output_dir=self.output_dir)
# Files created by indexdb_rna to be deleted
self.files_to_remove.extend(db_files_to_remove)
# Run SortMeRNA mapper
app_result = sortmerna_map(seq_path=self.file_read_seqs_fp,
output_dir=self.output_dir,
refseqs_fp=self.file_reference_seq_fp,
sortmerna_db=sortmerna_db,
print_all_reads=False)
# Check all sortmerna output files exist
output_files = [join(self.output_dir, ext)
for ext in ['sortmerna_map.blast',
'sortmerna_map.log']]
# Check output files exist
for fp in output_files:
self.assertTrue(exists(fp))
blast_alignments_fp = app_result['BlastAlignments'].name
# Check there are 20 alignments (1 per read)
with open(blast_alignments_fp, 'U') as blast_actual:
entries = (line.strip().split('\t') for line in blast_actual)
actual_alignments = {r[0]: r[1:] for r in entries}
self.assertEqual(20, len(actual_alignments))
# Check this alignment exists
self.assertTrue("HMPMockV1.2.Staggered2.673827_47"
in actual_alignments)
self.assertEqual("97.3", actual_alignments[
"HMPMockV1.2.Staggered2.673827_47"][1])
self.assertEqual("100", actual_alignments[
"HMPMockV1.2.Staggered2.673827_47"][12])
# Check alignment for random read does not exist
self.assertFalse("simulated_random_reads.fa.000000000"
in actual_alignments)
def test_sortmerna_map_num_alignments(self):
""" SortMeRNA version 2.0 for mapping sequences onto a reference
outputting first INT num_alignments passing the E-value threshold
(rather than first INT best alignments)
"""
# Rebuild the index
sortmerna_db, db_files_to_remove = build_database_sortmerna(
abspath(self.file_reference_seq_fp),
max_pos=250,
output_dir=self.output_dir)
# Files created by indexdb_rna to be deleted
self.files_to_remove.extend(db_files_to_remove)
# Run SortMeRNA mapper
app_result = sortmerna_map(seq_path=self.file_read_seqs_fp,
output_dir=self.output_dir,
refseqs_fp=self.file_reference_seq_fp,
sortmerna_db=sortmerna_db,
num_alignments=1)
# Check all sortmerna output files exist
output_files = [join(self.output_dir, ext)
for ext in ['sortmerna_map.blast',
'sortmerna_map.log']]
# Check output files exist
for fp in output_files:
self.assertTrue(exists(fp))
blast_alignments_fp = app_result['BlastAlignments'].name
# Check there are 30 alignments (1 per read)
with open(blast_alignments_fp, 'U') as blast_actual:
entries = (line.strip().split('\t') for line in blast_actual)
actual_alignments = {r[0]: r[1:] for r in entries}
self.assertEqual(30, len(actual_alignments))
# Check this alignment exists
self.assertTrue("HMPMockV1.2.Staggered2.673827_47"
in actual_alignments)
self.assertEqual("97.3", actual_alignments[
"HMPMockV1.2.Staggered2.673827_47"][1])
self.assertEqual("100", actual_alignments[
"HMPMockV1.2.Staggered2.673827_47"][12])
# Check alignment for random read is NULL
self.assertTrue("simulated_random_reads.fa.000000000"
in actual_alignments)
self.assertEqual("*", actual_alignments[
"simulated_random_reads.fa.000000000"][0])
def test_blast_or_sam(self):
""" SortMeRNA should fail with output_sam and blast_format both
set to False
"""
# Rebuild the index
sortmerna_db, db_files_to_remove = build_database_sortmerna(
abspath(self.file_reference_seq_fp),
max_pos=250,
output_dir=self.output_dir)
# Files created by indexdb_rna to be deleted
self.files_to_remove.extend(db_files_to_remove)
self.assertRaises(ValueError,
sortmerna_map,
seq_path=self.file_read_seqs_fp,
output_dir=self.output_dir,
refseqs_fp=self.file_reference_seq_fp,
sortmerna_db=sortmerna_db,
output_sam=False,
blast_format=None)
def test_best_or_num_alignments(self):
""" SortMeRNA should fail with "best" and "num_alignments" both
set to True
"""
# Rebuild the index
sortmerna_db, db_files_to_remove = build_database_sortmerna(
abspath(self.file_reference_seq_fp),
max_pos=250,
output_dir=self.output_dir)
# Files created by indexdb_rna to be deleted
self.files_to_remove.extend(db_files_to_remove)
self.assertRaises(ValueError,
sortmerna_map,
seq_path=self.file_read_seqs_fp,
output_dir=self.output_dir,
refseqs_fp=self.file_reference_seq_fp,
sortmerna_db=sortmerna_db,
best=1,
num_alignments=1)
# Reference sequence database
reference_seqs_fp = """>426848
AGAGTTTGATCCTGGCTCAGGATGAACGCTAGCGGCAGGCTTAATACATGCAAGTCGAGGGGCAGCACTGGTAGCAATAC
CTGGTGGCGACCGGCGGACGGGTGCGTAACACGTATGCAACCTACCCTGTACAGGGGGATAGCCCGAGGAAATTCGGATT
AATACCCCATACGATAAGAATCGGCATCGATTTTTATTGAAAGCTCCGGCGGTACAGGATGGGCATGCGCCCCATTAGCT
AGTTGGTGAGGTAACGGCTCACCAAGGCTACGATGGGTAGGGGGCCTGAGAGGGTGATCCCCCACACTGGAACTGAGACA
CGGTCCAGACTCCTACGGGAGGCAGCAGTAAGGAATATTGGTCAATGGGCGCAAGCCTGAACCAGCCATGCCGCGTGCAG
GAAGACTGCCATTATGGTTGTAAACTGCTTTTATATGGGAAGAAACCTCCGGACGTGTCCGGAGCTGACGGTACCATGTG
AATAAGGATCGGCTAACTCCGTGCCAGCAGCCGCGGTAATACGGAGGATCCAAGCGTTATCCGGATTTATTGGGTTTAAA
GGGTGCGTAGGCGGCGTGTTAAGTCAGAGGTGAAATTCGGCAGCTCAACTGTCAAATTGCCTTTGATACTGGCACACTTG
AATGCGATTGAGGTAGGCGGAATGTGACATGTAGCGGTGAAATGCTTAGACATGTGACAGAACACCGATTGCGAAGGCAG
CTTACCAAGTCGTTATTGACGCTGAGGCACGAAAGCGTGGGGAGCAAACAGGATTAGATACCCTGGTAGTCCACGCCGTA
AACGATGATAACTCGACGTTAGCGATACACTGTTAGCGTCCAAGCGAAAGCGTTAAGTTATCCACCTGGGAAGTACGATC
GCAAGGTTGAAACTCAAAGGAATTGACGGGGGCCCGCACAAGCGGTGGAGCATGTGGTTTAATTCGATGATACGCGAGGA
ACCTTACCAGGGCTTAAATGGGGAACGACCTTCTGGGAAACCAGAATTTCTTTTAGACGGTCCTCAAGGTGCTGCATGGT
TGTCGTCAGCTCGTGCCGTGAGGTGTTGGGTTAAGTCCCGCAACGAGCGCAACCCCTACTGTTAGTTGCCAGCGGATAAT
GCCGGGGACTCTAGCGGAACTGCCTGTGCAAACAGAGAGGAAGGTGGGGATGACGTCAAATCATCACGGCCCTTACGTCC
TGGGCTACACACGTGCTACAATGGCCGGTACAGAGGGCAGCCACTTCGTGAGAAGGAGCGAATCCTTAAAGCCGGTCTCA
GTTCGGATTGTAGTCTGCAACTCGACTACATGAAGCTGGAATCGCTAGTAATCGCGTATCAGCCATGACGCGGTGAATAC
GTTCCCGGGCCTTGTACACACCGCCCGTCAAGCCATGGGAATTGGGAGTACCTAAAGTCGGTAACCGCAAGGAGCCGCCT
AAGGTAATACCAGTGACTGGGGCTAAGTCGTAACAAGGTAGCCGTA
>42684
AGAGTTTGATCCTGGCTCAGATTGAACGCTGGCGGCATGCTTTACACATGCAAGTCGGACGGCAGCACAGAGGAGCTTGC
TTCTTGGGTGGCGAGTGGCGAACGGGTGAGTGACGCATCGGAACGTACCGAGTAATGGGGGATAACTGTCCGAAAGGACA
GCTAATACCGCATACGCCCTGAGGGGGAAAGCGGGGGATCTTAGGACCTCGCGTTATTCGAGCGGCCGATGTCTGATTAG
CTGGTTGGCGGGGTAAAGGCCCACCAAGGCGACGATCAGTAGCGGGTCTGAGAGGATGATCCGCCACACTGGGACTGAGA
CACGGCCCAGACTCCTACGGGAGGCAGCAGTGGGGAATTTTGGACAATGGGCGCAAGCCTGATCCAGCCATGCCGCGTGT
CTGAAGAAGGCCTTCGGGTTGTAAAGGACTTTTGTCAGGGAAGAAAAGGAACGTGTTAATACCATGTTCTGATGACGGTA
CCTGAAGAATAAGCACCGGCTAACTACGTGCCAGCAGCCGCGGTAATACGTAGGGTGCGAGCGTTAATCGGAATTACTGG
GCGTAAAGCGGGCGCAGACGGTTACTTAAGCGGGATGTGAAATCCCCGGGCTCAACCCGGGAACTGCGTTCCGAACTGGG
TGGCTAGAGTGTGTCAGAGGGGGGTAGAATTCCACGTGTAGCAGTGAAATGCGTAGAGATGTGGAGGAATACCGATGGCG
AAGGCAGCCCCCTGGGATAACACTGACGTTCATGCCCGAAAGCGTGGGTAGCAAACAGGGTTAGATACCCTGGTAGTCCA
CGCCCTAAACGATGTCGATTAGCTGTTGGGGCACTTGATGCCTTAGTAGCGTAGCTAACGCGTGAAATCGACCGCCTGGG
GAGTACGGTCGCAAGATTAAAACTCAAAGGAATTGACGGGGACCCGCACAAGCGGTGGATGATGTGGATTAATTCGATGC
AACGCGAAGAACCTTACCTGGTCTTGACATGTACGGAATCTTCCAGAGACGGAAGGGTGCCTTCGGGAGCCGTAACACAG
GTGCTGCATGGCTGTCGTCAGCTCGTGTCGTGAGATGTTGGGTTAAGTCCCGCAACGAGCGCAACCCTTGTCATTAGTTG
CCATCACTTGGTTGGGCACTCTAATGAGACTGCCGGTGACAAACCGGAGGAAGGTGGGGATGACGTCAAGTCCTCATGGC
CCTTATGACCAGGGCTTCACACGTCATACAATGGTCGGTACAGAGGGTAGCCAAGCCGCGAGGCGGAGCCAATCCCAGAA
AACCGATCGTAGTCCGGATTGCACTCTGCAACTCGAGTGCATGAAGTCGGAATCGCTAGTAATCGCAGGTCAGCATACTG
CGGTGAATACGTTCCCGGGTCTTGTACACACCGCCCGTCACACCATGGGAGTGGGGGATACCAGAAGCAGGTAGGCTAAC
CGCAAGGAGGCCGCTTGCCACGGTATGCTTCATGACTGGGGTGAAGTCGTAACAAGGTAAC
>342684
AGAGTTTGATCCTGGCTCAGGATGAACGCTAGCGGCAGGCTTAACACATGCAAGTCGAGGGGCATCGCGGGTAGCAATAC
CTGGCGGCGACCGGCGGAAGGGTGCGTAACGCGTGAGCGACATACCCGTGACAGGGGGATAACAGATGGAAACGTCTCCT
AATACCCCATAAGATCATATATCGCATGGTATGTGATTGAAAGGTGAGAACCGGTCACGGATTGGCTCGCGTCCCATCAG
GTAGACGGCGGGGCAGCGGCCCGCCGTGCCGACGACGGGTAGGGGCTCTGAGAGGAGTGACCCCCACAATGGAACTGAGA
CACGGTCCATACTCCTACGGGAGGCAGCAGTGAGGAATATTGGTCAATGGGCGGAAGCCTGAACCAGCCATGCCGCGTGC
GGGAGGACGGCCCTATGGGTTGTAAACCGCTTTTGAGTGAGAGCAATAAGGTTCACGTGTGGACCGATGAGAGTATCATT
CGAATAAGCATCGGCTAACTCCGTGCCAGCAGCCGCGGTAATACGGAGGATGCGAGCGTTATCCGGATTCATTGGGTTTA
AAGGGTGCGTAGGCGGACATGTAAGTCCGAGGTGAAAGACCGGGGCCCAACCCCGGGGTTGCCTCGGATACTGTGTGTCT
GGAGTGGACGTGCCGCCGGGGGAATGAGTGGTGTAGCGGTGAAATGCATAGATGTCACTCAGAACACCGATTGCGAAGGC
ACCTGGCGAATGTCTTACTGACGCTGAGGCACGAAAGCGTGGGGATCGAACAGGATTAGATACCCTGGTAGTCCACGCAG
TAAACGATGATGGCTGTCCGTTCGCTCCGATAGGAGTGAGTAGACAAGCGAAAGCGCTAAGCCATCCACCTGGGGAGTAC
GGCCGCAAGGCTGAAACTCAAAGGAATTGACGGGGGCCCGCACAAGCGGAGGAACATGTGGTTTAATTCGATGATACGCG
AGGAACCTTACCCGGGCTCGAACGGCAGGTGAACGATGCAGAGATGCAAAGGCCCTTCGGGGCGTCTGTCGAGGTGCTGC
ATGGTTGTCGTCAGCTCGTGCCGTGAGGTGTCGGCTCAAGTGCCATAACGAGCGCAACCCTTGCCTGCAGTTGCCATCGG
GTAAAGCCGGGGACTCTGCAGGGACTGCCACCGCAAGGTGAGAGGAGGGGGGGGATGACGTCAAATCAGCACGGCCCTTA
CGTCCGGGGCGACACACGTGTTACAATGGCGGCCACAGCGGGAAGCCACCCAGTGATGGGGCGCGGATCCCAAAAAAGCC
GCCTCAGTTCGGATCGGAGTCTGCAACCCGACTCCGTGAAGCTGGATTCGCTAGTAATCGCGCATCAGCCATGGCGCGGT
GAATACGTTCCCGGGCCTTGTACACACCGCCCGTCAAGCCATGGGAGTCGTGGGCGCCTGAAGGCCGTGACCGCGAGGAG
CGGCCTAGGGCGAACGCGGTGACTGGGGCTAAGTCGTAACAAGGTA
>295053
AGAGTTTGATCCTGGCTCAGGACGAACGCTGGCGGCGTGCCTAACACATGCAAGTCGAACGGAGATGCTCCTTCGGGAGT
ATCTTAGTGGCGAACGGGTGAGTAACGCGTGAGCAACCTGACCTTCACAGGGGGATAACCGCTGGAAACAGCAGCTAATA
CCGCATAACGTCGCAAGACCAAAGAGGGGGACCTTCGGGCCTCTTGCCATCGGATGTGCCCAGATGGGATTAGCTTGTTG
GTGGGGTAACGGCTCACCAAGGCGACGATCCCTAGCTGGTCTGAGAGGATGACCAGCCACACTGGAACTGAGACACGGTC
CAGACTCCTACGGGAGGCAGCAGTGGGGAATATTGCACAATGGGCGCAAGCCTGATGCAGCCATGCCGCGTGTATGAAGA
AGGCCTTCGGGTTGTAAAGTACTTTCAGCGGGGAGGAAGGGAGTAAAGTTAATACCTTTGCTCATTGACGTTACCCGCAG
AAGAAGCACCGGCTAACTCCGTGCCAGCAGCCGCGGTAATACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAA
GCGCACGCAGGCGGTTTGTTAAGTCAGATGTGAAATCCCCGGGCTCAACCTGGGAACTGCATCTGATACTGGCAAGCTTG
AGTCTCGTAGAGGGGGGTAGAATTCCAGGTGTAGCGGTGAAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGG
CCCCCTGGACGAAGACTGACGCTCAGGTGCGAAAGCGTGGGGAGCAAACAGGATTAGATACCCTGGTAGTCCACGCCGTA
AACGATGTCGACTTGGAGGTTGTGCCCTTGAGGCGTGGCTTCCGGAGCTAACGCGTTAAGTCGACCGCCTGGGGAGTACG
GCCGCAAGGTTAAAACTCAAATGAATTGACGGGGGCCCGCACAAGCGGTGGAGCATGTGGTTTAATTCGATGCAACGCGA
AGAACCTTACCTGGTCTTGACATCCACAGAACTTTCCAGAGATGGATTGGTGCCTTCGGGAACTGTGAGACAGGTGCTGC
ATGGCTGTCGTCAGCTCGTGTTGTGAAATGTTGGGTTAAGTCCCGCAACGAGCGCAACCCTTGTCCTTTGTTGCCAGCGG
TCCGGCCGGGAACTCAAAGGAGACTGCCAGTGATAAACTGGAGGAAGGTGGGGATGACGTCAAGTCATCATGGCCCTTAC
GACCAGGGCTACACACGTGCTACAATGGCGCATACAAAGAGAAGCGACCTCGCGAGAGCAAGCGGACCTCATAAAGTGCG
TCGTAGTCCGGATTGGAGTCTGCAACTCGACTCCATGAAGTCGGAATCGCTAGTAATCGTGGATCAGAATGCCACGGTGA
ATACGTTCCCGGGCCTTGCACACACCGCC
>879972
GACGAACGCTGGCGGCGTGCCTAATACATGCAAGTCGAACGAGATTGACCGGTGCTTGCACTGGTCAATCTAGTGGCGAA
CGGGTGAGTAACACGTGGGTAACCTGCCCATCAGAGGGGGATAACATTCGGAAACGGATGCTAAAACCGCATAGGTCTTC
GAACCGCATGGTTTGAAGAGGAAAAGAGGCGCAAGCTTCTGCTGATGGATGGACCCGCGGTGTATTAGCTAGTTGGTGGG
GTAACGGCTCACCAAGGCGACGATACATAGCCGACCTGAGAGGGTGATCGGCCACACTGGGACTGAGACACGGCCCAGAC
TCCTACGGGAGGCAGCAGTAGGGAATCTTCGGCAATGGACGGAAGTCTGACCGAGCAACGCCGCGTGAGTGAAGAAGGTT
TTCGGATCGTAAAGCTCTGTTGTAAGAGAAGAACGAGTGTGAGAGTGGAAAGTTCACACTGTGACGGTATCTTACCAGAA
AGGGACGGCTAACTACGTGCCAGCAGCCGCGGTAATACGTAGGTCCCGAGCGTTGTCCGGATTTATTGGGCGTAAAGCGA
GCGCAGGCGGTTAGATAAGTCTGAAGTTAAAGGCTGTGGCTTAACCATAGTACGCTTTGGAAACTGTTTAACTTGAGTGC
AAGAGGGGAGAGTGGAATTCCATGTGTAGCGGTGAAATGCGTAGATATATGGAGGAACACCGGTGGCGAAAGCGGCTCTC
TGGCTTGTAACTGACGCTGAGGCTCGAAAGCGTGGGGAGCAAACAGGATTAGATACCCTGGTAGTCCACGCCGTAAACGA
TGAGTGCTAGGTGTTAGACCCTTTCCGGGGTTTAGTGCCGCAGCTAACGCATTAAGCACTCCGCCTGGGGAGTACGACCG
CAGGGTTGAAACTCAAAGGAATTGACGGGGGCCCGCACAAGCGGTGGAGCATGTGGTTTAATTCGAAGCAACGCGAAGAA
CCTTACCAGGTCTTGACATCCCTCTGACCGCTCTAGAGATAGAGCTTTCCTTCGGGACAGAGGTGACAGGTGGTGCATGG
TTGTCGTCAGCTCGTGTCGTGAGATGTTGGGTTAAGTCCCGCAACGAGCGCAACCCCTATTGTTAGTTGCCATCATTCAG
TTGGGCACTCTAGCGAGACTGCCGGTAATAAACCGGAGGAAGGTGGGGATGACGTCAAATCATCATGCCCCTTATGACCT
GGGCTACACACGTGCTACAATGGCTGGTACAACGAGTCGCAAGCCGGTGACGGCAAGCTAATCTCTTAAAGCCAGTCTCA
GTTCGGATTGTAGGCTGCAACTCGCCTACATGAAGTCGGAATCGCTAGTAATCGCGGATCAGCACGCCGCGGTGAATACG
TTCCCGGGCCT
"""
# Reads to search against the database
# - 10 rRNA reads: amplicon reads were taken from Qiime study 1685
# - 10 random reads: simulated using mason with the following command:
# mason illumina -N 10 -snN -o simulated_random_reads.fa -n
# 150 random.fasta
# - 10 rRNA reads with id < 97: amplicon reads were taken from
# Qiime study 1685
read_seqs_fp = """>HMPMockV1.2.Staggered2.673827_47 M141:79:749142:1:1101:16169:1589
TACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCAAGCAGGCGGTTTGTTAAGTCAGATGTGAAATCCC
CGGGCTCAACCTGGGAACTGCATTTGATACTGGCAAGCTTGAGTCTCGTAGAGGAGGGTAGAATTCCAGGTGTAGCGGGG
AAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGGCTCCATGGACGAAGACTGACGCT
>HMPMockV1.2.Staggered2.673827_115 M141:79:749142:1:1101:14141:1729
TACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCACGCAGGCGGTTTGTTAAGTCAGATGTGAAATCCC
CCGGCTCAACCTTGGAACTGCATCTGATACGGGCAAGCTTGAGTCTCGTAGAGGGGGGTAGAATTCCAGGTGTAGCGGTG
AAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGGCCCTCTGGACGAAGACTGACGCTCAGGTGCGAAAGCGTG
GGGAGCAAACA
>HMPMockV1.2.Staggered2.673827_122 M141:79:749142:1:1101:16032:1739
TACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCACGCAGGCGGTTTGTTAAGTCAGATGTGAAATCCC
CGGGCTCAACCTGGGAACTGCATCTGATACTGGCAAGCTTGAGTCTCGTAGAGGGGGGTAGAATTCCAGGTGTAGCGGTG
AAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGGCCCCCTGGACGAAGACTGACGCTCAGGTGCGAAAGCGTG
GTGATCAAACA
>HMPMockV1.2.Staggered2.673827_161 M141:79:749142:1:1101:17917:1787
TACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCACGCAGGCGGTTTGTTAAGTCAGATGTGAAATCCC
CGGGCTCAACCTGGGAACTGCATCTGATACTGGCAAGCTTGAGTCTCGTAGAGGGGGGTAGAATTCCAGGTGTAGCGGTG
AAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGGCTCCCTGGACGAAGACTGACGCTCAGGTGCGAAAGCGTG
GGGAGCAAACA
>HMPMockV1.2.Staggered2.673827_180 M141:79:749142:1:1101:16014:1819
TACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCACGCAGGTGGTTTGTTAAGTCAGATGTGAAATCCC
CGGGCTCAACCTGGGAACTGCATCTGATACTGGCAAGCTTGAGTCTCGTAGAGGGGGGTAGAATTCCAGGTGTAGCGGTG
AAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGGCCCCCTGGACGAAGACTGACGCTCAGGTGCGAAAGCGTG
>HMPMockV1.2.Staggered2.673827_203 M141:79:749142:1:1101:17274:1859
TACGGAGGTTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCACGCAGGCGGTTTGTTAAGTCAGATGTGAAATCCC
CCGGCTCAACCTGGGAACTGCATCTGATACTGGCAAGCTTGAGTCTCGTAGAGGGGGGTAGAATTCCAGGTGTAGCGGTG
AAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGGCCTCCTGGACGAAGACTGACGCTCAGGTGCGAAAGCGTG
GGGATCAAACA
>HMPMockV1.2.Staggered2.673827_207 M141:79:749142:1:1101:17460:1866
TACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCACGCAGGCGGTTTGTTAAGTCAGATGTGAAATCCC
CGGGCTCAACCTGGGAACTGCATCTGATACTGGCAAGCTTGAGTCTCGTAGAGGGGGGTAGAATTCCAGGTGTAGCGGTG
AAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGGCCCCCTGGACGAAGACTGACGCTCAGGTGCGAAAGCGTG
GGGAGCAAACA
>HMPMockV1.2.Staggered2.673827_215 M141:79:749142:1:1101:18390:1876
TACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCACGCAGGCGGTTTGTTAAGTCAGATGTGAAATCCC
CGGGCTCAACCTGGGAACTGCATCTGATACTGGCAAGCTTGAGTCTCGTAGAGGGGGGTAGAATTCCAGGTGTAGCGGTG
AAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGGCCCCCTGGACGAAGACTGACG
>HMPMockV1.2.Staggered2.673827_218 M141:79:749142:1:1101:18249:1879
TACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCACGCAGGCGGTTTGTTAAGTCAGATGTGAAATCCC
CGGGCTCAACCTGGGAACTTCATCTGATACTGGCAAGCTTGAGTCTCGTAGAGGGGGGTAGAATTCCAGGTGTAGCGGTG
AAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGGCCCCCTGGACGAAGACTGACGCTCAGGTGCGAAAGCGTG
GGGAGCACACA
>HMPMockV1.2.Staggered2.673827_220 M141:79:749142:1:1101:15057:1880
TACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCACGCAGGCGGTTTGTTAAGTCAGATGTGAAATCCC
CGGGCTCAACCTGGGAACTGCATCTGATACTGGCAAGCTTGAGTCTCGTAGAGGGGGGTAGAATTCCAGGTGTAGCGGTG
AAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGGCCTCCTGGACGAAGACTGACGCTC
>simulated_random_reads.fa.000000000
AGCCGGGTGTCTACGGTCAGGTGTGTTCTGACTACGTAGTTTGACAGCACGTGTCCTTTCCCCTTCCCAAGGTAACGAAT
TGTCGTTATCAACGTTTCGATCCGTAATTTCACGGAACGACATAAAGGCATCAATACTATCGCCAACAGA
>simulated_random_reads.fa.000000001
GTGGACGTCGTGGCGGCGTACTAACTTCCTACAGGCATATCCGGAATAACATTCTGCCGCTTGTCGACATAAGCTGTTCC
CTACATAGACGACGACGGTTGAAGGGTGTATGTATTCTTTGGGTACGGCTCCTCTGGGCGCATGGTAGCA
>simulated_random_reads.fa.000000002
CATTCTTTATAGGCCTACAACACTAATCATCGTTAAGCATAAGGGGAGGAGTGTGCGTGGCATCAAGTCCTGGTTCTTCG
CCTAGTACCACACCGTCTCACACGCAGCCGCCGACGACCAGTGAGGGCGCGTGGGACACCCATTCGGTCC
>simulated_random_reads.fa.000000003
TCGCCTTGGTACAAACAGTCGCGGCACGCTGTATGGAGGACCATAGAGGCACAGGCTGAGGACAGGGGCATGGAAGGTTC
AATCGCCCCCCACAGCTTTAGGTAGGAAGTACTGTTCTAGTGCCAATTTGATTTTAACGGCAGTTACTCG
>simulated_random_reads.fa.000000004
CATATTCTAATATCCTACTTCTGATACCCGATTATACACGACACCACCCCAGGACTGTCGTCACATCCTTATCTGGATAA
ACATCCGGTTCCGTTTGGCCGTGCTCCGCAAGTGATGCGTCTGTGGAATGTACGTGGAGCGTTGACAGTT
>simulated_random_reads.fa.000000005
CCGGATTAGGCATGTTTATAGTACAACGGATTCGCAAAAAGGTCAGGGTAACAATTTTGAAATGCTTTCATACTGCGGTC
TAAATGGACCACCCTTTAGGTGCAGCCAACTATAGTTGGTCGATTCTCTGAACACGTACCGAAGGCAATT
>simulated_random_reads.fa.000000006
AACCCATCGGAATAATCTACTGCTTCGTATGGAACGGTCCTACATTTAAATAAACGTGTCCAGTGCCACCCGATACCTCT
CGTCAATCAGGGGCTCTCCCTGAATCAGCAGTAAACAAACCCAGTACACTGTCGAACACTACTGAGACCG
>simulated_random_reads.fa.000000007
CCGAAGGCAAGTCTGTCGTAGAATGGTTTTTGTCGTTGTAACAACCCCGCTCTAGACCCTGAAAACCATAAAGTCAAGCC
CAACTAATATTAGAGGCATTCTGGCTACTCCCGCTCACCGCAATCTTCACATACTGTGATACCCTCAGCC
>simulated_random_reads.fa.000000008
ATATCCGTTAAACCCCGGATTTGACAATTCATCATCAACGCTACTAACGGCTTTCTCAATTTGGGGCTGTGGCCTATCCG
CATACGGCTACCTGCGCAAGAAGAGAGTACTGTTAGATGTCACGCTGCACTTGCGAAGACCGGTGGGCGT
>simulated_random_reads.fa.000000009
AGCGATGAGTACACAAGATGAGTGAAGGGATTAAACTTCAAACCTTGAAGTGTTACCCGATTTCCTACCATTGGGGATTC
GTTAATGCTTCGAATGGATCTATATCCGGTGTTTAGCTGACTGTTAAAATACTCTCGTTGTACGAAAGTA
>HMPMockV1.2.Staggered2.673827_0 M141:79:749142:1:1101:17530:1438
TACGTAGGTGGCAAGCGTTATCCGGAATTATTGGGCGCAAAGCGCGCGTAGGCGGTTTTTTAAGTCTGATGTGAAAGCCC
ACGGCTCAACCGTGGAGGGTCATTGGAAACTGGAAAACTTGAGTGCAGAAGAGGAAAGTGGAATTCCATGTGTAGCGGTG
AAATGCGCAGAGATATGGAGGAACACCAGTGGCGAAGGCGACCTTCTGGTCTGTAACTGACGCTGATGTGCGAAAGCGTG
>HMPMockV1.2.Staggered2.673827_1 M141:79:749142:1:1101:17007:1451
TACGTAGGTGGCAAGCGTTATCCGGAATTATTGGGCGTAAAGCGCGCGTAGGCGGTTTTTTAAGTCTGATGTGAAAGCCC
ACGGCTCAACCGTGGAGGGTCATTGGAAACTGGAAAACTTGAGTGCAGAAGAGGAAAGTGGAATTCCATGTGTAGCGGTG
AAATGCGCAGAGATATGGAGGAACACCAGTGGCGAAGGCGACTTTCTGGTCTGTAACTTACGCTG
>HMPMockV1.2.Staggered2.673827_2 M141:79:749142:1:1101:16695:1471
TACGTAGGTGGCAAGCGTTATCCGGAATTATTGGGCGTAAAGCGCGCGTAGGCGGTTTTTTAAGTCTGATGTGAAAGCCC
ACGGCTCAACCGTGGAGGGTCATTGGAAACTGGAAAACTTGAGTGCAGAAGAGGAAAGTGGAATTCCATGTGTAGCGGTG
AAATGCGCAGAGATATGGAGGAACACCAGTGGCGAAGGCGACTTTCTGGTCTGTAACTGACGCTGATGTGCGAAAGCGTG
GGGA
>HMPMockV1.2.Staggered2.673827_3 M141:79:749142:1:1101:17203:1479
TACGTAGGTGGCAAGCGTTATCCGGAATTATTGGGCGTAAAGCGCGCGTAGGCGGTTTTTTAAGTCTGATGTGAAAGCCC
ACGGCTCAACCGTGGAGGGTCATTGGAAACTGGAAAACTTGAGTGCAGAAGAGGAAAGTGGAATTCCATGTGTAGCGGTG
AAATGCGTAGAGATATGGAGGAACACCAGTGGCGAAGGCGACGTTCTGGTCTGTAACTGACGCTGATGTGCGAAAGCGTG
G
>HMPMockV1.2.Staggered2.673827_4 M141:79:749142:1:1101:14557:1490
TACGTAGGTGGCAAGCGTTATCCGGAATTATTGGGCGTAAAGCGCGCGTAGGCGGTTTTTTAAGTCTGATGTGAAAGCCC
ACGGCTCAACCGTGGAGGGTCATTGGAAACTGGAAAACTTGAGTGCAGAAGAGGAAAGTGGAATTCCATGTGTAGCGGTG
AAATGCGCAGAGATATGGAGGAACACCAGTGGCGAAGGCGACTTTCTGGGCTGTAACTGACGCTGATGTGCGCAAGCGTG
GTGATCAAACA
>HMPMockV1.2.Staggered2.673827_5 M141:79:749142:1:1101:16104:1491
TACGTAGGTGGCAAGCGTTATCCGGAATTATTGGGCGTAAAGCGCGCGTAGGCGGTTTTTTAAGTCTGATGTGAAAGCCC
ACGGCTCAACCGTGGAGGGTCATTGGAAACTGGAAAACTTGAGTGCAGAAGAGGAAAGTGGAATTCCATGTGTAGCGGTG
AAATGCGCAGAGATATGGAGGAACACCAGTGGCGAAGGCGACTTTCTGGTCTGTAACTGACGC
>HMPMockV1.2.Staggered2.673827_6 M141:79:749142:1:1101:16372:1491
TACGTAGGTGGCAAGCGTTATCCGGAATTATTGGGCGTAAAGCGCGCGTAGGCGGTTTTTTAAGTCTGATGTGAAAGCCC
ACGGCTCAACCGTGGAGGGTCATTGGAAACTGGAAAACTTGAGTGCAGAAGAGGAAAGTGGAATTCCATGTGTAGCGGTG
AAATGCGCAGAGATATGGAGGAACAACAGTGGCGAAGGCGACTTTCTGGTCTGTAACTGACGCTGATGTGCGTAAG
>HMPMockV1.2.Staggered2.673827_7 M141:79:749142:1:1101:17334:1499
TACGTAGGTGGCAAGCGTTATCCGGAATTATTGGGCGTAAAGCGCGCGTAGGCGGTTTTTTAAGTCTGATGTGAAAGCCC
ACGGCTCAACCGTGGAGGGTCATTGGAAACTGGAAAACTTGAGTGCAGAAGAGGAAAGTGGAATTCCATGTGTAGCGGTG
AAATGCGCAGAGATATGGAGGAACACCAGTGGCGAAGGCGACTTTCTGGTCTGTAACTGACGCTGATGT
>HMPMockV1.2.Staggered2.673827_8 M141:79:749142:1:1101:17273:1504
TACGTAGGTGGCAAGCGTTATCCGGAATTATTGGGCGTAAAGCGCGCGTAGGCGGTTTTTTAAGTCTGATGTGAAAGCCC
ACGGCTCAACCGTGGAGGGTCATTGGAAACTGGAAAACTTGAGTGCAGAAGAGGAAAGTGGAATTCCATGTGTAGCGGTG
AAATGCACAGAGATATGGAGGAACACCAGTGGCGAAGGCGACTTTCTGGTCTGTAACTGACGCTGA
>HMPMockV1.2.Staggered2.673827_9 M141:79:749142:1:1101:16835:1505
TACGTAGGTGGCAAGCGTTATCCGGAATTATTGGGCGTAAAGCGCGCGTAGGCGGTTTTTTAAGTCTGATGTGAAAGCCC
ACGGCTCAACCGTGGAGGGTCATTGGAAACTGGAAAACTTGAGTGCAGAAGAGGAAAGTGGAATTCCATGTGTAGCGGTG
ACATGCGCAGAGATATGGAGGAACACCAGTGGCGAAGGCGACTTTCTGGTCTGTAACTGACGCTGATGTGCGAAAGCGTG
GGGAT
"""
if __name__ == '__main__':
main()
| #!/usr/bin/env python
#-----------------------------------------------------------------------------
# Copyright (c) 2013--, biocore development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
"""
Unit tests for the SortMeRNA version 2.0 Application controller
===============================================================
"""
from unittest import TestCase, main
import re
from os import close
from os.path import abspath, exists, join, dirname
from tempfile import mkstemp, mkdtemp
from shutil import rmtree
from skbio.util import remove_files
from skbio.parse.sequences import parse_fasta
from bfillings.sortmerna_v2 import (build_database_sortmerna,
sortmerna_ref_cluster,
sortmerna_map)
# ----------------------------------------------------------------------------
# Copyright (c) 2014--, biocore development team
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
# Test class and cases
class SortmernaV2Tests(TestCase):
""" Tests for SortMeRNA version 2.0 functionality """
def setUp(self):
self.output_dir = mkdtemp()
self.reference_seq_fp = reference_seqs_fp
self.read_seqs_fp = read_seqs_fp
# create temporary file with reference sequences defined
# in reference_seqs_fp
f, self.file_reference_seq_fp = mkstemp(prefix='temp_references_',
suffix='.fasta')
close(f)
# write _reference_ sequences to tmp file
with open(self.file_reference_seq_fp, 'w') as tmp:
tmp.write(self.reference_seq_fp)
tmp.close()
# create temporary file with read sequences defined in read_seqs_fp
f, self.file_read_seqs_fp = mkstemp(prefix='temp_reads_',
suffix='.fasta')
close(f)
# write _read_ sequences to tmp file
with open(self.file_read_seqs_fp, 'w') as tmp:
tmp.write(self.read_seqs_fp)
tmp.close()
# list of files to remove
self.files_to_remove = [self.file_reference_seq_fp,
self.file_read_seqs_fp]
def tearDown(self):
remove_files(self.files_to_remove)
rmtree(self.output_dir)
def test_indexdb_default_param(self):
""" Test indexing a database using SortMeRNA
"""
sortmerna_db, db_files_to_remove = build_database_sortmerna(
abspath(self.file_reference_seq_fp),
max_pos=250,
output_dir=self.output_dir)
expected_db_files = set(sortmerna_db + ext
for ext in ['.bursttrie_0.dat', '.kmer_0.dat',
'.pos_0.dat', '.stats'])
# Make sure all db_files exist
for fp in expected_db_files:
self.assertTrue(exists(fp))
# Add files to be remove
self.files_to_remove.extend(db_files_to_remove)
def test_empty_fasta_path(self):
""" Indexdb should fail with an empty fasta path
"""
self.assertRaises(ValueError,
build_database_sortmerna,
fasta_path=None,
max_pos=250,
output_dir=self.output_dir)
def test_empty_inputs(self):
""" (1) Indexdb should set output_dir to the same directory
as where the input FASTA file is located;
(2) SortMeRNA should fail if an empty result path is
passed;
(3) SortMeRNA should fail if an empty seq path is passed
"""
sortmerna_db, db_files_to_remove = build_database_sortmerna(
abspath(self.file_reference_seq_fp),
max_pos=250,
output_dir=None)
self.files_to_remove.extend(db_files_to_remove)
fasta_dir = dirname(abspath(self.file_reference_seq_fp))
out_dir = dirname(sortmerna_db)
self.assertEqual(fasta_dir, out_dir)
self.assertRaises(ValueError,
sortmerna_ref_cluster,
seq_path=self.file_read_seqs_fp,
sortmerna_db=sortmerna_db,
refseqs_fp=self.file_reference_seq_fp,
result_path=None)
self.assertRaises(ValueError,
sortmerna_ref_cluster,
seq_path=None,
sortmerna_db=sortmerna_db,
refseqs_fp=self.file_reference_seq_fp,
result_path=join(self.output_dir,
"sortmerna_otus.txt"))
def test_tabular_output(self):
""" SortMeRNA should output a BLAST tabular output
"""
sortmerna_db, db_files_to_remove = build_database_sortmerna(
abspath(self.file_reference_seq_fp),
max_pos=250,
output_dir=self.output_dir)
self.files_to_remove.extend(db_files_to_remove)
# Run SortMeRNA
clusters, failures, smr_files_to_remove = sortmerna_ref_cluster(
seq_path=self.file_read_seqs_fp,
sortmerna_db=sortmerna_db,
refseqs_fp=self.file_reference_seq_fp,
result_path=join(self.output_dir, "sortmerna_otus.txt"),
tabular=True)
self.assertTrue(exists(join(self.output_dir,
"sortmerna_otus.blast")))
def test_empty_result_path(self):
""" SortMeRNA should fail with an empty indexed database
"""
self.assertRaises(ValueError,
sortmerna_ref_cluster,
seq_path=self.file_read_seqs_fp,
sortmerna_db=None,
refseqs_fp=self.file_reference_seq_fp,
result_path=join(self.output_dir,
"sortmerna_otus.txt")
)
def test_sortmerna_default_param(self):
""" SortMeRNA version 2.0 reference OTU picking works with default settings
"""
# rebuild the index
sortmerna_db, db_files_to_remove = build_database_sortmerna(
abspath(self.file_reference_seq_fp),
max_pos=250,
output_dir=self.output_dir)
# Files created by indexdb_rna to be deleted
self.files_to_remove.extend(db_files_to_remove)
# Run SortMeRNA
cluster_map, failures, smr_files_to_remove = sortmerna_ref_cluster(
seq_path=self.file_read_seqs_fp,
sortmerna_db=sortmerna_db,
refseqs_fp=self.file_reference_seq_fp,
result_path=join(self.output_dir, "sortmerna_otus.txt"))
# Check all sortmerna output files exist
output_files = [join(self.output_dir, ext)
for ext in ['sortmerna_otus_otus.txt',
'sortmerna_otus.log',
'sortmerna_otus_denovo.fasta',
'sortmerna_otus.fasta']]
# Check output files exist
for fp in output_files:
self.assertTrue(exists(fp))
# Files created sortmerna to be deleted (StdErr and StdOut were already
# removed in sortmerna_ref_cluster)
self.files_to_remove.extend(output_files)
# Random reads that should not appear in any output file
random_reads = ['simulated_random_reads.fa.000000000',
'simulated_random_reads.fa.000000001',
'simulated_random_reads.fa.000000002',
'simulated_random_reads.fa.000000003',
'simulated_random_reads.fa.000000004',
'simulated_random_reads.fa.000000005',
'simulated_random_reads.fa.000000006',
'simulated_random_reads.fa.000000007',
'simulated_random_reads.fa.000000008',
'simulated_random_reads.fa.000000009']
# Reads passing E-value threshold and with similarity/coverage >=97%
otu_reads = ['HMPMockV1.2.Staggered2.673827_47',
'HMPMockV1.2.Staggered2.673827_115',
'HMPMockV1.2.Staggered2.673827_122',
'HMPMockV1.2.Staggered2.673827_161',
'HMPMockV1.2.Staggered2.673827_180',
'HMPMockV1.2.Staggered2.673827_203',
'HMPMockV1.2.Staggered2.673827_207',
'HMPMockV1.2.Staggered2.673827_215',
'HMPMockV1.2.Staggered2.673827_218',
'HMPMockV1.2.Staggered2.673827_220']
# Reads passing E-value threshold and with similarity/coverage <97%
denovo_reads = ['HMPMockV1.2.Staggered2.673827_0',
'HMPMockV1.2.Staggered2.673827_1',
'HMPMockV1.2.Staggered2.673827_2',
'HMPMockV1.2.Staggered2.673827_3',
'HMPMockV1.2.Staggered2.673827_4',
'HMPMockV1.2.Staggered2.673827_5',
'HMPMockV1.2.Staggered2.673827_6',
'HMPMockV1.2.Staggered2.673827_7',
'HMPMockV1.2.Staggered2.673827_8',
'HMPMockV1.2.Staggered2.673827_9']
# Check correct number of OTU clusters in file
otu_clusters = ['295053']
f_aligned = open(output_files[3], "U")
f_otumap = open(output_files[0], "U")
f_denovo = open(output_files[2], "U")
# Verify the aligned FASTA file
for label, seq in parse_fasta(f_aligned):
id = label.split()[0]
# Read is not random
self.assertNotIn(id, random_reads)
# Read is either in otu_reads or denovo_reads
self.assertIn(id, otu_reads+denovo_reads)
f_aligned.close()
# Verify the de novo reads FASTA file
for label, seq in parse_fasta(f_denovo):
id = label.split()[0]
# Read is not random
self.assertNotIn(id, random_reads)
# Read is not an OTU read
self.assertNotIn(id, otu_reads)
# Read is a de novo read
self.assertIn(id, denovo_reads)
f_denovo.close()
# Check the OTU map
for line in f_otumap:
otu_entry = line.split()
# Cluster ID is correct
self.assertIn(otu_entry[0], otu_clusters)
# Each read in the cluster must exclusively be an OTU read
for read in otu_entry[1:]:
self.assertNotIn(read, random_reads)
self.assertNotIn(read, denovo_reads)
self.assertIn(read, otu_reads)
f_otumap.close()
# Check returned list of lists of clusters
expected_cluster = ['HMPMockV1.2.Staggered2.673827_47',
'HMPMockV1.2.Staggered2.673827_115',
'HMPMockV1.2.Staggered2.673827_122',
'HMPMockV1.2.Staggered2.673827_161',
'HMPMockV1.2.Staggered2.673827_180',
'HMPMockV1.2.Staggered2.673827_203',
'HMPMockV1.2.Staggered2.673827_207',
'HMPMockV1.2.Staggered2.673827_215',
'HMPMockV1.2.Staggered2.673827_218',
'HMPMockV1.2.Staggered2.673827_220']
# Should only have 1 cluster
self.assertEqual(1, len(cluster_map))
for actual_cluster in cluster_map.itervalues():
actual_cluster.sort()
expected_cluster.sort()
self.assertEqual(actual_cluster, expected_cluster)
# Check log file number of clusters and failures corresponds to
# the results in the output files
f_log = open(output_files[1], "U")
num_clusters = 0
num_failures = 0
for line in f_log:
if line.startswith(" Total OTUs"):
num_clusters = (re.split(' = ', line)[1]).strip()
elif line.startswith(" Total reads for de novo clustering"):
num_failures = (re.split(' = ', line)[1]).strip()
f_log.close()
self.assertEqual(int(num_clusters), len(otu_clusters))
self.assertEqual(int(num_failures), len(denovo_reads))
def test_sortmerna_map_default(self):
""" SortMeRNA version 2.0 for mapping sequences onto a reference
using default parameters
"""
# Rebuild the index
sortmerna_db, db_files_to_remove = build_database_sortmerna(
abspath(self.file_reference_seq_fp),
max_pos=250,
output_dir=self.output_dir)
# Files created by indexdb_rna to be deleted
self.files_to_remove.extend(db_files_to_remove)
# Run SortMeRNA mapper
app_result = sortmerna_map(seq_path=self.file_read_seqs_fp,
output_dir=self.output_dir,
refseqs_fp=self.file_reference_seq_fp,
sortmerna_db=sortmerna_db)
# Check all sortmerna output files exist
output_files = [join(self.output_dir, ext)
for ext in ['sortmerna_map.blast',
'sortmerna_map.log']]
# Check output files exist
for fp in output_files:
self.assertTrue(exists(fp))
blast_alignments_fp = app_result['BlastAlignments'].name
# Check there are 30 alignments (1 per read)
with open(blast_alignments_fp, 'U') as blast_actual:
entries = (line.strip().split('\t') for line in blast_actual)
actual_alignments = {r[0]: r[1:] for r in entries}
self.assertEqual(30, len(actual_alignments))
# Check this alignment exists
self.assertTrue("HMPMockV1.2.Staggered2.673827_47"
in actual_alignments)
self.assertEqual("97.3", actual_alignments[
"HMPMockV1.2.Staggered2.673827_47"][1])
self.assertEqual("100", actual_alignments[
"HMPMockV1.2.Staggered2.673827_47"][12])
# Check alignment for random read is NULL
self.assertTrue("simulated_random_reads.fa.000000000"
in actual_alignments)
self.assertEqual("*", actual_alignments[
"simulated_random_reads.fa.000000000"][0])
def test_sortmerna_map_sam_alignments(self):
""" SortMeRNA version 2.0 for mapping sequences onto a reference
outputting Blast and SAM alignments
"""
# Rebuild the index
sortmerna_db, db_files_to_remove = build_database_sortmerna(
abspath(self.file_reference_seq_fp),
max_pos=250,
output_dir=self.output_dir)
# Files created by indexdb_rna to be deleted
self.files_to_remove.extend(db_files_to_remove)
# Run SortMeRNA mapper
app_result = sortmerna_map(seq_path=self.file_read_seqs_fp,
output_dir=self.output_dir,
refseqs_fp=self.file_reference_seq_fp,
sortmerna_db=sortmerna_db,
output_sam=True)
# Check all sortmerna output files exist
output_files = [join(self.output_dir, ext)
for ext in ['sortmerna_map.blast',
'sortmerna_map.sam',
'sortmerna_map.log']]
# Check output files exist
for fp in output_files:
self.assertTrue(exists(fp))
sam_alignments_fp = app_result['SAMAlignments'].name
# Check there are 30 alignments in the SAM output (1 per read)
with open(sam_alignments_fp, 'U') as sam_actual:
entries = (line.strip().split('\t') for line in sam_actual)
actual_alignments = {r[0]: r[1:] for r in entries}
# 30 alignments expected + 2 lines for @HD and @PG fields
self.assertEqual(32, len(actual_alignments))
# Check this alignment exists
self.assertTrue("HMPMockV1.2.Staggered2.673827_47"
in actual_alignments)
self.assertEqual("295053", actual_alignments[
"HMPMockV1.2.Staggered2.673827_47"][1])
self.assertEqual("AS:i:418", actual_alignments[
"HMPMockV1.2.Staggered2.673827_47"][10])
# Check alignment for random read is NULL
self.assertTrue("simulated_random_reads.fa.000000000"
in actual_alignments)
self.assertEqual("*", actual_alignments[
"simulated_random_reads.fa.000000000"][1])
def test_sortmerna_map_sam_alignments_with_tags(self):
""" SortMeRNA version 2.0 for mapping sequences onto a reference
outputting SAM alignments with @SQ tags
"""
# Rebuild the index
sortmerna_db, db_files_to_remove = build_database_sortmerna(
abspath(self.file_reference_seq_fp),
max_pos=250,
output_dir=self.output_dir)
# Files created by indexdb_rna to be deleted
self.files_to_remove.extend(db_files_to_remove)
# Run SortMeRNA mapper
app_result = sortmerna_map(seq_path=self.file_read_seqs_fp,
output_dir=self.output_dir,
refseqs_fp=self.file_reference_seq_fp,
sortmerna_db=sortmerna_db,
output_sam=True,
sam_SQ_tags=True,
blast_format=None)
# Check all sortmerna output files exist
output_files = [join(self.output_dir, ext)
for ext in ['sortmerna_map.sam',
'sortmerna_map.log']]
# Check output files exist
for fp in output_files:
self.assertTrue(exists(fp))
sam_alignments_fp = app_result['SAMAlignments'].name
# Check there are 30 alignments in the SAM output (1 per read)
with open(sam_alignments_fp, 'U') as sam_actual:
actual_entries = [line.strip().split('\t') for line in sam_actual]
# 30 alignments expected + 2 lines for @HD and @PG fields + 5 lines
# for the @SQ tags
self.assertEqual(37, len(actual_entries))
# Check all expected @SQ tags have been included
SQ_array = [['@SQ', 'SN:42684', 'LN:1501'],
['@SQ', 'SN:342684', 'LN:1486'],
['@SQ', 'SN:426848', 'LN:1486'],
['@SQ', 'SN:295053', 'LN:1389'],
['@SQ', 'SN:879972', 'LN:1371']]
for entry in SQ_array:
self.assertTrue(entry in actual_entries)
def test_sortmerna_map_blast_no_null_alignments(self):
""" SortMeRNA version 2.0 for mapping sequences onto a reference
using Blast with --print_all_reads option set to False
(no NULL alignments output)
"""
# Rebuild the index
sortmerna_db, db_files_to_remove = build_database_sortmerna(
abspath(self.file_reference_seq_fp),
max_pos=250,
output_dir=self.output_dir)
# Files created by indexdb_rna to be deleted
self.files_to_remove.extend(db_files_to_remove)
# Run SortMeRNA mapper
app_result = sortmerna_map(seq_path=self.file_read_seqs_fp,
output_dir=self.output_dir,
refseqs_fp=self.file_reference_seq_fp,
sortmerna_db=sortmerna_db,
print_all_reads=False)
# Check all sortmerna output files exist
output_files = [join(self.output_dir, ext)
for ext in ['sortmerna_map.blast',
'sortmerna_map.log']]
# Check output files exist
for fp in output_files:
self.assertTrue(exists(fp))
blast_alignments_fp = app_result['BlastAlignments'].name
# Check there are 20 alignments (1 per read)
with open(blast_alignments_fp, 'U') as blast_actual:
entries = (line.strip().split('\t') for line in blast_actual)
actual_alignments = {r[0]: r[1:] for r in entries}
self.assertEqual(20, len(actual_alignments))
# Check this alignment exists
self.assertTrue("HMPMockV1.2.Staggered2.673827_47"
in actual_alignments)
self.assertEqual("97.3", actual_alignments[
"HMPMockV1.2.Staggered2.673827_47"][1])
self.assertEqual("100", actual_alignments[
"HMPMockV1.2.Staggered2.673827_47"][12])
# Check alignment for random read does not exist
self.assertFalse("simulated_random_reads.fa.000000000"
in actual_alignments)
def test_sortmerna_map_num_alignments(self):
""" SortMeRNA version 2.0 for mapping sequences onto a reference
outputting first INT num_alignments passing the E-value threshold
(rather than first INT best alignments)
"""
# Rebuild the index
sortmerna_db, db_files_to_remove = build_database_sortmerna(
abspath(self.file_reference_seq_fp),
max_pos=250,
output_dir=self.output_dir)
# Files created by indexdb_rna to be deleted
self.files_to_remove.extend(db_files_to_remove)
# Run SortMeRNA mapper
app_result = sortmerna_map(seq_path=self.file_read_seqs_fp,
output_dir=self.output_dir,
refseqs_fp=self.file_reference_seq_fp,
sortmerna_db=sortmerna_db,
num_alignments=1)
# Check all sortmerna output files exist
output_files = [join(self.output_dir, ext)
for ext in ['sortmerna_map.blast',
'sortmerna_map.log']]
# Check output files exist
for fp in output_files:
self.assertTrue(exists(fp))
blast_alignments_fp = app_result['BlastAlignments'].name
# Check there are 30 alignments (1 per read)
with open(blast_alignments_fp, 'U') as blast_actual:
entries = (line.strip().split('\t') for line in blast_actual)
actual_alignments = {r[0]: r[1:] for r in entries}
self.assertEqual(30, len(actual_alignments))
# Check this alignment exists
self.assertTrue("HMPMockV1.2.Staggered2.673827_47"
in actual_alignments)
self.assertEqual("97.3", actual_alignments[
"HMPMockV1.2.Staggered2.673827_47"][1])
self.assertEqual("100", actual_alignments[
"HMPMockV1.2.Staggered2.673827_47"][12])
# Check alignment for random read is NULL
self.assertTrue("simulated_random_reads.fa.000000000"
in actual_alignments)
self.assertEqual("*", actual_alignments[
"simulated_random_reads.fa.000000000"][0])
def test_blast_or_sam(self):
""" SortMeRNA should fail with output_sam and blast_format both
set to False
"""
# Rebuild the index
sortmerna_db, db_files_to_remove = build_database_sortmerna(
abspath(self.file_reference_seq_fp),
max_pos=250,
output_dir=self.output_dir)
# Files created by indexdb_rna to be deleted
self.files_to_remove.extend(db_files_to_remove)
self.assertRaises(ValueError,
sortmerna_map,
seq_path=self.file_read_seqs_fp,
output_dir=self.output_dir,
refseqs_fp=self.file_reference_seq_fp,
sortmerna_db=sortmerna_db,
output_sam=False,
blast_format=None)
def test_best_or_num_alignments(self):
""" SortMeRNA should fail with "best" and "num_alignments" both
set to True
"""
# Rebuild the index
sortmerna_db, db_files_to_remove = build_database_sortmerna(
abspath(self.file_reference_seq_fp),
max_pos=250,
output_dir=self.output_dir)
# Files created by indexdb_rna to be deleted
self.files_to_remove.extend(db_files_to_remove)
self.assertRaises(ValueError,
sortmerna_map,
seq_path=self.file_read_seqs_fp,
output_dir=self.output_dir,
refseqs_fp=self.file_reference_seq_fp,
sortmerna_db=sortmerna_db,
best=1,
num_alignments=1)
# Reference sequence database
reference_seqs_fp = """>426848
AGAGTTTGATCCTGGCTCAGGATGAACGCTAGCGGCAGGCTTAATACATGCAAGTCGAGGGGCAGCACTGGTAGCAATAC
CTGGTGGCGACCGGCGGACGGGTGCGTAACACGTATGCAACCTACCCTGTACAGGGGGATAGCCCGAGGAAATTCGGATT
AATACCCCATACGATAAGAATCGGCATCGATTTTTATTGAAAGCTCCGGCGGTACAGGATGGGCATGCGCCCCATTAGCT
AGTTGGTGAGGTAACGGCTCACCAAGGCTACGATGGGTAGGGGGCCTGAGAGGGTGATCCCCCACACTGGAACTGAGACA
CGGTCCAGACTCCTACGGGAGGCAGCAGTAAGGAATATTGGTCAATGGGCGCAAGCCTGAACCAGCCATGCCGCGTGCAG
GAAGACTGCCATTATGGTTGTAAACTGCTTTTATATGGGAAGAAACCTCCGGACGTGTCCGGAGCTGACGGTACCATGTG
AATAAGGATCGGCTAACTCCGTGCCAGCAGCCGCGGTAATACGGAGGATCCAAGCGTTATCCGGATTTATTGGGTTTAAA
GGGTGCGTAGGCGGCGTGTTAAGTCAGAGGTGAAATTCGGCAGCTCAACTGTCAAATTGCCTTTGATACTGGCACACTTG
AATGCGATTGAGGTAGGCGGAATGTGACATGTAGCGGTGAAATGCTTAGACATGTGACAGAACACCGATTGCGAAGGCAG
CTTACCAAGTCGTTATTGACGCTGAGGCACGAAAGCGTGGGGAGCAAACAGGATTAGATACCCTGGTAGTCCACGCCGTA
AACGATGATAACTCGACGTTAGCGATACACTGTTAGCGTCCAAGCGAAAGCGTTAAGTTATCCACCTGGGAAGTACGATC
GCAAGGTTGAAACTCAAAGGAATTGACGGGGGCCCGCACAAGCGGTGGAGCATGTGGTTTAATTCGATGATACGCGAGGA
ACCTTACCAGGGCTTAAATGGGGAACGACCTTCTGGGAAACCAGAATTTCTTTTAGACGGTCCTCAAGGTGCTGCATGGT
TGTCGTCAGCTCGTGCCGTGAGGTGTTGGGTTAAGTCCCGCAACGAGCGCAACCCCTACTGTTAGTTGCCAGCGGATAAT
GCCGGGGACTCTAGCGGAACTGCCTGTGCAAACAGAGAGGAAGGTGGGGATGACGTCAAATCATCACGGCCCTTACGTCC
TGGGCTACACACGTGCTACAATGGCCGGTACAGAGGGCAGCCACTTCGTGAGAAGGAGCGAATCCTTAAAGCCGGTCTCA
GTTCGGATTGTAGTCTGCAACTCGACTACATGAAGCTGGAATCGCTAGTAATCGCGTATCAGCCATGACGCGGTGAATAC
GTTCCCGGGCCTTGTACACACCGCCCGTCAAGCCATGGGAATTGGGAGTACCTAAAGTCGGTAACCGCAAGGAGCCGCCT
AAGGTAATACCAGTGACTGGGGCTAAGTCGTAACAAGGTAGCCGTA
>42684
AGAGTTTGATCCTGGCTCAGATTGAACGCTGGCGGCATGCTTTACACATGCAAGTCGGACGGCAGCACAGAGGAGCTTGC
TTCTTGGGTGGCGAGTGGCGAACGGGTGAGTGACGCATCGGAACGTACCGAGTAATGGGGGATAACTGTCCGAAAGGACA
GCTAATACCGCATACGCCCTGAGGGGGAAAGCGGGGGATCTTAGGACCTCGCGTTATTCGAGCGGCCGATGTCTGATTAG
CTGGTTGGCGGGGTAAAGGCCCACCAAGGCGACGATCAGTAGCGGGTCTGAGAGGATGATCCGCCACACTGGGACTGAGA
CACGGCCCAGACTCCTACGGGAGGCAGCAGTGGGGAATTTTGGACAATGGGCGCAAGCCTGATCCAGCCATGCCGCGTGT
CTGAAGAAGGCCTTCGGGTTGTAAAGGACTTTTGTCAGGGAAGAAAAGGAACGTGTTAATACCATGTTCTGATGACGGTA
CCTGAAGAATAAGCACCGGCTAACTACGTGCCAGCAGCCGCGGTAATACGTAGGGTGCGAGCGTTAATCGGAATTACTGG
GCGTAAAGCGGGCGCAGACGGTTACTTAAGCGGGATGTGAAATCCCCGGGCTCAACCCGGGAACTGCGTTCCGAACTGGG
TGGCTAGAGTGTGTCAGAGGGGGGTAGAATTCCACGTGTAGCAGTGAAATGCGTAGAGATGTGGAGGAATACCGATGGCG
AAGGCAGCCCCCTGGGATAACACTGACGTTCATGCCCGAAAGCGTGGGTAGCAAACAGGGTTAGATACCCTGGTAGTCCA
CGCCCTAAACGATGTCGATTAGCTGTTGGGGCACTTGATGCCTTAGTAGCGTAGCTAACGCGTGAAATCGACCGCCTGGG
GAGTACGGTCGCAAGATTAAAACTCAAAGGAATTGACGGGGACCCGCACAAGCGGTGGATGATGTGGATTAATTCGATGC
AACGCGAAGAACCTTACCTGGTCTTGACATGTACGGAATCTTCCAGAGACGGAAGGGTGCCTTCGGGAGCCGTAACACAG
GTGCTGCATGGCTGTCGTCAGCTCGTGTCGTGAGATGTTGGGTTAAGTCCCGCAACGAGCGCAACCCTTGTCATTAGTTG
CCATCACTTGGTTGGGCACTCTAATGAGACTGCCGGTGACAAACCGGAGGAAGGTGGGGATGACGTCAAGTCCTCATGGC
CCTTATGACCAGGGCTTCACACGTCATACAATGGTCGGTACAGAGGGTAGCCAAGCCGCGAGGCGGAGCCAATCCCAGAA
AACCGATCGTAGTCCGGATTGCACTCTGCAACTCGAGTGCATGAAGTCGGAATCGCTAGTAATCGCAGGTCAGCATACTG
CGGTGAATACGTTCCCGGGTCTTGTACACACCGCCCGTCACACCATGGGAGTGGGGGATACCAGAAGCAGGTAGGCTAAC
CGCAAGGAGGCCGCTTGCCACGGTATGCTTCATGACTGGGGTGAAGTCGTAACAAGGTAAC
>342684
AGAGTTTGATCCTGGCTCAGGATGAACGCTAGCGGCAGGCTTAACACATGCAAGTCGAGGGGCATCGCGGGTAGCAATAC
CTGGCGGCGACCGGCGGAAGGGTGCGTAACGCGTGAGCGACATACCCGTGACAGGGGGATAACAGATGGAAACGTCTCCT
AATACCCCATAAGATCATATATCGCATGGTATGTGATTGAAAGGTGAGAACCGGTCACGGATTGGCTCGCGTCCCATCAG
GTAGACGGCGGGGCAGCGGCCCGCCGTGCCGACGACGGGTAGGGGCTCTGAGAGGAGTGACCCCCACAATGGAACTGAGA
CACGGTCCATACTCCTACGGGAGGCAGCAGTGAGGAATATTGGTCAATGGGCGGAAGCCTGAACCAGCCATGCCGCGTGC
GGGAGGACGGCCCTATGGGTTGTAAACCGCTTTTGAGTGAGAGCAATAAGGTTCACGTGTGGACCGATGAGAGTATCATT
CGAATAAGCATCGGCTAACTCCGTGCCAGCAGCCGCGGTAATACGGAGGATGCGAGCGTTATCCGGATTCATTGGGTTTA
AAGGGTGCGTAGGCGGACATGTAAGTCCGAGGTGAAAGACCGGGGCCCAACCCCGGGGTTGCCTCGGATACTGTGTGTCT
GGAGTGGACGTGCCGCCGGGGGAATGAGTGGTGTAGCGGTGAAATGCATAGATGTCACTCAGAACACCGATTGCGAAGGC
ACCTGGCGAATGTCTTACTGACGCTGAGGCACGAAAGCGTGGGGATCGAACAGGATTAGATACCCTGGTAGTCCACGCAG
TAAACGATGATGGCTGTCCGTTCGCTCCGATAGGAGTGAGTAGACAAGCGAAAGCGCTAAGCCATCCACCTGGGGAGTAC
GGCCGCAAGGCTGAAACTCAAAGGAATTGACGGGGGCCCGCACAAGCGGAGGAACATGTGGTTTAATTCGATGATACGCG
AGGAACCTTACCCGGGCTCGAACGGCAGGTGAACGATGCAGAGATGCAAAGGCCCTTCGGGGCGTCTGTCGAGGTGCTGC
ATGGTTGTCGTCAGCTCGTGCCGTGAGGTGTCGGCTCAAGTGCCATAACGAGCGCAACCCTTGCCTGCAGTTGCCATCGG
GTAAAGCCGGGGACTCTGCAGGGACTGCCACCGCAAGGTGAGAGGAGGGGGGGGATGACGTCAAATCAGCACGGCCCTTA
CGTCCGGGGCGACACACGTGTTACAATGGCGGCCACAGCGGGAAGCCACCCAGTGATGGGGCGCGGATCCCAAAAAAGCC
GCCTCAGTTCGGATCGGAGTCTGCAACCCGACTCCGTGAAGCTGGATTCGCTAGTAATCGCGCATCAGCCATGGCGCGGT
GAATACGTTCCCGGGCCTTGTACACACCGCCCGTCAAGCCATGGGAGTCGTGGGCGCCTGAAGGCCGTGACCGCGAGGAG
CGGCCTAGGGCGAACGCGGTGACTGGGGCTAAGTCGTAACAAGGTA
>295053
AGAGTTTGATCCTGGCTCAGGACGAACGCTGGCGGCGTGCCTAACACATGCAAGTCGAACGGAGATGCTCCTTCGGGAGT
ATCTTAGTGGCGAACGGGTGAGTAACGCGTGAGCAACCTGACCTTCACAGGGGGATAACCGCTGGAAACAGCAGCTAATA
CCGCATAACGTCGCAAGACCAAAGAGGGGGACCTTCGGGCCTCTTGCCATCGGATGTGCCCAGATGGGATTAGCTTGTTG
GTGGGGTAACGGCTCACCAAGGCGACGATCCCTAGCTGGTCTGAGAGGATGACCAGCCACACTGGAACTGAGACACGGTC
CAGACTCCTACGGGAGGCAGCAGTGGGGAATATTGCACAATGGGCGCAAGCCTGATGCAGCCATGCCGCGTGTATGAAGA
AGGCCTTCGGGTTGTAAAGTACTTTCAGCGGGGAGGAAGGGAGTAAAGTTAATACCTTTGCTCATTGACGTTACCCGCAG
AAGAAGCACCGGCTAACTCCGTGCCAGCAGCCGCGGTAATACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAA
GCGCACGCAGGCGGTTTGTTAAGTCAGATGTGAAATCCCCGGGCTCAACCTGGGAACTGCATCTGATACTGGCAAGCTTG
AGTCTCGTAGAGGGGGGTAGAATTCCAGGTGTAGCGGTGAAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGG
CCCCCTGGACGAAGACTGACGCTCAGGTGCGAAAGCGTGGGGAGCAAACAGGATTAGATACCCTGGTAGTCCACGCCGTA
AACGATGTCGACTTGGAGGTTGTGCCCTTGAGGCGTGGCTTCCGGAGCTAACGCGTTAAGTCGACCGCCTGGGGAGTACG
GCCGCAAGGTTAAAACTCAAATGAATTGACGGGGGCCCGCACAAGCGGTGGAGCATGTGGTTTAATTCGATGCAACGCGA
AGAACCTTACCTGGTCTTGACATCCACAGAACTTTCCAGAGATGGATTGGTGCCTTCGGGAACTGTGAGACAGGTGCTGC
ATGGCTGTCGTCAGCTCGTGTTGTGAAATGTTGGGTTAAGTCCCGCAACGAGCGCAACCCTTGTCCTTTGTTGCCAGCGG
TCCGGCCGGGAACTCAAAGGAGACTGCCAGTGATAAACTGGAGGAAGGTGGGGATGACGTCAAGTCATCATGGCCCTTAC
GACCAGGGCTACACACGTGCTACAATGGCGCATACAAAGAGAAGCGACCTCGCGAGAGCAAGCGGACCTCATAAAGTGCG
TCGTAGTCCGGATTGGAGTCTGCAACTCGACTCCATGAAGTCGGAATCGCTAGTAATCGTGGATCAGAATGCCACGGTGA
ATACGTTCCCGGGCCTTGCACACACCGCC
>879972
GACGAACGCTGGCGGCGTGCCTAATACATGCAAGTCGAACGAGATTGACCGGTGCTTGCACTGGTCAATCTAGTGGCGAA
CGGGTGAGTAACACGTGGGTAACCTGCCCATCAGAGGGGGATAACATTCGGAAACGGATGCTAAAACCGCATAGGTCTTC
GAACCGCATGGTTTGAAGAGGAAAAGAGGCGCAAGCTTCTGCTGATGGATGGACCCGCGGTGTATTAGCTAGTTGGTGGG
GTAACGGCTCACCAAGGCGACGATACATAGCCGACCTGAGAGGGTGATCGGCCACACTGGGACTGAGACACGGCCCAGAC
TCCTACGGGAGGCAGCAGTAGGGAATCTTCGGCAATGGACGGAAGTCTGACCGAGCAACGCCGCGTGAGTGAAGAAGGTT
TTCGGATCGTAAAGCTCTGTTGTAAGAGAAGAACGAGTGTGAGAGTGGAAAGTTCACACTGTGACGGTATCTTACCAGAA
AGGGACGGCTAACTACGTGCCAGCAGCCGCGGTAATACGTAGGTCCCGAGCGTTGTCCGGATTTATTGGGCGTAAAGCGA
GCGCAGGCGGTTAGATAAGTCTGAAGTTAAAGGCTGTGGCTTAACCATAGTACGCTTTGGAAACTGTTTAACTTGAGTGC
AAGAGGGGAGAGTGGAATTCCATGTGTAGCGGTGAAATGCGTAGATATATGGAGGAACACCGGTGGCGAAAGCGGCTCTC
TGGCTTGTAACTGACGCTGAGGCTCGAAAGCGTGGGGAGCAAACAGGATTAGATACCCTGGTAGTCCACGCCGTAAACGA
TGAGTGCTAGGTGTTAGACCCTTTCCGGGGTTTAGTGCCGCAGCTAACGCATTAAGCACTCCGCCTGGGGAGTACGACCG
CAGGGTTGAAACTCAAAGGAATTGACGGGGGCCCGCACAAGCGGTGGAGCATGTGGTTTAATTCGAAGCAACGCGAAGAA
CCTTACCAGGTCTTGACATCCCTCTGACCGCTCTAGAGATAGAGCTTTCCTTCGGGACAGAGGTGACAGGTGGTGCATGG
TTGTCGTCAGCTCGTGTCGTGAGATGTTGGGTTAAGTCCCGCAACGAGCGCAACCCCTATTGTTAGTTGCCATCATTCAG
TTGGGCACTCTAGCGAGACTGCCGGTAATAAACCGGAGGAAGGTGGGGATGACGTCAAATCATCATGCCCCTTATGACCT
GGGCTACACACGTGCTACAATGGCTGGTACAACGAGTCGCAAGCCGGTGACGGCAAGCTAATCTCTTAAAGCCAGTCTCA
GTTCGGATTGTAGGCTGCAACTCGCCTACATGAAGTCGGAATCGCTAGTAATCGCGGATCAGCACGCCGCGGTGAATACG
TTCCCGGGCCT
"""
# Reads to search against the database
# - 10 rRNA reads: amplicon reads were taken from Qiime study 1685
# - 10 random reads: simulated using mason with the following command:
# mason illumina -N 10 -snN -o simulated_random_reads.fa -n
# 150 random.fasta
# - 10 rRNA reads with id < 97: amplicon reads were taken from
# Qiime study 1685
read_seqs_fp = """>HMPMockV1.2.Staggered2.673827_47 M141:79:749142:1:1101:16169:1589
TACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCAAGCAGGCGGTTTGTTAAGTCAGATGTGAAATCCC
CGGGCTCAACCTGGGAACTGCATTTGATACTGGCAAGCTTGAGTCTCGTAGAGGAGGGTAGAATTCCAGGTGTAGCGGGG
AAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGGCTCCATGGACGAAGACTGACGCT
>HMPMockV1.2.Staggered2.673827_115 M141:79:749142:1:1101:14141:1729
TACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCACGCAGGCGGTTTGTTAAGTCAGATGTGAAATCCC
CCGGCTCAACCTTGGAACTGCATCTGATACGGGCAAGCTTGAGTCTCGTAGAGGGGGGTAGAATTCCAGGTGTAGCGGTG
AAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGGCCCTCTGGACGAAGACTGACGCTCAGGTGCGAAAGCGTG
GGGAGCAAACA
>HMPMockV1.2.Staggered2.673827_122 M141:79:749142:1:1101:16032:1739
TACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCACGCAGGCGGTTTGTTAAGTCAGATGTGAAATCCC
CGGGCTCAACCTGGGAACTGCATCTGATACTGGCAAGCTTGAGTCTCGTAGAGGGGGGTAGAATTCCAGGTGTAGCGGTG
AAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGGCCCCCTGGACGAAGACTGACGCTCAGGTGCGAAAGCGTG
GTGATCAAACA
>HMPMockV1.2.Staggered2.673827_161 M141:79:749142:1:1101:17917:1787
TACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCACGCAGGCGGTTTGTTAAGTCAGATGTGAAATCCC
CGGGCTCAACCTGGGAACTGCATCTGATACTGGCAAGCTTGAGTCTCGTAGAGGGGGGTAGAATTCCAGGTGTAGCGGTG
AAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGGCTCCCTGGACGAAGACTGACGCTCAGGTGCGAAAGCGTG
GGGAGCAAACA
>HMPMockV1.2.Staggered2.673827_180 M141:79:749142:1:1101:16014:1819
TACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCACGCAGGTGGTTTGTTAAGTCAGATGTGAAATCCC
CGGGCTCAACCTGGGAACTGCATCTGATACTGGCAAGCTTGAGTCTCGTAGAGGGGGGTAGAATTCCAGGTGTAGCGGTG
AAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGGCCCCCTGGACGAAGACTGACGCTCAGGTGCGAAAGCGTG
>HMPMockV1.2.Staggered2.673827_203 M141:79:749142:1:1101:17274:1859
TACGGAGGTTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCACGCAGGCGGTTTGTTAAGTCAGATGTGAAATCCC
CCGGCTCAACCTGGGAACTGCATCTGATACTGGCAAGCTTGAGTCTCGTAGAGGGGGGTAGAATTCCAGGTGTAGCGGTG
AAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGGCCTCCTGGACGAAGACTGACGCTCAGGTGCGAAAGCGTG
GGGATCAAACA
>HMPMockV1.2.Staggered2.673827_207 M141:79:749142:1:1101:17460:1866
TACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCACGCAGGCGGTTTGTTAAGTCAGATGTGAAATCCC
CGGGCTCAACCTGGGAACTGCATCTGATACTGGCAAGCTTGAGTCTCGTAGAGGGGGGTAGAATTCCAGGTGTAGCGGTG
AAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGGCCCCCTGGACGAAGACTGACGCTCAGGTGCGAAAGCGTG
GGGAGCAAACA
>HMPMockV1.2.Staggered2.673827_215 M141:79:749142:1:1101:18390:1876
TACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCACGCAGGCGGTTTGTTAAGTCAGATGTGAAATCCC
CGGGCTCAACCTGGGAACTGCATCTGATACTGGCAAGCTTGAGTCTCGTAGAGGGGGGTAGAATTCCAGGTGTAGCGGTG
AAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGGCCCCCTGGACGAAGACTGACG
>HMPMockV1.2.Staggered2.673827_218 M141:79:749142:1:1101:18249:1879
TACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCACGCAGGCGGTTTGTTAAGTCAGATGTGAAATCCC
CGGGCTCAACCTGGGAACTTCATCTGATACTGGCAAGCTTGAGTCTCGTAGAGGGGGGTAGAATTCCAGGTGTAGCGGTG
AAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGGCCCCCTGGACGAAGACTGACGCTCAGGTGCGAAAGCGTG
GGGAGCACACA
>HMPMockV1.2.Staggered2.673827_220 M141:79:749142:1:1101:15057:1880
TACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCACGCAGGCGGTTTGTTAAGTCAGATGTGAAATCCC
CGGGCTCAACCTGGGAACTGCATCTGATACTGGCAAGCTTGAGTCTCGTAGAGGGGGGTAGAATTCCAGGTGTAGCGGTG
AAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGGCCTCCTGGACGAAGACTGACGCTC
>simulated_random_reads.fa.000000000
AGCCGGGTGTCTACGGTCAGGTGTGTTCTGACTACGTAGTTTGACAGCACGTGTCCTTTCCCCTTCCCAAGGTAACGAAT
TGTCGTTATCAACGTTTCGATCCGTAATTTCACGGAACGACATAAAGGCATCAATACTATCGCCAACAGA
>simulated_random_reads.fa.000000001
GTGGACGTCGTGGCGGCGTACTAACTTCCTACAGGCATATCCGGAATAACATTCTGCCGCTTGTCGACATAAGCTGTTCC
CTACATAGACGACGACGGTTGAAGGGTGTATGTATTCTTTGGGTACGGCTCCTCTGGGCGCATGGTAGCA
>simulated_random_reads.fa.000000002
CATTCTTTATAGGCCTACAACACTAATCATCGTTAAGCATAAGGGGAGGAGTGTGCGTGGCATCAAGTCCTGGTTCTTCG
CCTAGTACCACACCGTCTCACACGCAGCCGCCGACGACCAGTGAGGGCGCGTGGGACACCCATTCGGTCC
>simulated_random_reads.fa.000000003
TCGCCTTGGTACAAACAGTCGCGGCACGCTGTATGGAGGACCATAGAGGCACAGGCTGAGGACAGGGGCATGGAAGGTTC
AATCGCCCCCCACAGCTTTAGGTAGGAAGTACTGTTCTAGTGCCAATTTGATTTTAACGGCAGTTACTCG
>simulated_random_reads.fa.000000004
CATATTCTAATATCCTACTTCTGATACCCGATTATACACGACACCACCCCAGGACTGTCGTCACATCCTTATCTGGATAA
ACATCCGGTTCCGTTTGGCCGTGCTCCGCAAGTGATGCGTCTGTGGAATGTACGTGGAGCGTTGACAGTT
>simulated_random_reads.fa.000000005
CCGGATTAGGCATGTTTATAGTACAACGGATTCGCAAAAAGGTCAGGGTAACAATTTTGAAATGCTTTCATACTGCGGTC
TAAATGGACCACCCTTTAGGTGCAGCCAACTATAGTTGGTCGATTCTCTGAACACGTACCGAAGGCAATT
>simulated_random_reads.fa.000000006
AACCCATCGGAATAATCTACTGCTTCGTATGGAACGGTCCTACATTTAAATAAACGTGTCCAGTGCCACCCGATACCTCT
CGTCAATCAGGGGCTCTCCCTGAATCAGCAGTAAACAAACCCAGTACACTGTCGAACACTACTGAGACCG
>simulated_random_reads.fa.000000007
CCGAAGGCAAGTCTGTCGTAGAATGGTTTTTGTCGTTGTAACAACCCCGCTCTAGACCCTGAAAACCATAAAGTCAAGCC
CAACTAATATTAGAGGCATTCTGGCTACTCCCGCTCACCGCAATCTTCACATACTGTGATACCCTCAGCC
>simulated_random_reads.fa.000000008
ATATCCGTTAAACCCCGGATTTGACAATTCATCATCAACGCTACTAACGGCTTTCTCAATTTGGGGCTGTGGCCTATCCG
CATACGGCTACCTGCGCAAGAAGAGAGTACTGTTAGATGTCACGCTGCACTTGCGAAGACCGGTGGGCGT
>simulated_random_reads.fa.000000009
AGCGATGAGTACACAAGATGAGTGAAGGGATTAAACTTCAAACCTTGAAGTGTTACCCGATTTCCTACCATTGGGGATTC
GTTAATGCTTCGAATGGATCTATATCCGGTGTTTAGCTGACTGTTAAAATACTCTCGTTGTACGAAAGTA
>HMPMockV1.2.Staggered2.673827_0 M141:79:749142:1:1101:17530:1438
TACGTAGGTGGCAAGCGTTATCCGGAATTATTGGGCGCAAAGCGCGCGTAGGCGGTTTTTTAAGTCTGATGTGAAAGCCC
ACGGCTCAACCGTGGAGGGTCATTGGAAACTGGAAAACTTGAGTGCAGAAGAGGAAAGTGGAATTCCATGTGTAGCGGTG
AAATGCGCAGAGATATGGAGGAACACCAGTGGCGAAGGCGACCTTCTGGTCTGTAACTGACGCTGATGTGCGAAAGCGTG
>HMPMockV1.2.Staggered2.673827_1 M141:79:749142:1:1101:17007:1451
TACGTAGGTGGCAAGCGTTATCCGGAATTATTGGGCGTAAAGCGCGCGTAGGCGGTTTTTTAAGTCTGATGTGAAAGCCC
ACGGCTCAACCGTGGAGGGTCATTGGAAACTGGAAAACTTGAGTGCAGAAGAGGAAAGTGGAATTCCATGTGTAGCGGTG
AAATGCGCAGAGATATGGAGGAACACCAGTGGCGAAGGCGACTTTCTGGTCTGTAACTTACGCTG
>HMPMockV1.2.Staggered2.673827_2 M141:79:749142:1:1101:16695:1471
TACGTAGGTGGCAAGCGTTATCCGGAATTATTGGGCGTAAAGCGCGCGTAGGCGGTTTTTTAAGTCTGATGTGAAAGCCC
ACGGCTCAACCGTGGAGGGTCATTGGAAACTGGAAAACTTGAGTGCAGAAGAGGAAAGTGGAATTCCATGTGTAGCGGTG
AAATGCGCAGAGATATGGAGGAACACCAGTGGCGAAGGCGACTTTCTGGTCTGTAACTGACGCTGATGTGCGAAAGCGTG
GGGA
>HMPMockV1.2.Staggered2.673827_3 M141:79:749142:1:1101:17203:1479
TACGTAGGTGGCAAGCGTTATCCGGAATTATTGGGCGTAAAGCGCGCGTAGGCGGTTTTTTAAGTCTGATGTGAAAGCCC
ACGGCTCAACCGTGGAGGGTCATTGGAAACTGGAAAACTTGAGTGCAGAAGAGGAAAGTGGAATTCCATGTGTAGCGGTG
AAATGCGTAGAGATATGGAGGAACACCAGTGGCGAAGGCGACGTTCTGGTCTGTAACTGACGCTGATGTGCGAAAGCGTG
G
>HMPMockV1.2.Staggered2.673827_4 M141:79:749142:1:1101:14557:1490
TACGTAGGTGGCAAGCGTTATCCGGAATTATTGGGCGTAAAGCGCGCGTAGGCGGTTTTTTAAGTCTGATGTGAAAGCCC
ACGGCTCAACCGTGGAGGGTCATTGGAAACTGGAAAACTTGAGTGCAGAAGAGGAAAGTGGAATTCCATGTGTAGCGGTG
AAATGCGCAGAGATATGGAGGAACACCAGTGGCGAAGGCGACTTTCTGGGCTGTAACTGACGCTGATGTGCGCAAGCGTG
GTGATCAAACA
>HMPMockV1.2.Staggered2.673827_5 M141:79:749142:1:1101:16104:1491
TACGTAGGTGGCAAGCGTTATCCGGAATTATTGGGCGTAAAGCGCGCGTAGGCGGTTTTTTAAGTCTGATGTGAAAGCCC
ACGGCTCAACCGTGGAGGGTCATTGGAAACTGGAAAACTTGAGTGCAGAAGAGGAAAGTGGAATTCCATGTGTAGCGGTG
AAATGCGCAGAGATATGGAGGAACACCAGTGGCGAAGGCGACTTTCTGGTCTGTAACTGACGC
>HMPMockV1.2.Staggered2.673827_6 M141:79:749142:1:1101:16372:1491
TACGTAGGTGGCAAGCGTTATCCGGAATTATTGGGCGTAAAGCGCGCGTAGGCGGTTTTTTAAGTCTGATGTGAAAGCCC
ACGGCTCAACCGTGGAGGGTCATTGGAAACTGGAAAACTTGAGTGCAGAAGAGGAAAGTGGAATTCCATGTGTAGCGGTG
AAATGCGCAGAGATATGGAGGAACAACAGTGGCGAAGGCGACTTTCTGGTCTGTAACTGACGCTGATGTGCGTAAG
>HMPMockV1.2.Staggered2.673827_7 M141:79:749142:1:1101:17334:1499
TACGTAGGTGGCAAGCGTTATCCGGAATTATTGGGCGTAAAGCGCGCGTAGGCGGTTTTTTAAGTCTGATGTGAAAGCCC
ACGGCTCAACCGTGGAGGGTCATTGGAAACTGGAAAACTTGAGTGCAGAAGAGGAAAGTGGAATTCCATGTGTAGCGGTG
AAATGCGCAGAGATATGGAGGAACACCAGTGGCGAAGGCGACTTTCTGGTCTGTAACTGACGCTGATGT
>HMPMockV1.2.Staggered2.673827_8 M141:79:749142:1:1101:17273:1504
TACGTAGGTGGCAAGCGTTATCCGGAATTATTGGGCGTAAAGCGCGCGTAGGCGGTTTTTTAAGTCTGATGTGAAAGCCC
ACGGCTCAACCGTGGAGGGTCATTGGAAACTGGAAAACTTGAGTGCAGAAGAGGAAAGTGGAATTCCATGTGTAGCGGTG
AAATGCACAGAGATATGGAGGAACACCAGTGGCGAAGGCGACTTTCTGGTCTGTAACTGACGCTGA
>HMPMockV1.2.Staggered2.673827_9 M141:79:749142:1:1101:16835:1505
TACGTAGGTGGCAAGCGTTATCCGGAATTATTGGGCGTAAAGCGCGCGTAGGCGGTTTTTTAAGTCTGATGTGAAAGCCC
ACGGCTCAACCGTGGAGGGTCATTGGAAACTGGAAAACTTGAGTGCAGAAGAGGAAAGTGGAATTCCATGTGTAGCGGTG
ACATGCGCAGAGATATGGAGGAACACCAGTGGCGAAGGCGACTTTCTGGTCTGTAACTGACGCTGATGTGCGAAAGCGTG
GGGAT
"""
if __name__ == '__main__':
main()
| en | 0.505963 | #!/usr/bin/env python #----------------------------------------------------------------------------- # Copyright (c) 2013--, biocore development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. #----------------------------------------------------------------------------- Unit tests for the SortMeRNA version 2.0 Application controller =============================================================== # ---------------------------------------------------------------------------- # Copyright (c) 2014--, biocore development team # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. # ---------------------------------------------------------------------------- # Test class and cases Tests for SortMeRNA version 2.0 functionality # create temporary file with reference sequences defined # in reference_seqs_fp # write _reference_ sequences to tmp file # create temporary file with read sequences defined in read_seqs_fp # write _read_ sequences to tmp file # list of files to remove Test indexing a database using SortMeRNA # Make sure all db_files exist # Add files to be remove Indexdb should fail with an empty fasta path (1) Indexdb should set output_dir to the same directory as where the input FASTA file is located; (2) SortMeRNA should fail if an empty result path is passed; (3) SortMeRNA should fail if an empty seq path is passed SortMeRNA should output a BLAST tabular output # Run SortMeRNA SortMeRNA should fail with an empty indexed database SortMeRNA version 2.0 reference OTU picking works with default settings # rebuild the index # Files created by indexdb_rna to be deleted # Run SortMeRNA # Check all sortmerna output files exist # Check output files exist # Files created sortmerna to be deleted (StdErr and StdOut were already # removed in sortmerna_ref_cluster) # Random reads that should not appear in any output file # Reads passing E-value threshold and with similarity/coverage >=97% # Reads passing E-value threshold and with similarity/coverage <97% # Check correct number of OTU clusters in file # Verify the aligned FASTA file # Read is not random # Read is either in otu_reads or denovo_reads # Verify the de novo reads FASTA file # Read is not random # Read is not an OTU read # Read is a de novo read # Check the OTU map # Cluster ID is correct # Each read in the cluster must exclusively be an OTU read # Check returned list of lists of clusters # Should only have 1 cluster # Check log file number of clusters and failures corresponds to # the results in the output files SortMeRNA version 2.0 for mapping sequences onto a reference using default parameters # Rebuild the index # Files created by indexdb_rna to be deleted # Run SortMeRNA mapper # Check all sortmerna output files exist # Check output files exist # Check there are 30 alignments (1 per read) # Check this alignment exists # Check alignment for random read is NULL SortMeRNA version 2.0 for mapping sequences onto a reference outputting Blast and SAM alignments # Rebuild the index # Files created by indexdb_rna to be deleted # Run SortMeRNA mapper # Check all sortmerna output files exist # Check output files exist # Check there are 30 alignments in the SAM output (1 per read) # 30 alignments expected + 2 lines for @HD and @PG fields # Check this alignment exists # Check alignment for random read is NULL SortMeRNA version 2.0 for mapping sequences onto a reference outputting SAM alignments with @SQ tags # Rebuild the index # Files created by indexdb_rna to be deleted # Run SortMeRNA mapper # Check all sortmerna output files exist # Check output files exist # Check there are 30 alignments in the SAM output (1 per read) # 30 alignments expected + 2 lines for @HD and @PG fields + 5 lines # for the @SQ tags # Check all expected @SQ tags have been included SortMeRNA version 2.0 for mapping sequences onto a reference using Blast with --print_all_reads option set to False (no NULL alignments output) # Rebuild the index # Files created by indexdb_rna to be deleted # Run SortMeRNA mapper # Check all sortmerna output files exist # Check output files exist # Check there are 20 alignments (1 per read) # Check this alignment exists # Check alignment for random read does not exist SortMeRNA version 2.0 for mapping sequences onto a reference outputting first INT num_alignments passing the E-value threshold (rather than first INT best alignments) # Rebuild the index # Files created by indexdb_rna to be deleted # Run SortMeRNA mapper # Check all sortmerna output files exist # Check output files exist # Check there are 30 alignments (1 per read) # Check this alignment exists # Check alignment for random read is NULL SortMeRNA should fail with output_sam and blast_format both set to False # Rebuild the index # Files created by indexdb_rna to be deleted SortMeRNA should fail with "best" and "num_alignments" both set to True # Rebuild the index # Files created by indexdb_rna to be deleted # Reference sequence database >426848 AGAGTTTGATCCTGGCTCAGGATGAACGCTAGCGGCAGGCTTAATACATGCAAGTCGAGGGGCAGCACTGGTAGCAATAC CTGGTGGCGACCGGCGGACGGGTGCGTAACACGTATGCAACCTACCCTGTACAGGGGGATAGCCCGAGGAAATTCGGATT AATACCCCATACGATAAGAATCGGCATCGATTTTTATTGAAAGCTCCGGCGGTACAGGATGGGCATGCGCCCCATTAGCT AGTTGGTGAGGTAACGGCTCACCAAGGCTACGATGGGTAGGGGGCCTGAGAGGGTGATCCCCCACACTGGAACTGAGACA CGGTCCAGACTCCTACGGGAGGCAGCAGTAAGGAATATTGGTCAATGGGCGCAAGCCTGAACCAGCCATGCCGCGTGCAG GAAGACTGCCATTATGGTTGTAAACTGCTTTTATATGGGAAGAAACCTCCGGACGTGTCCGGAGCTGACGGTACCATGTG AATAAGGATCGGCTAACTCCGTGCCAGCAGCCGCGGTAATACGGAGGATCCAAGCGTTATCCGGATTTATTGGGTTTAAA GGGTGCGTAGGCGGCGTGTTAAGTCAGAGGTGAAATTCGGCAGCTCAACTGTCAAATTGCCTTTGATACTGGCACACTTG AATGCGATTGAGGTAGGCGGAATGTGACATGTAGCGGTGAAATGCTTAGACATGTGACAGAACACCGATTGCGAAGGCAG CTTACCAAGTCGTTATTGACGCTGAGGCACGAAAGCGTGGGGAGCAAACAGGATTAGATACCCTGGTAGTCCACGCCGTA AACGATGATAACTCGACGTTAGCGATACACTGTTAGCGTCCAAGCGAAAGCGTTAAGTTATCCACCTGGGAAGTACGATC GCAAGGTTGAAACTCAAAGGAATTGACGGGGGCCCGCACAAGCGGTGGAGCATGTGGTTTAATTCGATGATACGCGAGGA ACCTTACCAGGGCTTAAATGGGGAACGACCTTCTGGGAAACCAGAATTTCTTTTAGACGGTCCTCAAGGTGCTGCATGGT TGTCGTCAGCTCGTGCCGTGAGGTGTTGGGTTAAGTCCCGCAACGAGCGCAACCCCTACTGTTAGTTGCCAGCGGATAAT GCCGGGGACTCTAGCGGAACTGCCTGTGCAAACAGAGAGGAAGGTGGGGATGACGTCAAATCATCACGGCCCTTACGTCC TGGGCTACACACGTGCTACAATGGCCGGTACAGAGGGCAGCCACTTCGTGAGAAGGAGCGAATCCTTAAAGCCGGTCTCA GTTCGGATTGTAGTCTGCAACTCGACTACATGAAGCTGGAATCGCTAGTAATCGCGTATCAGCCATGACGCGGTGAATAC GTTCCCGGGCCTTGTACACACCGCCCGTCAAGCCATGGGAATTGGGAGTACCTAAAGTCGGTAACCGCAAGGAGCCGCCT AAGGTAATACCAGTGACTGGGGCTAAGTCGTAACAAGGTAGCCGTA >42684 AGAGTTTGATCCTGGCTCAGATTGAACGCTGGCGGCATGCTTTACACATGCAAGTCGGACGGCAGCACAGAGGAGCTTGC TTCTTGGGTGGCGAGTGGCGAACGGGTGAGTGACGCATCGGAACGTACCGAGTAATGGGGGATAACTGTCCGAAAGGACA GCTAATACCGCATACGCCCTGAGGGGGAAAGCGGGGGATCTTAGGACCTCGCGTTATTCGAGCGGCCGATGTCTGATTAG CTGGTTGGCGGGGTAAAGGCCCACCAAGGCGACGATCAGTAGCGGGTCTGAGAGGATGATCCGCCACACTGGGACTGAGA CACGGCCCAGACTCCTACGGGAGGCAGCAGTGGGGAATTTTGGACAATGGGCGCAAGCCTGATCCAGCCATGCCGCGTGT CTGAAGAAGGCCTTCGGGTTGTAAAGGACTTTTGTCAGGGAAGAAAAGGAACGTGTTAATACCATGTTCTGATGACGGTA CCTGAAGAATAAGCACCGGCTAACTACGTGCCAGCAGCCGCGGTAATACGTAGGGTGCGAGCGTTAATCGGAATTACTGG GCGTAAAGCGGGCGCAGACGGTTACTTAAGCGGGATGTGAAATCCCCGGGCTCAACCCGGGAACTGCGTTCCGAACTGGG TGGCTAGAGTGTGTCAGAGGGGGGTAGAATTCCACGTGTAGCAGTGAAATGCGTAGAGATGTGGAGGAATACCGATGGCG AAGGCAGCCCCCTGGGATAACACTGACGTTCATGCCCGAAAGCGTGGGTAGCAAACAGGGTTAGATACCCTGGTAGTCCA CGCCCTAAACGATGTCGATTAGCTGTTGGGGCACTTGATGCCTTAGTAGCGTAGCTAACGCGTGAAATCGACCGCCTGGG GAGTACGGTCGCAAGATTAAAACTCAAAGGAATTGACGGGGACCCGCACAAGCGGTGGATGATGTGGATTAATTCGATGC AACGCGAAGAACCTTACCTGGTCTTGACATGTACGGAATCTTCCAGAGACGGAAGGGTGCCTTCGGGAGCCGTAACACAG GTGCTGCATGGCTGTCGTCAGCTCGTGTCGTGAGATGTTGGGTTAAGTCCCGCAACGAGCGCAACCCTTGTCATTAGTTG CCATCACTTGGTTGGGCACTCTAATGAGACTGCCGGTGACAAACCGGAGGAAGGTGGGGATGACGTCAAGTCCTCATGGC CCTTATGACCAGGGCTTCACACGTCATACAATGGTCGGTACAGAGGGTAGCCAAGCCGCGAGGCGGAGCCAATCCCAGAA AACCGATCGTAGTCCGGATTGCACTCTGCAACTCGAGTGCATGAAGTCGGAATCGCTAGTAATCGCAGGTCAGCATACTG CGGTGAATACGTTCCCGGGTCTTGTACACACCGCCCGTCACACCATGGGAGTGGGGGATACCAGAAGCAGGTAGGCTAAC CGCAAGGAGGCCGCTTGCCACGGTATGCTTCATGACTGGGGTGAAGTCGTAACAAGGTAAC >342684 AGAGTTTGATCCTGGCTCAGGATGAACGCTAGCGGCAGGCTTAACACATGCAAGTCGAGGGGCATCGCGGGTAGCAATAC CTGGCGGCGACCGGCGGAAGGGTGCGTAACGCGTGAGCGACATACCCGTGACAGGGGGATAACAGATGGAAACGTCTCCT AATACCCCATAAGATCATATATCGCATGGTATGTGATTGAAAGGTGAGAACCGGTCACGGATTGGCTCGCGTCCCATCAG GTAGACGGCGGGGCAGCGGCCCGCCGTGCCGACGACGGGTAGGGGCTCTGAGAGGAGTGACCCCCACAATGGAACTGAGA CACGGTCCATACTCCTACGGGAGGCAGCAGTGAGGAATATTGGTCAATGGGCGGAAGCCTGAACCAGCCATGCCGCGTGC GGGAGGACGGCCCTATGGGTTGTAAACCGCTTTTGAGTGAGAGCAATAAGGTTCACGTGTGGACCGATGAGAGTATCATT CGAATAAGCATCGGCTAACTCCGTGCCAGCAGCCGCGGTAATACGGAGGATGCGAGCGTTATCCGGATTCATTGGGTTTA AAGGGTGCGTAGGCGGACATGTAAGTCCGAGGTGAAAGACCGGGGCCCAACCCCGGGGTTGCCTCGGATACTGTGTGTCT GGAGTGGACGTGCCGCCGGGGGAATGAGTGGTGTAGCGGTGAAATGCATAGATGTCACTCAGAACACCGATTGCGAAGGC ACCTGGCGAATGTCTTACTGACGCTGAGGCACGAAAGCGTGGGGATCGAACAGGATTAGATACCCTGGTAGTCCACGCAG TAAACGATGATGGCTGTCCGTTCGCTCCGATAGGAGTGAGTAGACAAGCGAAAGCGCTAAGCCATCCACCTGGGGAGTAC GGCCGCAAGGCTGAAACTCAAAGGAATTGACGGGGGCCCGCACAAGCGGAGGAACATGTGGTTTAATTCGATGATACGCG AGGAACCTTACCCGGGCTCGAACGGCAGGTGAACGATGCAGAGATGCAAAGGCCCTTCGGGGCGTCTGTCGAGGTGCTGC ATGGTTGTCGTCAGCTCGTGCCGTGAGGTGTCGGCTCAAGTGCCATAACGAGCGCAACCCTTGCCTGCAGTTGCCATCGG GTAAAGCCGGGGACTCTGCAGGGACTGCCACCGCAAGGTGAGAGGAGGGGGGGGATGACGTCAAATCAGCACGGCCCTTA CGTCCGGGGCGACACACGTGTTACAATGGCGGCCACAGCGGGAAGCCACCCAGTGATGGGGCGCGGATCCCAAAAAAGCC GCCTCAGTTCGGATCGGAGTCTGCAACCCGACTCCGTGAAGCTGGATTCGCTAGTAATCGCGCATCAGCCATGGCGCGGT GAATACGTTCCCGGGCCTTGTACACACCGCCCGTCAAGCCATGGGAGTCGTGGGCGCCTGAAGGCCGTGACCGCGAGGAG CGGCCTAGGGCGAACGCGGTGACTGGGGCTAAGTCGTAACAAGGTA >295053 AGAGTTTGATCCTGGCTCAGGACGAACGCTGGCGGCGTGCCTAACACATGCAAGTCGAACGGAGATGCTCCTTCGGGAGT ATCTTAGTGGCGAACGGGTGAGTAACGCGTGAGCAACCTGACCTTCACAGGGGGATAACCGCTGGAAACAGCAGCTAATA CCGCATAACGTCGCAAGACCAAAGAGGGGGACCTTCGGGCCTCTTGCCATCGGATGTGCCCAGATGGGATTAGCTTGTTG GTGGGGTAACGGCTCACCAAGGCGACGATCCCTAGCTGGTCTGAGAGGATGACCAGCCACACTGGAACTGAGACACGGTC CAGACTCCTACGGGAGGCAGCAGTGGGGAATATTGCACAATGGGCGCAAGCCTGATGCAGCCATGCCGCGTGTATGAAGA AGGCCTTCGGGTTGTAAAGTACTTTCAGCGGGGAGGAAGGGAGTAAAGTTAATACCTTTGCTCATTGACGTTACCCGCAG AAGAAGCACCGGCTAACTCCGTGCCAGCAGCCGCGGTAATACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAA GCGCACGCAGGCGGTTTGTTAAGTCAGATGTGAAATCCCCGGGCTCAACCTGGGAACTGCATCTGATACTGGCAAGCTTG AGTCTCGTAGAGGGGGGTAGAATTCCAGGTGTAGCGGTGAAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGG CCCCCTGGACGAAGACTGACGCTCAGGTGCGAAAGCGTGGGGAGCAAACAGGATTAGATACCCTGGTAGTCCACGCCGTA AACGATGTCGACTTGGAGGTTGTGCCCTTGAGGCGTGGCTTCCGGAGCTAACGCGTTAAGTCGACCGCCTGGGGAGTACG GCCGCAAGGTTAAAACTCAAATGAATTGACGGGGGCCCGCACAAGCGGTGGAGCATGTGGTTTAATTCGATGCAACGCGA AGAACCTTACCTGGTCTTGACATCCACAGAACTTTCCAGAGATGGATTGGTGCCTTCGGGAACTGTGAGACAGGTGCTGC ATGGCTGTCGTCAGCTCGTGTTGTGAAATGTTGGGTTAAGTCCCGCAACGAGCGCAACCCTTGTCCTTTGTTGCCAGCGG TCCGGCCGGGAACTCAAAGGAGACTGCCAGTGATAAACTGGAGGAAGGTGGGGATGACGTCAAGTCATCATGGCCCTTAC GACCAGGGCTACACACGTGCTACAATGGCGCATACAAAGAGAAGCGACCTCGCGAGAGCAAGCGGACCTCATAAAGTGCG TCGTAGTCCGGATTGGAGTCTGCAACTCGACTCCATGAAGTCGGAATCGCTAGTAATCGTGGATCAGAATGCCACGGTGA ATACGTTCCCGGGCCTTGCACACACCGCC >879972 GACGAACGCTGGCGGCGTGCCTAATACATGCAAGTCGAACGAGATTGACCGGTGCTTGCACTGGTCAATCTAGTGGCGAA CGGGTGAGTAACACGTGGGTAACCTGCCCATCAGAGGGGGATAACATTCGGAAACGGATGCTAAAACCGCATAGGTCTTC GAACCGCATGGTTTGAAGAGGAAAAGAGGCGCAAGCTTCTGCTGATGGATGGACCCGCGGTGTATTAGCTAGTTGGTGGG GTAACGGCTCACCAAGGCGACGATACATAGCCGACCTGAGAGGGTGATCGGCCACACTGGGACTGAGACACGGCCCAGAC TCCTACGGGAGGCAGCAGTAGGGAATCTTCGGCAATGGACGGAAGTCTGACCGAGCAACGCCGCGTGAGTGAAGAAGGTT TTCGGATCGTAAAGCTCTGTTGTAAGAGAAGAACGAGTGTGAGAGTGGAAAGTTCACACTGTGACGGTATCTTACCAGAA AGGGACGGCTAACTACGTGCCAGCAGCCGCGGTAATACGTAGGTCCCGAGCGTTGTCCGGATTTATTGGGCGTAAAGCGA GCGCAGGCGGTTAGATAAGTCTGAAGTTAAAGGCTGTGGCTTAACCATAGTACGCTTTGGAAACTGTTTAACTTGAGTGC AAGAGGGGAGAGTGGAATTCCATGTGTAGCGGTGAAATGCGTAGATATATGGAGGAACACCGGTGGCGAAAGCGGCTCTC TGGCTTGTAACTGACGCTGAGGCTCGAAAGCGTGGGGAGCAAACAGGATTAGATACCCTGGTAGTCCACGCCGTAAACGA TGAGTGCTAGGTGTTAGACCCTTTCCGGGGTTTAGTGCCGCAGCTAACGCATTAAGCACTCCGCCTGGGGAGTACGACCG CAGGGTTGAAACTCAAAGGAATTGACGGGGGCCCGCACAAGCGGTGGAGCATGTGGTTTAATTCGAAGCAACGCGAAGAA CCTTACCAGGTCTTGACATCCCTCTGACCGCTCTAGAGATAGAGCTTTCCTTCGGGACAGAGGTGACAGGTGGTGCATGG TTGTCGTCAGCTCGTGTCGTGAGATGTTGGGTTAAGTCCCGCAACGAGCGCAACCCCTATTGTTAGTTGCCATCATTCAG TTGGGCACTCTAGCGAGACTGCCGGTAATAAACCGGAGGAAGGTGGGGATGACGTCAAATCATCATGCCCCTTATGACCT GGGCTACACACGTGCTACAATGGCTGGTACAACGAGTCGCAAGCCGGTGACGGCAAGCTAATCTCTTAAAGCCAGTCTCA GTTCGGATTGTAGGCTGCAACTCGCCTACATGAAGTCGGAATCGCTAGTAATCGCGGATCAGCACGCCGCGGTGAATACG TTCCCGGGCCT # Reads to search against the database # - 10 rRNA reads: amplicon reads were taken from Qiime study 1685 # - 10 random reads: simulated using mason with the following command: # mason illumina -N 10 -snN -o simulated_random_reads.fa -n # 150 random.fasta # - 10 rRNA reads with id < 97: amplicon reads were taken from # Qiime study 1685 >HMPMockV1.2.Staggered2.673827_47 M141:79:749142:1:1101:16169:1589 TACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCAAGCAGGCGGTTTGTTAAGTCAGATGTGAAATCCC CGGGCTCAACCTGGGAACTGCATTTGATACTGGCAAGCTTGAGTCTCGTAGAGGAGGGTAGAATTCCAGGTGTAGCGGGG AAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGGCTCCATGGACGAAGACTGACGCT >HMPMockV1.2.Staggered2.673827_115 M141:79:749142:1:1101:14141:1729 TACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCACGCAGGCGGTTTGTTAAGTCAGATGTGAAATCCC CCGGCTCAACCTTGGAACTGCATCTGATACGGGCAAGCTTGAGTCTCGTAGAGGGGGGTAGAATTCCAGGTGTAGCGGTG AAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGGCCCTCTGGACGAAGACTGACGCTCAGGTGCGAAAGCGTG GGGAGCAAACA >HMPMockV1.2.Staggered2.673827_122 M141:79:749142:1:1101:16032:1739 TACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCACGCAGGCGGTTTGTTAAGTCAGATGTGAAATCCC CGGGCTCAACCTGGGAACTGCATCTGATACTGGCAAGCTTGAGTCTCGTAGAGGGGGGTAGAATTCCAGGTGTAGCGGTG AAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGGCCCCCTGGACGAAGACTGACGCTCAGGTGCGAAAGCGTG GTGATCAAACA >HMPMockV1.2.Staggered2.673827_161 M141:79:749142:1:1101:17917:1787 TACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCACGCAGGCGGTTTGTTAAGTCAGATGTGAAATCCC CGGGCTCAACCTGGGAACTGCATCTGATACTGGCAAGCTTGAGTCTCGTAGAGGGGGGTAGAATTCCAGGTGTAGCGGTG AAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGGCTCCCTGGACGAAGACTGACGCTCAGGTGCGAAAGCGTG GGGAGCAAACA >HMPMockV1.2.Staggered2.673827_180 M141:79:749142:1:1101:16014:1819 TACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCACGCAGGTGGTTTGTTAAGTCAGATGTGAAATCCC CGGGCTCAACCTGGGAACTGCATCTGATACTGGCAAGCTTGAGTCTCGTAGAGGGGGGTAGAATTCCAGGTGTAGCGGTG AAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGGCCCCCTGGACGAAGACTGACGCTCAGGTGCGAAAGCGTG >HMPMockV1.2.Staggered2.673827_203 M141:79:749142:1:1101:17274:1859 TACGGAGGTTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCACGCAGGCGGTTTGTTAAGTCAGATGTGAAATCCC CCGGCTCAACCTGGGAACTGCATCTGATACTGGCAAGCTTGAGTCTCGTAGAGGGGGGTAGAATTCCAGGTGTAGCGGTG AAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGGCCTCCTGGACGAAGACTGACGCTCAGGTGCGAAAGCGTG GGGATCAAACA >HMPMockV1.2.Staggered2.673827_207 M141:79:749142:1:1101:17460:1866 TACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCACGCAGGCGGTTTGTTAAGTCAGATGTGAAATCCC CGGGCTCAACCTGGGAACTGCATCTGATACTGGCAAGCTTGAGTCTCGTAGAGGGGGGTAGAATTCCAGGTGTAGCGGTG AAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGGCCCCCTGGACGAAGACTGACGCTCAGGTGCGAAAGCGTG GGGAGCAAACA >HMPMockV1.2.Staggered2.673827_215 M141:79:749142:1:1101:18390:1876 TACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCACGCAGGCGGTTTGTTAAGTCAGATGTGAAATCCC CGGGCTCAACCTGGGAACTGCATCTGATACTGGCAAGCTTGAGTCTCGTAGAGGGGGGTAGAATTCCAGGTGTAGCGGTG AAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGGCCCCCTGGACGAAGACTGACG >HMPMockV1.2.Staggered2.673827_218 M141:79:749142:1:1101:18249:1879 TACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCACGCAGGCGGTTTGTTAAGTCAGATGTGAAATCCC CGGGCTCAACCTGGGAACTTCATCTGATACTGGCAAGCTTGAGTCTCGTAGAGGGGGGTAGAATTCCAGGTGTAGCGGTG AAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGGCCCCCTGGACGAAGACTGACGCTCAGGTGCGAAAGCGTG GGGAGCACACA >HMPMockV1.2.Staggered2.673827_220 M141:79:749142:1:1101:15057:1880 TACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCACGCAGGCGGTTTGTTAAGTCAGATGTGAAATCCC CGGGCTCAACCTGGGAACTGCATCTGATACTGGCAAGCTTGAGTCTCGTAGAGGGGGGTAGAATTCCAGGTGTAGCGGTG AAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGGCCTCCTGGACGAAGACTGACGCTC >simulated_random_reads.fa.000000000 AGCCGGGTGTCTACGGTCAGGTGTGTTCTGACTACGTAGTTTGACAGCACGTGTCCTTTCCCCTTCCCAAGGTAACGAAT TGTCGTTATCAACGTTTCGATCCGTAATTTCACGGAACGACATAAAGGCATCAATACTATCGCCAACAGA >simulated_random_reads.fa.000000001 GTGGACGTCGTGGCGGCGTACTAACTTCCTACAGGCATATCCGGAATAACATTCTGCCGCTTGTCGACATAAGCTGTTCC CTACATAGACGACGACGGTTGAAGGGTGTATGTATTCTTTGGGTACGGCTCCTCTGGGCGCATGGTAGCA >simulated_random_reads.fa.000000002 CATTCTTTATAGGCCTACAACACTAATCATCGTTAAGCATAAGGGGAGGAGTGTGCGTGGCATCAAGTCCTGGTTCTTCG CCTAGTACCACACCGTCTCACACGCAGCCGCCGACGACCAGTGAGGGCGCGTGGGACACCCATTCGGTCC >simulated_random_reads.fa.000000003 TCGCCTTGGTACAAACAGTCGCGGCACGCTGTATGGAGGACCATAGAGGCACAGGCTGAGGACAGGGGCATGGAAGGTTC AATCGCCCCCCACAGCTTTAGGTAGGAAGTACTGTTCTAGTGCCAATTTGATTTTAACGGCAGTTACTCG >simulated_random_reads.fa.000000004 CATATTCTAATATCCTACTTCTGATACCCGATTATACACGACACCACCCCAGGACTGTCGTCACATCCTTATCTGGATAA ACATCCGGTTCCGTTTGGCCGTGCTCCGCAAGTGATGCGTCTGTGGAATGTACGTGGAGCGTTGACAGTT >simulated_random_reads.fa.000000005 CCGGATTAGGCATGTTTATAGTACAACGGATTCGCAAAAAGGTCAGGGTAACAATTTTGAAATGCTTTCATACTGCGGTC TAAATGGACCACCCTTTAGGTGCAGCCAACTATAGTTGGTCGATTCTCTGAACACGTACCGAAGGCAATT >simulated_random_reads.fa.000000006 AACCCATCGGAATAATCTACTGCTTCGTATGGAACGGTCCTACATTTAAATAAACGTGTCCAGTGCCACCCGATACCTCT CGTCAATCAGGGGCTCTCCCTGAATCAGCAGTAAACAAACCCAGTACACTGTCGAACACTACTGAGACCG >simulated_random_reads.fa.000000007 CCGAAGGCAAGTCTGTCGTAGAATGGTTTTTGTCGTTGTAACAACCCCGCTCTAGACCCTGAAAACCATAAAGTCAAGCC CAACTAATATTAGAGGCATTCTGGCTACTCCCGCTCACCGCAATCTTCACATACTGTGATACCCTCAGCC >simulated_random_reads.fa.000000008 ATATCCGTTAAACCCCGGATTTGACAATTCATCATCAACGCTACTAACGGCTTTCTCAATTTGGGGCTGTGGCCTATCCG CATACGGCTACCTGCGCAAGAAGAGAGTACTGTTAGATGTCACGCTGCACTTGCGAAGACCGGTGGGCGT >simulated_random_reads.fa.000000009 AGCGATGAGTACACAAGATGAGTGAAGGGATTAAACTTCAAACCTTGAAGTGTTACCCGATTTCCTACCATTGGGGATTC GTTAATGCTTCGAATGGATCTATATCCGGTGTTTAGCTGACTGTTAAAATACTCTCGTTGTACGAAAGTA >HMPMockV1.2.Staggered2.673827_0 M141:79:749142:1:1101:17530:1438 TACGTAGGTGGCAAGCGTTATCCGGAATTATTGGGCGCAAAGCGCGCGTAGGCGGTTTTTTAAGTCTGATGTGAAAGCCC ACGGCTCAACCGTGGAGGGTCATTGGAAACTGGAAAACTTGAGTGCAGAAGAGGAAAGTGGAATTCCATGTGTAGCGGTG AAATGCGCAGAGATATGGAGGAACACCAGTGGCGAAGGCGACCTTCTGGTCTGTAACTGACGCTGATGTGCGAAAGCGTG >HMPMockV1.2.Staggered2.673827_1 M141:79:749142:1:1101:17007:1451 TACGTAGGTGGCAAGCGTTATCCGGAATTATTGGGCGTAAAGCGCGCGTAGGCGGTTTTTTAAGTCTGATGTGAAAGCCC ACGGCTCAACCGTGGAGGGTCATTGGAAACTGGAAAACTTGAGTGCAGAAGAGGAAAGTGGAATTCCATGTGTAGCGGTG AAATGCGCAGAGATATGGAGGAACACCAGTGGCGAAGGCGACTTTCTGGTCTGTAACTTACGCTG >HMPMockV1.2.Staggered2.673827_2 M141:79:749142:1:1101:16695:1471 TACGTAGGTGGCAAGCGTTATCCGGAATTATTGGGCGTAAAGCGCGCGTAGGCGGTTTTTTAAGTCTGATGTGAAAGCCC ACGGCTCAACCGTGGAGGGTCATTGGAAACTGGAAAACTTGAGTGCAGAAGAGGAAAGTGGAATTCCATGTGTAGCGGTG AAATGCGCAGAGATATGGAGGAACACCAGTGGCGAAGGCGACTTTCTGGTCTGTAACTGACGCTGATGTGCGAAAGCGTG GGGA >HMPMockV1.2.Staggered2.673827_3 M141:79:749142:1:1101:17203:1479 TACGTAGGTGGCAAGCGTTATCCGGAATTATTGGGCGTAAAGCGCGCGTAGGCGGTTTTTTAAGTCTGATGTGAAAGCCC ACGGCTCAACCGTGGAGGGTCATTGGAAACTGGAAAACTTGAGTGCAGAAGAGGAAAGTGGAATTCCATGTGTAGCGGTG AAATGCGTAGAGATATGGAGGAACACCAGTGGCGAAGGCGACGTTCTGGTCTGTAACTGACGCTGATGTGCGAAAGCGTG G >HMPMockV1.2.Staggered2.673827_4 M141:79:749142:1:1101:14557:1490 TACGTAGGTGGCAAGCGTTATCCGGAATTATTGGGCGTAAAGCGCGCGTAGGCGGTTTTTTAAGTCTGATGTGAAAGCCC ACGGCTCAACCGTGGAGGGTCATTGGAAACTGGAAAACTTGAGTGCAGAAGAGGAAAGTGGAATTCCATGTGTAGCGGTG AAATGCGCAGAGATATGGAGGAACACCAGTGGCGAAGGCGACTTTCTGGGCTGTAACTGACGCTGATGTGCGCAAGCGTG GTGATCAAACA >HMPMockV1.2.Staggered2.673827_5 M141:79:749142:1:1101:16104:1491 TACGTAGGTGGCAAGCGTTATCCGGAATTATTGGGCGTAAAGCGCGCGTAGGCGGTTTTTTAAGTCTGATGTGAAAGCCC ACGGCTCAACCGTGGAGGGTCATTGGAAACTGGAAAACTTGAGTGCAGAAGAGGAAAGTGGAATTCCATGTGTAGCGGTG AAATGCGCAGAGATATGGAGGAACACCAGTGGCGAAGGCGACTTTCTGGTCTGTAACTGACGC >HMPMockV1.2.Staggered2.673827_6 M141:79:749142:1:1101:16372:1491 TACGTAGGTGGCAAGCGTTATCCGGAATTATTGGGCGTAAAGCGCGCGTAGGCGGTTTTTTAAGTCTGATGTGAAAGCCC ACGGCTCAACCGTGGAGGGTCATTGGAAACTGGAAAACTTGAGTGCAGAAGAGGAAAGTGGAATTCCATGTGTAGCGGTG AAATGCGCAGAGATATGGAGGAACAACAGTGGCGAAGGCGACTTTCTGGTCTGTAACTGACGCTGATGTGCGTAAG >HMPMockV1.2.Staggered2.673827_7 M141:79:749142:1:1101:17334:1499 TACGTAGGTGGCAAGCGTTATCCGGAATTATTGGGCGTAAAGCGCGCGTAGGCGGTTTTTTAAGTCTGATGTGAAAGCCC ACGGCTCAACCGTGGAGGGTCATTGGAAACTGGAAAACTTGAGTGCAGAAGAGGAAAGTGGAATTCCATGTGTAGCGGTG AAATGCGCAGAGATATGGAGGAACACCAGTGGCGAAGGCGACTTTCTGGTCTGTAACTGACGCTGATGT >HMPMockV1.2.Staggered2.673827_8 M141:79:749142:1:1101:17273:1504 TACGTAGGTGGCAAGCGTTATCCGGAATTATTGGGCGTAAAGCGCGCGTAGGCGGTTTTTTAAGTCTGATGTGAAAGCCC ACGGCTCAACCGTGGAGGGTCATTGGAAACTGGAAAACTTGAGTGCAGAAGAGGAAAGTGGAATTCCATGTGTAGCGGTG AAATGCACAGAGATATGGAGGAACACCAGTGGCGAAGGCGACTTTCTGGTCTGTAACTGACGCTGA >HMPMockV1.2.Staggered2.673827_9 M141:79:749142:1:1101:16835:1505 TACGTAGGTGGCAAGCGTTATCCGGAATTATTGGGCGTAAAGCGCGCGTAGGCGGTTTTTTAAGTCTGATGTGAAAGCCC ACGGCTCAACCGTGGAGGGTCATTGGAAACTGGAAAACTTGAGTGCAGAAGAGGAAAGTGGAATTCCATGTGTAGCGGTG ACATGCGCAGAGATATGGAGGAACACCAGTGGCGAAGGCGACTTTCTGGTCTGTAACTGACGCTGATGTGCGAAAGCGTG GGGAT | 1.380361 | 1 |
setup.py | pgdr/benchmcmc | 1 | 6619782 | <reponame>pgdr/benchmcmc<filename>setup.py
import os
import setuptools
__pgdr = "<NAME> <<EMAIL>>"
__source = "https://github.com/pgdr/benchmcmc"
__webpage = __source
__description = "Use MCMC to do benchmark analysis"
def _src(x):
root = os.path.dirname(__file__)
return os.path.abspath(os.path.join(root, x))
def _read_file(fname, op):
with open(_src(fname), "r") as fin:
return op(fin.readlines())
def readme():
try:
return _read_file("README.md", lambda lines: "".join(lines))
except Exception:
return __description
setuptools.setup(
name="benchmcmc",
version="0.0.7",
packages=["benchmcmc"],
description=__description,
long_description=readme(),
long_description_content_type="text/markdown",
author="<NAME>",
author_email="<EMAIL>",
maintainer=__pgdr,
url=__webpage,
project_urls={
"Bug Tracker": "{}/issues".format(__source),
"Documentation": "{}/blob/master/README.md".format(__source),
"Source Code": __source,
},
license="MIT",
keywords="mcmc, bayesian methods, statistics, benchmark analysis, disaster modeling, unix, command line tool",
install_requires=["matplotlib", "pymc3"],
entry_points={"console_scripts": ["benchmcmc=benchmcmc:main"]},
)
| import os
import setuptools
__pgdr = "<NAME> <<EMAIL>>"
__source = "https://github.com/pgdr/benchmcmc"
__webpage = __source
__description = "Use MCMC to do benchmark analysis"
def _src(x):
root = os.path.dirname(__file__)
return os.path.abspath(os.path.join(root, x))
def _read_file(fname, op):
with open(_src(fname), "r") as fin:
return op(fin.readlines())
def readme():
try:
return _read_file("README.md", lambda lines: "".join(lines))
except Exception:
return __description
setuptools.setup(
name="benchmcmc",
version="0.0.7",
packages=["benchmcmc"],
description=__description,
long_description=readme(),
long_description_content_type="text/markdown",
author="<NAME>",
author_email="<EMAIL>",
maintainer=__pgdr,
url=__webpage,
project_urls={
"Bug Tracker": "{}/issues".format(__source),
"Documentation": "{}/blob/master/README.md".format(__source),
"Source Code": __source,
},
license="MIT",
keywords="mcmc, bayesian methods, statistics, benchmark analysis, disaster modeling, unix, command line tool",
install_requires=["matplotlib", "pymc3"],
entry_points={"console_scripts": ["benchmcmc=benchmcmc:main"]},
) | none | 1 | 1.688513 | 2 | |
python/easy/calculate-money-in-leetcode-bank.py | anidever/leetcode | 0 | 6619783 | # question can be found on leetcode.com/problems/calculate-money-in-leetcode-bank/
class Solution:
def totalMoney(self, n: int) -> int:
weeks = n // 7
days = n % 7
saved = 0
if weeks > 0:
for i in range(weeks):
saved += 7 * (4 + i)
if days > 0:
saved += sum(range(weeks + 1, weeks + days + 1))
return saved
| # question can be found on leetcode.com/problems/calculate-money-in-leetcode-bank/
class Solution:
def totalMoney(self, n: int) -> int:
weeks = n // 7
days = n % 7
saved = 0
if weeks > 0:
for i in range(weeks):
saved += 7 * (4 + i)
if days > 0:
saved += sum(range(weeks + 1, weeks + days + 1))
return saved
| en | 0.762938 | # question can be found on leetcode.com/problems/calculate-money-in-leetcode-bank/ | 3.468142 | 3 |
styleframe/command_line/tests/json_schema.py | Jacaranda-Health/StyleFrame | 308 | 6619784 | commandline_json_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "sheets",
"definitions": {
"Sheet": {
"$id": "#sheet",
"title": "sheet",
"type": "object",
"properties": {
"sheet_name": {
"type": "string"
},
"columns": {
"type": "array",
"items": {
"$ref": "#/definitions/Column"
},
"minItems": 1
},
"row_heights": {
"type": "object"
},
"extra_features": {
"type": "object"
},
"default_styles": {
"type": "object",
"properties": {
"headers": {
"$ref": "#/definitions/Style"
},
"cells": {
"$ref": "#/definitions/Style"
}
},
"additionalProperties": False
}
},
"required": [
"sheet_name",
"columns"
]
},
"Column": {
"$id": "#column",
"title": "column",
"type": "object",
"properties": {
"col_name": {
"type": "string"
},
"style": {
"$ref": "#/definitions/Style"
},
"width": {
"type": "number"
},
"cells": {
"type": "array",
"items": {
"$ref": "#/definitions/Cell"
}
}
},
"required": [
"col_name",
"cells"
]
},
"Cell": {
"$id": "#cell",
"title": "cell",
"type": "object",
"properties": {
"value": {},
"style": {
"$ref": "#/definitions/Style"
}
},
"required": [
"value"
],
"additionalProperties": False
},
"Style": {
"$id": "#style",
"title": "style",
"type": "object",
"properties": {
"bg_color": {
"type": "string"
},
"bold": {
"type": "boolean"
},
"font": {
"type": "string"
},
"font_size": {
"type": "number"
},
"font_color": {
"type": "string"
},
"number_format": {
"type": "string"
},
"protection": {
"type": "boolean"
},
"underline": {
"type": "string"
},
"border_type": {
"type": "string"
},
"horizontal_alignment": {
"type": "string"
},
"vertical_alignment": {
"type": "string"
},
"wrap_text": {
"type": "boolean"
},
"shrink_to_fit": {
"type": "boolean"
},
"fill_pattern_type": {
"type": "string"
},
"indent": {
"type": "number"
}
},
"additionalProperties": False
}
},
"type": "array",
"items": {
"$ref": "#/definitions/Sheet"
},
"minItems": 1
}
| commandline_json_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "sheets",
"definitions": {
"Sheet": {
"$id": "#sheet",
"title": "sheet",
"type": "object",
"properties": {
"sheet_name": {
"type": "string"
},
"columns": {
"type": "array",
"items": {
"$ref": "#/definitions/Column"
},
"minItems": 1
},
"row_heights": {
"type": "object"
},
"extra_features": {
"type": "object"
},
"default_styles": {
"type": "object",
"properties": {
"headers": {
"$ref": "#/definitions/Style"
},
"cells": {
"$ref": "#/definitions/Style"
}
},
"additionalProperties": False
}
},
"required": [
"sheet_name",
"columns"
]
},
"Column": {
"$id": "#column",
"title": "column",
"type": "object",
"properties": {
"col_name": {
"type": "string"
},
"style": {
"$ref": "#/definitions/Style"
},
"width": {
"type": "number"
},
"cells": {
"type": "array",
"items": {
"$ref": "#/definitions/Cell"
}
}
},
"required": [
"col_name",
"cells"
]
},
"Cell": {
"$id": "#cell",
"title": "cell",
"type": "object",
"properties": {
"value": {},
"style": {
"$ref": "#/definitions/Style"
}
},
"required": [
"value"
],
"additionalProperties": False
},
"Style": {
"$id": "#style",
"title": "style",
"type": "object",
"properties": {
"bg_color": {
"type": "string"
},
"bold": {
"type": "boolean"
},
"font": {
"type": "string"
},
"font_size": {
"type": "number"
},
"font_color": {
"type": "string"
},
"number_format": {
"type": "string"
},
"protection": {
"type": "boolean"
},
"underline": {
"type": "string"
},
"border_type": {
"type": "string"
},
"horizontal_alignment": {
"type": "string"
},
"vertical_alignment": {
"type": "string"
},
"wrap_text": {
"type": "boolean"
},
"shrink_to_fit": {
"type": "boolean"
},
"fill_pattern_type": {
"type": "string"
},
"indent": {
"type": "number"
}
},
"additionalProperties": False
}
},
"type": "array",
"items": {
"$ref": "#/definitions/Sheet"
},
"minItems": 1
}
| none | 1 | 1.593762 | 2 | |
tests/gitdelver_test.py | nicolasriquet/gitdelver | 1 | 6619785 | <filename>tests/gitdelver_test.py
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the unit tests for the "gitdelver" module.
"""
import pytest, gitdelver
from typing import Callable, Dict
from utilities import AnalysisMode
from pathlib import Path
@pytest.fixture
def gitdelver_config_params_fixture() -> Dict[str, str]:
"""
This test fixture initializes the config_params dictionary.
"""
config_params = {
"repo_path": str(Path.home()),
"csv_output_folder_path": str(Path.home()),
"keep_unsupported_files": False,
"analysis_mode": AnalysisMode.COMMITS_FILES,
"nb_processes": 4,
"nb_commits_before_checkpoint": 50,
"verbose": True,
"SATD_keywords": ["//todo", "#todo", "//fixme", "#fixme", "//tofix", "#tofix",
"//hack", "#hack", "//workaround", "#workaround"],
"bugfix_keywords": ["fix", "solve", "bug", "defect", "problem"]
}
return config_params
def test_check_config_params_valid(gitdelver_config_params_fixture: Callable[[None], Dict[str, str]]):
"""
This unit test checks that _check_config_params raises no SystemExit exception when all parameters are present and valid.
"""
config_params = gitdelver_config_params_fixture
gitdelver._check_config_params(config_params)
def test_check_config_params_missing_repo_path(gitdelver_config_params_fixture: Callable[[None], Dict[str, str]]):
"""
This unit test checks that _check_config_params raises a SystemExit exception when repo_path is missing.
"""
config_params = gitdelver_config_params_fixture.pop("repo_path", None)
with pytest.raises(SystemExit):
gitdelver._check_config_params(config_params)
def test_check_config_params_missing_csv_output_folder_path(gitdelver_config_params_fixture: Callable[[None], Dict[str, str]]):
"""
This unit test checks that _check_config_params raises a SystemExit exception when csv_output_folder_path is missing.
"""
config_params = gitdelver_config_params_fixture.pop("csv_output_folder_path", None)
with pytest.raises(SystemExit):
gitdelver._check_config_params(config_params)
def test_check_config_params_missing_keep_unsupported_files(gitdelver_config_params_fixture: Callable[[None], Dict[str, str]]):
"""
This unit test checks that _check_config_params raises a SystemExit exception when keep_unsupported_files is missing.
"""
config_params = gitdelver_config_params_fixture.pop("keep_unsupported_files", None)
with pytest.raises(SystemExit):
gitdelver._check_config_params(config_params)
def test_check_config_params_missing_analysis_mode(gitdelver_config_params_fixture: Callable[[None], Dict[str, str]]):
"""
This unit test checks that _check_config_params raises a SystemExit exception when analysis_mode is missing.
"""
config_params = gitdelver_config_params_fixture.pop("analysis_mode", None)
with pytest.raises(SystemExit):
gitdelver._check_config_params(config_params)
def test_check_config_params_missing_nb_processes(gitdelver_config_params_fixture: Callable[[None], Dict[str, str]]):
"""
This unit test checks that _check_config_params raises a SystemExit exception when nb_processes is missing.
"""
config_params = gitdelver_config_params_fixture.pop("nb_processes", None)
with pytest.raises(SystemExit):
gitdelver._check_config_params(config_params)
def test_check_config_params_missing_nb_commits_before_checkpoint(gitdelver_config_params_fixture: Callable[[None], Dict[str, str]]):
"""
This unit test checks that _check_config_params raises a SystemExit exception when nb_commits_before_checkpoint is missing.
"""
config_params = gitdelver_config_params_fixture.pop("nb_commits_before_checkpoint", None)
with pytest.raises(SystemExit):
gitdelver._check_config_params(config_params)
def test_check_config_params_missing_verbose(gitdelver_config_params_fixture: Callable[[None], Dict[str, str]]):
"""
This unit test checks that _check_config_params raises a SystemExit exception when verbose is missing.
"""
config_params = gitdelver_config_params_fixture.pop("verbose", None)
with pytest.raises(SystemExit):
gitdelver._check_config_params(config_params)
def test_check_config_params_missing_SATD_keywords(gitdelver_config_params_fixture: Callable[[None], Dict[str, str]]):
"""
This unit test checks that _check_config_params raises a SystemExit exception when SATD_keywords is missing.
"""
config_params = gitdelver_config_params_fixture.pop("SATD_keywords", None)
with pytest.raises(SystemExit):
gitdelver._check_config_params(config_params)
def test_check_config_params_missing_bugfix_keywords(gitdelver_config_params_fixture: Callable[[None], Dict[str, str]]):
"""
This unit test checks that _check_config_params raises a SystemExit exception when bugfix_keywords is missing.
"""
config_params = gitdelver_config_params_fixture.pop("bugfix_keywords", None)
with pytest.raises(SystemExit):
gitdelver._check_config_params(config_params)
def test_check_config_params_empty_repo_path(gitdelver_config_params_fixture: Callable[[None], Dict[str, str]]):
"""
This unit test checks that _check_config_params raises a SystemExit exception when repo_path is empty.
"""
config_params = gitdelver_config_params_fixture
config_params["repo_path"] = ""
with pytest.raises(SystemExit):
gitdelver._check_config_params(config_params)
def test_check_config_params_invalid_repo_path(gitdelver_config_params_fixture: Callable[[None], Dict[str, str]]):
"""
This unit test checks that _check_config_params raises a SystemExit exception when repo_path is invalid.
"""
config_params = gitdelver_config_params_fixture
config_params["repo_path"] = "test"
with pytest.raises(SystemExit):
gitdelver._check_config_params(config_params)
def test_check_config_params_empty_csv_output_folder_path(gitdelver_config_params_fixture: Callable[[None], Dict[str, str]]):
"""
This unit test checks that _check_config_params raises a SystemExit exception when csv_output_folder_path is empty.
"""
config_params = gitdelver_config_params_fixture
config_params["csv_output_folder_path"] = ""
with pytest.raises(SystemExit):
gitdelver._check_config_params(config_params)
def test_check_config_params_invalid_csv_output_folder_path(gitdelver_config_params_fixture: Callable[[None], Dict[str, str]]):
"""
This unit test checks that _check_config_params raises a SystemExit exception when csv_output_folder_path is invalid.
"""
config_params = gitdelver_config_params_fixture
config_params["csv_output_folder_path"] = "test"
with pytest.raises(SystemExit):
gitdelver._check_config_params(config_params)
def test_check_config_params_keep_unsupported_files_wrong_type(gitdelver_config_params_fixture: Callable[[None], Dict[str, str]]):
"""
This unit test checks that _check_config_params raises a SystemExit exception when keep_unsupported_files is of the wrong type.
"""
config_params = gitdelver_config_params_fixture
config_params["keep_unsupported_files"] = "test"
with pytest.raises(SystemExit):
gitdelver._check_config_params(config_params)
def test_check_config_params_analysis_mode_wrong_type(gitdelver_config_params_fixture: Callable[[None], Dict[str, str]]):
"""
This unit test checks that _check_config_params raises a SystemExit exception when analysis_mode is of the wrong type.
"""
config_params = gitdelver_config_params_fixture
config_params["analysis_mode"] = "test"
with pytest.raises(SystemExit):
gitdelver._check_config_params(config_params)
def test_check_config_params_nb_processes_wrong_type(gitdelver_config_params_fixture: Callable[[None], Dict[str, str]]):
"""
This unit test checks that _check_config_params raises a SystemExit exception when nb_processes is of the wrong type.
"""
config_params = gitdelver_config_params_fixture
config_params["nb_processes"] = "test"
with pytest.raises(SystemExit):
gitdelver._check_config_params(config_params)
def test_check_config_params_nb_commits_before_checkpoint_wrong_type(gitdelver_config_params_fixture: Callable[[None], Dict[str, str]]):
"""
This unit test checks that _check_config_params raises a SystemExit exception when nb_commits_before_checkpoint is of the wrong type.
"""
config_params = gitdelver_config_params_fixture
config_params["nb_commits_before_checkpoint"] = "test"
with pytest.raises(SystemExit):
gitdelver._check_config_params(config_params)
def test_check_config_params_verbose_wrong_type(gitdelver_config_params_fixture: Callable[[None], Dict[str, str]]):
"""
This unit test checks that _check_config_params raises a SystemExit exception when verbose is of the wrong type.
"""
config_params = gitdelver_config_params_fixture
config_params["verbose"] = "test"
with pytest.raises(SystemExit):
gitdelver._check_config_params(config_params)
def test_check_config_params_SATD_keywords_wrong_type(gitdelver_config_params_fixture: Callable[[None], Dict[str, str]]):
"""
This unit test checks that _check_config_params raises a SystemExit exception when SATD_keywords is of the wrong type.
"""
config_params = gitdelver_config_params_fixture
config_params["SATD_keywords"] = [1, 2]
with pytest.raises(SystemExit):
gitdelver._check_config_params(config_params)
def test_check_config_params_bugfix_keywords_wrong_type(gitdelver_config_params_fixture: Callable[[None], Dict[str, str]]):
"""
This unit test checks that _check_config_params raises a SystemExit exception when bugfix_keywords is of the wrong type.
"""
config_params = gitdelver_config_params_fixture
config_params["bugfix_keywords"] = [1, 2]
with pytest.raises(SystemExit):
gitdelver._check_config_params(config_params) | <filename>tests/gitdelver_test.py
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the unit tests for the "gitdelver" module.
"""
import pytest, gitdelver
from typing import Callable, Dict
from utilities import AnalysisMode
from pathlib import Path
@pytest.fixture
def gitdelver_config_params_fixture() -> Dict[str, str]:
"""
This test fixture initializes the config_params dictionary.
"""
config_params = {
"repo_path": str(Path.home()),
"csv_output_folder_path": str(Path.home()),
"keep_unsupported_files": False,
"analysis_mode": AnalysisMode.COMMITS_FILES,
"nb_processes": 4,
"nb_commits_before_checkpoint": 50,
"verbose": True,
"SATD_keywords": ["//todo", "#todo", "//fixme", "#fixme", "//tofix", "#tofix",
"//hack", "#hack", "//workaround", "#workaround"],
"bugfix_keywords": ["fix", "solve", "bug", "defect", "problem"]
}
return config_params
def test_check_config_params_valid(gitdelver_config_params_fixture: Callable[[None], Dict[str, str]]):
"""
This unit test checks that _check_config_params raises no SystemExit exception when all parameters are present and valid.
"""
config_params = gitdelver_config_params_fixture
gitdelver._check_config_params(config_params)
def test_check_config_params_missing_repo_path(gitdelver_config_params_fixture: Callable[[None], Dict[str, str]]):
"""
This unit test checks that _check_config_params raises a SystemExit exception when repo_path is missing.
"""
config_params = gitdelver_config_params_fixture.pop("repo_path", None)
with pytest.raises(SystemExit):
gitdelver._check_config_params(config_params)
def test_check_config_params_missing_csv_output_folder_path(gitdelver_config_params_fixture: Callable[[None], Dict[str, str]]):
"""
This unit test checks that _check_config_params raises a SystemExit exception when csv_output_folder_path is missing.
"""
config_params = gitdelver_config_params_fixture.pop("csv_output_folder_path", None)
with pytest.raises(SystemExit):
gitdelver._check_config_params(config_params)
def test_check_config_params_missing_keep_unsupported_files(gitdelver_config_params_fixture: Callable[[None], Dict[str, str]]):
"""
This unit test checks that _check_config_params raises a SystemExit exception when keep_unsupported_files is missing.
"""
config_params = gitdelver_config_params_fixture.pop("keep_unsupported_files", None)
with pytest.raises(SystemExit):
gitdelver._check_config_params(config_params)
def test_check_config_params_missing_analysis_mode(gitdelver_config_params_fixture: Callable[[None], Dict[str, str]]):
"""
This unit test checks that _check_config_params raises a SystemExit exception when analysis_mode is missing.
"""
config_params = gitdelver_config_params_fixture.pop("analysis_mode", None)
with pytest.raises(SystemExit):
gitdelver._check_config_params(config_params)
def test_check_config_params_missing_nb_processes(gitdelver_config_params_fixture: Callable[[None], Dict[str, str]]):
"""
This unit test checks that _check_config_params raises a SystemExit exception when nb_processes is missing.
"""
config_params = gitdelver_config_params_fixture.pop("nb_processes", None)
with pytest.raises(SystemExit):
gitdelver._check_config_params(config_params)
def test_check_config_params_missing_nb_commits_before_checkpoint(gitdelver_config_params_fixture: Callable[[None], Dict[str, str]]):
"""
This unit test checks that _check_config_params raises a SystemExit exception when nb_commits_before_checkpoint is missing.
"""
config_params = gitdelver_config_params_fixture.pop("nb_commits_before_checkpoint", None)
with pytest.raises(SystemExit):
gitdelver._check_config_params(config_params)
def test_check_config_params_missing_verbose(gitdelver_config_params_fixture: Callable[[None], Dict[str, str]]):
"""
This unit test checks that _check_config_params raises a SystemExit exception when verbose is missing.
"""
config_params = gitdelver_config_params_fixture.pop("verbose", None)
with pytest.raises(SystemExit):
gitdelver._check_config_params(config_params)
def test_check_config_params_missing_SATD_keywords(gitdelver_config_params_fixture: Callable[[None], Dict[str, str]]):
"""
This unit test checks that _check_config_params raises a SystemExit exception when SATD_keywords is missing.
"""
config_params = gitdelver_config_params_fixture.pop("SATD_keywords", None)
with pytest.raises(SystemExit):
gitdelver._check_config_params(config_params)
def test_check_config_params_missing_bugfix_keywords(gitdelver_config_params_fixture: Callable[[None], Dict[str, str]]):
"""
This unit test checks that _check_config_params raises a SystemExit exception when bugfix_keywords is missing.
"""
config_params = gitdelver_config_params_fixture.pop("bugfix_keywords", None)
with pytest.raises(SystemExit):
gitdelver._check_config_params(config_params)
def test_check_config_params_empty_repo_path(gitdelver_config_params_fixture: Callable[[None], Dict[str, str]]):
"""
This unit test checks that _check_config_params raises a SystemExit exception when repo_path is empty.
"""
config_params = gitdelver_config_params_fixture
config_params["repo_path"] = ""
with pytest.raises(SystemExit):
gitdelver._check_config_params(config_params)
def test_check_config_params_invalid_repo_path(gitdelver_config_params_fixture: Callable[[None], Dict[str, str]]):
"""
This unit test checks that _check_config_params raises a SystemExit exception when repo_path is invalid.
"""
config_params = gitdelver_config_params_fixture
config_params["repo_path"] = "test"
with pytest.raises(SystemExit):
gitdelver._check_config_params(config_params)
def test_check_config_params_empty_csv_output_folder_path(gitdelver_config_params_fixture: Callable[[None], Dict[str, str]]):
"""
This unit test checks that _check_config_params raises a SystemExit exception when csv_output_folder_path is empty.
"""
config_params = gitdelver_config_params_fixture
config_params["csv_output_folder_path"] = ""
with pytest.raises(SystemExit):
gitdelver._check_config_params(config_params)
def test_check_config_params_invalid_csv_output_folder_path(gitdelver_config_params_fixture: Callable[[None], Dict[str, str]]):
"""
This unit test checks that _check_config_params raises a SystemExit exception when csv_output_folder_path is invalid.
"""
config_params = gitdelver_config_params_fixture
config_params["csv_output_folder_path"] = "test"
with pytest.raises(SystemExit):
gitdelver._check_config_params(config_params)
def test_check_config_params_keep_unsupported_files_wrong_type(gitdelver_config_params_fixture: Callable[[None], Dict[str, str]]):
"""
This unit test checks that _check_config_params raises a SystemExit exception when keep_unsupported_files is of the wrong type.
"""
config_params = gitdelver_config_params_fixture
config_params["keep_unsupported_files"] = "test"
with pytest.raises(SystemExit):
gitdelver._check_config_params(config_params)
def test_check_config_params_analysis_mode_wrong_type(gitdelver_config_params_fixture: Callable[[None], Dict[str, str]]):
"""
This unit test checks that _check_config_params raises a SystemExit exception when analysis_mode is of the wrong type.
"""
config_params = gitdelver_config_params_fixture
config_params["analysis_mode"] = "test"
with pytest.raises(SystemExit):
gitdelver._check_config_params(config_params)
def test_check_config_params_nb_processes_wrong_type(gitdelver_config_params_fixture: Callable[[None], Dict[str, str]]):
"""
This unit test checks that _check_config_params raises a SystemExit exception when nb_processes is of the wrong type.
"""
config_params = gitdelver_config_params_fixture
config_params["nb_processes"] = "test"
with pytest.raises(SystemExit):
gitdelver._check_config_params(config_params)
def test_check_config_params_nb_commits_before_checkpoint_wrong_type(gitdelver_config_params_fixture: Callable[[None], Dict[str, str]]):
"""
This unit test checks that _check_config_params raises a SystemExit exception when nb_commits_before_checkpoint is of the wrong type.
"""
config_params = gitdelver_config_params_fixture
config_params["nb_commits_before_checkpoint"] = "test"
with pytest.raises(SystemExit):
gitdelver._check_config_params(config_params)
def test_check_config_params_verbose_wrong_type(gitdelver_config_params_fixture: Callable[[None], Dict[str, str]]):
"""
This unit test checks that _check_config_params raises a SystemExit exception when verbose is of the wrong type.
"""
config_params = gitdelver_config_params_fixture
config_params["verbose"] = "test"
with pytest.raises(SystemExit):
gitdelver._check_config_params(config_params)
def test_check_config_params_SATD_keywords_wrong_type(gitdelver_config_params_fixture: Callable[[None], Dict[str, str]]):
"""
This unit test checks that _check_config_params raises a SystemExit exception when SATD_keywords is of the wrong type.
"""
config_params = gitdelver_config_params_fixture
config_params["SATD_keywords"] = [1, 2]
with pytest.raises(SystemExit):
gitdelver._check_config_params(config_params)
def test_check_config_params_bugfix_keywords_wrong_type(gitdelver_config_params_fixture: Callable[[None], Dict[str, str]]):
"""
This unit test checks that _check_config_params raises a SystemExit exception when bugfix_keywords is of the wrong type.
"""
config_params = gitdelver_config_params_fixture
config_params["bugfix_keywords"] = [1, 2]
with pytest.raises(SystemExit):
gitdelver._check_config_params(config_params) | en | 0.677 | # Copyright 2021 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. This module contains the unit tests for the "gitdelver" module. This test fixture initializes the config_params dictionary. This unit test checks that _check_config_params raises no SystemExit exception when all parameters are present and valid. This unit test checks that _check_config_params raises a SystemExit exception when repo_path is missing. This unit test checks that _check_config_params raises a SystemExit exception when csv_output_folder_path is missing. This unit test checks that _check_config_params raises a SystemExit exception when keep_unsupported_files is missing. This unit test checks that _check_config_params raises a SystemExit exception when analysis_mode is missing. This unit test checks that _check_config_params raises a SystemExit exception when nb_processes is missing. This unit test checks that _check_config_params raises a SystemExit exception when nb_commits_before_checkpoint is missing. This unit test checks that _check_config_params raises a SystemExit exception when verbose is missing. This unit test checks that _check_config_params raises a SystemExit exception when SATD_keywords is missing. This unit test checks that _check_config_params raises a SystemExit exception when bugfix_keywords is missing. This unit test checks that _check_config_params raises a SystemExit exception when repo_path is empty. This unit test checks that _check_config_params raises a SystemExit exception when repo_path is invalid. This unit test checks that _check_config_params raises a SystemExit exception when csv_output_folder_path is empty. This unit test checks that _check_config_params raises a SystemExit exception when csv_output_folder_path is invalid. This unit test checks that _check_config_params raises a SystemExit exception when keep_unsupported_files is of the wrong type. This unit test checks that _check_config_params raises a SystemExit exception when analysis_mode is of the wrong type. This unit test checks that _check_config_params raises a SystemExit exception when nb_processes is of the wrong type. This unit test checks that _check_config_params raises a SystemExit exception when nb_commits_before_checkpoint is of the wrong type. This unit test checks that _check_config_params raises a SystemExit exception when verbose is of the wrong type. This unit test checks that _check_config_params raises a SystemExit exception when SATD_keywords is of the wrong type. This unit test checks that _check_config_params raises a SystemExit exception when bugfix_keywords is of the wrong type. | 2.063551 | 2 |
aiida_optimize/engines/_result_mapping.py | greschd/aiida_optimize | 0 | 6619786 | # -*- coding: utf-8 -*-
# © 2017-2019, ETH Zurich, Institut für Theoretische Physik
# Author: <NAME> <<EMAIL>>
"""
Defines the datastructures used by optimization engines to keep track of results.
"""
from __future__ import annotations
import typing as ty
__all__ = ['Result', 'ResultMapping']
class Result:
"""
Data object for storing the input created by the optimization engine, and the output from the evaluation process corresponding to that input.
"""
def __init__(self, input_: ty.Any, output: ty.Any = None) -> None:
self.input = input_
self.output = output
class ResultMapping:
"""
Maps the keys used to identify evaluations to their inputs / outputs.
"""
def __init__(self) -> None:
self._results: ty.Dict[int, Result] = {}
@property
def state(self) -> ty.Dict[int, Result]:
"""
Uniquely defines the state of the object. This can be used to create an identical copy.
"""
return self._results
@classmethod
def from_state(cls, state: ty.Optional[ty.Dict[int, Result]]) -> ResultMapping:
"""
Create a :class:`ResultMapping` instance from a state.
"""
instance = cls()
if state is not None:
instance._results = state # pylint: disable=protected-access
return instance
def add_inputs(self, inputs_list: ty.List[ty.Any]) -> ty.Dict[int, Result]:
"""
Adds a list of inputs to the mapping, generating new keys. Returns a dict mapping the keys to the inputs.
"""
keys = []
for input_value in inputs_list:
for value in input_value.values():
if not value.is_stored:
value.store()
key = self._get_new_key()
keys.append(key)
self._results[key] = Result(input_=input_value)
return {k: self._results[k].input for k in keys}
def _get_new_key(self) -> int:
try:
return max(self._results.keys()) + 1
except ValueError:
return 0
def add_outputs(self, outputs: ty.Dict[int, ty.Any]) -> None:
for key, out in outputs.items():
self._results[key].output = out
def __getattr__(self, key: str) -> ty.Any:
return getattr(self._results, key)
def __getitem__(self, key: int) -> Result:
return self._results[key]
def __len__(self) -> int:
return len(self._results)
| # -*- coding: utf-8 -*-
# © 2017-2019, ETH Zurich, Institut für Theoretische Physik
# Author: <NAME> <<EMAIL>>
"""
Defines the datastructures used by optimization engines to keep track of results.
"""
from __future__ import annotations
import typing as ty
__all__ = ['Result', 'ResultMapping']
class Result:
"""
Data object for storing the input created by the optimization engine, and the output from the evaluation process corresponding to that input.
"""
def __init__(self, input_: ty.Any, output: ty.Any = None) -> None:
self.input = input_
self.output = output
class ResultMapping:
"""
Maps the keys used to identify evaluations to their inputs / outputs.
"""
def __init__(self) -> None:
self._results: ty.Dict[int, Result] = {}
@property
def state(self) -> ty.Dict[int, Result]:
"""
Uniquely defines the state of the object. This can be used to create an identical copy.
"""
return self._results
@classmethod
def from_state(cls, state: ty.Optional[ty.Dict[int, Result]]) -> ResultMapping:
"""
Create a :class:`ResultMapping` instance from a state.
"""
instance = cls()
if state is not None:
instance._results = state # pylint: disable=protected-access
return instance
def add_inputs(self, inputs_list: ty.List[ty.Any]) -> ty.Dict[int, Result]:
"""
Adds a list of inputs to the mapping, generating new keys. Returns a dict mapping the keys to the inputs.
"""
keys = []
for input_value in inputs_list:
for value in input_value.values():
if not value.is_stored:
value.store()
key = self._get_new_key()
keys.append(key)
self._results[key] = Result(input_=input_value)
return {k: self._results[k].input for k in keys}
def _get_new_key(self) -> int:
try:
return max(self._results.keys()) + 1
except ValueError:
return 0
def add_outputs(self, outputs: ty.Dict[int, ty.Any]) -> None:
for key, out in outputs.items():
self._results[key].output = out
def __getattr__(self, key: str) -> ty.Any:
return getattr(self._results, key)
def __getitem__(self, key: int) -> Result:
return self._results[key]
def __len__(self) -> int:
return len(self._results)
| en | 0.765843 | # -*- coding: utf-8 -*- # © 2017-2019, ETH Zurich, Institut für Theoretische Physik # Author: <NAME> <<EMAIL>> Defines the datastructures used by optimization engines to keep track of results. Data object for storing the input created by the optimization engine, and the output from the evaluation process corresponding to that input. Maps the keys used to identify evaluations to their inputs / outputs. Uniquely defines the state of the object. This can be used to create an identical copy. Create a :class:`ResultMapping` instance from a state. # pylint: disable=protected-access Adds a list of inputs to the mapping, generating new keys. Returns a dict mapping the keys to the inputs. | 3.24422 | 3 |
opensdraw/library/partsString.py | HazenBabcock/openldraw | 9 | 6619787 | #!/usr/bin/env python
"""
.. module:: partsString
:synopsis: Python functions to add parts using return delimited strings.
.. moduleauthor:: <NAME>
"""
import numpy
# Define the basestring type for Python 3.
try:
basestring
except NameError:
basestring = str
import opensdraw.lcad_language.geometry as geometry
import opensdraw.lcad_language.interpreter as interpreter
import opensdraw.lcad_language.parts as parts
import opensdraw.lcad_language.lcadExceptions as lcadExceptions
lcad_functions = {}
def addParts(model, parts_string):
"""
This does the actual parsing of the parts from the string.
"""
lines = parts_string.splitlines()
group = model.curGroup()
matrix = group.matrix()
step_offset = interpreter.getStepOffset(model)
# Configure brick spacing.
bw = 20.0
bh = 24.0
if ("technic" in lines[0]):
bh = 20.0
lines = lines[1:]
# Process lines.
for line in lines:
line = line.strip()
# Check that this is not a comment line.
if (len(line) > 0) and (line[0] != ";"):
data = line.split(" ")
if ((len(data) == 8) or (len(data) == 9)):
# Get position and orientation and adjust for brick spacing.
pos_ori = list(map(float, data[0:6]))
pos_ori[0] = bw * pos_ori[0]
pos_ori[1] = bw * pos_ori[1]
pos_ori[2] = bh * pos_ori[2]
# Calculate part matrix.
partm = geometry.listToMatrix(pos_ori)
curm = numpy.dot(matrix, partm)
# Color
try:
color = int(data[7])
except ValueError:
color = data[7]
# Add the part to the model.
if (len(data) == 8):
group.addPart(parts.Part(curm, data[6], color, step_offset), False)
else:
group.addPart(parts.Part(curm, data[6], color, step_offset + int(data[8])), False)
else:
# Warning for non blank lines.
if (len(data) > 1):
print(line, "has an unexpected number of elements", len(data))
class PartsFile(interpreter.LCadFunction):
"""
**parts-file** - Specify parts in a text file.
This lets you load parts from a text file that is formatted in the
same fashion as for the *parts-string()* function.
:param file: A string containing the name of the file to load.
Usage::
(parts-file "parts_file.txt") ; Load parts from parts_file.txt
"""
def __init__(self):
interpreter.LCadFunction.__init__(self, "parts-file")
self.setSignature([[basestring]])
def call(self, model, filename):
with open(filename) as fp:
addParts(model, fp.read())
lcad_functions["parts-file"] = PartsFile()
class PartsString(interpreter.LCadFunction):
"""
**parts-string** - Specify parts using a return delimited string.
This lets you specify parts using a return delimited string, instead
of having to use a function like *tb()* or *sb()* from *locate.lcad*.
When you have lots of parts with a relatively simple geometry this might
be faster and easier. If the first line of the string contains the word
"technic" then technic brick spacing will be used instead of standard
brick spacing. Any line that contains 8 or 9 elements is assumed to
specify a part as *(x, y, z, x rotation, y rotation, z rotation,
part, color, {optional} step)*. Other lines are ignored, including
all lines that start with ";".
:param string: The string of part locations and types.
Usage::
; 3 2x1 bricks using standard brick units.
(parts-string "0 0 0 -90 0 0 3004 Red
0 0 1 -90 0 0 3004 Green
0 0 2 -90 0 0 3004 Blue")
; 3 technic beam 2 using standard technic units.
(parts-string "technic
0 2 0 90 0 0 43857 4
0 2 1 90 0 0 43857 2
0 2 2 90 0 0 43857 1")
"""
def __init__(self):
interpreter.LCadFunction.__init__(self, "parts-string")
self.setSignature([[basestring]])
def call(self, model, string):
addParts(model, string)
lcad_functions["parts-string"] = PartsString()
| #!/usr/bin/env python
"""
.. module:: partsString
:synopsis: Python functions to add parts using return delimited strings.
.. moduleauthor:: <NAME>
"""
import numpy
# Define the basestring type for Python 3.
try:
basestring
except NameError:
basestring = str
import opensdraw.lcad_language.geometry as geometry
import opensdraw.lcad_language.interpreter as interpreter
import opensdraw.lcad_language.parts as parts
import opensdraw.lcad_language.lcadExceptions as lcadExceptions
lcad_functions = {}
def addParts(model, parts_string):
"""
This does the actual parsing of the parts from the string.
"""
lines = parts_string.splitlines()
group = model.curGroup()
matrix = group.matrix()
step_offset = interpreter.getStepOffset(model)
# Configure brick spacing.
bw = 20.0
bh = 24.0
if ("technic" in lines[0]):
bh = 20.0
lines = lines[1:]
# Process lines.
for line in lines:
line = line.strip()
# Check that this is not a comment line.
if (len(line) > 0) and (line[0] != ";"):
data = line.split(" ")
if ((len(data) == 8) or (len(data) == 9)):
# Get position and orientation and adjust for brick spacing.
pos_ori = list(map(float, data[0:6]))
pos_ori[0] = bw * pos_ori[0]
pos_ori[1] = bw * pos_ori[1]
pos_ori[2] = bh * pos_ori[2]
# Calculate part matrix.
partm = geometry.listToMatrix(pos_ori)
curm = numpy.dot(matrix, partm)
# Color
try:
color = int(data[7])
except ValueError:
color = data[7]
# Add the part to the model.
if (len(data) == 8):
group.addPart(parts.Part(curm, data[6], color, step_offset), False)
else:
group.addPart(parts.Part(curm, data[6], color, step_offset + int(data[8])), False)
else:
# Warning for non blank lines.
if (len(data) > 1):
print(line, "has an unexpected number of elements", len(data))
class PartsFile(interpreter.LCadFunction):
"""
**parts-file** - Specify parts in a text file.
This lets you load parts from a text file that is formatted in the
same fashion as for the *parts-string()* function.
:param file: A string containing the name of the file to load.
Usage::
(parts-file "parts_file.txt") ; Load parts from parts_file.txt
"""
def __init__(self):
interpreter.LCadFunction.__init__(self, "parts-file")
self.setSignature([[basestring]])
def call(self, model, filename):
with open(filename) as fp:
addParts(model, fp.read())
lcad_functions["parts-file"] = PartsFile()
class PartsString(interpreter.LCadFunction):
"""
**parts-string** - Specify parts using a return delimited string.
This lets you specify parts using a return delimited string, instead
of having to use a function like *tb()* or *sb()* from *locate.lcad*.
When you have lots of parts with a relatively simple geometry this might
be faster and easier. If the first line of the string contains the word
"technic" then technic brick spacing will be used instead of standard
brick spacing. Any line that contains 8 or 9 elements is assumed to
specify a part as *(x, y, z, x rotation, y rotation, z rotation,
part, color, {optional} step)*. Other lines are ignored, including
all lines that start with ";".
:param string: The string of part locations and types.
Usage::
; 3 2x1 bricks using standard brick units.
(parts-string "0 0 0 -90 0 0 3004 Red
0 0 1 -90 0 0 3004 Green
0 0 2 -90 0 0 3004 Blue")
; 3 technic beam 2 using standard technic units.
(parts-string "technic
0 2 0 90 0 0 43857 4
0 2 1 90 0 0 43857 2
0 2 2 90 0 0 43857 1")
"""
def __init__(self):
interpreter.LCadFunction.__init__(self, "parts-string")
self.setSignature([[basestring]])
def call(self, model, string):
addParts(model, string)
lcad_functions["parts-string"] = PartsString()
| en | 0.682862 | #!/usr/bin/env python .. module:: partsString :synopsis: Python functions to add parts using return delimited strings. .. moduleauthor:: <NAME> # Define the basestring type for Python 3. This does the actual parsing of the parts from the string. # Configure brick spacing. # Process lines. # Check that this is not a comment line. # Get position and orientation and adjust for brick spacing. # Calculate part matrix. # Color # Add the part to the model. # Warning for non blank lines. **parts-file** - Specify parts in a text file. This lets you load parts from a text file that is formatted in the same fashion as for the *parts-string()* function. :param file: A string containing the name of the file to load. Usage:: (parts-file "parts_file.txt") ; Load parts from parts_file.txt **parts-string** - Specify parts using a return delimited string. This lets you specify parts using a return delimited string, instead of having to use a function like *tb()* or *sb()* from *locate.lcad*. When you have lots of parts with a relatively simple geometry this might be faster and easier. If the first line of the string contains the word "technic" then technic brick spacing will be used instead of standard brick spacing. Any line that contains 8 or 9 elements is assumed to specify a part as *(x, y, z, x rotation, y rotation, z rotation, part, color, {optional} step)*. Other lines are ignored, including all lines that start with ";". :param string: The string of part locations and types. Usage:: ; 3 2x1 bricks using standard brick units. (parts-string "0 0 0 -90 0 0 3004 Red 0 0 1 -90 0 0 3004 Green 0 0 2 -90 0 0 3004 Blue") ; 3 technic beam 2 using standard technic units. (parts-string "technic 0 2 0 90 0 0 43857 4 0 2 1 90 0 0 43857 2 0 2 2 90 0 0 43857 1") | 3.320761 | 3 |
cogs/subtitles.py | junia7759/musicbot | 1 | 6619788 | import asyncio
import re
import discord
from discord.ext import commands
from . import check_voice_connection
URL_REGEX = re.compile(
r"https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6}\b([-a-zA-Z0-9()@:%_\+.~#?&//=]*)"
)
class SubtitleCallback:
def __init__(self, channel: discord.TextChannel):
self.loop = asyncio.get_event_loop()
self._message: discord.Message = None
self.channel: discord.TextChannel = channel
async def callback(self, subtitle: str) -> None:
if not self._message or self.channel.last_message.id != self._message.id:
if self._message:
self.loop.create_task(self._message.delete())
self._message = await self.channel.send(
f'{subtitle.get("previous", "")}\n> {subtitle["current"]}\n{subtitle.get("next") or ""}'
)
else:
await self._message.edit(
content=f'{subtitle.get("previous", "")}\n> {subtitle["current"]}\n{subtitle.get("next") or ""}'
)
class Subtitles(commands.Cog):
def __init__(self, Bot) -> None:
self.Bot = Bot
@commands.command(name="subtitles", aliases=["subtitle", "lyrics"])
@commands.check(check_voice_connection)
async def subtitles(self, ctx, value: str = None) -> None:
VC = self.Bot.Audio.getVC(ctx.guild.id)
State: dict = await VC.getState()
usableSubtitles: list = State.get("current", {}).get("subtitles", {}).keys()
if not value:
return await ctx.send(
f"> 사용 가능한 자막: {' '.join(map(lambda x: f'`{x}`', usableSubtitles))}"
)
urlMatch = URL_REGEX.match(value)
if urlMatch:
url, value = value, None
else:
url = None
if value and value not in usableSubtitles:
return await ctx.send(
f"> ❎ `{value}` 자막을 찾을 수 없어요.\n> \n> 사용 가능한 자막: {' '.join(map(lambda x: f'`{x}`', usableSubtitles))}"
)
Data = await VC.getSubtitle(
lang=value, url=url, callback=SubtitleCallback(ctx.channel).callback
)
await ctx.send(f"> ➡️ {f'`{value}` ' if value else ''}자막을 출력할게요!")
def setup(Bot):
Bot.add_cog(Subtitles(Bot))
| import asyncio
import re
import discord
from discord.ext import commands
from . import check_voice_connection
URL_REGEX = re.compile(
r"https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6}\b([-a-zA-Z0-9()@:%_\+.~#?&//=]*)"
)
class SubtitleCallback:
def __init__(self, channel: discord.TextChannel):
self.loop = asyncio.get_event_loop()
self._message: discord.Message = None
self.channel: discord.TextChannel = channel
async def callback(self, subtitle: str) -> None:
if not self._message or self.channel.last_message.id != self._message.id:
if self._message:
self.loop.create_task(self._message.delete())
self._message = await self.channel.send(
f'{subtitle.get("previous", "")}\n> {subtitle["current"]}\n{subtitle.get("next") or ""}'
)
else:
await self._message.edit(
content=f'{subtitle.get("previous", "")}\n> {subtitle["current"]}\n{subtitle.get("next") or ""}'
)
class Subtitles(commands.Cog):
def __init__(self, Bot) -> None:
self.Bot = Bot
@commands.command(name="subtitles", aliases=["subtitle", "lyrics"])
@commands.check(check_voice_connection)
async def subtitles(self, ctx, value: str = None) -> None:
VC = self.Bot.Audio.getVC(ctx.guild.id)
State: dict = await VC.getState()
usableSubtitles: list = State.get("current", {}).get("subtitles", {}).keys()
if not value:
return await ctx.send(
f"> 사용 가능한 자막: {' '.join(map(lambda x: f'`{x}`', usableSubtitles))}"
)
urlMatch = URL_REGEX.match(value)
if urlMatch:
url, value = value, None
else:
url = None
if value and value not in usableSubtitles:
return await ctx.send(
f"> ❎ `{value}` 자막을 찾을 수 없어요.\n> \n> 사용 가능한 자막: {' '.join(map(lambda x: f'`{x}`', usableSubtitles))}"
)
Data = await VC.getSubtitle(
lang=value, url=url, callback=SubtitleCallback(ctx.channel).callback
)
await ctx.send(f"> ➡️ {f'`{value}` ' if value else ''}자막을 출력할게요!")
def setup(Bot):
Bot.add_cog(Subtitles(Bot))
| zh | 0.25616 | #=]{1,256}\.[a-zA-Z0-9()]{1,6}\b([-a-zA-Z0-9()@:%_\+.~#?&//=]*)" | 2.475037 | 2 |
Exercicio41a50/ex044.py | ItamarHavenstein/Python | 0 | 6619789 | import locale
valor = int(input('Preço das compras: R$'))
print('FORMAS DE PAGAMENTO')
print('[ 1 ] à vista dinheiro/cheque \n'
'[ 2 ] à vista cartão \n'
'[ 3 ] 2x no cartão \n'
'[ 4 ] 3x ou mais no cartão')
opcao = int(input('Qual a opção? '))
locale.setlocale(locale.LC_MONETARY, 'pt_BR.UTF-8')
if opcao == 1:
pagamento = valor - (valor * 0.10)
print('Suas compras de {}, ficaram em {} com o desconto de 10%'
''.format(locale.currency(valor), locale.currency(pagamento)))
elif opcao == 2:
pagamento = valor - (valor * 0.05)
print('Suas compras de {:.2f}, ficaram em {:.2f} com o desconto de 5%'.format(valor, pagamento))
elif opcao == 3:
pagamento = valor / 2
print('Suas compras de {}, ficaram no valor de {} em 2 vezes sem acréscimo'.format(valor, pagamento))
elif opcao == 4:
parcelas = int(input('Em quantas parcelas você deseja: '))
pagamento = (valor + (valor * 0.20)) / parcelas
print('Suas compras de {}, ficaram no valor de {} em {} vezes com juros \n'
'Valor final da sua compra será de {}'.format(valor, pagamento, parcelas, (pagamento * parcelas)))
else:
print('Opção inválida, tente novamente')
| import locale
valor = int(input('Preço das compras: R$'))
print('FORMAS DE PAGAMENTO')
print('[ 1 ] à vista dinheiro/cheque \n'
'[ 2 ] à vista cartão \n'
'[ 3 ] 2x no cartão \n'
'[ 4 ] 3x ou mais no cartão')
opcao = int(input('Qual a opção? '))
locale.setlocale(locale.LC_MONETARY, 'pt_BR.UTF-8')
if opcao == 1:
pagamento = valor - (valor * 0.10)
print('Suas compras de {}, ficaram em {} com o desconto de 10%'
''.format(locale.currency(valor), locale.currency(pagamento)))
elif opcao == 2:
pagamento = valor - (valor * 0.05)
print('Suas compras de {:.2f}, ficaram em {:.2f} com o desconto de 5%'.format(valor, pagamento))
elif opcao == 3:
pagamento = valor / 2
print('Suas compras de {}, ficaram no valor de {} em 2 vezes sem acréscimo'.format(valor, pagamento))
elif opcao == 4:
parcelas = int(input('Em quantas parcelas você deseja: '))
pagamento = (valor + (valor * 0.20)) / parcelas
print('Suas compras de {}, ficaram no valor de {} em {} vezes com juros \n'
'Valor final da sua compra será de {}'.format(valor, pagamento, parcelas, (pagamento * parcelas)))
else:
print('Opção inválida, tente novamente')
| none | 1 | 3.94324 | 4 | |
tests/utils.py | assigdev/site_pinger | 0 | 6619790 | import functools
def cases(case_list):
def decorator(func):
@functools.wraps(func)
def wrapper(*args):
for case in case_list:
new_args = args + (case,)
try:
func(*new_args)
except AssertionError:
print("Error in case: %s" % (case,))
raise
return
return wrapper
return decorator
| import functools
def cases(case_list):
def decorator(func):
@functools.wraps(func)
def wrapper(*args):
for case in case_list:
new_args = args + (case,)
try:
func(*new_args)
except AssertionError:
print("Error in case: %s" % (case,))
raise
return
return wrapper
return decorator
| none | 1 | 3.058181 | 3 | |
ds_linked_list_ordered.py | bowen0701/python-algorithms-data-structures | 8 | 6619791 | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
class Node(object):
"""Node class as building block for linked list."""
def __init__(self, data):
self.data = data
self.next = None
class LinkedListOrdered(object):
"""Ordered singly linked list class.
Operations include the following:
- is_empty()
- size()
- show()
- add(data)
- delete_with_data(data)
- pop(pos)
- search(item)
- index(item)
"""
def __init__(self):
self.head = None
def is_empty(self):
"""Check list is empty or not.
Time complexity: O(1).
Space complexity: O(1).
"""
return self.head is None
def size(self):
"""Obtain list size.
Time complexity: O(n).
Space complexity: O(1).
"""
current = self.head
counter = 0
while current:
counter += 1
current = current.next
return counter
def show(self):
"""Print the list.
Time complexity: O(n).
Space complexity: O(n).
"""
a_list = []
current = self.head
while current:
a_list.append(current.data)
current = current.next
print(a_list)
def add(self, data):
"""Add data to list.
Time complexity: O(n).
Space complexity: O(1).
"""
if not self.head:
self.head = Node(data)
return None
if self.head.data > data:
new_node = Node(data)
new_node.next = self.head
self.head = new_node
return None
current = self.head
previous = None
is_stop = False
while not is_stop and current.next:
if current.next.data > data:
is_stop = True
previous = current
current = current.next
new_node = Node(data)
if not previous:
current.next = new_node
else:
if not is_stop:
current.next = new_node
else:
new_node.next = current
previous.next = new_node
def delete_with_data(self, data):
"""Delete data from list, if existed.
Time complexity: O(n).
Space complexity: O(1).
"""
if not self.head:
return None
if self.head.data == data:
self.head = self.head.next
return None
current = self.head
while current.next:
if current.next.data == data:
current.next = current.next.next
return None
else:
current = current.next
def pop(self, pos=None):
"""Pop list item at specified position.
If pos is None, then pop the last item.
Time complexity: O(pos).
Space complexity: O(1).
"""
if not self.head:
return None
if not pos:
pos = self.size() - 1
current = self.head
previous = None
counter = 0
while counter < pos and current.next:
previous = current
current = current.next
counter += 1
if not previous:
self.head = current.next
else:
previous.next = current.next
return current.data
def search(self, data):
"""Search data in list.
Time complexity: O(n).
Space complexity: O(1).
"""
if not self.head:
return False
current = self.head
is_found = False
is_stop = False
while not is_found and not is_stop and current.next:
if current.data == data:
is_found = True
else:
if current.data > data:
is_stop = True
else:
current = current.next
return is_found
def index(self, data):
"""Obtain data's index in list.
Time complexity: O(n).
Space complexity: O(1).
"""
if not self.head:
return None
current = self.head
is_found = False
is_stop = False
counter = 0
while not is_found and not is_stop and current.next:
if current.data == data:
is_found = True
else:
if current.data > data:
is_stop = True
else:
current = current.next
counter += 1
if not is_found:
counter = None
return counter
def main():
a_list = LinkedListOrdered()
a_list.add(31)
a_list.add(77)
a_list.add(17)
a_list.add(93)
a_list.add(26)
a_list.add(54)
print('Is empty: {}'.format(a_list.is_empty()))
print('Size: {}'.format(a_list.size()))
print('Delete non-existed 100')
a_list.delete_with_data(100)
a_list.show()
print('Delete 77')
a_list.delete_with_data(77)
a_list.show()
print('Pop pos 3:')
a_list.pop(3)
a_list.show()
print('Search non-existed 100: {}'.format(a_list.search(100)))
print('Search 31: {}'.format(a_list.search(31)))
print('Index non-existed 100: {}'.format(a_list.index(100)))
print('Index 31: {}'.format(a_list.index(31)))
if __name__ == '__main__':
main()
| from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
class Node(object):
"""Node class as building block for linked list."""
def __init__(self, data):
self.data = data
self.next = None
class LinkedListOrdered(object):
"""Ordered singly linked list class.
Operations include the following:
- is_empty()
- size()
- show()
- add(data)
- delete_with_data(data)
- pop(pos)
- search(item)
- index(item)
"""
def __init__(self):
self.head = None
def is_empty(self):
"""Check list is empty or not.
Time complexity: O(1).
Space complexity: O(1).
"""
return self.head is None
def size(self):
"""Obtain list size.
Time complexity: O(n).
Space complexity: O(1).
"""
current = self.head
counter = 0
while current:
counter += 1
current = current.next
return counter
def show(self):
"""Print the list.
Time complexity: O(n).
Space complexity: O(n).
"""
a_list = []
current = self.head
while current:
a_list.append(current.data)
current = current.next
print(a_list)
def add(self, data):
"""Add data to list.
Time complexity: O(n).
Space complexity: O(1).
"""
if not self.head:
self.head = Node(data)
return None
if self.head.data > data:
new_node = Node(data)
new_node.next = self.head
self.head = new_node
return None
current = self.head
previous = None
is_stop = False
while not is_stop and current.next:
if current.next.data > data:
is_stop = True
previous = current
current = current.next
new_node = Node(data)
if not previous:
current.next = new_node
else:
if not is_stop:
current.next = new_node
else:
new_node.next = current
previous.next = new_node
def delete_with_data(self, data):
"""Delete data from list, if existed.
Time complexity: O(n).
Space complexity: O(1).
"""
if not self.head:
return None
if self.head.data == data:
self.head = self.head.next
return None
current = self.head
while current.next:
if current.next.data == data:
current.next = current.next.next
return None
else:
current = current.next
def pop(self, pos=None):
"""Pop list item at specified position.
If pos is None, then pop the last item.
Time complexity: O(pos).
Space complexity: O(1).
"""
if not self.head:
return None
if not pos:
pos = self.size() - 1
current = self.head
previous = None
counter = 0
while counter < pos and current.next:
previous = current
current = current.next
counter += 1
if not previous:
self.head = current.next
else:
previous.next = current.next
return current.data
def search(self, data):
"""Search data in list.
Time complexity: O(n).
Space complexity: O(1).
"""
if not self.head:
return False
current = self.head
is_found = False
is_stop = False
while not is_found and not is_stop and current.next:
if current.data == data:
is_found = True
else:
if current.data > data:
is_stop = True
else:
current = current.next
return is_found
def index(self, data):
"""Obtain data's index in list.
Time complexity: O(n).
Space complexity: O(1).
"""
if not self.head:
return None
current = self.head
is_found = False
is_stop = False
counter = 0
while not is_found and not is_stop and current.next:
if current.data == data:
is_found = True
else:
if current.data > data:
is_stop = True
else:
current = current.next
counter += 1
if not is_found:
counter = None
return counter
def main():
a_list = LinkedListOrdered()
a_list.add(31)
a_list.add(77)
a_list.add(17)
a_list.add(93)
a_list.add(26)
a_list.add(54)
print('Is empty: {}'.format(a_list.is_empty()))
print('Size: {}'.format(a_list.size()))
print('Delete non-existed 100')
a_list.delete_with_data(100)
a_list.show()
print('Delete 77')
a_list.delete_with_data(77)
a_list.show()
print('Pop pos 3:')
a_list.pop(3)
a_list.show()
print('Search non-existed 100: {}'.format(a_list.search(100)))
print('Search 31: {}'.format(a_list.search(31)))
print('Index non-existed 100: {}'.format(a_list.index(100)))
print('Index 31: {}'.format(a_list.index(31)))
if __name__ == '__main__':
main()
| en | 0.650813 | Node class as building block for linked list. Ordered singly linked list class. Operations include the following: - is_empty() - size() - show() - add(data) - delete_with_data(data) - pop(pos) - search(item) - index(item) Check list is empty or not. Time complexity: O(1). Space complexity: O(1). Obtain list size. Time complexity: O(n). Space complexity: O(1). Print the list. Time complexity: O(n). Space complexity: O(n). Add data to list. Time complexity: O(n). Space complexity: O(1). Delete data from list, if existed. Time complexity: O(n). Space complexity: O(1). Pop list item at specified position. If pos is None, then pop the last item. Time complexity: O(pos). Space complexity: O(1). Search data in list. Time complexity: O(n). Space complexity: O(1). Obtain data's index in list. Time complexity: O(n). Space complexity: O(1). | 4.061999 | 4 |
Algorithm/BOJ/DP/14925목장건설하기.py | Nyapy/FMTG | 0 | 6619792 | import sys
sys.stdin = open('14925.txt')
M,N = map(int , input().split()) #세로가 M
mocjang = [list(1 for _ in range(N+1))]+[[1]+list(map(int, input().split())) for _ in range(M)]
square = [[0 for _ in range(N+1)] for _ in range(M+1)]
ans = 0
for i in range(1,M+1):
for j in range(1,N+1):
if mocjang[i][j] == 1 or mocjang[i][j] == 2:
pass
else:
square[i][j] = min(square[i-1][j-1],square[i][j-1],square[i-1][j]) +1
if square[i][j] > ans:
ans = square[i][j]
print(ans) | import sys
sys.stdin = open('14925.txt')
M,N = map(int , input().split()) #세로가 M
mocjang = [list(1 for _ in range(N+1))]+[[1]+list(map(int, input().split())) for _ in range(M)]
square = [[0 for _ in range(N+1)] for _ in range(M+1)]
ans = 0
for i in range(1,M+1):
for j in range(1,N+1):
if mocjang[i][j] == 1 or mocjang[i][j] == 2:
pass
else:
square[i][j] = min(square[i-1][j-1],square[i][j-1],square[i-1][j]) +1
if square[i][j] > ans:
ans = square[i][j]
print(ans) | none | 1 | 3.028478 | 3 | |
examples/buckets/sources/check_bucket.py | DmitryBogomolov/aws-cloudformation-sample | 0 | 6619793 | import os
import boto3
s3 = boto3.client('s3')
def handler(event, context):
return s3.put_object(
ACL='public-read',
Bucket=os.getenv('BUCKET'),
ContentType='text/plain',
Key=event['key'],
Body=event['body'].encode()
)
| import os
import boto3
s3 = boto3.client('s3')
def handler(event, context):
return s3.put_object(
ACL='public-read',
Bucket=os.getenv('BUCKET'),
ContentType='text/plain',
Key=event['key'],
Body=event['body'].encode()
)
| none | 1 | 2.033022 | 2 | |
restio/graph.py | eduardostarling/restio | 3 | 6619794 | from __future__ import annotations
import asyncio
from collections import deque
from typing import (
AsyncGenerator,
Callable,
Dict,
Generic,
List,
Optional,
Set,
Tuple,
TypeVar,
cast,
)
# As a design decision, the classes in this file are bound to BaseModel
# to facilitate the navigation across different objects by utilizing
# the already built-in functionality get_children() and to allow monitoring
# of model states. If future implementation depends on drawing dependency
# graphs, then this file should be modified to decouple from BaseModel
# implementation. The node objects in this case will have to be Hashable.
from restio.model import BaseModel
Model_co = TypeVar("Model_co", bound=BaseModel, covariant=True)
class Node(Generic[Model_co]):
"""
Represents a Node in a Tree of a DependencyGraph.
Each Node instance stores a BaseModel object that represents a model in the
dependency Tree. `parents` and `children` store references to the nodes immediately
above and below them in the Tree.
"""
node_object: Model_co
parents: Set[Node[Model_co]]
children: Set[Node[Model_co]]
def __init__(
self,
node_object: Model_co,
parents: Optional[Set[Node[Model_co]]] = None,
children: Optional[Set[Node[Model_co]]] = None,
):
self.node_object = node_object
self.parents = parents if parents else set()
self.children = children if children else set()
def get_children(
self, recursive: bool = False, children: Optional[Set[Node[Model_co]]] = None
) -> Set[Node[Model_co]]:
"""
Returns the child nodes of the current Node.
:param recursive: Indicates whether all child nodes should be returned.
Defaults to False.
:param children: Contains the nodes that have already been inspected and should
be ignored. Used for recursion only.
:return: Returns all children nodes, including children of children, if
`recursive` is True. The operation stops when all leaves are reached.
Returns only the first degree children if False.
"""
return self._get_nodes("children", recursive=recursive, nodes=children)
def get_parents(
self, recursive: bool = False, parents: Optional[Set[Node[Model_co]]] = None
) -> Set[Node[Model_co]]:
"""
Returns the parent nodes of the current Node.
:param recursive: Indicates whether all parent nodes should be returned.
Defaults to False.
:param parents: Contains the nodes that have already been inspected and should
be ignored. Used for recursion only.
:return: Returns all parent nodes, including parents of parents, if `recursive`
is True. The operation stops when roots are reached. Returns only the
first degree parents if False.
"""
return self._get_nodes("parents", recursive=recursive, nodes=parents)
def _get_nodes(
self,
nodes_attribute: str,
recursive: bool = False,
nodes: Optional[Set[Node[Model_co]]] = None,
) -> Set[Node]:
dependent_nodes = getattr(self, nodes_attribute, [])
if not recursive:
return dependent_nodes.copy()
if not nodes:
nodes = set()
for node in dependent_nodes:
if node not in nodes:
nodes.add(node)
nodes = nodes.union(node._get_nodes(nodes_attribute, recursive, nodes))
return nodes
def __hash__(self):
return self.node_object.__hash__()
def __eq__(self, other):
if not isinstance(other, Node):
return False
if not self.node_object or not other.node_object:
return False
return self.node_object.__hash__() == other.node_object.__hash__()
GetRelativesCallable = Callable[..., Set[Node[Model_co]]]
NavigationDirection = Tuple[GetRelativesCallable, GetRelativesCallable]
class NavigationType:
"""
Indicates how the Tree navigation should be done.
- ROOTS_TO_LEAVES: Starts at the roots of the tree and moves towards the leaves.
- LEAVES_TO_ROOTS: Starts at the leaves of the tree and moves towards the roots.
"""
ROOTS_TO_LEAVES: NavigationDirection = (
Node.get_parents,
Node.get_children,
)
LEAVES_TO_ROOTS: NavigationDirection = (
Node.get_children,
Node.get_parents,
)
class Tree(Generic[Model_co]):
"""
Represents a Tree in a DependencyGraph.
Each Tree stores a set of Nodes that have at least one degree of relationship with
each other. The Tree and can be traversed with callback based tasks using the
method `process`.
"""
nodes: Set[Node[Model_co]]
_canceled: bool
_processing: bool
def __init__(self, nodes: Set[Node[Model_co]]):
self.nodes = nodes
self._canceled = False
self._processing = False
async def navigate(
self,
nodes: Set[Node[Model_co]],
direction: NavigationDirection,
processed_nodes: asyncio.Queue[Node[Model_co]],
) -> AsyncGenerator[Node, bool]:
"""
Traverses the dependency Tree based on already processed nodes. The caller
should maintain the queue `processed_nodes` with the nodes that have been
processed on the past iteration. This generator will yield a Node instance when
possible, otherwise it will hang until extra nodes processed.
The Tree traversal order will depend on the `direction` specified (either from
roots to leaves or leaves to roots) and the order in which nodes are processed
by the caller.
:param nodes: The set of Node instances to be processed.
:param direction: The direction of navigation, either from LEAVES_TO_ROOTS
(navigates upwards in the tree) or ROOTS_TO_LEAVES (navigates
downwards in the tree).
:param processed_nodes: The queue containing the nodes that have just been
processed by the caller.
:raises TypeError: If the direction is invalid.
:yield: The next node in the navigation.
"""
if direction == NavigationType.LEAVES_TO_ROOTS:
entrypoint = self.get_leafs()
elif direction == NavigationType.ROOTS_TO_LEAVES:
entrypoint = self.get_roots()
else:
raise TypeError("The provided argument `direction` is invalid.")
from_direction, to_direction = direction
next_nodes = deque(n for n in entrypoint if n in nodes)
processed: Optional[Node] = None
while nodes or next_nodes:
if next_nodes:
yield next_nodes.pop()
continue
processed = await processed_nodes.get()
if isinstance(processed, Node) and processed not in nodes:
return
nodes.remove(processed)
nodes_to_direction = to_direction(processed) if processed else set()
for node_to in nodes_to_direction:
nodes_from_direction = from_direction(node_to)
if not nodes_from_direction.intersection(nodes):
next_nodes.appendleft(node_to)
def get_roots(self) -> Set[Node[Model_co]]:
"""
Returns all roots of the Tree.
:return: Set containing Node instances.
"""
return self._get_tree_roots(self.nodes)
def get_leafs(self) -> Set[Node[Model_co]]:
"""
Returns all leaves of the Tree.
:return: Set containing Node instances.
"""
return self._get_tree_leafs(self.nodes)
def get_nodes(self) -> Set[Node[Model_co]]:
"""
Returns a copy of all nodes in the Tree.
:return: Set with Node instances.
"""
return self.nodes.copy()
def cancel(self):
"""
Cancels the processing when `process` is active. Currently running tasks will
be finalized normally, and new tasks will not be scheduled.
"""
if self._processing:
self._canceled = True
@staticmethod
def _get_tree_roots(tree_nodes: Set[Node[Model_co]]) -> Set[Node[Model_co]]:
return set(filter(lambda x: not x.parents, tree_nodes))
@staticmethod
def _get_tree_leafs(tree_nodes: Set[Node[Model_co]]) -> Set[Node[Model_co]]:
return set(filter(lambda x: not x.children, tree_nodes))
class DependencyGraph(Generic[Model_co]):
"""
Represents dependency graph made of a combination of Tree instances.
The DependencyGraph stores a list of Tree instances that contain all the Nodes in
the graph. This module is also responsible to instantiate all Trees and Nodes given
a set of objects of type BaseModel.
"""
trees: List[Tree[Model_co]] = []
def __init__(self, trees: List[Tree[Model_co]]):
self.trees = trees
@classmethod
def generate_from_objects(cls, objects: Set[Model_co]) -> DependencyGraph:
"""
Generates a DependencyGraph instance based on the set of BaseModel instances
`objects`.
:param objects: The set of BaseModel instances.
:return: The DependencyGraph instance.
"""
nodes: Set[Node] = cls._get_connected_nodes(objects)
return cls.generate_from_nodes(nodes)
@classmethod
def generate_from_nodes(cls, nodes: Set[Node[Model_co]]) -> DependencyGraph:
"""
Generates a DependencyGraph instance based on the set of Node instances `nodes`.
:param nodes: The set of Node instances
:return: The DependencyGraph instance.
"""
roots: Set[Node] = Tree._get_tree_roots(nodes)
roots_children: Dict[Node, Set[Node]] = {
root: root.get_children(True) for root in roots
}
trees: List[Tree] = []
# generates trees with intersections
while roots:
root = roots.pop()
intersecting_items = {root}.union(roots_children[root])
intersecting_roots = {root}
for next_root in roots:
next_root_children = {next_root}.union(roots_children[next_root])
if intersecting_items.intersection(next_root_children):
intersecting_items = intersecting_items.union(next_root_children)
intersecting_roots.add(next_root)
trees.append(Tree(intersecting_items))
roots = roots - intersecting_roots
return cls(trees)
@staticmethod
def _get_connected_nodes(objects: Set[Model_co]) -> Set[Node]:
nodes: Dict[str, Node] = {}
# creates nodes
for node_object in objects:
add_node = Node(node_object)
nodes[str(node_object.__hash__())] = add_node
# connects nodes
for node in nodes.values():
for child in node.node_object.get_children(recursive=False):
child_node = nodes.get(str(child.__hash__()), None)
if not child_node:
continue
node.children.add(child_node)
child_node.parents.add(node)
# check for circular dependency
for node in nodes.values():
all_children = node.get_children(recursive=True)
if all_children.intersection(node.get_parents(recursive=False)):
raise RuntimeError("Circular dependency detected")
return cast(Set[Node], nodes.values())
| from __future__ import annotations
import asyncio
from collections import deque
from typing import (
AsyncGenerator,
Callable,
Dict,
Generic,
List,
Optional,
Set,
Tuple,
TypeVar,
cast,
)
# As a design decision, the classes in this file are bound to BaseModel
# to facilitate the navigation across different objects by utilizing
# the already built-in functionality get_children() and to allow monitoring
# of model states. If future implementation depends on drawing dependency
# graphs, then this file should be modified to decouple from BaseModel
# implementation. The node objects in this case will have to be Hashable.
from restio.model import BaseModel
Model_co = TypeVar("Model_co", bound=BaseModel, covariant=True)
class Node(Generic[Model_co]):
"""
Represents a Node in a Tree of a DependencyGraph.
Each Node instance stores a BaseModel object that represents a model in the
dependency Tree. `parents` and `children` store references to the nodes immediately
above and below them in the Tree.
"""
node_object: Model_co
parents: Set[Node[Model_co]]
children: Set[Node[Model_co]]
def __init__(
self,
node_object: Model_co,
parents: Optional[Set[Node[Model_co]]] = None,
children: Optional[Set[Node[Model_co]]] = None,
):
self.node_object = node_object
self.parents = parents if parents else set()
self.children = children if children else set()
def get_children(
self, recursive: bool = False, children: Optional[Set[Node[Model_co]]] = None
) -> Set[Node[Model_co]]:
"""
Returns the child nodes of the current Node.
:param recursive: Indicates whether all child nodes should be returned.
Defaults to False.
:param children: Contains the nodes that have already been inspected and should
be ignored. Used for recursion only.
:return: Returns all children nodes, including children of children, if
`recursive` is True. The operation stops when all leaves are reached.
Returns only the first degree children if False.
"""
return self._get_nodes("children", recursive=recursive, nodes=children)
def get_parents(
self, recursive: bool = False, parents: Optional[Set[Node[Model_co]]] = None
) -> Set[Node[Model_co]]:
"""
Returns the parent nodes of the current Node.
:param recursive: Indicates whether all parent nodes should be returned.
Defaults to False.
:param parents: Contains the nodes that have already been inspected and should
be ignored. Used for recursion only.
:return: Returns all parent nodes, including parents of parents, if `recursive`
is True. The operation stops when roots are reached. Returns only the
first degree parents if False.
"""
return self._get_nodes("parents", recursive=recursive, nodes=parents)
def _get_nodes(
self,
nodes_attribute: str,
recursive: bool = False,
nodes: Optional[Set[Node[Model_co]]] = None,
) -> Set[Node]:
dependent_nodes = getattr(self, nodes_attribute, [])
if not recursive:
return dependent_nodes.copy()
if not nodes:
nodes = set()
for node in dependent_nodes:
if node not in nodes:
nodes.add(node)
nodes = nodes.union(node._get_nodes(nodes_attribute, recursive, nodes))
return nodes
def __hash__(self):
return self.node_object.__hash__()
def __eq__(self, other):
if not isinstance(other, Node):
return False
if not self.node_object or not other.node_object:
return False
return self.node_object.__hash__() == other.node_object.__hash__()
GetRelativesCallable = Callable[..., Set[Node[Model_co]]]
NavigationDirection = Tuple[GetRelativesCallable, GetRelativesCallable]
class NavigationType:
"""
Indicates how the Tree navigation should be done.
- ROOTS_TO_LEAVES: Starts at the roots of the tree and moves towards the leaves.
- LEAVES_TO_ROOTS: Starts at the leaves of the tree and moves towards the roots.
"""
ROOTS_TO_LEAVES: NavigationDirection = (
Node.get_parents,
Node.get_children,
)
LEAVES_TO_ROOTS: NavigationDirection = (
Node.get_children,
Node.get_parents,
)
class Tree(Generic[Model_co]):
"""
Represents a Tree in a DependencyGraph.
Each Tree stores a set of Nodes that have at least one degree of relationship with
each other. The Tree and can be traversed with callback based tasks using the
method `process`.
"""
nodes: Set[Node[Model_co]]
_canceled: bool
_processing: bool
def __init__(self, nodes: Set[Node[Model_co]]):
self.nodes = nodes
self._canceled = False
self._processing = False
async def navigate(
self,
nodes: Set[Node[Model_co]],
direction: NavigationDirection,
processed_nodes: asyncio.Queue[Node[Model_co]],
) -> AsyncGenerator[Node, bool]:
"""
Traverses the dependency Tree based on already processed nodes. The caller
should maintain the queue `processed_nodes` with the nodes that have been
processed on the past iteration. This generator will yield a Node instance when
possible, otherwise it will hang until extra nodes processed.
The Tree traversal order will depend on the `direction` specified (either from
roots to leaves or leaves to roots) and the order in which nodes are processed
by the caller.
:param nodes: The set of Node instances to be processed.
:param direction: The direction of navigation, either from LEAVES_TO_ROOTS
(navigates upwards in the tree) or ROOTS_TO_LEAVES (navigates
downwards in the tree).
:param processed_nodes: The queue containing the nodes that have just been
processed by the caller.
:raises TypeError: If the direction is invalid.
:yield: The next node in the navigation.
"""
if direction == NavigationType.LEAVES_TO_ROOTS:
entrypoint = self.get_leafs()
elif direction == NavigationType.ROOTS_TO_LEAVES:
entrypoint = self.get_roots()
else:
raise TypeError("The provided argument `direction` is invalid.")
from_direction, to_direction = direction
next_nodes = deque(n for n in entrypoint if n in nodes)
processed: Optional[Node] = None
while nodes or next_nodes:
if next_nodes:
yield next_nodes.pop()
continue
processed = await processed_nodes.get()
if isinstance(processed, Node) and processed not in nodes:
return
nodes.remove(processed)
nodes_to_direction = to_direction(processed) if processed else set()
for node_to in nodes_to_direction:
nodes_from_direction = from_direction(node_to)
if not nodes_from_direction.intersection(nodes):
next_nodes.appendleft(node_to)
def get_roots(self) -> Set[Node[Model_co]]:
"""
Returns all roots of the Tree.
:return: Set containing Node instances.
"""
return self._get_tree_roots(self.nodes)
def get_leafs(self) -> Set[Node[Model_co]]:
"""
Returns all leaves of the Tree.
:return: Set containing Node instances.
"""
return self._get_tree_leafs(self.nodes)
def get_nodes(self) -> Set[Node[Model_co]]:
"""
Returns a copy of all nodes in the Tree.
:return: Set with Node instances.
"""
return self.nodes.copy()
def cancel(self):
"""
Cancels the processing when `process` is active. Currently running tasks will
be finalized normally, and new tasks will not be scheduled.
"""
if self._processing:
self._canceled = True
@staticmethod
def _get_tree_roots(tree_nodes: Set[Node[Model_co]]) -> Set[Node[Model_co]]:
return set(filter(lambda x: not x.parents, tree_nodes))
@staticmethod
def _get_tree_leafs(tree_nodes: Set[Node[Model_co]]) -> Set[Node[Model_co]]:
return set(filter(lambda x: not x.children, tree_nodes))
class DependencyGraph(Generic[Model_co]):
"""
Represents dependency graph made of a combination of Tree instances.
The DependencyGraph stores a list of Tree instances that contain all the Nodes in
the graph. This module is also responsible to instantiate all Trees and Nodes given
a set of objects of type BaseModel.
"""
trees: List[Tree[Model_co]] = []
def __init__(self, trees: List[Tree[Model_co]]):
self.trees = trees
@classmethod
def generate_from_objects(cls, objects: Set[Model_co]) -> DependencyGraph:
"""
Generates a DependencyGraph instance based on the set of BaseModel instances
`objects`.
:param objects: The set of BaseModel instances.
:return: The DependencyGraph instance.
"""
nodes: Set[Node] = cls._get_connected_nodes(objects)
return cls.generate_from_nodes(nodes)
@classmethod
def generate_from_nodes(cls, nodes: Set[Node[Model_co]]) -> DependencyGraph:
"""
Generates a DependencyGraph instance based on the set of Node instances `nodes`.
:param nodes: The set of Node instances
:return: The DependencyGraph instance.
"""
roots: Set[Node] = Tree._get_tree_roots(nodes)
roots_children: Dict[Node, Set[Node]] = {
root: root.get_children(True) for root in roots
}
trees: List[Tree] = []
# generates trees with intersections
while roots:
root = roots.pop()
intersecting_items = {root}.union(roots_children[root])
intersecting_roots = {root}
for next_root in roots:
next_root_children = {next_root}.union(roots_children[next_root])
if intersecting_items.intersection(next_root_children):
intersecting_items = intersecting_items.union(next_root_children)
intersecting_roots.add(next_root)
trees.append(Tree(intersecting_items))
roots = roots - intersecting_roots
return cls(trees)
@staticmethod
def _get_connected_nodes(objects: Set[Model_co]) -> Set[Node]:
nodes: Dict[str, Node] = {}
# creates nodes
for node_object in objects:
add_node = Node(node_object)
nodes[str(node_object.__hash__())] = add_node
# connects nodes
for node in nodes.values():
for child in node.node_object.get_children(recursive=False):
child_node = nodes.get(str(child.__hash__()), None)
if not child_node:
continue
node.children.add(child_node)
child_node.parents.add(node)
# check for circular dependency
for node in nodes.values():
all_children = node.get_children(recursive=True)
if all_children.intersection(node.get_parents(recursive=False)):
raise RuntimeError("Circular dependency detected")
return cast(Set[Node], nodes.values())
| en | 0.872243 | # As a design decision, the classes in this file are bound to BaseModel # to facilitate the navigation across different objects by utilizing # the already built-in functionality get_children() and to allow monitoring # of model states. If future implementation depends on drawing dependency # graphs, then this file should be modified to decouple from BaseModel # implementation. The node objects in this case will have to be Hashable. Represents a Node in a Tree of a DependencyGraph. Each Node instance stores a BaseModel object that represents a model in the dependency Tree. `parents` and `children` store references to the nodes immediately above and below them in the Tree. Returns the child nodes of the current Node. :param recursive: Indicates whether all child nodes should be returned. Defaults to False. :param children: Contains the nodes that have already been inspected and should be ignored. Used for recursion only. :return: Returns all children nodes, including children of children, if `recursive` is True. The operation stops when all leaves are reached. Returns only the first degree children if False. Returns the parent nodes of the current Node. :param recursive: Indicates whether all parent nodes should be returned. Defaults to False. :param parents: Contains the nodes that have already been inspected and should be ignored. Used for recursion only. :return: Returns all parent nodes, including parents of parents, if `recursive` is True. The operation stops when roots are reached. Returns only the first degree parents if False. Indicates how the Tree navigation should be done. - ROOTS_TO_LEAVES: Starts at the roots of the tree and moves towards the leaves. - LEAVES_TO_ROOTS: Starts at the leaves of the tree and moves towards the roots. Represents a Tree in a DependencyGraph. Each Tree stores a set of Nodes that have at least one degree of relationship with each other. The Tree and can be traversed with callback based tasks using the method `process`. Traverses the dependency Tree based on already processed nodes. The caller should maintain the queue `processed_nodes` with the nodes that have been processed on the past iteration. This generator will yield a Node instance when possible, otherwise it will hang until extra nodes processed. The Tree traversal order will depend on the `direction` specified (either from roots to leaves or leaves to roots) and the order in which nodes are processed by the caller. :param nodes: The set of Node instances to be processed. :param direction: The direction of navigation, either from LEAVES_TO_ROOTS (navigates upwards in the tree) or ROOTS_TO_LEAVES (navigates downwards in the tree). :param processed_nodes: The queue containing the nodes that have just been processed by the caller. :raises TypeError: If the direction is invalid. :yield: The next node in the navigation. Returns all roots of the Tree. :return: Set containing Node instances. Returns all leaves of the Tree. :return: Set containing Node instances. Returns a copy of all nodes in the Tree. :return: Set with Node instances. Cancels the processing when `process` is active. Currently running tasks will be finalized normally, and new tasks will not be scheduled. Represents dependency graph made of a combination of Tree instances. The DependencyGraph stores a list of Tree instances that contain all the Nodes in the graph. This module is also responsible to instantiate all Trees and Nodes given a set of objects of type BaseModel. Generates a DependencyGraph instance based on the set of BaseModel instances `objects`. :param objects: The set of BaseModel instances. :return: The DependencyGraph instance. Generates a DependencyGraph instance based on the set of Node instances `nodes`. :param nodes: The set of Node instances :return: The DependencyGraph instance. # generates trees with intersections # creates nodes # connects nodes # check for circular dependency | 2.595379 | 3 |
offerTracking/customer/migrations/0001_initial.py | SefaAkdeniz/Offer-Tracking-Project-with-Django | 0 | 6619795 | # Generated by Django 3.0.8 on 2020-09-02 17:12
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('company_name', models.CharField(max_length=100, verbose_name='Firma Adı')),
('company_type', models.CharField(choices=[(1, 'İş Makinesi Üreticileri'), (2, 'İstif Makineleri Üreticileri'), (3, 'Tarım Makinaları Üreticileri'), (4, 'Hidrolik Pnömatikçiler')], max_length=100, verbose_name='Firma İş Alanı')),
('company_phone', models.CharField(max_length=11, verbose_name='Firma Telefon Numarası')),
('company_city', models.CharField(choices=[(6, 'ANKARA'), (34, 'İSTANBUL'), (35, 'İZMİR'), (17, 'ÇANAKKALE')], max_length=100, verbose_name='Konum')),
('company_adress', models.TextField(verbose_name='Adres')),
('first_contact', models.CharField(choices=[(1, '<NAME>'), (2, '<NAME>'), (3, '<NAME>'), (4, 'E-Mail'), (5, 'Telefon Araması'), (6, 'Diğer')], max_length=100, verbose_name='İlk İletişim')),
('related_person_name1', models.CharField(max_length=100, verbose_name='1.İlgili Kişi Adı')),
('related_person_title1', models.CharField(max_length=100, verbose_name='1.İlgili Ünvanı')),
('related_person_phone1', models.CharField(max_length=11, verbose_name='1.İlgili Kişi Cep Telefon Numarası')),
('related_person_mail1', models.CharField(max_length=100, verbose_name='1.İlgili Kişi E-Posta Adresi')),
('related_person_name2', models.CharField(max_length=100, verbose_name='2.<NAME>')),
('related_person_title2', models.CharField(max_length=100, verbose_name='2.İlgili Ünvanı')),
('related_person_phone2', models.CharField(max_length=11, verbose_name='2.İlgili Kişi Cep Telefon Numarası')),
('related_person_mail2', models.CharField(max_length=100, verbose_name='2.İlgili Kişi E-Posta Adresi')),
('first_contact_date', models.DateTimeField(auto_now_add=True, verbose_name='İlk İlişki Tarihi')),
],
options={
'verbose_name': 'Müşteri',
'verbose_name_plural': 'Müşteriler',
},
),
]
| # Generated by Django 3.0.8 on 2020-09-02 17:12
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('company_name', models.CharField(max_length=100, verbose_name='Firma Adı')),
('company_type', models.CharField(choices=[(1, 'İş Makinesi Üreticileri'), (2, 'İstif Makineleri Üreticileri'), (3, 'Tarım Makinaları Üreticileri'), (4, 'Hidrolik Pnömatikçiler')], max_length=100, verbose_name='Firma İş Alanı')),
('company_phone', models.CharField(max_length=11, verbose_name='Firma Telefon Numarası')),
('company_city', models.CharField(choices=[(6, 'ANKARA'), (34, 'İSTANBUL'), (35, 'İZMİR'), (17, 'ÇANAKKALE')], max_length=100, verbose_name='Konum')),
('company_adress', models.TextField(verbose_name='Adres')),
('first_contact', models.CharField(choices=[(1, '<NAME>'), (2, '<NAME>'), (3, '<NAME>'), (4, 'E-Mail'), (5, 'Telefon Araması'), (6, 'Diğer')], max_length=100, verbose_name='İlk İletişim')),
('related_person_name1', models.CharField(max_length=100, verbose_name='1.İlgili Kişi Adı')),
('related_person_title1', models.CharField(max_length=100, verbose_name='1.İlgili Ünvanı')),
('related_person_phone1', models.CharField(max_length=11, verbose_name='1.İlgili Kişi Cep Telefon Numarası')),
('related_person_mail1', models.CharField(max_length=100, verbose_name='1.İlgili Kişi E-Posta Adresi')),
('related_person_name2', models.CharField(max_length=100, verbose_name='2.<NAME>')),
('related_person_title2', models.CharField(max_length=100, verbose_name='2.İlgili Ünvanı')),
('related_person_phone2', models.CharField(max_length=11, verbose_name='2.İlgili Kişi Cep Telefon Numarası')),
('related_person_mail2', models.CharField(max_length=100, verbose_name='2.İlgili Kişi E-Posta Adresi')),
('first_contact_date', models.DateTimeField(auto_now_add=True, verbose_name='İlk İlişki Tarihi')),
],
options={
'verbose_name': 'Müşteri',
'verbose_name_plural': 'Müşteriler',
},
),
]
| en | 0.815025 | # Generated by Django 3.0.8 on 2020-09-02 17:12 | 1.908565 | 2 |
src/__init__.py | recohut/recobase | 0 | 6619796 | <reponame>recohut/recobase
from .datasets import ML1MDataset
from .sampling import RandomNegativeSampler
from .dataloader import BertDataloader
DATASETS = {
ML1MDataset.code(): ML1MDataset
}
def dataset_factory(args):
dataset = DATASETS[args.dataset_code]
return dataset(args)
NEGATIVE_SAMPLERS = {
RandomNegativeSampler.code(): RandomNegativeSampler,
}
def negative_sampler_factory(code, train, val, test, user_count, item_count, sample_size, seed, save_folder):
negative_sampler = NEGATIVE_SAMPLERS[code]
return negative_sampler(train, val, test, user_count, item_count, sample_size, seed, save_folder)
DATALOADERS = {
BertDataloader.code(): BertDataloader,
}
def dataloader_factory(args):
dataset = dataset_factory(args)
dataloader = DATALOADERS[args.dataloader_code]
dataloader = dataloader(args, dataset)
train, val, test = dataloader.get_pytorch_dataloaders()
return train, val, test | from .datasets import ML1MDataset
from .sampling import RandomNegativeSampler
from .dataloader import BertDataloader
DATASETS = {
ML1MDataset.code(): ML1MDataset
}
def dataset_factory(args):
dataset = DATASETS[args.dataset_code]
return dataset(args)
NEGATIVE_SAMPLERS = {
RandomNegativeSampler.code(): RandomNegativeSampler,
}
def negative_sampler_factory(code, train, val, test, user_count, item_count, sample_size, seed, save_folder):
negative_sampler = NEGATIVE_SAMPLERS[code]
return negative_sampler(train, val, test, user_count, item_count, sample_size, seed, save_folder)
DATALOADERS = {
BertDataloader.code(): BertDataloader,
}
def dataloader_factory(args):
dataset = dataset_factory(args)
dataloader = DATALOADERS[args.dataloader_code]
dataloader = dataloader(args, dataset)
train, val, test = dataloader.get_pytorch_dataloaders()
return train, val, test | none | 1 | 2.673028 | 3 | |
server/manage.py | NWCalvank/react-python-starter | 0 | 6619797 | <reponame>NWCalvank/react-python-starter
import unittest
from flask.cli import FlaskGroup
from app import create_app, db
from app.api.models.foo import Foo
app = create_app()
cli = FlaskGroup(create_app=create_app)
@cli.command()
def recreate_db():
db.drop_all()
db.create_all()
db.session.commit()
@cli.command()
def create_db():
db.create_all()
db.session.commit()
@cli.command()
def seed_db():
db.session.add(Foo(string_field='Hello World'))
db.session.commit()
@cli.command()
def test():
# Runs the tests without code coverage
tests = unittest.TestLoader().discover('test', pattern='test*.py')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
return 0
return 1
if __name__ == '__main__':
cli()
| import unittest
from flask.cli import FlaskGroup
from app import create_app, db
from app.api.models.foo import Foo
app = create_app()
cli = FlaskGroup(create_app=create_app)
@cli.command()
def recreate_db():
db.drop_all()
db.create_all()
db.session.commit()
@cli.command()
def create_db():
db.create_all()
db.session.commit()
@cli.command()
def seed_db():
db.session.add(Foo(string_field='Hello World'))
db.session.commit()
@cli.command()
def test():
# Runs the tests without code coverage
tests = unittest.TestLoader().discover('test', pattern='test*.py')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
return 0
return 1
if __name__ == '__main__':
cli() | en | 0.706286 | # Runs the tests without code coverage | 2.576673 | 3 |
Generic/functions_test.py | RodrigoMattosoSilveira/py-mfc-sim | 0 | 6619798 | <filename>Generic/functions_test.py
import unittest
import Generic.functions as f
class MyTestCase(unittest.TestCase):
orderItems = 0
pickedKitItems = 0
def test_calculate_kit_items_to_pick(self):
self.orderItems = 0
self.pickedKitItems = 0
self.assertEqual(f.calculate_kit_items_to_pick(self.orderItems, self.pickedKitItems), 0) # add assertion here
self.orderItems = 1
self.pickedKitItems = 0
self.assertEqual(f.calculate_kit_items_to_pick(self.orderItems, self.pickedKitItems), 1) # add assertion here
self.orderItems = 1
self.pickedKitItems = 1
self.assertEqual(f.calculate_kit_items_to_pick(self.orderItems, self.pickedKitItems), 0) # add assertion here
self.orderItems = 6
self.pickedKitItems = 0
self.assertEqual(f.calculate_kit_items_to_pick(self.orderItems, self.pickedKitItems), 2) # add assertion here
self.orderItems = 6
self.pickedKitItems = 1
self.assertEqual(f.calculate_kit_items_to_pick(self.orderItems, self.pickedKitItems), 2) # add assertion here
self.orderItems = 6
self.pickedKitItems = 2
self.assertEqual(f.calculate_kit_items_to_pick(self.orderItems, self.pickedKitItems), 2) # add assertion here
self.orderItems = 6
self.pickedKitItems = 3
self.assertEqual(f.calculate_kit_items_to_pick(self.orderItems, self.pickedKitItems), 2) # add assertion here
self.orderItems = 6
self.pickedKitItems = 4
self.assertEqual(f.calculate_kit_items_to_pick(self.orderItems, self.pickedKitItems), 2) # add assertion here
self.orderItems = 6
self.pickedKitItems = 5
self.assertEqual(f.calculate_kit_items_to_pick(self.orderItems, self.pickedKitItems), 1) # add assertion here
self.orderItems = 6
self.pickedKitItems = 6
self.assertEqual(f.calculate_kit_items_to_pick(self.orderItems, self.pickedKitItems), 0) # add assertion here
self.orderItems = 6
self.pickedKitItems = 7
self.assertEqual(f.calculate_kit_items_to_pick(self.orderItems, self.pickedKitItems), 0) # add assertion here
if __name__ == '__main__':
unittest.main()
| <filename>Generic/functions_test.py
import unittest
import Generic.functions as f
class MyTestCase(unittest.TestCase):
orderItems = 0
pickedKitItems = 0
def test_calculate_kit_items_to_pick(self):
self.orderItems = 0
self.pickedKitItems = 0
self.assertEqual(f.calculate_kit_items_to_pick(self.orderItems, self.pickedKitItems), 0) # add assertion here
self.orderItems = 1
self.pickedKitItems = 0
self.assertEqual(f.calculate_kit_items_to_pick(self.orderItems, self.pickedKitItems), 1) # add assertion here
self.orderItems = 1
self.pickedKitItems = 1
self.assertEqual(f.calculate_kit_items_to_pick(self.orderItems, self.pickedKitItems), 0) # add assertion here
self.orderItems = 6
self.pickedKitItems = 0
self.assertEqual(f.calculate_kit_items_to_pick(self.orderItems, self.pickedKitItems), 2) # add assertion here
self.orderItems = 6
self.pickedKitItems = 1
self.assertEqual(f.calculate_kit_items_to_pick(self.orderItems, self.pickedKitItems), 2) # add assertion here
self.orderItems = 6
self.pickedKitItems = 2
self.assertEqual(f.calculate_kit_items_to_pick(self.orderItems, self.pickedKitItems), 2) # add assertion here
self.orderItems = 6
self.pickedKitItems = 3
self.assertEqual(f.calculate_kit_items_to_pick(self.orderItems, self.pickedKitItems), 2) # add assertion here
self.orderItems = 6
self.pickedKitItems = 4
self.assertEqual(f.calculate_kit_items_to_pick(self.orderItems, self.pickedKitItems), 2) # add assertion here
self.orderItems = 6
self.pickedKitItems = 5
self.assertEqual(f.calculate_kit_items_to_pick(self.orderItems, self.pickedKitItems), 1) # add assertion here
self.orderItems = 6
self.pickedKitItems = 6
self.assertEqual(f.calculate_kit_items_to_pick(self.orderItems, self.pickedKitItems), 0) # add assertion here
self.orderItems = 6
self.pickedKitItems = 7
self.assertEqual(f.calculate_kit_items_to_pick(self.orderItems, self.pickedKitItems), 0) # add assertion here
if __name__ == '__main__':
unittest.main()
| en | 0.70604 | # add assertion here # add assertion here # add assertion here # add assertion here # add assertion here # add assertion here # add assertion here # add assertion here # add assertion here # add assertion here # add assertion here | 3.400505 | 3 |
eden/cli/test/config_test.py | jmswen/eden | 0 | 6619799 | <gh_stars>0
#!/usr/bin/env python3
#
# Copyright (c) 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import configparser
import io
import os
import unittest
from pathlib import Path
import toml
import toml.decoder
from eden.test_support.environment_variable import EnvironmentVariableMixin
from eden.test_support.temporary_directory import TemporaryDirectoryMixin
from .. import config as config_mod, configutil, util
from ..config import EdenInstance
from ..configinterpolator import EdenConfigInterpolator
from ..configutil import EdenConfigParser, UnexpectedType
def get_toml_test_file_invalid():
cfg_file = """
[core thisIsNotAllowed]
"""
return cfg_file
def get_toml_test_file_defaults():
cfg_file = """
[core]
systemIgnoreFile = "/etc/eden/gitignore"
ignoreFile = "/home/${USER}/.gitignore"
[clone]
default-revision = "master"
[rage]
reporter = 'arc paste --title "eden rage from $(hostname)" --conduit-uri=https://phabricator.intern.facebook.com/api/'
"""
return cfg_file
def get_toml_test_file_fbsource_repo():
cfg_file = """
["repository fbsource"]
type = "hg"
path = "/data/users/${USER}/fbsource"
["bindmounts fbsource"]
fbcode-buck-out = "fbcode/buck-out"
buck-out = "buck-out"
"""
return cfg_file
def get_toml_test_file_user_rc():
cfg_file = """
[core]
ignoreFile = "/home/${USER}/.gitignore-override"
edenDirectory = "/home/${USER}/.eden"
["repository fbsource"]
type = "hg"
path = "/data/users/${USER}/fbsource-override"
["bindmounts fbsource"]
fbcode-buck-out = "fbcode/buck-out-override"
["repository git"]
type = "git"
path = "/home/${USER}/src/git/.git"
"""
return cfg_file
class TomlConfigTest(
unittest.TestCase, TemporaryDirectoryMixin, EnvironmentVariableMixin
):
def setUp(self) -> None:
self._test_dir = self.make_temporary_directory()
self._user = "bob"
self._state_dir = os.path.join(self._test_dir, ".eden")
self._etc_eden_dir = os.path.join(self._test_dir, "etc/eden")
self._config_d = os.path.join(self._test_dir, "etc/eden/config.d")
self._home_dir = os.path.join(self._test_dir, "home", self._user)
self._interpolate_dict = {
"USER": self._user,
"USER_ID": "42",
"HOME": self._home_dir,
}
os.mkdir(self._state_dir)
util.mkdir_p(self._config_d)
util.mkdir_p(self._home_dir)
self.unset_environment_variable("EDEN_EXPERIMENTAL_SYSTEMD")
def copy_config_files(self) -> None:
path = os.path.join(self._config_d, "defaults.toml")
with open(path, "w") as text_file:
text_file.write(get_toml_test_file_defaults())
path = os.path.join(self._config_d, "fbsource.repo.toml")
with open(path, "w") as text_file:
text_file.write(get_toml_test_file_fbsource_repo())
path = os.path.join(self._home_dir, ".edenrc")
with open(path, "w") as text_file:
text_file.write(get_toml_test_file_user_rc())
def assert_core_config(self, cfg: EdenInstance) -> None:
self.assertEqual(
cfg.get_config_value("rage.reporter", default=""),
'arc paste --title "eden rage from $(hostname)" --conduit-uri=https://phabricator.intern.facebook.com/api/',
)
self.assertEqual(
cfg.get_config_value("core.ignoreFile", default=""),
f"/home/{self._user}/.gitignore-override",
)
self.assertEqual(
cfg.get_config_value("core.systemIgnoreFile", default=""),
"/etc/eden/gitignore",
)
self.assertEqual(
cfg.get_config_value("core.edenDirectory", default=""),
f"/home/{self._user}/.eden",
)
def assert_git_repo_config(self, cfg: EdenInstance) -> None:
cc = cfg.find_config_for_alias("git")
assert cc is not None
self.assertEqual(cc.backing_repo, Path(f"/home/{self._user}/src/git/.git"))
self.assertEqual(cc.scm_type, "git")
self.assertEqual(cc.bind_mounts, {})
self.assertEqual(cc.default_revision, "master")
def assert_fbsource_repo_config(self, cfg: EdenInstance) -> None:
cc = cfg.find_config_for_alias("fbsource")
assert cc is not None
self.assertEqual(
cc.backing_repo, Path(f"/data/users/{self._user}/fbsource-override")
)
self.assertEqual(cc.scm_type, "hg")
self.assertEqual(
cc.bind_mounts,
{"fbcode-buck-out": "fbcode/buck-out-override", "buck-out": "buck-out"},
)
self.assertEqual(cc.default_revision, "master")
def test_load_config(self) -> None:
self.copy_config_files()
cfg = self.get_config()
# Check the various config sections
self.assert_core_config(cfg)
exp_repos = ["fbsource", "git"]
self.assertEqual(cfg.get_repository_list(), exp_repos)
self.assert_fbsource_repo_config(cfg)
self.assert_git_repo_config(cfg)
# Check if test is for toml or cfg by cfg._user_toml_cfg
exp_rc_files = [
Path(self._config_d) / "defaults.toml",
Path(self._config_d) / "fbsource.repo.toml",
Path(self._home_dir) / ".edenrc",
]
self.assertEqual(cfg.get_rc_files(), exp_rc_files)
def test_no_dot_edenrc(self) -> None:
self.copy_config_files()
os.remove(os.path.join(self._home_dir, ".edenrc"))
cfg = self.get_config()
cfg._loadConfig()
exp_repos = ["fbsource"]
self.assertEqual(cfg.get_repository_list(), exp_repos)
self.assertEqual(
cfg.get_config_value("rage.reporter", default=""),
'arc paste --title "eden rage from $(hostname)" --conduit-uri=https://phabricator.intern.facebook.com/api/',
)
self.assertEqual(
cfg.get_config_value("core.ignoreFile", default=""),
f"/home/{self._user}/.gitignore",
)
self.assertEqual(
cfg.get_config_value("core.systemIgnoreFile", default=""),
"/etc/eden/gitignore",
)
cc = cfg.find_config_for_alias("fbsource")
assert cc is not None
self.assertEqual(cc.backing_repo, Path(f"/data/users/{self._user}/fbsource"))
self.assertEqual(cc.scm_type, "hg")
self.assertEqual(
cc.bind_mounts,
{"fbcode-buck-out": "fbcode/buck-out", "buck-out": "buck-out"},
)
self.assertEqual(cc.default_revision, "master")
def test_add_existing_repo(self) -> None:
self.copy_config_files()
cfg = self.get_config()
with self.assertRaisesRegex(
config_mod.UsageError,
"repository fbsource already exists. You will need to edit "
"the ~/.edenrc config file by hand to make changes to the "
"repository or remove it.",
):
cfg.add_repository("fbsource", "hg", f"/data/users/{self._user}/fbsource")
def test_add_repo(self) -> None:
self.copy_config_files()
cfg = self.get_config()
cfg.add_repository("fbandroid", "hg", f"/data/users/{self._user}/fbandroid")
# Lets reload our config
cfg = self.get_config()
# Check the various config sections
self.assert_core_config(cfg)
exp_repos = ["fbandroid", "fbsource", "git"]
self.assertEqual(cfg.get_repository_list(), exp_repos)
self.assert_fbsource_repo_config(cfg)
self.assert_git_repo_config(cfg)
# Check the newly added repo
cc = cfg.find_config_for_alias("fbandroid")
assert cc is not None
self.assertEqual(cc.backing_repo, Path(f"/data/users/{self._user}/fbandroid"))
self.assertEqual(cc.scm_type, "hg")
self.assertEqual(cc.bind_mounts, {})
self.assertEqual(cc.default_revision, "master")
def test_missing_type_option_in_repository_is_an_error(self) -> None:
self.write_user_config(
"""
["repository myrepo"]
path = "/tmp/myrepo"
"""
)
with self.assertRaises(Exception) as expectation:
cfg = self.get_config()
cfg.find_config_for_alias("myrepo")
self.assertEqual(
str(expectation.exception), 'repository "myrepo" missing key "type".'
)
def test_invalid_type_option_in_repository_is_an_error(self) -> None:
self.write_user_config(
"""
["repository myrepo"]
type = "invalidrepotype"
path = "/tmp/myrepo"
"""
)
with self.assertRaises(Exception) as expectation:
cfg = self.get_config()
cfg.find_config_for_alias("myrepo")
self.assertEqual(
str(expectation.exception), 'repository "myrepo" has unsupported type.'
)
def test_empty_type_option_in_repository_is_an_error(self) -> None:
self.write_user_config(
"""
["repository myrepo"]
type = ""
path = "/tmp/myrepo"
"""
)
with self.assertRaises(Exception) as expectation:
cfg = self.get_config()
cfg.find_config_for_alias("myrepo")
self.assertEqual(
str(expectation.exception), 'repository "myrepo" missing key "type".'
)
def test_missing_path_option_in_repository_is_an_error(self) -> None:
self.write_user_config(
"""
["repository myrepo"]
type = "hg"
"""
)
with self.assertRaises(Exception) as expectation:
cfg = self.get_config()
cfg.find_config_for_alias("myrepo")
self.assertEqual(
str(expectation.exception), 'repository "myrepo" missing key "path".'
)
def test_empty_path_option_in_repository_is_an_error(self) -> None:
self.write_user_config(
"""
["repository myrepo"]
type = "hg"
path = ""
"""
)
with self.assertRaises(Exception) as expectation:
cfg = self.get_config()
cfg.find_config_for_alias("myrepo")
self.assertEqual(
str(expectation.exception), 'repository "myrepo" missing key "path".'
)
def test_toml_error(self) -> None:
self.copy_config_files()
self.write_user_config(get_toml_test_file_invalid())
cfg = self.get_config()
with self.assertRaises(toml.decoder.TomlDecodeError):
cfg._loadConfig()
def test_get_config_value_returns_default_if_section_is_missing(self) -> None:
self.assertEqual(
self.get_config().get_config_value(
"missing_section.test_option", default="test default"
),
"test default",
)
def test_get_config_value_returns_default_if_option_is_missing(self) -> None:
self.write_user_config(
"""[test_section]
other_option = "test value"
"""
)
self.assertEqual(
self.get_config().get_config_value(
"test_section.missing_option", default="test default"
),
"test default",
)
def test_get_config_value_returns_value_for_string_option(self) -> None:
self.write_user_config(
"""[test_section]
test_option = "test value"
"""
)
self.assertEqual(
self.get_config().get_config_value(
"test_section.test_option", default="test default"
),
"test value",
)
def test_experimental_systemd_is_disabled_by_default(self) -> None:
self.assertFalse(self.get_config().should_use_experimental_systemd_mode())
def test_experimental_systemd_is_enabled_with_environment_variable(self) -> None:
self.set_environment_variable("EDEN_EXPERIMENTAL_SYSTEMD", "1")
self.assertTrue(self.get_config().should_use_experimental_systemd_mode())
def test_experimental_systemd_is_enabled_with_user_config_setting(self) -> None:
self.write_user_config(
"""[service]
experimental_systemd = true
"""
)
self.assertTrue(self.get_config().should_use_experimental_systemd_mode())
def test_experimental_systemd_environment_variable_overrides_config(self) -> None:
self.set_environment_variable("EDEN_EXPERIMENTAL_SYSTEMD", "1")
self.write_user_config(
f"""[service]
experimental_systemd = false
"""
)
self.assertTrue(self.get_config().should_use_experimental_systemd_mode())
self.set_environment_variable("EDEN_EXPERIMENTAL_SYSTEMD", "0")
self.write_user_config(
f"""[service]
experimental_systemd = true
"""
)
self.assertFalse(self.get_config().should_use_experimental_systemd_mode())
def test_empty_experimental_systemd_environment_variable_does_not_override_config(
self
) -> None:
self.set_environment_variable("EDEN_EXPERIMENTAL_SYSTEMD", "")
self.write_user_config(
f"""[service]
experimental_systemd = true
"""
)
self.assertTrue(self.get_config().should_use_experimental_systemd_mode())
self.set_environment_variable("EDEN_EXPERIMENTAL_SYSTEMD", "")
self.write_user_config(
f"""[service]
experimental_systemd = false
"""
)
self.assertFalse(self.get_config().should_use_experimental_systemd_mode())
def test_user_id_variable_is_set_to_process_uid(self) -> None:
config = self.get_config_without_stub_variables()
self.write_user_config(
"""
[testsection]
testoption = "My user ID is ${USER_ID}."
"""
)
self.assertEqual(
config.get_config_value("testsection.testoption", default=""),
f"My user ID is {os.getuid()}.",
)
def test_default_fallback_systemd_xdg_runtime_dir_is_run_user_uid(self) -> None:
self.assertEqual(
self.get_config().get_fallback_systemd_xdg_runtime_dir(), "/run/user/42"
)
def test_configured_fallback_systemd_xdg_runtime_dir_expands_user_and_user_id(
self
) -> None:
self.write_user_config(
"""
[service]
fallback_systemd_xdg_runtime_dir = "/var/run/${USER}/${USER_ID}"
"""
)
self.assertEqual(
self.get_config().get_fallback_systemd_xdg_runtime_dir(), "/var/run/bob/42"
)
def test_printed_config_is_valid_toml(self) -> None:
self.write_user_config(
"""
[clone]
default-revision = "master"
"""
)
printed_config = io.StringIO()
self.get_config().print_full_config(file=printed_config)
printed_config.seek(0)
parsed_config = toml.load(printed_config)
self.assertIn("clone", parsed_config)
self.assertEqual(parsed_config["clone"].get("default-revision"), "master")
def test_printed_config_expands_variables(self) -> None:
self.write_user_config(
"""
["repository fbsource"]
type = "hg"
path = "/data/users/${USER}/fbsource"
"""
)
printed_config = io.StringIO()
self.get_config().print_full_config(file=printed_config)
self.assertIn("/data/users/bob/fbsource", printed_config.getvalue())
def test_printed_config_writes_booleans_as_booleans(self) -> None:
self.write_user_config(
"""
[service]
experimental_systemd = true
"""
)
printed_config = io.StringIO()
self.get_config().print_full_config(file=printed_config)
self.assertRegex(printed_config.getvalue(), r"experimental_systemd\s*=\s*true")
def get_config(self) -> EdenInstance:
return EdenInstance(
self._state_dir, self._etc_eden_dir, self._home_dir, self._interpolate_dict
)
def get_config_without_stub_variables(self) -> EdenInstance:
return EdenInstance(
self._state_dir, self._etc_eden_dir, self._home_dir, interpolate_dict=None
)
def write_user_config(self, content: str) -> None:
path = os.path.join(self._home_dir, ".edenrc")
with open(path, "w") as text_file:
text_file.write(content)
class EdenConfigParserTest(unittest.TestCase):
unsupported_value = {"dict of string to string": ""}
def test_loading_config_with_unsupported_type_is_not_an_error(self) -> None:
parser = EdenConfigParser()
parser.read_dict({"test_section": {"test_option": self.unsupported_value}})
def test_querying_bool_returns_bool(self) -> None:
for value in [True, False]:
with self.subTest(value=value):
parser = EdenConfigParser()
parser.read_dict({"test_section": {"test_option": value}})
self.assertEqual(
parser.get_bool("test_section", "test_option", default=True), value
)
self.assertEqual(
parser.get_bool("test_section", "test_option", default=False), value
)
def test_querying_bool_with_non_boolean_value_fails(self) -> None:
for value in ["not a boolean", "", "true", "True", 0]:
with self.subTest(value=value):
parser = EdenConfigParser()
parser.read_dict({"test_section": {"test_option": value}})
with self.assertRaises(UnexpectedType) as expectation:
parser.get_bool("test_section", "test_option", default=False)
# pyre-fixme[16]: `_E` has no attribute `section`.
self.assertEqual(expectation.exception.section, "test_section")
# pyre-fixme[16]: `_E` has no attribute `option`.
self.assertEqual(expectation.exception.option, "test_option")
# pyre-fixme[16]: `_E` has no attribute `value`.
self.assertEqual(expectation.exception.value, value)
# pyre-fixme[16]: `_E` has no attribute `expected_type`.
self.assertEqual(expectation.exception.expected_type, bool)
def test_querying_bool_with_value_of_unsupported_type_fails(self) -> None:
parser = EdenConfigParser()
parser.read_dict({"test_section": {"test_option": self.unsupported_value}})
with self.assertRaises(UnexpectedType) as expectation:
parser.get_bool("test_section", "test_option", default=False)
# pyre-fixme[16]: `_E` has no attribute `section`.
self.assertEqual(expectation.exception.section, "test_section")
# pyre-fixme[16]: `_E` has no attribute `option`.
self.assertEqual(expectation.exception.option, "test_option")
# pyre-fixme[16]: `_E` has no attribute `value`.
self.assertEqual(expectation.exception.value, self.unsupported_value)
# pyre-fixme[16]: `_E` has no attribute `expected_type`.
self.assertEqual(expectation.exception.expected_type, bool)
def test_querying_str_with_non_string_value_fails(self) -> None:
parser = EdenConfigParser()
parser.read_dict({"test_section": {"test_option": True}})
with self.assertRaises(UnexpectedType) as expectation:
parser.get_str("test_section", "test_option", default="")
# pyre-fixme[16]: `_E` has no attribute `section`.
self.assertEqual(expectation.exception.section, "test_section")
# pyre-fixme[16]: `_E` has no attribute `option`.
self.assertEqual(expectation.exception.option, "test_option")
# pyre-fixme[16]: `_E` has no attribute `value`.
self.assertEqual(expectation.exception.value, True)
# pyre-fixme[16]: `_E` has no attribute `expected_type`.
self.assertEqual(expectation.exception.expected_type, str)
def test_querying_section_str_to_str_returns_mapping(self) -> None:
parser = EdenConfigParser()
parser.read_dict({"test_section": {"a": "a value", "b": "b value"}})
section = parser.get_section_str_to_str("test_section")
self.assertCountEqual(section, {"a", "b"})
self.assertEqual(section["a"], "a value")
self.assertEqual(section["b"], "b value")
def test_querying_section_str_to_any_fails_if_option_has_unsupported_type(
self
) -> None:
parser = EdenConfigParser()
parser.read_dict({"test_section": {"unsupported": self.unsupported_value}})
with self.assertRaises(UnexpectedType) as expectation:
parser.get_section_str_to_any("test_section")
# pyre-fixme[16]: `_E` has no attribute `section`.
self.assertEqual(expectation.exception.section, "test_section")
# pyre-fixme[16]: `_E` has no attribute `option`.
self.assertEqual(expectation.exception.option, "unsupported")
# pyre-fixme[16]: `_E` has no attribute `value`.
self.assertEqual(expectation.exception.value, self.unsupported_value)
# pyre-fixme[16]: `_E` has no attribute `expected_type`.
self.assertIsNone(expectation.exception.expected_type)
def test_querying_section_str_to_any_interpolates_options(self) -> None:
parser = EdenConfigParser(
interpolation=EdenConfigInterpolator({"USER": "alice"})
)
parser.read_dict({"test_section": {"test_option": "hello ${USER}"}})
section = parser.get_section_str_to_any("test_section")
self.assertEqual(section.get("test_option"), "hello alice")
def test_querying_section_str_to_any_returns_any_supported_type(self) -> None:
parser = EdenConfigParser()
parser.read_dict(
{
"test_section": {
"bool_option": True,
"string_array_option": ["hello", "world"],
"string_option": "hello",
}
}
)
section = parser.get_section_str_to_any("test_section")
self.assertEqual(section["bool_option"], True)
self.assertEqual(list(section["string_array_option"]), ["hello", "world"])
self.assertEqual(section["string_option"], "hello")
def test_querying_section_str_to_str_with_non_string_value_fails(self) -> None:
parser = EdenConfigParser()
parser.read_dict({"test_section": {"a": False}})
with self.assertRaises(UnexpectedType) as expectation:
parser.get_section_str_to_str("test_section")
# pyre-fixme[16]: `_E` has no attribute `section`.
self.assertEqual(expectation.exception.section, "test_section")
# pyre-fixme[16]: `_E` has no attribute `option`.
self.assertEqual(expectation.exception.option, "a")
# pyre-fixme[16]: `_E` has no attribute `value`.
self.assertEqual(expectation.exception.value, False)
# pyre-fixme[16]: `_E` has no attribute `expected_type`.
self.assertEqual(expectation.exception.expected_type, str)
def test_querying_section_str_to_str_of_missing_section_fails(self) -> None:
parser = EdenConfigParser()
parser.read_dict({"test_section": {"a": "a value"}})
with self.assertRaises(configparser.NoSectionError) as expectation:
parser.get_section_str_to_str("not_test_section")
section: str = expectation.exception.section # type: ignore
self.assertEqual(section, "not_test_section")
def test_querying_strs_with_empty_array_returns_empty_sequence(self) -> None:
parser = EdenConfigParser()
parser.read_dict({"test_section": {"test_option": []}})
self.assertEqual(
list(
parser.get_strs(
"test_section", "test_option", default=["default value"]
)
),
[],
)
def test_querying_strs_with_array_of_strings_returns_strs(self) -> None:
parser = EdenConfigParser()
parser.read_dict({"test_section": {"test_option": ["first", "second", "3rd"]}})
self.assertEqual(
list(parser.get_strs("test_section", "test_option", default=[])),
["first", "second", "3rd"],
)
def test_querying_strs_with_array_of_non_strings_fails(self) -> None:
parser = EdenConfigParser()
parser.read_dict({"test_section": {"test_option": [123]}})
with self.assertRaises(UnexpectedType) as expectation:
parser.get_strs("test_section", "test_option", default=[])
# pyre-fixme[16]: `_E` has no attribute `section`.
self.assertEqual(expectation.exception.section, "test_section")
# pyre-fixme[16]: `_E` has no attribute `option`.
self.assertEqual(expectation.exception.option, "test_option")
# pyre-fixme[16]: `_E` has no attribute `value`.
self.assertEqual(expectation.exception.value, [123])
# pyre-fixme[16]: `_E` has no attribute `expected_type`.
self.assertEqual(expectation.exception.expected_type, configutil.Strs)
def test_querying_missing_value_as_strs_returns_default(self) -> None:
parser = EdenConfigParser()
parser.read_dict({"test_section": {"bogus_option": []}})
self.assertEqual(
list(
parser.get_strs(
"test_section", "missing_option", default=["default value"]
)
),
["default value"],
)
def test_str_sequences_are_interpolated(self) -> None:
parser = EdenConfigParser(
interpolation=EdenConfigInterpolator({"USER": "alice"})
)
parser.read_dict(
{
"test_section": {
"test_option": ["sudo", "-u", "${USER}", "echo", "Hello, ${USER}!"]
}
}
)
self.assertEqual(
list(parser.get_strs("test_section", "test_option", default=[])),
["sudo", "-u", "alice", "echo", "Hello, alice!"],
)
def test_unexpected_type_error_messages_are_helpful(self) -> None:
self.assertEqual(
'Expected boolean for service.experimental_systemd, but got string: "true"',
str(
UnexpectedType(
section="service",
option="experimental_systemd",
value="true",
expected_type=bool,
)
),
)
self.assertEqual(
"Expected string for repository myrepo.path, but got boolean: true",
str(
UnexpectedType(
section="repository myrepo",
option="path",
value=True,
expected_type=str,
)
),
)
self.assertRegex(
str(
UnexpectedType(
section="section", option="option", value={}, expected_type=None
)
),
r"^Unexpected dict for section.option: \{\s*\}$",
)
self.assertEqual(
"Expected array of strings for service.command, but got array: [ 123,]",
str(
UnexpectedType(
section="service",
option="command",
value=[123],
expected_type=configutil.Strs,
)
),
)
| #!/usr/bin/env python3
#
# Copyright (c) 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import configparser
import io
import os
import unittest
from pathlib import Path
import toml
import toml.decoder
from eden.test_support.environment_variable import EnvironmentVariableMixin
from eden.test_support.temporary_directory import TemporaryDirectoryMixin
from .. import config as config_mod, configutil, util
from ..config import EdenInstance
from ..configinterpolator import EdenConfigInterpolator
from ..configutil import EdenConfigParser, UnexpectedType
def get_toml_test_file_invalid():
cfg_file = """
[core thisIsNotAllowed]
"""
return cfg_file
def get_toml_test_file_defaults():
cfg_file = """
[core]
systemIgnoreFile = "/etc/eden/gitignore"
ignoreFile = "/home/${USER}/.gitignore"
[clone]
default-revision = "master"
[rage]
reporter = 'arc paste --title "eden rage from $(hostname)" --conduit-uri=https://phabricator.intern.facebook.com/api/'
"""
return cfg_file
def get_toml_test_file_fbsource_repo():
cfg_file = """
["repository fbsource"]
type = "hg"
path = "/data/users/${USER}/fbsource"
["bindmounts fbsource"]
fbcode-buck-out = "fbcode/buck-out"
buck-out = "buck-out"
"""
return cfg_file
def get_toml_test_file_user_rc():
cfg_file = """
[core]
ignoreFile = "/home/${USER}/.gitignore-override"
edenDirectory = "/home/${USER}/.eden"
["repository fbsource"]
type = "hg"
path = "/data/users/${USER}/fbsource-override"
["bindmounts fbsource"]
fbcode-buck-out = "fbcode/buck-out-override"
["repository git"]
type = "git"
path = "/home/${USER}/src/git/.git"
"""
return cfg_file
class TomlConfigTest(
unittest.TestCase, TemporaryDirectoryMixin, EnvironmentVariableMixin
):
def setUp(self) -> None:
self._test_dir = self.make_temporary_directory()
self._user = "bob"
self._state_dir = os.path.join(self._test_dir, ".eden")
self._etc_eden_dir = os.path.join(self._test_dir, "etc/eden")
self._config_d = os.path.join(self._test_dir, "etc/eden/config.d")
self._home_dir = os.path.join(self._test_dir, "home", self._user)
self._interpolate_dict = {
"USER": self._user,
"USER_ID": "42",
"HOME": self._home_dir,
}
os.mkdir(self._state_dir)
util.mkdir_p(self._config_d)
util.mkdir_p(self._home_dir)
self.unset_environment_variable("EDEN_EXPERIMENTAL_SYSTEMD")
def copy_config_files(self) -> None:
path = os.path.join(self._config_d, "defaults.toml")
with open(path, "w") as text_file:
text_file.write(get_toml_test_file_defaults())
path = os.path.join(self._config_d, "fbsource.repo.toml")
with open(path, "w") as text_file:
text_file.write(get_toml_test_file_fbsource_repo())
path = os.path.join(self._home_dir, ".edenrc")
with open(path, "w") as text_file:
text_file.write(get_toml_test_file_user_rc())
def assert_core_config(self, cfg: EdenInstance) -> None:
self.assertEqual(
cfg.get_config_value("rage.reporter", default=""),
'arc paste --title "eden rage from $(hostname)" --conduit-uri=https://phabricator.intern.facebook.com/api/',
)
self.assertEqual(
cfg.get_config_value("core.ignoreFile", default=""),
f"/home/{self._user}/.gitignore-override",
)
self.assertEqual(
cfg.get_config_value("core.systemIgnoreFile", default=""),
"/etc/eden/gitignore",
)
self.assertEqual(
cfg.get_config_value("core.edenDirectory", default=""),
f"/home/{self._user}/.eden",
)
def assert_git_repo_config(self, cfg: EdenInstance) -> None:
cc = cfg.find_config_for_alias("git")
assert cc is not None
self.assertEqual(cc.backing_repo, Path(f"/home/{self._user}/src/git/.git"))
self.assertEqual(cc.scm_type, "git")
self.assertEqual(cc.bind_mounts, {})
self.assertEqual(cc.default_revision, "master")
def assert_fbsource_repo_config(self, cfg: EdenInstance) -> None:
cc = cfg.find_config_for_alias("fbsource")
assert cc is not None
self.assertEqual(
cc.backing_repo, Path(f"/data/users/{self._user}/fbsource-override")
)
self.assertEqual(cc.scm_type, "hg")
self.assertEqual(
cc.bind_mounts,
{"fbcode-buck-out": "fbcode/buck-out-override", "buck-out": "buck-out"},
)
self.assertEqual(cc.default_revision, "master")
def test_load_config(self) -> None:
self.copy_config_files()
cfg = self.get_config()
# Check the various config sections
self.assert_core_config(cfg)
exp_repos = ["fbsource", "git"]
self.assertEqual(cfg.get_repository_list(), exp_repos)
self.assert_fbsource_repo_config(cfg)
self.assert_git_repo_config(cfg)
# Check if test is for toml or cfg by cfg._user_toml_cfg
exp_rc_files = [
Path(self._config_d) / "defaults.toml",
Path(self._config_d) / "fbsource.repo.toml",
Path(self._home_dir) / ".edenrc",
]
self.assertEqual(cfg.get_rc_files(), exp_rc_files)
def test_no_dot_edenrc(self) -> None:
self.copy_config_files()
os.remove(os.path.join(self._home_dir, ".edenrc"))
cfg = self.get_config()
cfg._loadConfig()
exp_repos = ["fbsource"]
self.assertEqual(cfg.get_repository_list(), exp_repos)
self.assertEqual(
cfg.get_config_value("rage.reporter", default=""),
'arc paste --title "eden rage from $(hostname)" --conduit-uri=https://phabricator.intern.facebook.com/api/',
)
self.assertEqual(
cfg.get_config_value("core.ignoreFile", default=""),
f"/home/{self._user}/.gitignore",
)
self.assertEqual(
cfg.get_config_value("core.systemIgnoreFile", default=""),
"/etc/eden/gitignore",
)
cc = cfg.find_config_for_alias("fbsource")
assert cc is not None
self.assertEqual(cc.backing_repo, Path(f"/data/users/{self._user}/fbsource"))
self.assertEqual(cc.scm_type, "hg")
self.assertEqual(
cc.bind_mounts,
{"fbcode-buck-out": "fbcode/buck-out", "buck-out": "buck-out"},
)
self.assertEqual(cc.default_revision, "master")
def test_add_existing_repo(self) -> None:
self.copy_config_files()
cfg = self.get_config()
with self.assertRaisesRegex(
config_mod.UsageError,
"repository fbsource already exists. You will need to edit "
"the ~/.edenrc config file by hand to make changes to the "
"repository or remove it.",
):
cfg.add_repository("fbsource", "hg", f"/data/users/{self._user}/fbsource")
def test_add_repo(self) -> None:
self.copy_config_files()
cfg = self.get_config()
cfg.add_repository("fbandroid", "hg", f"/data/users/{self._user}/fbandroid")
# Lets reload our config
cfg = self.get_config()
# Check the various config sections
self.assert_core_config(cfg)
exp_repos = ["fbandroid", "fbsource", "git"]
self.assertEqual(cfg.get_repository_list(), exp_repos)
self.assert_fbsource_repo_config(cfg)
self.assert_git_repo_config(cfg)
# Check the newly added repo
cc = cfg.find_config_for_alias("fbandroid")
assert cc is not None
self.assertEqual(cc.backing_repo, Path(f"/data/users/{self._user}/fbandroid"))
self.assertEqual(cc.scm_type, "hg")
self.assertEqual(cc.bind_mounts, {})
self.assertEqual(cc.default_revision, "master")
def test_missing_type_option_in_repository_is_an_error(self) -> None:
self.write_user_config(
"""
["repository myrepo"]
path = "/tmp/myrepo"
"""
)
with self.assertRaises(Exception) as expectation:
cfg = self.get_config()
cfg.find_config_for_alias("myrepo")
self.assertEqual(
str(expectation.exception), 'repository "myrepo" missing key "type".'
)
def test_invalid_type_option_in_repository_is_an_error(self) -> None:
self.write_user_config(
"""
["repository myrepo"]
type = "invalidrepotype"
path = "/tmp/myrepo"
"""
)
with self.assertRaises(Exception) as expectation:
cfg = self.get_config()
cfg.find_config_for_alias("myrepo")
self.assertEqual(
str(expectation.exception), 'repository "myrepo" has unsupported type.'
)
def test_empty_type_option_in_repository_is_an_error(self) -> None:
self.write_user_config(
"""
["repository myrepo"]
type = ""
path = "/tmp/myrepo"
"""
)
with self.assertRaises(Exception) as expectation:
cfg = self.get_config()
cfg.find_config_for_alias("myrepo")
self.assertEqual(
str(expectation.exception), 'repository "myrepo" missing key "type".'
)
def test_missing_path_option_in_repository_is_an_error(self) -> None:
self.write_user_config(
"""
["repository myrepo"]
type = "hg"
"""
)
with self.assertRaises(Exception) as expectation:
cfg = self.get_config()
cfg.find_config_for_alias("myrepo")
self.assertEqual(
str(expectation.exception), 'repository "myrepo" missing key "path".'
)
def test_empty_path_option_in_repository_is_an_error(self) -> None:
self.write_user_config(
"""
["repository myrepo"]
type = "hg"
path = ""
"""
)
with self.assertRaises(Exception) as expectation:
cfg = self.get_config()
cfg.find_config_for_alias("myrepo")
self.assertEqual(
str(expectation.exception), 'repository "myrepo" missing key "path".'
)
def test_toml_error(self) -> None:
self.copy_config_files()
self.write_user_config(get_toml_test_file_invalid())
cfg = self.get_config()
with self.assertRaises(toml.decoder.TomlDecodeError):
cfg._loadConfig()
def test_get_config_value_returns_default_if_section_is_missing(self) -> None:
self.assertEqual(
self.get_config().get_config_value(
"missing_section.test_option", default="test default"
),
"test default",
)
def test_get_config_value_returns_default_if_option_is_missing(self) -> None:
self.write_user_config(
"""[test_section]
other_option = "test value"
"""
)
self.assertEqual(
self.get_config().get_config_value(
"test_section.missing_option", default="test default"
),
"test default",
)
def test_get_config_value_returns_value_for_string_option(self) -> None:
self.write_user_config(
"""[test_section]
test_option = "test value"
"""
)
self.assertEqual(
self.get_config().get_config_value(
"test_section.test_option", default="test default"
),
"test value",
)
def test_experimental_systemd_is_disabled_by_default(self) -> None:
self.assertFalse(self.get_config().should_use_experimental_systemd_mode())
def test_experimental_systemd_is_enabled_with_environment_variable(self) -> None:
self.set_environment_variable("EDEN_EXPERIMENTAL_SYSTEMD", "1")
self.assertTrue(self.get_config().should_use_experimental_systemd_mode())
def test_experimental_systemd_is_enabled_with_user_config_setting(self) -> None:
self.write_user_config(
"""[service]
experimental_systemd = true
"""
)
self.assertTrue(self.get_config().should_use_experimental_systemd_mode())
def test_experimental_systemd_environment_variable_overrides_config(self) -> None:
self.set_environment_variable("EDEN_EXPERIMENTAL_SYSTEMD", "1")
self.write_user_config(
f"""[service]
experimental_systemd = false
"""
)
self.assertTrue(self.get_config().should_use_experimental_systemd_mode())
self.set_environment_variable("EDEN_EXPERIMENTAL_SYSTEMD", "0")
self.write_user_config(
f"""[service]
experimental_systemd = true
"""
)
self.assertFalse(self.get_config().should_use_experimental_systemd_mode())
def test_empty_experimental_systemd_environment_variable_does_not_override_config(
self
) -> None:
self.set_environment_variable("EDEN_EXPERIMENTAL_SYSTEMD", "")
self.write_user_config(
f"""[service]
experimental_systemd = true
"""
)
self.assertTrue(self.get_config().should_use_experimental_systemd_mode())
self.set_environment_variable("EDEN_EXPERIMENTAL_SYSTEMD", "")
self.write_user_config(
f"""[service]
experimental_systemd = false
"""
)
self.assertFalse(self.get_config().should_use_experimental_systemd_mode())
def test_user_id_variable_is_set_to_process_uid(self) -> None:
config = self.get_config_without_stub_variables()
self.write_user_config(
"""
[testsection]
testoption = "My user ID is ${USER_ID}."
"""
)
self.assertEqual(
config.get_config_value("testsection.testoption", default=""),
f"My user ID is {os.getuid()}.",
)
def test_default_fallback_systemd_xdg_runtime_dir_is_run_user_uid(self) -> None:
self.assertEqual(
self.get_config().get_fallback_systemd_xdg_runtime_dir(), "/run/user/42"
)
def test_configured_fallback_systemd_xdg_runtime_dir_expands_user_and_user_id(
self
) -> None:
self.write_user_config(
"""
[service]
fallback_systemd_xdg_runtime_dir = "/var/run/${USER}/${USER_ID}"
"""
)
self.assertEqual(
self.get_config().get_fallback_systemd_xdg_runtime_dir(), "/var/run/bob/42"
)
def test_printed_config_is_valid_toml(self) -> None:
self.write_user_config(
"""
[clone]
default-revision = "master"
"""
)
printed_config = io.StringIO()
self.get_config().print_full_config(file=printed_config)
printed_config.seek(0)
parsed_config = toml.load(printed_config)
self.assertIn("clone", parsed_config)
self.assertEqual(parsed_config["clone"].get("default-revision"), "master")
def test_printed_config_expands_variables(self) -> None:
self.write_user_config(
"""
["repository fbsource"]
type = "hg"
path = "/data/users/${USER}/fbsource"
"""
)
printed_config = io.StringIO()
self.get_config().print_full_config(file=printed_config)
self.assertIn("/data/users/bob/fbsource", printed_config.getvalue())
def test_printed_config_writes_booleans_as_booleans(self) -> None:
self.write_user_config(
"""
[service]
experimental_systemd = true
"""
)
printed_config = io.StringIO()
self.get_config().print_full_config(file=printed_config)
self.assertRegex(printed_config.getvalue(), r"experimental_systemd\s*=\s*true")
def get_config(self) -> EdenInstance:
return EdenInstance(
self._state_dir, self._etc_eden_dir, self._home_dir, self._interpolate_dict
)
def get_config_without_stub_variables(self) -> EdenInstance:
return EdenInstance(
self._state_dir, self._etc_eden_dir, self._home_dir, interpolate_dict=None
)
def write_user_config(self, content: str) -> None:
path = os.path.join(self._home_dir, ".edenrc")
with open(path, "w") as text_file:
text_file.write(content)
class EdenConfigParserTest(unittest.TestCase):
unsupported_value = {"dict of string to string": ""}
def test_loading_config_with_unsupported_type_is_not_an_error(self) -> None:
parser = EdenConfigParser()
parser.read_dict({"test_section": {"test_option": self.unsupported_value}})
def test_querying_bool_returns_bool(self) -> None:
for value in [True, False]:
with self.subTest(value=value):
parser = EdenConfigParser()
parser.read_dict({"test_section": {"test_option": value}})
self.assertEqual(
parser.get_bool("test_section", "test_option", default=True), value
)
self.assertEqual(
parser.get_bool("test_section", "test_option", default=False), value
)
def test_querying_bool_with_non_boolean_value_fails(self) -> None:
for value in ["not a boolean", "", "true", "True", 0]:
with self.subTest(value=value):
parser = EdenConfigParser()
parser.read_dict({"test_section": {"test_option": value}})
with self.assertRaises(UnexpectedType) as expectation:
parser.get_bool("test_section", "test_option", default=False)
# pyre-fixme[16]: `_E` has no attribute `section`.
self.assertEqual(expectation.exception.section, "test_section")
# pyre-fixme[16]: `_E` has no attribute `option`.
self.assertEqual(expectation.exception.option, "test_option")
# pyre-fixme[16]: `_E` has no attribute `value`.
self.assertEqual(expectation.exception.value, value)
# pyre-fixme[16]: `_E` has no attribute `expected_type`.
self.assertEqual(expectation.exception.expected_type, bool)
def test_querying_bool_with_value_of_unsupported_type_fails(self) -> None:
parser = EdenConfigParser()
parser.read_dict({"test_section": {"test_option": self.unsupported_value}})
with self.assertRaises(UnexpectedType) as expectation:
parser.get_bool("test_section", "test_option", default=False)
# pyre-fixme[16]: `_E` has no attribute `section`.
self.assertEqual(expectation.exception.section, "test_section")
# pyre-fixme[16]: `_E` has no attribute `option`.
self.assertEqual(expectation.exception.option, "test_option")
# pyre-fixme[16]: `_E` has no attribute `value`.
self.assertEqual(expectation.exception.value, self.unsupported_value)
# pyre-fixme[16]: `_E` has no attribute `expected_type`.
self.assertEqual(expectation.exception.expected_type, bool)
def test_querying_str_with_non_string_value_fails(self) -> None:
parser = EdenConfigParser()
parser.read_dict({"test_section": {"test_option": True}})
with self.assertRaises(UnexpectedType) as expectation:
parser.get_str("test_section", "test_option", default="")
# pyre-fixme[16]: `_E` has no attribute `section`.
self.assertEqual(expectation.exception.section, "test_section")
# pyre-fixme[16]: `_E` has no attribute `option`.
self.assertEqual(expectation.exception.option, "test_option")
# pyre-fixme[16]: `_E` has no attribute `value`.
self.assertEqual(expectation.exception.value, True)
# pyre-fixme[16]: `_E` has no attribute `expected_type`.
self.assertEqual(expectation.exception.expected_type, str)
def test_querying_section_str_to_str_returns_mapping(self) -> None:
parser = EdenConfigParser()
parser.read_dict({"test_section": {"a": "a value", "b": "b value"}})
section = parser.get_section_str_to_str("test_section")
self.assertCountEqual(section, {"a", "b"})
self.assertEqual(section["a"], "a value")
self.assertEqual(section["b"], "b value")
def test_querying_section_str_to_any_fails_if_option_has_unsupported_type(
self
) -> None:
parser = EdenConfigParser()
parser.read_dict({"test_section": {"unsupported": self.unsupported_value}})
with self.assertRaises(UnexpectedType) as expectation:
parser.get_section_str_to_any("test_section")
# pyre-fixme[16]: `_E` has no attribute `section`.
self.assertEqual(expectation.exception.section, "test_section")
# pyre-fixme[16]: `_E` has no attribute `option`.
self.assertEqual(expectation.exception.option, "unsupported")
# pyre-fixme[16]: `_E` has no attribute `value`.
self.assertEqual(expectation.exception.value, self.unsupported_value)
# pyre-fixme[16]: `_E` has no attribute `expected_type`.
self.assertIsNone(expectation.exception.expected_type)
def test_querying_section_str_to_any_interpolates_options(self) -> None:
parser = EdenConfigParser(
interpolation=EdenConfigInterpolator({"USER": "alice"})
)
parser.read_dict({"test_section": {"test_option": "hello ${USER}"}})
section = parser.get_section_str_to_any("test_section")
self.assertEqual(section.get("test_option"), "hello alice")
def test_querying_section_str_to_any_returns_any_supported_type(self) -> None:
parser = EdenConfigParser()
parser.read_dict(
{
"test_section": {
"bool_option": True,
"string_array_option": ["hello", "world"],
"string_option": "hello",
}
}
)
section = parser.get_section_str_to_any("test_section")
self.assertEqual(section["bool_option"], True)
self.assertEqual(list(section["string_array_option"]), ["hello", "world"])
self.assertEqual(section["string_option"], "hello")
def test_querying_section_str_to_str_with_non_string_value_fails(self) -> None:
parser = EdenConfigParser()
parser.read_dict({"test_section": {"a": False}})
with self.assertRaises(UnexpectedType) as expectation:
parser.get_section_str_to_str("test_section")
# pyre-fixme[16]: `_E` has no attribute `section`.
self.assertEqual(expectation.exception.section, "test_section")
# pyre-fixme[16]: `_E` has no attribute `option`.
self.assertEqual(expectation.exception.option, "a")
# pyre-fixme[16]: `_E` has no attribute `value`.
self.assertEqual(expectation.exception.value, False)
# pyre-fixme[16]: `_E` has no attribute `expected_type`.
self.assertEqual(expectation.exception.expected_type, str)
def test_querying_section_str_to_str_of_missing_section_fails(self) -> None:
parser = EdenConfigParser()
parser.read_dict({"test_section": {"a": "a value"}})
with self.assertRaises(configparser.NoSectionError) as expectation:
parser.get_section_str_to_str("not_test_section")
section: str = expectation.exception.section # type: ignore
self.assertEqual(section, "not_test_section")
def test_querying_strs_with_empty_array_returns_empty_sequence(self) -> None:
parser = EdenConfigParser()
parser.read_dict({"test_section": {"test_option": []}})
self.assertEqual(
list(
parser.get_strs(
"test_section", "test_option", default=["default value"]
)
),
[],
)
def test_querying_strs_with_array_of_strings_returns_strs(self) -> None:
parser = EdenConfigParser()
parser.read_dict({"test_section": {"test_option": ["first", "second", "3rd"]}})
self.assertEqual(
list(parser.get_strs("test_section", "test_option", default=[])),
["first", "second", "3rd"],
)
def test_querying_strs_with_array_of_non_strings_fails(self) -> None:
parser = EdenConfigParser()
parser.read_dict({"test_section": {"test_option": [123]}})
with self.assertRaises(UnexpectedType) as expectation:
parser.get_strs("test_section", "test_option", default=[])
# pyre-fixme[16]: `_E` has no attribute `section`.
self.assertEqual(expectation.exception.section, "test_section")
# pyre-fixme[16]: `_E` has no attribute `option`.
self.assertEqual(expectation.exception.option, "test_option")
# pyre-fixme[16]: `_E` has no attribute `value`.
self.assertEqual(expectation.exception.value, [123])
# pyre-fixme[16]: `_E` has no attribute `expected_type`.
self.assertEqual(expectation.exception.expected_type, configutil.Strs)
def test_querying_missing_value_as_strs_returns_default(self) -> None:
parser = EdenConfigParser()
parser.read_dict({"test_section": {"bogus_option": []}})
self.assertEqual(
list(
parser.get_strs(
"test_section", "missing_option", default=["default value"]
)
),
["default value"],
)
def test_str_sequences_are_interpolated(self) -> None:
parser = EdenConfigParser(
interpolation=EdenConfigInterpolator({"USER": "alice"})
)
parser.read_dict(
{
"test_section": {
"test_option": ["sudo", "-u", "${USER}", "echo", "Hello, ${USER}!"]
}
}
)
self.assertEqual(
list(parser.get_strs("test_section", "test_option", default=[])),
["sudo", "-u", "alice", "echo", "Hello, alice!"],
)
def test_unexpected_type_error_messages_are_helpful(self) -> None:
self.assertEqual(
'Expected boolean for service.experimental_systemd, but got string: "true"',
str(
UnexpectedType(
section="service",
option="experimental_systemd",
value="true",
expected_type=bool,
)
),
)
self.assertEqual(
"Expected string for repository myrepo.path, but got boolean: true",
str(
UnexpectedType(
section="repository myrepo",
option="path",
value=True,
expected_type=str,
)
),
)
self.assertRegex(
str(
UnexpectedType(
section="section", option="option", value={}, expected_type=None
)
),
r"^Unexpected dict for section.option: \{\s*\}$",
)
self.assertEqual(
"Expected array of strings for service.command, but got array: [ 123,]",
str(
UnexpectedType(
section="service",
option="command",
value=[123],
expected_type=configutil.Strs,
)
),
) | en | 0.57939 | #!/usr/bin/env python3 # # Copyright (c) 2016-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. An additional grant # of patent rights can be found in the PATENTS file in the same directory. [core thisIsNotAllowed] [core] systemIgnoreFile = "/etc/eden/gitignore" ignoreFile = "/home/${USER}/.gitignore" [clone] default-revision = "master" [rage] reporter = 'arc paste --title "eden rage from $(hostname)" --conduit-uri=https://phabricator.intern.facebook.com/api/' ["repository fbsource"] type = "hg" path = "/data/users/${USER}/fbsource" ["bindmounts fbsource"] fbcode-buck-out = "fbcode/buck-out" buck-out = "buck-out" [core] ignoreFile = "/home/${USER}/.gitignore-override" edenDirectory = "/home/${USER}/.eden" ["repository fbsource"] type = "hg" path = "/data/users/${USER}/fbsource-override" ["bindmounts fbsource"] fbcode-buck-out = "fbcode/buck-out-override" ["repository git"] type = "git" path = "/home/${USER}/src/git/.git" # Check the various config sections # Check if test is for toml or cfg by cfg._user_toml_cfg # Lets reload our config # Check the various config sections # Check the newly added repo ["repository myrepo"] path = "/tmp/myrepo" ["repository myrepo"] type = "invalidrepotype" path = "/tmp/myrepo" ["repository myrepo"] type = "" path = "/tmp/myrepo" ["repository myrepo"] type = "hg" ["repository myrepo"] type = "hg" path = "" [test_section] other_option = "test value" [test_section] test_option = "test value" [service] experimental_systemd = true [service] experimental_systemd = false [service] experimental_systemd = true [service] experimental_systemd = true [service] experimental_systemd = false [testsection] testoption = "My user ID is ${USER_ID}." [service] fallback_systemd_xdg_runtime_dir = "/var/run/${USER}/${USER_ID}" [clone] default-revision = "master" ["repository fbsource"] type = "hg" path = "/data/users/${USER}/fbsource" [service] experimental_systemd = true # pyre-fixme[16]: `_E` has no attribute `section`. # pyre-fixme[16]: `_E` has no attribute `option`. # pyre-fixme[16]: `_E` has no attribute `value`. # pyre-fixme[16]: `_E` has no attribute `expected_type`. # pyre-fixme[16]: `_E` has no attribute `section`. # pyre-fixme[16]: `_E` has no attribute `option`. # pyre-fixme[16]: `_E` has no attribute `value`. # pyre-fixme[16]: `_E` has no attribute `expected_type`. # pyre-fixme[16]: `_E` has no attribute `section`. # pyre-fixme[16]: `_E` has no attribute `option`. # pyre-fixme[16]: `_E` has no attribute `value`. # pyre-fixme[16]: `_E` has no attribute `expected_type`. # pyre-fixme[16]: `_E` has no attribute `section`. # pyre-fixme[16]: `_E` has no attribute `option`. # pyre-fixme[16]: `_E` has no attribute `value`. # pyre-fixme[16]: `_E` has no attribute `expected_type`. # pyre-fixme[16]: `_E` has no attribute `section`. # pyre-fixme[16]: `_E` has no attribute `option`. # pyre-fixme[16]: `_E` has no attribute `value`. # pyre-fixme[16]: `_E` has no attribute `expected_type`. # type: ignore # pyre-fixme[16]: `_E` has no attribute `section`. # pyre-fixme[16]: `_E` has no attribute `option`. # pyre-fixme[16]: `_E` has no attribute `value`. # pyre-fixme[16]: `_E` has no attribute `expected_type`. | 1.728832 | 2 |
app/forms.py | gcmaciel/django-widgets-tutorial | 0 | 6619800 | <filename>app/forms.py
from django import forms
from django.forms import ModelForm, TextInput, EmailInput
from .models import User
class UserInfoForm(ModelForm):
class Meta:
model = User
fields = ['name', 'email']
widgets = {
'name': TextInput(attrs={
'class': "form-control",
'style': 'max-width: 300px;',
'placeholder': 'Name'
}),
'email': EmailInput(attrs={
'class': "form-control",
'style': 'max-width: 300px;',
'placeholder': 'Email'
})
}
| <filename>app/forms.py
from django import forms
from django.forms import ModelForm, TextInput, EmailInput
from .models import User
class UserInfoForm(ModelForm):
class Meta:
model = User
fields = ['name', 'email']
widgets = {
'name': TextInput(attrs={
'class': "form-control",
'style': 'max-width: 300px;',
'placeholder': 'Name'
}),
'email': EmailInput(attrs={
'class': "form-control",
'style': 'max-width: 300px;',
'placeholder': 'Email'
})
}
| none | 1 | 2.361282 | 2 | |
neutron_lib/constants.py | rolaya/neutron-lib | 0 | 6619801 | # Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO(salv-orlando): Verify if a single set of operational
# status constants is achievable
NET_STATUS_ACTIVE = 'ACTIVE'
NET_STATUS_BUILD = 'BUILD'
NET_STATUS_DOWN = 'DOWN'
NET_STATUS_ERROR = 'ERROR'
PORT_STATUS_ACTIVE = 'ACTIVE'
PORT_STATUS_BUILD = 'BUILD'
PORT_STATUS_DOWN = 'DOWN'
PORT_STATUS_ERROR = 'ERROR'
PORT_STATUS_NOTAPPLICABLE = 'N/A'
FLOATINGIP_STATUS_ACTIVE = 'ACTIVE'
FLOATINGIP_STATUS_DOWN = 'DOWN'
FLOATINGIP_STATUS_ERROR = 'ERROR'
# Service operation status constants
ACTIVE = "ACTIVE"
DOWN = "DOWN"
CREATED = "CREATED"
PENDING_CREATE = "PENDING_CREATE"
PENDING_UPDATE = "PENDING_UPDATE"
PENDING_DELETE = "PENDING_DELETE"
INACTIVE = "INACTIVE"
ERROR = "ERROR"
DEVICE_OWNER_COMPUTE_PREFIX = "compute:"
DEVICE_OWNER_NETWORK_PREFIX = "network:"
DEVICE_OWNER_NEUTRON_PREFIX = "neutron:"
DEVICE_OWNER_BAREMETAL_PREFIX = "baremetal:"
DEVICE_OWNER_ROUTER_HA_INTF = (DEVICE_OWNER_NETWORK_PREFIX +
"router_ha_interface")
DEVICE_OWNER_HA_REPLICATED_INT = (DEVICE_OWNER_NETWORK_PREFIX +
"ha_router_replicated_interface")
DEVICE_OWNER_ROUTER_INTF = DEVICE_OWNER_NETWORK_PREFIX + "router_interface"
DEVICE_OWNER_ROUTER_GW = DEVICE_OWNER_NETWORK_PREFIX + "router_gateway"
DEVICE_OWNER_FLOATINGIP = DEVICE_OWNER_NETWORK_PREFIX + "floatingip"
DEVICE_OWNER_DHCP = DEVICE_OWNER_NETWORK_PREFIX + "dhcp"
DEVICE_OWNER_DVR_INTERFACE = (DEVICE_OWNER_NETWORK_PREFIX +
"router_interface_distributed")
DEVICE_OWNER_AGENT_GW = (DEVICE_OWNER_NETWORK_PREFIX +
"floatingip_agent_gateway")
DEVICE_OWNER_ROUTER_SNAT = (DEVICE_OWNER_NETWORK_PREFIX +
"router_centralized_snat")
# TODO(johnsom) Remove after these stop being used. Neutron-LBaaS is now
# retired (train) and these should no longer be necessary.
DEVICE_OWNER_LOADBALANCER = DEVICE_OWNER_NEUTRON_PREFIX + "LOADBALANCER"
DEVICE_OWNER_LOADBALANCERV2 = DEVICE_OWNER_NEUTRON_PREFIX + "LOADBALANCERV2"
DEVICE_OWNER_PREFIXES = (DEVICE_OWNER_NETWORK_PREFIX,
DEVICE_OWNER_NEUTRON_PREFIX)
# Collection used to identify devices owned by router interfaces.
# DEVICE_OWNER_ROUTER_HA_INTF is a special case and so is not included.
ROUTER_INTERFACE_OWNERS = (DEVICE_OWNER_ROUTER_INTF,
DEVICE_OWNER_HA_REPLICATED_INT,
DEVICE_OWNER_DVR_INTERFACE)
ROUTER_INTERFACE_OWNERS_SNAT = (DEVICE_OWNER_ROUTER_INTF,
DEVICE_OWNER_HA_REPLICATED_INT,
DEVICE_OWNER_DVR_INTERFACE,
DEVICE_OWNER_ROUTER_SNAT)
DEVICE_ID_RESERVED_DHCP_PORT = 'reserved_dhcp_port'
FLOATINGIP_KEY = '_floatingips'
PORT_FORWARDING_FLOATINGIP_KEY = '_pf_floatingips'
INTERFACE_KEY = '_interfaces'
HA_INTERFACE_KEY = '_ha_interface'
IPv4 = 'IPv4'
IPv6 = 'IPv6'
IP_VERSION_4 = 4
IP_VERSION_6 = 6
IPv4_BITS = 32
IPv6_BITS = 128
INVALID_MAC_ADDRESSES = ['00:00:00:00:00:00', 'FF:FF:FF:FF:FF:FF']
IPv4_ANY = '0.0.0.0/0'
IPv6_ANY = '::/0'
IP_ANY = {IP_VERSION_4: IPv4_ANY, IP_VERSION_6: IPv6_ANY}
IPv6_LLA_PREFIX = 'fe80::/64'
DHCP_CLIENT_PORT = 67
DHCP_RESPONSE_PORT = 68
DHCPV6_CLIENT_PORT = 546
DHCPV6_RESPONSE_PORT = 547
FLOODING_ENTRY = ('00:00:00:00:00:00', '0.0.0.0')
AGENT_TYPE_DHCP = 'DHCP agent'
AGENT_TYPE_OVS = 'Open vSwitch agent'
AGENT_TYPE_LINUXBRIDGE = 'Linux bridge agent'
AGENT_TYPE_OFA = 'OFA driver agent'
AGENT_TYPE_L3 = 'L3 agent'
AGENT_TYPE_METERING = 'Metering agent'
AGENT_TYPE_METADATA = 'Metadata agent'
AGENT_TYPE_NIC_SWITCH = 'NIC Switch agent'
AGENT_TYPE_MACVTAP = 'Macvtap agent'
L2_AGENT_TOPIC = 'N/A'
L3_AGENT_MODE_DVR = 'dvr'
L3_AGENT_MODE_DVR_SNAT = 'dvr_snat'
L3_AGENT_MODE_LEGACY = 'legacy'
L3_AGENT_MODE = 'agent_mode'
L3_AGENT_MODE_DVR_NO_EXTERNAL = 'dvr_no_external'
DVR_SNAT_BOUND = 'dvr_snat_bound'
PORT_BINDING_EXT_ALIAS = 'binding'
L3_AGENT_SCHEDULER_EXT_ALIAS = 'l3_agent_scheduler'
DHCP_AGENT_SCHEDULER_EXT_ALIAS = 'dhcp_agent_scheduler'
L3_DISTRIBUTED_EXT_ALIAS = 'dvr'
L3_HA_MODE_EXT_ALIAS = 'l3-ha'
SUBNET_ALLOCATION_EXT_ALIAS = 'subnet_allocation'
# Protocol names and numbers for Security Groups/Firewalls
PROTO_NAME_AH = 'ah'
PROTO_NAME_DCCP = 'dccp'
PROTO_NAME_EGP = 'egp'
PROTO_NAME_ESP = 'esp'
PROTO_NAME_GRE = 'gre'
PROTO_NAME_HOPOPT = 'hopopt'
PROTO_NAME_ICMP = 'icmp'
PROTO_NAME_IGMP = 'igmp'
PROTO_NAME_IP = 'ip'
PROTO_NAME_IPIP = 'ipip'
PROTO_NAME_IPV6_ENCAP = 'ipv6-encap'
PROTO_NAME_IPV6_FRAG = 'ipv6-frag'
PROTO_NAME_IPV6_ICMP = 'ipv6-icmp'
# For backward-compatibility of security group rule API, we keep the old value
# for IPv6 ICMP. It should be clean up in the future.
PROTO_NAME_IPV6_ICMP_LEGACY = 'icmpv6'
PROTO_NAME_IPV6_NONXT = 'ipv6-nonxt'
PROTO_NAME_IPV6_OPTS = 'ipv6-opts'
PROTO_NAME_IPV6_ROUTE = 'ipv6-route'
PROTO_NAME_OSPF = 'ospf'
PROTO_NAME_PGM = 'pgm'
PROTO_NAME_RSVP = 'rsvp'
PROTO_NAME_SCTP = 'sctp'
PROTO_NAME_TCP = 'tcp'
PROTO_NAME_UDP = 'udp'
PROTO_NAME_UDPLITE = 'udplite'
PROTO_NAME_VRRP = 'vrrp'
PROTO_NUM_AH = 51
PROTO_NUM_DCCP = 33
PROTO_NUM_EGP = 8
PROTO_NUM_ESP = 50
PROTO_NUM_GRE = 47
PROTO_NUM_HOPOPT = 0
PROTO_NUM_ICMP = 1
PROTO_NUM_IGMP = 2
PROTO_NUM_IP = 0
PROTO_NUM_IPIP = 4
PROTO_NUM_IPV6_ENCAP = 41
PROTO_NUM_IPV6_FRAG = 44
PROTO_NUM_IPV6_ICMP = 58
PROTO_NUM_IPV6_NONXT = 59
PROTO_NUM_IPV6_OPTS = 60
PROTO_NUM_IPV6_ROUTE = 43
PROTO_NUM_OSPF = 89
PROTO_NUM_PGM = 113
PROTO_NUM_RSVP = 46
PROTO_NUM_SCTP = 132
PROTO_NUM_TCP = 6
PROTO_NUM_UDP = 17
PROTO_NUM_UDPLITE = 136
PROTO_NUM_VRRP = 112
IP_PROTOCOL_MAP = {PROTO_NAME_AH: PROTO_NUM_AH,
PROTO_NAME_DCCP: PROTO_NUM_DCCP,
PROTO_NAME_EGP: PROTO_NUM_EGP,
PROTO_NAME_ESP: PROTO_NUM_ESP,
PROTO_NAME_GRE: PROTO_NUM_GRE,
PROTO_NAME_HOPOPT: PROTO_NUM_HOPOPT,
PROTO_NAME_ICMP: PROTO_NUM_ICMP,
PROTO_NAME_IGMP: PROTO_NUM_IGMP,
PROTO_NAME_IP: PROTO_NUM_IP,
PROTO_NAME_IPIP: PROTO_NUM_IPIP,
PROTO_NAME_IPV6_ENCAP: PROTO_NUM_IPV6_ENCAP,
PROTO_NAME_IPV6_FRAG: PROTO_NUM_IPV6_FRAG,
PROTO_NAME_IPV6_ICMP: PROTO_NUM_IPV6_ICMP,
# For backward-compatibility of security group rule API
PROTO_NAME_IPV6_ICMP_LEGACY: PROTO_NUM_IPV6_ICMP,
PROTO_NAME_IPV6_NONXT: PROTO_NUM_IPV6_NONXT,
PROTO_NAME_IPV6_OPTS: PROTO_NUM_IPV6_OPTS,
PROTO_NAME_IPV6_ROUTE: PROTO_NUM_IPV6_ROUTE,
PROTO_NAME_OSPF: PROTO_NUM_OSPF,
PROTO_NAME_PGM: PROTO_NUM_PGM,
PROTO_NAME_RSVP: PROTO_NUM_RSVP,
PROTO_NAME_SCTP: PROTO_NUM_SCTP,
PROTO_NAME_TCP: PROTO_NUM_TCP,
PROTO_NAME_UDP: PROTO_NUM_UDP,
PROTO_NAME_UDPLITE: PROTO_NUM_UDPLITE,
PROTO_NAME_VRRP: PROTO_NUM_VRRP}
# Note that this differs from IP_PROTOCOL_MAP because iptables refers to IPv6
# ICMP as 'icmp6' whereas it is 'ipv6-icmp' in IP_PROTOCOL_MAP.
IPTABLES_PROTOCOL_MAP = {PROTO_NAME_DCCP: 'dccp',
PROTO_NAME_ICMP: 'icmp',
PROTO_NAME_IPV6_ICMP: 'icmp6',
PROTO_NAME_SCTP: 'sctp',
PROTO_NAME_TCP: 'tcp',
PROTO_NAME_UDP: 'udp'}
# IP header length
IP_HEADER_LENGTH = {
4: 20,
6: 40,
}
# ICMPv6 types:
# Destination Unreachable (1)
ICMPV6_TYPE_DEST_UNREACH = 1
# Packet Too Big (2)
ICMPV6_TYPE_PKT_TOOBIG = 2
# Time Exceeded (3)
ICMPV6_TYPE_TIME_EXCEED = 3
# Parameter Problem (4)
ICMPV6_TYPE_PARAMPROB = 4
# Echo Request (128)
ICMPV6_TYPE_ECHO_REQUEST = 128
# Echo Reply (129)
ICMPV6_TYPE_ECHO_REPLY = 129
# Multicast Listener Query (130)
ICMPV6_TYPE_MLD_QUERY = 130
# Multicast Listener Report (131)
ICMPV6_TYPE_MLD_REPORT = 131
# Multicast Listener Done (132)
ICMPV6_TYPE_MLD_DONE = 132
# Router Solicitation (133)
ICMPV6_TYPE_RS = 133
# Router Advertisement (134)
ICMPV6_TYPE_RA = 134
# Neighbor Solicitation (135)
ICMPV6_TYPE_NS = 135
# Neighbor Advertisement (136)
ICMPV6_TYPE_NA = 136
# Multicast Listener v2 Report (143)
ICMPV6_TYPE_MLD2_REPORT = 143
# List of ICMPv6 types that should be allowed from the unspecified address for
# Duplicate Address Detection:
ICMPV6_ALLOWED_UNSPEC_ADDR_TYPES = [ICMPV6_TYPE_MLD_REPORT,
ICMPV6_TYPE_NS,
ICMPV6_TYPE_MLD2_REPORT]
# Human-readable ID to which the subnetpool ID should be set to
# indicate that IPv6 Prefix Delegation is enabled for a given subnetpool
IPV6_PD_POOL_ID = 'prefix_delegation'
# Device names start with "tap"
TAP_DEVICE_PREFIX = 'tap'
# Device names start with "macvtap"
MACVTAP_DEVICE_PREFIX = 'macvtap'
# Linux interface max length
DEVICE_NAME_MAX_LEN = 15
# Time format
ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%f'
DHCPV6_STATEFUL = 'dhcpv6-stateful'
DHCPV6_STATELESS = 'dhcpv6-stateless'
IPV6_SLAAC = 'slaac'
IPV6_MODES = [DHCPV6_STATEFUL, DHCPV6_STATELESS, IPV6_SLAAC]
ACTIVE_PENDING_STATUSES = (
ACTIVE,
PENDING_CREATE,
PENDING_UPDATE
)
# Network Type constants
TYPE_FLAT = 'flat'
TYPE_GENEVE = 'geneve'
TYPE_GRE = 'gre'
TYPE_LOCAL = 'local'
TYPE_VXLAN = 'vxlan'
TYPE_VLAN = 'vlan'
TYPE_NONE = 'none'
# List of supported network segment range types
NETWORK_SEGMENT_RANGE_TYPES = [TYPE_VLAN, TYPE_VXLAN, TYPE_GRE, TYPE_GENEVE]
# Values for network_type
# For VLAN Network
MIN_VLAN_TAG = 1
MAX_VLAN_TAG = 4094
# For Geneve Tunnel
MIN_GENEVE_VNI = 1
MAX_GENEVE_VNI = 2 ** 24 - 1
# For GRE Tunnel
MIN_GRE_ID = 1
MAX_GRE_ID = 2 ** 32 - 1
# For VXLAN Tunnel
MIN_VXLAN_VNI = 1
MAX_VXLAN_VNI = 2 ** 24 - 1
VXLAN_UDP_PORT = 4789
# Overlay (tunnel) protocol overhead
GENEVE_ENCAP_MIN_OVERHEAD = 30
GRE_ENCAP_OVERHEAD = 22
VXLAN_ENCAP_OVERHEAD = 30
# For DNS extension
DNS_DOMAIN_DEFAULT = 'openstacklocal.'
DNS_LABEL_KEYWORDS = ['project_id', 'project_name', 'user_name', 'user_id']
DNS_LABEL_MAX_LEN = 63
DNS_LABEL_REGEX = "^([a-z0-9-]{1,%d}|%s)$" % (
DNS_LABEL_MAX_LEN,
'<' + '>|<'.join(DNS_LABEL_KEYWORDS) + '>')
# max value for TCP, UDP, SCTP ports
PORT_MAX = 2**16 - 1
VALID_DSCP_MARKS = [0, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34,
36, 38, 40, 46, 48, 56]
INGRESS_DIRECTION = 'ingress'
EGRESS_DIRECTION = 'egress'
VALID_DIRECTIONS = (INGRESS_DIRECTION, EGRESS_DIRECTION)
PROVISIONAL_IPV6_PD_PREFIX = '::/64'
# Traffic control
TC_QDISC_TYPE_HTB = 'htb'
TC_QDISC_TYPE_TBF = 'tbf'
TC_QDISC_TYPE_INGRESS = 'ingress'
TC_QDISC_TYPES = (TC_QDISC_TYPE_HTB, TC_QDISC_TYPE_TBF, TC_QDISC_TYPE_INGRESS)
TC_QDISC_INGRESS_ID = 'ffff:'
TC_QDISC_PARENTS = {'root': 0xffffffff,
'ingress': 0xfffffff1}
class Sentinel(object):
"""A constant object that does not change even when copied."""
def __deepcopy__(self, memo):
# Always return the same object because this is essentially a constant.
return self
def __copy__(self):
# called via copy.copy(x)
return self
#############################
# Attribute related constants
#############################
ATTR_NOT_SPECIFIED = Sentinel()
DICT_POPULATE_DEFAULTS = 'dict_populate_defaults'
HEX_ELEM = '[0-9A-Fa-f]'
UUID_PATTERN = '-'.join([HEX_ELEM + '{8}', HEX_ELEM + '{4}',
HEX_ELEM + '{4}', HEX_ELEM + '{4}',
HEX_ELEM + '{12}'])
SHARED = 'shared'
##########################
# Device related constants
##########################
# vhost-user device names start with "vhu"
VHOST_USER_DEVICE_PREFIX = 'vhu'
# The vswitch side of a veth pair for a nova iptables filter setup
VETH_DEVICE_PREFIX = 'qvo'
# prefix for SNAT interface in DVR
SNAT_INT_DEV_PREFIX = 'sg-'
ROUTER_PORT_OWNERS = ROUTER_INTERFACE_OWNERS_SNAT + (DEVICE_OWNER_ROUTER_GW,)
ROUTER_STATUS_ACTIVE = 'ACTIVE'
ROUTER_STATUS_ALLOCATING = 'ALLOCATING'
ROUTER_STATUS_ERROR = 'ERROR'
VALID_ROUTER_STATUS = (ROUTER_STATUS_ACTIVE,
ROUTER_STATUS_ALLOCATING,
ROUTER_STATUS_ERROR)
HA_ROUTER_STATE_KEY = '_ha_state'
METERING_LABEL_KEY = '_metering_labels'
FLOATINGIP_AGENT_INTF_KEY = '_floatingip_agent_interfaces'
SNAT_ROUTER_INTF_KEY = '_snat_router_interfaces'
HA_NETWORK_NAME = 'HA network tenant %s'
HA_SUBNET_NAME = 'HA subnet tenant %s'
HA_PORT_NAME = 'HA port tenant %s'
HA_ROUTER_STATE_ACTIVE = 'active'
HA_ROUTER_STATE_STANDBY = 'standby'
HA_ROUTER_STATE_UNKNOWN = 'unknown'
VALID_HA_STATES = (HA_ROUTER_STATE_ACTIVE, HA_ROUTER_STATE_STANDBY,
HA_ROUTER_STATE_UNKNOWN)
PAGINATION_INFINITE = 'infinite'
SORT_DIRECTION_ASC = 'asc'
SORT_DIRECTION_DESC = 'desc'
ETHERTYPE_NAME_ARP = 'arp'
ETHERTYPE_ARP = 0x0806
ETHERTYPE_RARP = 0x8035
ETHERTYPE_IP = 0x0800
ETHERTYPE_IPV6 = 0x86DD
IP_PROTOCOL_NAME_ALIASES = {PROTO_NAME_IPV6_ICMP_LEGACY:
PROTO_NAME_IPV6_ICMP}
# We only want one mapping from '58' to 'ipv6-icmp' since that is the
# normalized string, the name to number mapping can have both
IP_PROTOCOL_NUM_TO_NAME_MAP = ({str(v): k for k, v in IP_PROTOCOL_MAP.items()
if k != PROTO_NAME_IPV6_ICMP_LEGACY})
# When using iptables-save we specify '-p {proto}',
# but sometimes those values are not identical. This is a map
# of known protocol numbers that require a name to be used and
# protocol names that require a different name to be used,
# because that is how iptables-save will display them.
#
# This is how the list was created, so there is a possibility
# it will need to be updated in the future:
#
# $ for num in {0..255}; do iptables -A INPUT -p $num; done
# $ iptables-save
#
# These cases are special, and were found by inspection:
# - 'ipv6-encap' uses 'ipv6'
# - 'icmpv6' uses 'ipv6-icmp'
# - 'pgm' uses '113' instead of its name
# - protocol '0' uses no -p argument
IPTABLES_PROTOCOL_NAME_MAP = {PROTO_NAME_IPV6_ENCAP: 'ipv6',
PROTO_NAME_IPV6_ICMP_LEGACY:
'ipv6-icmp',
PROTO_NAME_PGM: '113',
'0': None,
'1': 'icmp',
'2': 'igmp',
'3': 'ggp',
'4': 'ipencap',
'5': 'st',
'6': 'tcp',
'8': 'egp',
'9': 'igp',
'12': 'pup',
'17': 'udp',
'20': 'hmp',
'22': 'xns-idp',
'27': 'rdp',
'29': 'iso-tp4',
'33': 'dccp',
'36': 'xtp',
'37': 'ddp',
'38': 'idpr-cmtp',
'41': 'ipv6',
'43': 'ipv6-route',
'44': 'ipv6-frag',
'45': 'idrp',
'46': 'rsvp',
'47': 'gre',
'50': 'esp',
'51': 'ah',
'57': 'skip',
'58': 'ipv6-icmp',
'59': 'ipv6-nonxt',
'60': 'ipv6-opts',
'73': 'rspf',
'81': 'vmtp',
'88': 'eigrp',
'89': 'ospf',
'93': 'ax.25',
'94': 'ipip',
'97': 'etherip',
'98': 'encap',
'103': 'pim',
'108': 'ipcomp',
'112': 'vrrp',
'115': 'l2tp',
'124': 'isis',
'132': 'sctp',
'133': 'fc',
'135': 'mobility-header',
'136': 'udplite',
'137': 'mpls-in-ip',
'138': 'manet',
'139': 'hip',
'140': 'shim6',
'141': 'wesp',
'142': 'rohc'}
# A length of a iptables chain name must be less than or equal to 11
# characters.
# <max length of iptables chain name> - (<binary_name> + '-') = 28-(16+1) = 11
MAX_IPTABLES_CHAIN_LEN_WRAP = 11
MAX_IPTABLES_CHAIN_LEN_NOWRAP = 28
# Timeout in seconds for getting an IPv6 LLA
LLA_TASK_TIMEOUT = 40
# length of all device prefixes (e.g. qvo, tap, qvb)
LINUX_DEV_PREFIX_LEN = 3
# must be shorter than linux IFNAMSIZ (which is 16)
LINUX_DEV_LEN = 14
# Possible prefixes to partial port IDs in interface names used by the OVS,
# Linux Bridge, and IVS VIF drivers in Nova and the neutron agents. See the
# 'get_ovs_interfaceid' method in Nova (nova/virt/libvirt/vif.py) for details.
INTERFACE_PREFIXES = (TAP_DEVICE_PREFIX,
VETH_DEVICE_PREFIX,
SNAT_INT_DEV_PREFIX)
ATTRIBUTES_TO_UPDATE = 'attributes_to_update'
# TODO(amuller): Re-define the RPC namespaces once Oslo messaging supports
# Targets with multiple namespaces. Neutron will then implement callbacks
# for its RPC clients in order to support rolling upgrades.
# RPC Interface for agents to call DHCP API implemented on the plugin side
RPC_NAMESPACE_DHCP_PLUGIN = None
# RPC interface for the metadata service to get info from the plugin side
RPC_NAMESPACE_METADATA = None
# RPC interface for agent to plugin security group API
RPC_NAMESPACE_SECGROUP = None
# RPC interface for agent to plugin DVR api
RPC_NAMESPACE_DVR = None
# RPC interface for reporting state back to the plugin
RPC_NAMESPACE_STATE = None
# RPC interface for agent to plugin resources API
RPC_NAMESPACE_RESOURCES = None
# Default network MTU value when not configured
DEFAULT_NETWORK_MTU = 1500
IPV6_MIN_MTU = 1280
ROUTER_MARK_MASK = "0xffff"
VALID_ETHERTYPES = (IPv4, IPv6)
IP_ALLOWED_VERSIONS = [IP_VERSION_4, IP_VERSION_6]
PORT_RANGE_MIN = 1
PORT_RANGE_MAX = 65535
ETHERTYPE_MIN = 0
ETHERTYPE_MAX = 65535
DHCPV6_CLIENT_PORT = 546
# Configuration values for accept_ra sysctl, copied from linux kernel
# networking (netdev) tree, file Documentation/networking/ip-sysctl.txt
#
# Possible values are:
# 0 Do not accept Router Advertisements.
# 1 Accept Router Advertisements if forwarding is disabled.
# 2 Overrule forwarding behaviour. Accept Router Advertisements
# even if forwarding is enabled.
ACCEPT_RA_DISABLED = 0
ACCEPT_RA_WITHOUT_FORWARDING = 1
ACCEPT_RA_WITH_FORWARDING = 2
# Some components communicate using private address ranges, define
# them all here. These address ranges should not cause any issues
# even if they overlap since they are used in disjoint namespaces,
# but for now they are unique.
# We define the metadata cidr since it falls in the range.
PRIVATE_CIDR_RANGE = '169.254.0.0/16'
DVR_FIP_LL_CIDR = '169.254.64.0/18'
L3_HA_NET_CIDR = '169.254.192.0/18'
METADATA_CIDR = '169.254.169.254/32'
# The only defined IpamAllocation status at this stage is 'ALLOCATED'.
# More states will be available in the future - e.g.: RECYCLABLE
IPAM_ALLOCATION_STATUS_ALLOCATED = 'ALLOCATED'
VALID_IPAM_ALLOCATION_STATUSES = (IPAM_ALLOCATION_STATUS_ALLOCATED,)
# Port binding states for Live Migration
PORT_BINDING_STATUSES = (ACTIVE,
INACTIVE)
VALID_FLOATINGIP_STATUS = (FLOATINGIP_STATUS_ACTIVE,
FLOATINGIP_STATUS_DOWN,
FLOATINGIP_STATUS_ERROR)
# Floating IP host binding states
FLOATING_IP_HOST_UNBOUND = "FLOATING_IP_HOST_UNBOUND"
FLOATING_IP_HOST_NEEDS_BINDING = "FLOATING_IP_HOST_NEEDS_BINDING"
# Possible types of values (e.g. in QoS rule types)
VALUES_TYPE_CHOICES = "choices"
VALUES_TYPE_RANGE = "range"
# Units base
SI_BASE = 1000
IEC_BASE = 1024
# Port bindings handling
NO_ACTIVE_BINDING = 'no_active_binding'
EXT_PARENT_PREFIX = 'ext_parent'
RP_BANDWIDTHS = 'resource_provider_bandwidths'
RP_INVENTORY_DEFAULTS = 'resource_provider_inventory_defaults'
| # Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO(salv-orlando): Verify if a single set of operational
# status constants is achievable
NET_STATUS_ACTIVE = 'ACTIVE'
NET_STATUS_BUILD = 'BUILD'
NET_STATUS_DOWN = 'DOWN'
NET_STATUS_ERROR = 'ERROR'
PORT_STATUS_ACTIVE = 'ACTIVE'
PORT_STATUS_BUILD = 'BUILD'
PORT_STATUS_DOWN = 'DOWN'
PORT_STATUS_ERROR = 'ERROR'
PORT_STATUS_NOTAPPLICABLE = 'N/A'
FLOATINGIP_STATUS_ACTIVE = 'ACTIVE'
FLOATINGIP_STATUS_DOWN = 'DOWN'
FLOATINGIP_STATUS_ERROR = 'ERROR'
# Service operation status constants
ACTIVE = "ACTIVE"
DOWN = "DOWN"
CREATED = "CREATED"
PENDING_CREATE = "PENDING_CREATE"
PENDING_UPDATE = "PENDING_UPDATE"
PENDING_DELETE = "PENDING_DELETE"
INACTIVE = "INACTIVE"
ERROR = "ERROR"
DEVICE_OWNER_COMPUTE_PREFIX = "compute:"
DEVICE_OWNER_NETWORK_PREFIX = "network:"
DEVICE_OWNER_NEUTRON_PREFIX = "neutron:"
DEVICE_OWNER_BAREMETAL_PREFIX = "baremetal:"
DEVICE_OWNER_ROUTER_HA_INTF = (DEVICE_OWNER_NETWORK_PREFIX +
"router_ha_interface")
DEVICE_OWNER_HA_REPLICATED_INT = (DEVICE_OWNER_NETWORK_PREFIX +
"ha_router_replicated_interface")
DEVICE_OWNER_ROUTER_INTF = DEVICE_OWNER_NETWORK_PREFIX + "router_interface"
DEVICE_OWNER_ROUTER_GW = DEVICE_OWNER_NETWORK_PREFIX + "router_gateway"
DEVICE_OWNER_FLOATINGIP = DEVICE_OWNER_NETWORK_PREFIX + "floatingip"
DEVICE_OWNER_DHCP = DEVICE_OWNER_NETWORK_PREFIX + "dhcp"
DEVICE_OWNER_DVR_INTERFACE = (DEVICE_OWNER_NETWORK_PREFIX +
"router_interface_distributed")
DEVICE_OWNER_AGENT_GW = (DEVICE_OWNER_NETWORK_PREFIX +
"floatingip_agent_gateway")
DEVICE_OWNER_ROUTER_SNAT = (DEVICE_OWNER_NETWORK_PREFIX +
"router_centralized_snat")
# TODO(johnsom) Remove after these stop being used. Neutron-LBaaS is now
# retired (train) and these should no longer be necessary.
DEVICE_OWNER_LOADBALANCER = DEVICE_OWNER_NEUTRON_PREFIX + "LOADBALANCER"
DEVICE_OWNER_LOADBALANCERV2 = DEVICE_OWNER_NEUTRON_PREFIX + "LOADBALANCERV2"
DEVICE_OWNER_PREFIXES = (DEVICE_OWNER_NETWORK_PREFIX,
DEVICE_OWNER_NEUTRON_PREFIX)
# Collection used to identify devices owned by router interfaces.
# DEVICE_OWNER_ROUTER_HA_INTF is a special case and so is not included.
ROUTER_INTERFACE_OWNERS = (DEVICE_OWNER_ROUTER_INTF,
DEVICE_OWNER_HA_REPLICATED_INT,
DEVICE_OWNER_DVR_INTERFACE)
ROUTER_INTERFACE_OWNERS_SNAT = (DEVICE_OWNER_ROUTER_INTF,
DEVICE_OWNER_HA_REPLICATED_INT,
DEVICE_OWNER_DVR_INTERFACE,
DEVICE_OWNER_ROUTER_SNAT)
DEVICE_ID_RESERVED_DHCP_PORT = 'reserved_dhcp_port'
FLOATINGIP_KEY = '_floatingips'
PORT_FORWARDING_FLOATINGIP_KEY = '_pf_floatingips'
INTERFACE_KEY = '_interfaces'
HA_INTERFACE_KEY = '_ha_interface'
IPv4 = 'IPv4'
IPv6 = 'IPv6'
IP_VERSION_4 = 4
IP_VERSION_6 = 6
IPv4_BITS = 32
IPv6_BITS = 128
INVALID_MAC_ADDRESSES = ['00:00:00:00:00:00', 'FF:FF:FF:FF:FF:FF']
IPv4_ANY = '0.0.0.0/0'
IPv6_ANY = '::/0'
IP_ANY = {IP_VERSION_4: IPv4_ANY, IP_VERSION_6: IPv6_ANY}
IPv6_LLA_PREFIX = 'fe80::/64'
DHCP_CLIENT_PORT = 67
DHCP_RESPONSE_PORT = 68
DHCPV6_CLIENT_PORT = 546
DHCPV6_RESPONSE_PORT = 547
FLOODING_ENTRY = ('00:00:00:00:00:00', '0.0.0.0')
AGENT_TYPE_DHCP = 'DHCP agent'
AGENT_TYPE_OVS = 'Open vSwitch agent'
AGENT_TYPE_LINUXBRIDGE = 'Linux bridge agent'
AGENT_TYPE_OFA = 'OFA driver agent'
AGENT_TYPE_L3 = 'L3 agent'
AGENT_TYPE_METERING = 'Metering agent'
AGENT_TYPE_METADATA = 'Metadata agent'
AGENT_TYPE_NIC_SWITCH = 'NIC Switch agent'
AGENT_TYPE_MACVTAP = 'Macvtap agent'
L2_AGENT_TOPIC = 'N/A'
L3_AGENT_MODE_DVR = 'dvr'
L3_AGENT_MODE_DVR_SNAT = 'dvr_snat'
L3_AGENT_MODE_LEGACY = 'legacy'
L3_AGENT_MODE = 'agent_mode'
L3_AGENT_MODE_DVR_NO_EXTERNAL = 'dvr_no_external'
DVR_SNAT_BOUND = 'dvr_snat_bound'
PORT_BINDING_EXT_ALIAS = 'binding'
L3_AGENT_SCHEDULER_EXT_ALIAS = 'l3_agent_scheduler'
DHCP_AGENT_SCHEDULER_EXT_ALIAS = 'dhcp_agent_scheduler'
L3_DISTRIBUTED_EXT_ALIAS = 'dvr'
L3_HA_MODE_EXT_ALIAS = 'l3-ha'
SUBNET_ALLOCATION_EXT_ALIAS = 'subnet_allocation'
# Protocol names and numbers for Security Groups/Firewalls
PROTO_NAME_AH = 'ah'
PROTO_NAME_DCCP = 'dccp'
PROTO_NAME_EGP = 'egp'
PROTO_NAME_ESP = 'esp'
PROTO_NAME_GRE = 'gre'
PROTO_NAME_HOPOPT = 'hopopt'
PROTO_NAME_ICMP = 'icmp'
PROTO_NAME_IGMP = 'igmp'
PROTO_NAME_IP = 'ip'
PROTO_NAME_IPIP = 'ipip'
PROTO_NAME_IPV6_ENCAP = 'ipv6-encap'
PROTO_NAME_IPV6_FRAG = 'ipv6-frag'
PROTO_NAME_IPV6_ICMP = 'ipv6-icmp'
# For backward-compatibility of security group rule API, we keep the old value
# for IPv6 ICMP. It should be clean up in the future.
PROTO_NAME_IPV6_ICMP_LEGACY = 'icmpv6'
PROTO_NAME_IPV6_NONXT = 'ipv6-nonxt'
PROTO_NAME_IPV6_OPTS = 'ipv6-opts'
PROTO_NAME_IPV6_ROUTE = 'ipv6-route'
PROTO_NAME_OSPF = 'ospf'
PROTO_NAME_PGM = 'pgm'
PROTO_NAME_RSVP = 'rsvp'
PROTO_NAME_SCTP = 'sctp'
PROTO_NAME_TCP = 'tcp'
PROTO_NAME_UDP = 'udp'
PROTO_NAME_UDPLITE = 'udplite'
PROTO_NAME_VRRP = 'vrrp'
PROTO_NUM_AH = 51
PROTO_NUM_DCCP = 33
PROTO_NUM_EGP = 8
PROTO_NUM_ESP = 50
PROTO_NUM_GRE = 47
PROTO_NUM_HOPOPT = 0
PROTO_NUM_ICMP = 1
PROTO_NUM_IGMP = 2
PROTO_NUM_IP = 0
PROTO_NUM_IPIP = 4
PROTO_NUM_IPV6_ENCAP = 41
PROTO_NUM_IPV6_FRAG = 44
PROTO_NUM_IPV6_ICMP = 58
PROTO_NUM_IPV6_NONXT = 59
PROTO_NUM_IPV6_OPTS = 60
PROTO_NUM_IPV6_ROUTE = 43
PROTO_NUM_OSPF = 89
PROTO_NUM_PGM = 113
PROTO_NUM_RSVP = 46
PROTO_NUM_SCTP = 132
PROTO_NUM_TCP = 6
PROTO_NUM_UDP = 17
PROTO_NUM_UDPLITE = 136
PROTO_NUM_VRRP = 112
IP_PROTOCOL_MAP = {PROTO_NAME_AH: PROTO_NUM_AH,
PROTO_NAME_DCCP: PROTO_NUM_DCCP,
PROTO_NAME_EGP: PROTO_NUM_EGP,
PROTO_NAME_ESP: PROTO_NUM_ESP,
PROTO_NAME_GRE: PROTO_NUM_GRE,
PROTO_NAME_HOPOPT: PROTO_NUM_HOPOPT,
PROTO_NAME_ICMP: PROTO_NUM_ICMP,
PROTO_NAME_IGMP: PROTO_NUM_IGMP,
PROTO_NAME_IP: PROTO_NUM_IP,
PROTO_NAME_IPIP: PROTO_NUM_IPIP,
PROTO_NAME_IPV6_ENCAP: PROTO_NUM_IPV6_ENCAP,
PROTO_NAME_IPV6_FRAG: PROTO_NUM_IPV6_FRAG,
PROTO_NAME_IPV6_ICMP: PROTO_NUM_IPV6_ICMP,
# For backward-compatibility of security group rule API
PROTO_NAME_IPV6_ICMP_LEGACY: PROTO_NUM_IPV6_ICMP,
PROTO_NAME_IPV6_NONXT: PROTO_NUM_IPV6_NONXT,
PROTO_NAME_IPV6_OPTS: PROTO_NUM_IPV6_OPTS,
PROTO_NAME_IPV6_ROUTE: PROTO_NUM_IPV6_ROUTE,
PROTO_NAME_OSPF: PROTO_NUM_OSPF,
PROTO_NAME_PGM: PROTO_NUM_PGM,
PROTO_NAME_RSVP: PROTO_NUM_RSVP,
PROTO_NAME_SCTP: PROTO_NUM_SCTP,
PROTO_NAME_TCP: PROTO_NUM_TCP,
PROTO_NAME_UDP: PROTO_NUM_UDP,
PROTO_NAME_UDPLITE: PROTO_NUM_UDPLITE,
PROTO_NAME_VRRP: PROTO_NUM_VRRP}
# Note that this differs from IP_PROTOCOL_MAP because iptables refers to IPv6
# ICMP as 'icmp6' whereas it is 'ipv6-icmp' in IP_PROTOCOL_MAP.
IPTABLES_PROTOCOL_MAP = {PROTO_NAME_DCCP: 'dccp',
PROTO_NAME_ICMP: 'icmp',
PROTO_NAME_IPV6_ICMP: 'icmp6',
PROTO_NAME_SCTP: 'sctp',
PROTO_NAME_TCP: 'tcp',
PROTO_NAME_UDP: 'udp'}
# IP header length
IP_HEADER_LENGTH = {
4: 20,
6: 40,
}
# ICMPv6 types:
# Destination Unreachable (1)
ICMPV6_TYPE_DEST_UNREACH = 1
# Packet Too Big (2)
ICMPV6_TYPE_PKT_TOOBIG = 2
# Time Exceeded (3)
ICMPV6_TYPE_TIME_EXCEED = 3
# Parameter Problem (4)
ICMPV6_TYPE_PARAMPROB = 4
# Echo Request (128)
ICMPV6_TYPE_ECHO_REQUEST = 128
# Echo Reply (129)
ICMPV6_TYPE_ECHO_REPLY = 129
# Multicast Listener Query (130)
ICMPV6_TYPE_MLD_QUERY = 130
# Multicast Listener Report (131)
ICMPV6_TYPE_MLD_REPORT = 131
# Multicast Listener Done (132)
ICMPV6_TYPE_MLD_DONE = 132
# Router Solicitation (133)
ICMPV6_TYPE_RS = 133
# Router Advertisement (134)
ICMPV6_TYPE_RA = 134
# Neighbor Solicitation (135)
ICMPV6_TYPE_NS = 135
# Neighbor Advertisement (136)
ICMPV6_TYPE_NA = 136
# Multicast Listener v2 Report (143)
ICMPV6_TYPE_MLD2_REPORT = 143
# List of ICMPv6 types that should be allowed from the unspecified address for
# Duplicate Address Detection:
ICMPV6_ALLOWED_UNSPEC_ADDR_TYPES = [ICMPV6_TYPE_MLD_REPORT,
ICMPV6_TYPE_NS,
ICMPV6_TYPE_MLD2_REPORT]
# Human-readable ID to which the subnetpool ID should be set to
# indicate that IPv6 Prefix Delegation is enabled for a given subnetpool
IPV6_PD_POOL_ID = 'prefix_delegation'
# Device names start with "tap"
TAP_DEVICE_PREFIX = 'tap'
# Device names start with "macvtap"
MACVTAP_DEVICE_PREFIX = 'macvtap'
# Linux interface max length
DEVICE_NAME_MAX_LEN = 15
# Time format
ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%f'
DHCPV6_STATEFUL = 'dhcpv6-stateful'
DHCPV6_STATELESS = 'dhcpv6-stateless'
IPV6_SLAAC = 'slaac'
IPV6_MODES = [DHCPV6_STATEFUL, DHCPV6_STATELESS, IPV6_SLAAC]
ACTIVE_PENDING_STATUSES = (
ACTIVE,
PENDING_CREATE,
PENDING_UPDATE
)
# Network Type constants
TYPE_FLAT = 'flat'
TYPE_GENEVE = 'geneve'
TYPE_GRE = 'gre'
TYPE_LOCAL = 'local'
TYPE_VXLAN = 'vxlan'
TYPE_VLAN = 'vlan'
TYPE_NONE = 'none'
# List of supported network segment range types
NETWORK_SEGMENT_RANGE_TYPES = [TYPE_VLAN, TYPE_VXLAN, TYPE_GRE, TYPE_GENEVE]
# Values for network_type
# For VLAN Network
MIN_VLAN_TAG = 1
MAX_VLAN_TAG = 4094
# For Geneve Tunnel
MIN_GENEVE_VNI = 1
MAX_GENEVE_VNI = 2 ** 24 - 1
# For GRE Tunnel
MIN_GRE_ID = 1
MAX_GRE_ID = 2 ** 32 - 1
# For VXLAN Tunnel
MIN_VXLAN_VNI = 1
MAX_VXLAN_VNI = 2 ** 24 - 1
VXLAN_UDP_PORT = 4789
# Overlay (tunnel) protocol overhead
GENEVE_ENCAP_MIN_OVERHEAD = 30
GRE_ENCAP_OVERHEAD = 22
VXLAN_ENCAP_OVERHEAD = 30
# For DNS extension
DNS_DOMAIN_DEFAULT = 'openstacklocal.'
DNS_LABEL_KEYWORDS = ['project_id', 'project_name', 'user_name', 'user_id']
DNS_LABEL_MAX_LEN = 63
DNS_LABEL_REGEX = "^([a-z0-9-]{1,%d}|%s)$" % (
DNS_LABEL_MAX_LEN,
'<' + '>|<'.join(DNS_LABEL_KEYWORDS) + '>')
# max value for TCP, UDP, SCTP ports
PORT_MAX = 2**16 - 1
VALID_DSCP_MARKS = [0, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34,
36, 38, 40, 46, 48, 56]
INGRESS_DIRECTION = 'ingress'
EGRESS_DIRECTION = 'egress'
VALID_DIRECTIONS = (INGRESS_DIRECTION, EGRESS_DIRECTION)
PROVISIONAL_IPV6_PD_PREFIX = '::/64'
# Traffic control
TC_QDISC_TYPE_HTB = 'htb'
TC_QDISC_TYPE_TBF = 'tbf'
TC_QDISC_TYPE_INGRESS = 'ingress'
TC_QDISC_TYPES = (TC_QDISC_TYPE_HTB, TC_QDISC_TYPE_TBF, TC_QDISC_TYPE_INGRESS)
TC_QDISC_INGRESS_ID = 'ffff:'
TC_QDISC_PARENTS = {'root': 0xffffffff,
'ingress': 0xfffffff1}
class Sentinel(object):
"""A constant object that does not change even when copied."""
def __deepcopy__(self, memo):
# Always return the same object because this is essentially a constant.
return self
def __copy__(self):
# called via copy.copy(x)
return self
#############################
# Attribute related constants
#############################
ATTR_NOT_SPECIFIED = Sentinel()
DICT_POPULATE_DEFAULTS = 'dict_populate_defaults'
HEX_ELEM = '[0-9A-Fa-f]'
UUID_PATTERN = '-'.join([HEX_ELEM + '{8}', HEX_ELEM + '{4}',
HEX_ELEM + '{4}', HEX_ELEM + '{4}',
HEX_ELEM + '{12}'])
SHARED = 'shared'
##########################
# Device related constants
##########################
# vhost-user device names start with "vhu"
VHOST_USER_DEVICE_PREFIX = 'vhu'
# The vswitch side of a veth pair for a nova iptables filter setup
VETH_DEVICE_PREFIX = 'qvo'
# prefix for SNAT interface in DVR
SNAT_INT_DEV_PREFIX = 'sg-'
ROUTER_PORT_OWNERS = ROUTER_INTERFACE_OWNERS_SNAT + (DEVICE_OWNER_ROUTER_GW,)
ROUTER_STATUS_ACTIVE = 'ACTIVE'
ROUTER_STATUS_ALLOCATING = 'ALLOCATING'
ROUTER_STATUS_ERROR = 'ERROR'
VALID_ROUTER_STATUS = (ROUTER_STATUS_ACTIVE,
ROUTER_STATUS_ALLOCATING,
ROUTER_STATUS_ERROR)
HA_ROUTER_STATE_KEY = '_ha_state'
METERING_LABEL_KEY = '_metering_labels'
FLOATINGIP_AGENT_INTF_KEY = '_floatingip_agent_interfaces'
SNAT_ROUTER_INTF_KEY = '_snat_router_interfaces'
HA_NETWORK_NAME = 'HA network tenant %s'
HA_SUBNET_NAME = 'HA subnet tenant %s'
HA_PORT_NAME = 'HA port tenant %s'
HA_ROUTER_STATE_ACTIVE = 'active'
HA_ROUTER_STATE_STANDBY = 'standby'
HA_ROUTER_STATE_UNKNOWN = 'unknown'
VALID_HA_STATES = (HA_ROUTER_STATE_ACTIVE, HA_ROUTER_STATE_STANDBY,
HA_ROUTER_STATE_UNKNOWN)
PAGINATION_INFINITE = 'infinite'
SORT_DIRECTION_ASC = 'asc'
SORT_DIRECTION_DESC = 'desc'
ETHERTYPE_NAME_ARP = 'arp'
ETHERTYPE_ARP = 0x0806
ETHERTYPE_RARP = 0x8035
ETHERTYPE_IP = 0x0800
ETHERTYPE_IPV6 = 0x86DD
IP_PROTOCOL_NAME_ALIASES = {PROTO_NAME_IPV6_ICMP_LEGACY:
PROTO_NAME_IPV6_ICMP}
# We only want one mapping from '58' to 'ipv6-icmp' since that is the
# normalized string, the name to number mapping can have both
IP_PROTOCOL_NUM_TO_NAME_MAP = ({str(v): k for k, v in IP_PROTOCOL_MAP.items()
if k != PROTO_NAME_IPV6_ICMP_LEGACY})
# When using iptables-save we specify '-p {proto}',
# but sometimes those values are not identical. This is a map
# of known protocol numbers that require a name to be used and
# protocol names that require a different name to be used,
# because that is how iptables-save will display them.
#
# This is how the list was created, so there is a possibility
# it will need to be updated in the future:
#
# $ for num in {0..255}; do iptables -A INPUT -p $num; done
# $ iptables-save
#
# These cases are special, and were found by inspection:
# - 'ipv6-encap' uses 'ipv6'
# - 'icmpv6' uses 'ipv6-icmp'
# - 'pgm' uses '113' instead of its name
# - protocol '0' uses no -p argument
IPTABLES_PROTOCOL_NAME_MAP = {PROTO_NAME_IPV6_ENCAP: 'ipv6',
PROTO_NAME_IPV6_ICMP_LEGACY:
'ipv6-icmp',
PROTO_NAME_PGM: '113',
'0': None,
'1': 'icmp',
'2': 'igmp',
'3': 'ggp',
'4': 'ipencap',
'5': 'st',
'6': 'tcp',
'8': 'egp',
'9': 'igp',
'12': 'pup',
'17': 'udp',
'20': 'hmp',
'22': 'xns-idp',
'27': 'rdp',
'29': 'iso-tp4',
'33': 'dccp',
'36': 'xtp',
'37': 'ddp',
'38': 'idpr-cmtp',
'41': 'ipv6',
'43': 'ipv6-route',
'44': 'ipv6-frag',
'45': 'idrp',
'46': 'rsvp',
'47': 'gre',
'50': 'esp',
'51': 'ah',
'57': 'skip',
'58': 'ipv6-icmp',
'59': 'ipv6-nonxt',
'60': 'ipv6-opts',
'73': 'rspf',
'81': 'vmtp',
'88': 'eigrp',
'89': 'ospf',
'93': 'ax.25',
'94': 'ipip',
'97': 'etherip',
'98': 'encap',
'103': 'pim',
'108': 'ipcomp',
'112': 'vrrp',
'115': 'l2tp',
'124': 'isis',
'132': 'sctp',
'133': 'fc',
'135': 'mobility-header',
'136': 'udplite',
'137': 'mpls-in-ip',
'138': 'manet',
'139': 'hip',
'140': 'shim6',
'141': 'wesp',
'142': 'rohc'}
# A length of a iptables chain name must be less than or equal to 11
# characters.
# <max length of iptables chain name> - (<binary_name> + '-') = 28-(16+1) = 11
MAX_IPTABLES_CHAIN_LEN_WRAP = 11
MAX_IPTABLES_CHAIN_LEN_NOWRAP = 28
# Timeout in seconds for getting an IPv6 LLA
LLA_TASK_TIMEOUT = 40
# length of all device prefixes (e.g. qvo, tap, qvb)
LINUX_DEV_PREFIX_LEN = 3
# must be shorter than linux IFNAMSIZ (which is 16)
LINUX_DEV_LEN = 14
# Possible prefixes to partial port IDs in interface names used by the OVS,
# Linux Bridge, and IVS VIF drivers in Nova and the neutron agents. See the
# 'get_ovs_interfaceid' method in Nova (nova/virt/libvirt/vif.py) for details.
INTERFACE_PREFIXES = (TAP_DEVICE_PREFIX,
VETH_DEVICE_PREFIX,
SNAT_INT_DEV_PREFIX)
ATTRIBUTES_TO_UPDATE = 'attributes_to_update'
# TODO(amuller): Re-define the RPC namespaces once Oslo messaging supports
# Targets with multiple namespaces. Neutron will then implement callbacks
# for its RPC clients in order to support rolling upgrades.
# RPC Interface for agents to call DHCP API implemented on the plugin side
RPC_NAMESPACE_DHCP_PLUGIN = None
# RPC interface for the metadata service to get info from the plugin side
RPC_NAMESPACE_METADATA = None
# RPC interface for agent to plugin security group API
RPC_NAMESPACE_SECGROUP = None
# RPC interface for agent to plugin DVR api
RPC_NAMESPACE_DVR = None
# RPC interface for reporting state back to the plugin
RPC_NAMESPACE_STATE = None
# RPC interface for agent to plugin resources API
RPC_NAMESPACE_RESOURCES = None
# Default network MTU value when not configured
DEFAULT_NETWORK_MTU = 1500
IPV6_MIN_MTU = 1280
ROUTER_MARK_MASK = "0xffff"
VALID_ETHERTYPES = (IPv4, IPv6)
IP_ALLOWED_VERSIONS = [IP_VERSION_4, IP_VERSION_6]
PORT_RANGE_MIN = 1
PORT_RANGE_MAX = 65535
ETHERTYPE_MIN = 0
ETHERTYPE_MAX = 65535
DHCPV6_CLIENT_PORT = 546
# Configuration values for accept_ra sysctl, copied from linux kernel
# networking (netdev) tree, file Documentation/networking/ip-sysctl.txt
#
# Possible values are:
# 0 Do not accept Router Advertisements.
# 1 Accept Router Advertisements if forwarding is disabled.
# 2 Overrule forwarding behaviour. Accept Router Advertisements
# even if forwarding is enabled.
ACCEPT_RA_DISABLED = 0
ACCEPT_RA_WITHOUT_FORWARDING = 1
ACCEPT_RA_WITH_FORWARDING = 2
# Some components communicate using private address ranges, define
# them all here. These address ranges should not cause any issues
# even if they overlap since they are used in disjoint namespaces,
# but for now they are unique.
# We define the metadata cidr since it falls in the range.
PRIVATE_CIDR_RANGE = '169.254.0.0/16'
DVR_FIP_LL_CIDR = '169.254.64.0/18'
L3_HA_NET_CIDR = '169.254.192.0/18'
METADATA_CIDR = '169.254.169.254/32'
# The only defined IpamAllocation status at this stage is 'ALLOCATED'.
# More states will be available in the future - e.g.: RECYCLABLE
IPAM_ALLOCATION_STATUS_ALLOCATED = 'ALLOCATED'
VALID_IPAM_ALLOCATION_STATUSES = (IPAM_ALLOCATION_STATUS_ALLOCATED,)
# Port binding states for Live Migration
PORT_BINDING_STATUSES = (ACTIVE,
INACTIVE)
VALID_FLOATINGIP_STATUS = (FLOATINGIP_STATUS_ACTIVE,
FLOATINGIP_STATUS_DOWN,
FLOATINGIP_STATUS_ERROR)
# Floating IP host binding states
FLOATING_IP_HOST_UNBOUND = "FLOATING_IP_HOST_UNBOUND"
FLOATING_IP_HOST_NEEDS_BINDING = "FLOATING_IP_HOST_NEEDS_BINDING"
# Possible types of values (e.g. in QoS rule types)
VALUES_TYPE_CHOICES = "choices"
VALUES_TYPE_RANGE = "range"
# Units base
SI_BASE = 1000
IEC_BASE = 1024
# Port bindings handling
NO_ACTIVE_BINDING = 'no_active_binding'
EXT_PARENT_PREFIX = 'ext_parent'
RP_BANDWIDTHS = 'resource_provider_bandwidths'
RP_INVENTORY_DEFAULTS = 'resource_provider_inventory_defaults'
| en | 0.791402 | # Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # TODO(salv-orlando): Verify if a single set of operational # status constants is achievable # Service operation status constants # TODO(johnsom) Remove after these stop being used. Neutron-LBaaS is now # retired (train) and these should no longer be necessary. # Collection used to identify devices owned by router interfaces. # DEVICE_OWNER_ROUTER_HA_INTF is a special case and so is not included. # Protocol names and numbers for Security Groups/Firewalls # For backward-compatibility of security group rule API, we keep the old value # for IPv6 ICMP. It should be clean up in the future. # For backward-compatibility of security group rule API # Note that this differs from IP_PROTOCOL_MAP because iptables refers to IPv6 # ICMP as 'icmp6' whereas it is 'ipv6-icmp' in IP_PROTOCOL_MAP. # IP header length # ICMPv6 types: # Destination Unreachable (1) # Packet Too Big (2) # Time Exceeded (3) # Parameter Problem (4) # Echo Request (128) # Echo Reply (129) # Multicast Listener Query (130) # Multicast Listener Report (131) # Multicast Listener Done (132) # Router Solicitation (133) # Router Advertisement (134) # Neighbor Solicitation (135) # Neighbor Advertisement (136) # Multicast Listener v2 Report (143) # List of ICMPv6 types that should be allowed from the unspecified address for # Duplicate Address Detection: # Human-readable ID to which the subnetpool ID should be set to # indicate that IPv6 Prefix Delegation is enabled for a given subnetpool # Device names start with "tap" # Device names start with "macvtap" # Linux interface max length # Time format # Network Type constants # List of supported network segment range types # Values for network_type # For VLAN Network # For Geneve Tunnel # For GRE Tunnel # For VXLAN Tunnel # Overlay (tunnel) protocol overhead # For DNS extension # max value for TCP, UDP, SCTP ports # Traffic control A constant object that does not change even when copied. # Always return the same object because this is essentially a constant. # called via copy.copy(x) ############################# # Attribute related constants ############################# ########################## # Device related constants ########################## # vhost-user device names start with "vhu" # The vswitch side of a veth pair for a nova iptables filter setup # prefix for SNAT interface in DVR # We only want one mapping from '58' to 'ipv6-icmp' since that is the # normalized string, the name to number mapping can have both # When using iptables-save we specify '-p {proto}', # but sometimes those values are not identical. This is a map # of known protocol numbers that require a name to be used and # protocol names that require a different name to be used, # because that is how iptables-save will display them. # # This is how the list was created, so there is a possibility # it will need to be updated in the future: # # $ for num in {0..255}; do iptables -A INPUT -p $num; done # $ iptables-save # # These cases are special, and were found by inspection: # - 'ipv6-encap' uses 'ipv6' # - 'icmpv6' uses 'ipv6-icmp' # - 'pgm' uses '113' instead of its name # - protocol '0' uses no -p argument # A length of a iptables chain name must be less than or equal to 11 # characters. # <max length of iptables chain name> - (<binary_name> + '-') = 28-(16+1) = 11 # Timeout in seconds for getting an IPv6 LLA # length of all device prefixes (e.g. qvo, tap, qvb) # must be shorter than linux IFNAMSIZ (which is 16) # Possible prefixes to partial port IDs in interface names used by the OVS, # Linux Bridge, and IVS VIF drivers in Nova and the neutron agents. See the # 'get_ovs_interfaceid' method in Nova (nova/virt/libvirt/vif.py) for details. # TODO(amuller): Re-define the RPC namespaces once Oslo messaging supports # Targets with multiple namespaces. Neutron will then implement callbacks # for its RPC clients in order to support rolling upgrades. # RPC Interface for agents to call DHCP API implemented on the plugin side # RPC interface for the metadata service to get info from the plugin side # RPC interface for agent to plugin security group API # RPC interface for agent to plugin DVR api # RPC interface for reporting state back to the plugin # RPC interface for agent to plugin resources API # Default network MTU value when not configured # Configuration values for accept_ra sysctl, copied from linux kernel # networking (netdev) tree, file Documentation/networking/ip-sysctl.txt # # Possible values are: # 0 Do not accept Router Advertisements. # 1 Accept Router Advertisements if forwarding is disabled. # 2 Overrule forwarding behaviour. Accept Router Advertisements # even if forwarding is enabled. # Some components communicate using private address ranges, define # them all here. These address ranges should not cause any issues # even if they overlap since they are used in disjoint namespaces, # but for now they are unique. # We define the metadata cidr since it falls in the range. # The only defined IpamAllocation status at this stage is 'ALLOCATED'. # More states will be available in the future - e.g.: RECYCLABLE # Port binding states for Live Migration # Floating IP host binding states # Possible types of values (e.g. in QoS rule types) # Units base # Port bindings handling | 1.607355 | 2 |
tests/api_resources/test_tax_rate.py | bhch/async-stripe | 8 | 6619802 | <filename>tests/api_resources/test_tax_rate.py
from __future__ import absolute_import, division, print_function
import stripe
import pytest
pytestmark = pytest.mark.asyncio
TEST_RESOURCE_ID = "txr_123"
class TestTaxRate(object):
async def test_is_listable(self, request_mock):
resources = await stripe.TaxRate.list()
request_mock.assert_requested("get", "/v1/tax_rates")
assert isinstance(resources.data, list)
assert isinstance(resources.data[0], stripe.TaxRate)
async def test_is_retrievable(self, request_mock):
resource = await stripe.TaxRate.retrieve(TEST_RESOURCE_ID)
request_mock.assert_requested(
"get", "/v1/tax_rates/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.TaxRate)
async def test_is_creatable(self, request_mock):
resource = await stripe.TaxRate.create(
display_name="name", inclusive=False, percentage=10.15
)
request_mock.assert_requested("post", "/v1/tax_rates")
assert isinstance(resource, stripe.TaxRate)
async def test_is_saveable(self, request_mock):
resource = await stripe.TaxRate.retrieve(TEST_RESOURCE_ID)
resource.metadata["key"] = "value"
await resource.save()
request_mock.assert_requested(
"post", "/v1/tax_rates/%s" % TEST_RESOURCE_ID
)
async def test_is_modifiable(self, request_mock):
resource = await stripe.TaxRate.modify(
TEST_RESOURCE_ID, metadata={"key": "value"}
)
request_mock.assert_requested(
"post", "/v1/tax_rates/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.TaxRate)
| <filename>tests/api_resources/test_tax_rate.py
from __future__ import absolute_import, division, print_function
import stripe
import pytest
pytestmark = pytest.mark.asyncio
TEST_RESOURCE_ID = "txr_123"
class TestTaxRate(object):
async def test_is_listable(self, request_mock):
resources = await stripe.TaxRate.list()
request_mock.assert_requested("get", "/v1/tax_rates")
assert isinstance(resources.data, list)
assert isinstance(resources.data[0], stripe.TaxRate)
async def test_is_retrievable(self, request_mock):
resource = await stripe.TaxRate.retrieve(TEST_RESOURCE_ID)
request_mock.assert_requested(
"get", "/v1/tax_rates/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.TaxRate)
async def test_is_creatable(self, request_mock):
resource = await stripe.TaxRate.create(
display_name="name", inclusive=False, percentage=10.15
)
request_mock.assert_requested("post", "/v1/tax_rates")
assert isinstance(resource, stripe.TaxRate)
async def test_is_saveable(self, request_mock):
resource = await stripe.TaxRate.retrieve(TEST_RESOURCE_ID)
resource.metadata["key"] = "value"
await resource.save()
request_mock.assert_requested(
"post", "/v1/tax_rates/%s" % TEST_RESOURCE_ID
)
async def test_is_modifiable(self, request_mock):
resource = await stripe.TaxRate.modify(
TEST_RESOURCE_ID, metadata={"key": "value"}
)
request_mock.assert_requested(
"post", "/v1/tax_rates/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.TaxRate)
| none | 1 | 2.287552 | 2 | |
networks/layers/co_attention.py | huanglf714/COMatchNet | 1 | 6619803 | <reponame>huanglf714/COMatchNet
import torch
from torch import mode, nn
import torch.nn.functional as F
class CO_Attention(nn.Module):
def __init__(self,in_dim, co_attention_dim):
super(CO_Attention, self).__init__()
# self.linear_r = nn.Bilinear(in_dim, in_dim, co_attention_dim)
# self.linear_p = nn.Bilinear(in_dim, in_dim, co_attention_dim)
self.leak_relu = nn.LeakyReLU()
self.relu = nn.ReLU()
self.conv1 = nn.Conv2d(in_dim,64,kernel_size=3,padding=1)
self.conv2 = nn.Conv2d(64,co_attention_dim,kernel_size=3,padding=1)
self.pool = torch.nn.MaxPool2d(kernel_size=2)
self.gate1 = nn.Sequential(self.conv1, self.relu, self.pool, self.conv2, self.relu)
self.conv3 = nn.Conv2d(in_dim,64,kernel_size=3,padding=1)
self.conv4 = nn.Conv2d(64,co_attention_dim,kernel_size=3,padding=1)
self.gate2 = nn.Sequential(self.conv3, self.relu, self.pool, self.conv4, self.relu)
self.bilinear = torch.nn.Bilinear(co_attention_dim,co_attention_dim,1)
self.conv = nn.Conv2d(1,1,kernel_size=3,padding=1)
self.sigmoid = nn.Sigmoid()
def forward(self, query_embed, prev_embed, refer_embed, refer_label,ori_size=None,use_float16=True):
"""
Args:
query_embed:[height,width,embed_dim]
prev_embed:[height,width,embed_dim]
ref_embed:[height,width,embed_dim]
ref_label:[height,width,obj_nums]
Return:
x:[h,w,obj_num,1]
"""
h,w,_ = query_embed.size()
obj_nums = refer_label.size(2)
query_embed = query_embed.permute(2,0,1)
prev_embed = prev_embed.permute(2,1,0)
refer_embed = refer_embed.permute(2,1,0)
refer_label = refer_label.permute(2,0,1)
# all_ref_fg = torch.sum(refer_label_flat, dim=1, keepdim=True) > 0.9
# refer_label_flat = torch.masked_select(refer_label_flat,
# all_ref_fg.expand(-1, obj_nums)).view(-1, obj_nums)
r_attention = self.leak_relu(torch.bmm(refer_embed, query_embed)).unsqueeze(0)
p_attention = self.leak_relu(torch.bmm(prev_embed, query_embed)).unsqueeze(0)
r_attention = self.gate1(r_attention).squeeze(0)
p_attention = self.gate2(p_attention).squeeze(0)
attention_h = r_attention.size(1)
attention_w = r_attention.size(2)
r_attention = r_attention.reshape(attention_h*attention_w,-1)
p_attention = p_attention.reshape(attention_h*attention_w,-1)
attention = self.bilinear(r_attention,p_attention).reshape(-1,1,attention_h,attention_w)
if(attention.size(2)!=64 or attention.size(3)!=64):
attention = F.interpolate(attention, size=[64,64], mode='bilinear',align_corners=True)
attention = self.conv(attention)
attention = F.interpolate(attention, size=(h,w),
mode='bilinear', align_corners=True).squeeze(0)
x = torch.mul(attention,refer_label).unsqueeze(-1).permute(1,2, 0, 3)
attention = attention.permute(1,2,0)
x = self.sigmoid(x)
if ori_size is not None:
x = x.view(h, w, obj_nums, 1).permute(2, 3, 0, 1)
x = F.interpolate(x, size=ori_size,
mode='bilinear', align_corners=True).permute(2, 3, 0, 1).view(ori_size[0], ori_size[1], obj_nums, 1)
if use_float16:
x = x.float()
return x
if __name__=='__main__':
query_embed = torch.rand(117,117,100)
prev_embed = torch.rand(117,117,100)
refer_embed = torch.rand(117,117,100)
refer_label = torch.rand(117,117,3)
co_attention = CO_Attention(100,32)
out, attention = co_attention(query_embed,prev_embed,refer_embed,refer_label)
assert out.shape == (117,117,3,1)
assert attention.shape ==(117,117,1)
print(attention.shape)
# print(co_attention)
# from tensorboardX import SummaryWriter
# with SummaryWriter(comment='co_attention') as w:
# w.add_graph(co_attention,(query_embed,prev_embed,refer_embed,refer_label))
# print('-------')
# print('-----------------')
| import torch
from torch import mode, nn
import torch.nn.functional as F
class CO_Attention(nn.Module):
def __init__(self,in_dim, co_attention_dim):
super(CO_Attention, self).__init__()
# self.linear_r = nn.Bilinear(in_dim, in_dim, co_attention_dim)
# self.linear_p = nn.Bilinear(in_dim, in_dim, co_attention_dim)
self.leak_relu = nn.LeakyReLU()
self.relu = nn.ReLU()
self.conv1 = nn.Conv2d(in_dim,64,kernel_size=3,padding=1)
self.conv2 = nn.Conv2d(64,co_attention_dim,kernel_size=3,padding=1)
self.pool = torch.nn.MaxPool2d(kernel_size=2)
self.gate1 = nn.Sequential(self.conv1, self.relu, self.pool, self.conv2, self.relu)
self.conv3 = nn.Conv2d(in_dim,64,kernel_size=3,padding=1)
self.conv4 = nn.Conv2d(64,co_attention_dim,kernel_size=3,padding=1)
self.gate2 = nn.Sequential(self.conv3, self.relu, self.pool, self.conv4, self.relu)
self.bilinear = torch.nn.Bilinear(co_attention_dim,co_attention_dim,1)
self.conv = nn.Conv2d(1,1,kernel_size=3,padding=1)
self.sigmoid = nn.Sigmoid()
def forward(self, query_embed, prev_embed, refer_embed, refer_label,ori_size=None,use_float16=True):
"""
Args:
query_embed:[height,width,embed_dim]
prev_embed:[height,width,embed_dim]
ref_embed:[height,width,embed_dim]
ref_label:[height,width,obj_nums]
Return:
x:[h,w,obj_num,1]
"""
h,w,_ = query_embed.size()
obj_nums = refer_label.size(2)
query_embed = query_embed.permute(2,0,1)
prev_embed = prev_embed.permute(2,1,0)
refer_embed = refer_embed.permute(2,1,0)
refer_label = refer_label.permute(2,0,1)
# all_ref_fg = torch.sum(refer_label_flat, dim=1, keepdim=True) > 0.9
# refer_label_flat = torch.masked_select(refer_label_flat,
# all_ref_fg.expand(-1, obj_nums)).view(-1, obj_nums)
r_attention = self.leak_relu(torch.bmm(refer_embed, query_embed)).unsqueeze(0)
p_attention = self.leak_relu(torch.bmm(prev_embed, query_embed)).unsqueeze(0)
r_attention = self.gate1(r_attention).squeeze(0)
p_attention = self.gate2(p_attention).squeeze(0)
attention_h = r_attention.size(1)
attention_w = r_attention.size(2)
r_attention = r_attention.reshape(attention_h*attention_w,-1)
p_attention = p_attention.reshape(attention_h*attention_w,-1)
attention = self.bilinear(r_attention,p_attention).reshape(-1,1,attention_h,attention_w)
if(attention.size(2)!=64 or attention.size(3)!=64):
attention = F.interpolate(attention, size=[64,64], mode='bilinear',align_corners=True)
attention = self.conv(attention)
attention = F.interpolate(attention, size=(h,w),
mode='bilinear', align_corners=True).squeeze(0)
x = torch.mul(attention,refer_label).unsqueeze(-1).permute(1,2, 0, 3)
attention = attention.permute(1,2,0)
x = self.sigmoid(x)
if ori_size is not None:
x = x.view(h, w, obj_nums, 1).permute(2, 3, 0, 1)
x = F.interpolate(x, size=ori_size,
mode='bilinear', align_corners=True).permute(2, 3, 0, 1).view(ori_size[0], ori_size[1], obj_nums, 1)
if use_float16:
x = x.float()
return x
if __name__=='__main__':
query_embed = torch.rand(117,117,100)
prev_embed = torch.rand(117,117,100)
refer_embed = torch.rand(117,117,100)
refer_label = torch.rand(117,117,3)
co_attention = CO_Attention(100,32)
out, attention = co_attention(query_embed,prev_embed,refer_embed,refer_label)
assert out.shape == (117,117,3,1)
assert attention.shape ==(117,117,1)
print(attention.shape)
# print(co_attention)
# from tensorboardX import SummaryWriter
# with SummaryWriter(comment='co_attention') as w:
# w.add_graph(co_attention,(query_embed,prev_embed,refer_embed,refer_label))
# print('-------')
# print('-----------------') | en | 0.413138 | # self.linear_r = nn.Bilinear(in_dim, in_dim, co_attention_dim) # self.linear_p = nn.Bilinear(in_dim, in_dim, co_attention_dim) Args: query_embed:[height,width,embed_dim] prev_embed:[height,width,embed_dim] ref_embed:[height,width,embed_dim] ref_label:[height,width,obj_nums] Return: x:[h,w,obj_num,1] # all_ref_fg = torch.sum(refer_label_flat, dim=1, keepdim=True) > 0.9 # refer_label_flat = torch.masked_select(refer_label_flat, # all_ref_fg.expand(-1, obj_nums)).view(-1, obj_nums) # print(co_attention) # from tensorboardX import SummaryWriter # with SummaryWriter(comment='co_attention') as w: # w.add_graph(co_attention,(query_embed,prev_embed,refer_embed,refer_label)) # print('-------') # print('-----------------') | 2.621745 | 3 |
main.py | JohnGiorgi/PyTorch-Image-Retrieval | 262 | 6619804 | # -*- coding: utf_8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from data_loader import train_data_loader, test_data_loader
# Load initial models
from networks import EmbeddingNetwork
# Load batch sampler and train loss
from datasets import BalancedBatchSampler
from losses import BlendedLoss, MAIN_LOSS_CHOICES
from trainer import fit
from inference import retrieve
def load(file_path):
model.load_state_dict(torch.load(file_path))
print('model loaded!')
return model
def infer(model, queries, db):
retrieval_results = retrieve(model, queries, db, input_size, infer_batch_size)
return list(zip(range(len(retrieval_results)), retrieval_results.items()))
def get_arguments():
args = argparse.ArgumentParser()
args.add_argument('--dataset-path', type=str)
args.add_argument('--model-save-dir', type=str)
args.add_argument('--model-to-test', type=str)
# Hyperparameters
args.add_argument('--epochs', type=int, default=20)
args.add_argument('--model', type=str,
choices=['densenet161', 'resnet101', 'inceptionv3', 'seresnext'],
default='densenet161')
args.add_argument('--input-size', type=int, default=224, help='size of input image')
args.add_argument('--num-classes', type=int, default=64, help='number of classes for batch sampler')
args.add_argument('--num-samples', type=int, default=4, help='number of samples per class for batch sampler')
args.add_argument('--embedding-dim', type=int, default=128, help='size of embedding dimension')
args.add_argument('--feature-extracting', type=bool, default=False)
args.add_argument('--use-pretrained', type=bool, default=True)
args.add_argument('--lr', type=float, default=1e-4)
args.add_argument('--scheduler', type=str, choices=['StepLR', 'MultiStepLR'])
args.add_argument('--attention', action='store_true')
args.add_argument('--loss-type', type=str, choices=MAIN_LOSS_CHOICES)
args.add_argument('--cross-entropy', action='store_true')
args.add_argument('--use-augmentation', action='store_true')
# Mode selection
args.add_argument('--mode', type=str, default='train', help='mode selection: train or test.')
return args.parse_args()
if __name__ == '__main__':
config = get_arguments()
dataset_path = config.dataset_path
# Model parameters
model_name = config.model
input_size = config.input_size
embedding_dim = config.embedding_dim
feature_extracting = config.feature_extracting
use_pretrained = config.use_pretrained
attention_flag = config.attention
# Training parameters
nb_epoch = config.epochs
loss_type = config.loss_type
cross_entropy_flag = config.cross_entropy
scheduler_name = config.scheduler
lr = config.lr
# Mini-batch parameters
num_classes = config.num_classes
num_samples = config.num_samples
use_augmentation = config.use_augmentation
infer_batch_size = 64
log_interval = 50
""" Model """
model = EmbeddingNetwork(model_name=model_name,
embedding_dim=embedding_dim,
feature_extracting=feature_extracting,
use_pretrained=use_pretrained,
attention_flag=attention_flag,
cross_entropy_flag=cross_entropy_flag)
if torch.cuda.device_count() > 1:
model = nn.DataParallel(model)
if config.mode == 'train':
""" Load data """
print('dataset path', dataset_path)
train_dataset_path = dataset_path + '/train/train_data'
img_dataset = train_data_loader(data_path=train_dataset_path, img_size=input_size,
use_augment=use_augmentation)
# Balanced batch sampler and online train loader
train_batch_sampler = BalancedBatchSampler(img_dataset, n_classes=num_classes, n_samples=num_samples)
online_train_loader = torch.utils.data.DataLoader(img_dataset,
batch_sampler=train_batch_sampler,
num_workers=4,
pin_memory=True)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Gather the parameters to be optimized/updated.
params_to_update = model.parameters()
print("Params to learn:")
if feature_extracting:
params_to_update = []
for name, param in model.named_parameters():
if param.requires_grad:
params_to_update.append(param)
print("\t", name)
else:
for name, param in model.named_parameters():
if param.requires_grad:
print("\t", name)
# Send the model to GPU
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=1e-4)
if scheduler_name == 'StepLR':
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=8, gamma=0.1)
elif scheduler_name == 'MultiStepLR':
if use_augmentation:
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[20, 30], gamma=0.1)
else:
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[10, 15, 20], gamma=0.1)
else:
raise ValueError('Invalid scheduler')
# Loss function
loss_fn = BlendedLoss(loss_type, cross_entropy_flag)
# Train (fine-tune) model
fit(online_train_loader, model, loss_fn, optimizer, scheduler, nb_epoch,
device=device, log_interval=log_interval, save_model_to=config.model_save_dir)
elif config.mode == 'test':
test_dataset_path = dataset_path + '/test/test_data'
queries, db = test_data_loader(test_dataset_path)
model = load(file_path=config.model_to_test)
result_dict = infer(model, queries, db)
| # -*- coding: utf_8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from data_loader import train_data_loader, test_data_loader
# Load initial models
from networks import EmbeddingNetwork
# Load batch sampler and train loss
from datasets import BalancedBatchSampler
from losses import BlendedLoss, MAIN_LOSS_CHOICES
from trainer import fit
from inference import retrieve
def load(file_path):
model.load_state_dict(torch.load(file_path))
print('model loaded!')
return model
def infer(model, queries, db):
retrieval_results = retrieve(model, queries, db, input_size, infer_batch_size)
return list(zip(range(len(retrieval_results)), retrieval_results.items()))
def get_arguments():
args = argparse.ArgumentParser()
args.add_argument('--dataset-path', type=str)
args.add_argument('--model-save-dir', type=str)
args.add_argument('--model-to-test', type=str)
# Hyperparameters
args.add_argument('--epochs', type=int, default=20)
args.add_argument('--model', type=str,
choices=['densenet161', 'resnet101', 'inceptionv3', 'seresnext'],
default='densenet161')
args.add_argument('--input-size', type=int, default=224, help='size of input image')
args.add_argument('--num-classes', type=int, default=64, help='number of classes for batch sampler')
args.add_argument('--num-samples', type=int, default=4, help='number of samples per class for batch sampler')
args.add_argument('--embedding-dim', type=int, default=128, help='size of embedding dimension')
args.add_argument('--feature-extracting', type=bool, default=False)
args.add_argument('--use-pretrained', type=bool, default=True)
args.add_argument('--lr', type=float, default=1e-4)
args.add_argument('--scheduler', type=str, choices=['StepLR', 'MultiStepLR'])
args.add_argument('--attention', action='store_true')
args.add_argument('--loss-type', type=str, choices=MAIN_LOSS_CHOICES)
args.add_argument('--cross-entropy', action='store_true')
args.add_argument('--use-augmentation', action='store_true')
# Mode selection
args.add_argument('--mode', type=str, default='train', help='mode selection: train or test.')
return args.parse_args()
if __name__ == '__main__':
config = get_arguments()
dataset_path = config.dataset_path
# Model parameters
model_name = config.model
input_size = config.input_size
embedding_dim = config.embedding_dim
feature_extracting = config.feature_extracting
use_pretrained = config.use_pretrained
attention_flag = config.attention
# Training parameters
nb_epoch = config.epochs
loss_type = config.loss_type
cross_entropy_flag = config.cross_entropy
scheduler_name = config.scheduler
lr = config.lr
# Mini-batch parameters
num_classes = config.num_classes
num_samples = config.num_samples
use_augmentation = config.use_augmentation
infer_batch_size = 64
log_interval = 50
""" Model """
model = EmbeddingNetwork(model_name=model_name,
embedding_dim=embedding_dim,
feature_extracting=feature_extracting,
use_pretrained=use_pretrained,
attention_flag=attention_flag,
cross_entropy_flag=cross_entropy_flag)
if torch.cuda.device_count() > 1:
model = nn.DataParallel(model)
if config.mode == 'train':
""" Load data """
print('dataset path', dataset_path)
train_dataset_path = dataset_path + '/train/train_data'
img_dataset = train_data_loader(data_path=train_dataset_path, img_size=input_size,
use_augment=use_augmentation)
# Balanced batch sampler and online train loader
train_batch_sampler = BalancedBatchSampler(img_dataset, n_classes=num_classes, n_samples=num_samples)
online_train_loader = torch.utils.data.DataLoader(img_dataset,
batch_sampler=train_batch_sampler,
num_workers=4,
pin_memory=True)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Gather the parameters to be optimized/updated.
params_to_update = model.parameters()
print("Params to learn:")
if feature_extracting:
params_to_update = []
for name, param in model.named_parameters():
if param.requires_grad:
params_to_update.append(param)
print("\t", name)
else:
for name, param in model.named_parameters():
if param.requires_grad:
print("\t", name)
# Send the model to GPU
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=1e-4)
if scheduler_name == 'StepLR':
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=8, gamma=0.1)
elif scheduler_name == 'MultiStepLR':
if use_augmentation:
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[20, 30], gamma=0.1)
else:
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[10, 15, 20], gamma=0.1)
else:
raise ValueError('Invalid scheduler')
# Loss function
loss_fn = BlendedLoss(loss_type, cross_entropy_flag)
# Train (fine-tune) model
fit(online_train_loader, model, loss_fn, optimizer, scheduler, nb_epoch,
device=device, log_interval=log_interval, save_model_to=config.model_save_dir)
elif config.mode == 'test':
test_dataset_path = dataset_path + '/test/test_data'
queries, db = test_data_loader(test_dataset_path)
model = load(file_path=config.model_to_test)
result_dict = infer(model, queries, db)
| en | 0.462013 | # -*- coding: utf_8 -*- # Load initial models # Load batch sampler and train loss # Hyperparameters # Mode selection # Model parameters # Training parameters # Mini-batch parameters Model Load data # Balanced batch sampler and online train loader # Gather the parameters to be optimized/updated. # Send the model to GPU # Loss function # Train (fine-tune) model | 2.016132 | 2 |
Medium/523.py | Hellofafar/Leetcode | 6 | 6619805 | <filename>Medium/523.py
# ------------------------------
# 523. Continuous Subarray Sum
#
# Description:
# Given a list of non-negative numbers and a target integer k, write a function to check if
# the array has a continuous subarray of size at least 2 that sums up to a multiple of k,
# that is, sums up to n*k where n is also an integer.
#
# Example 1:
# Input: [23, 2, 4, 6, 7], k=6
# Output: True
# Explanation: Because [2, 4] is a continuous subarray of size 2 and sums up to 6.
#
# Example 2:
# Input: [23, 2, 6, 4, 7], k=6
# Output: True
# Explanation: Because [23, 2, 6, 4, 7] is an continuous subarray of size 5 and sums up to 42.
#
# Note:
# The length of the array won't exceed 10,000.
# You may assume the sum of all the numbers is in the range of a signed 32-bit integer.
#
# Version: 1.0
# 10/28/19 by Jianfa
# ------------------------------
class Solution:
def checkSubarraySum(self, nums: List[int], k: int) -> bool:
modDict = {0: -1} # if an array sums up to n*k, an initial value is required
runningSum = 0
for i, n in enumerate(nums):
runningSum += n
if k != 0:
runningSum %= k
if runningSum in modDict:
# for i > j, if sum(i) % k == sum(j) % k
# (sum(i) - sum(j)) % k == 0, then just check the size
# of subarray (j, i]
if i - modDict[runningSum] > 1:
return True
else:
modDict[runningSum] = i
return False
# Used for testing
if __name__ == "__main__":
test = Solution()
# ------------------------------
# Summary:
# Map solution from https://leetcode.com/problems/continuous-subarray-sum/discuss/99499/Java-O(n)-time-O(k)-space
# iterate through the input array exactly once, keeping track of the running sum mod k of
# the elements in the process. If we find that a running sum value at index j has been
# previously seen before in some earlier index i in the array, then we know that the
# sub-array (i,j] contains a desired sum.
#
# O(n) time, O(k) space | <filename>Medium/523.py
# ------------------------------
# 523. Continuous Subarray Sum
#
# Description:
# Given a list of non-negative numbers and a target integer k, write a function to check if
# the array has a continuous subarray of size at least 2 that sums up to a multiple of k,
# that is, sums up to n*k where n is also an integer.
#
# Example 1:
# Input: [23, 2, 4, 6, 7], k=6
# Output: True
# Explanation: Because [2, 4] is a continuous subarray of size 2 and sums up to 6.
#
# Example 2:
# Input: [23, 2, 6, 4, 7], k=6
# Output: True
# Explanation: Because [23, 2, 6, 4, 7] is an continuous subarray of size 5 and sums up to 42.
#
# Note:
# The length of the array won't exceed 10,000.
# You may assume the sum of all the numbers is in the range of a signed 32-bit integer.
#
# Version: 1.0
# 10/28/19 by Jianfa
# ------------------------------
class Solution:
def checkSubarraySum(self, nums: List[int], k: int) -> bool:
modDict = {0: -1} # if an array sums up to n*k, an initial value is required
runningSum = 0
for i, n in enumerate(nums):
runningSum += n
if k != 0:
runningSum %= k
if runningSum in modDict:
# for i > j, if sum(i) % k == sum(j) % k
# (sum(i) - sum(j)) % k == 0, then just check the size
# of subarray (j, i]
if i - modDict[runningSum] > 1:
return True
else:
modDict[runningSum] = i
return False
# Used for testing
if __name__ == "__main__":
test = Solution()
# ------------------------------
# Summary:
# Map solution from https://leetcode.com/problems/continuous-subarray-sum/discuss/99499/Java-O(n)-time-O(k)-space
# iterate through the input array exactly once, keeping track of the running sum mod k of
# the elements in the process. If we find that a running sum value at index j has been
# previously seen before in some earlier index i in the array, then we know that the
# sub-array (i,j] contains a desired sum.
#
# O(n) time, O(k) space | en | 0.782381 | # ------------------------------ # 523. Continuous Subarray Sum # # Description: # Given a list of non-negative numbers and a target integer k, write a function to check if # the array has a continuous subarray of size at least 2 that sums up to a multiple of k, # that is, sums up to n*k where n is also an integer. # # Example 1: # Input: [23, 2, 4, 6, 7], k=6 # Output: True # Explanation: Because [2, 4] is a continuous subarray of size 2 and sums up to 6. # # Example 2: # Input: [23, 2, 6, 4, 7], k=6 # Output: True # Explanation: Because [23, 2, 6, 4, 7] is an continuous subarray of size 5 and sums up to 42. # # Note: # The length of the array won't exceed 10,000. # You may assume the sum of all the numbers is in the range of a signed 32-bit integer. # # Version: 1.0 # 10/28/19 by Jianfa # ------------------------------ # if an array sums up to n*k, an initial value is required # for i > j, if sum(i) % k == sum(j) % k # (sum(i) - sum(j)) % k == 0, then just check the size # of subarray (j, i] # Used for testing # ------------------------------ # Summary: # Map solution from https://leetcode.com/problems/continuous-subarray-sum/discuss/99499/Java-O(n)-time-O(k)-space # iterate through the input array exactly once, keeping track of the running sum mod k of # the elements in the process. If we find that a running sum value at index j has been # previously seen before in some earlier index i in the array, then we know that the # sub-array (i,j] contains a desired sum. # # O(n) time, O(k) space | 3.899974 | 4 |
tests/nails/views/test_delete_feedback_view.py | borislavstoychev/nails_project | 0 | 6619806 | from django.urls import reverse
from nails_project.nails.models import Feedback
from tests.base.mixins import NailsTestUtils, UserTestUtils
from tests.base.tests import NailsProjectTestCase
class NailsDeleteTest(NailsTestUtils, UserTestUtils, NailsProjectTestCase):
def test_NailsDeleteVieName_and_templateName(self):
self.client.force_login(self.user)
nails = self.create_feedback(
type=Feedback.MANICURE,
feedback='Test',
description='Test nails description',
image='path/to/image.png',
user=self.user,
)
response = self.client.get(reverse('feedback delete', kwargs={'pk': nails.id}))
self.assertEqual(200, response.status_code)
self.assertTemplateUsed(response, template_name='nails/feedback_delete.html')
def test_deleteNails_whenNailsDoesNotExists_shouldBeNotFound(self):
self.client.force_login(self.user)
response = self.client.get(reverse('feedback delete', kwargs={
'pk': 1,
}))
self.assertEqual(404, response.status_code)
def test_deleteNails_whenNailsExistsAndIsOwner_shouldReturnAllNails(self):
self.client.force_login(self.user)
nails = self.create_feedback(
type=Feedback.MANICURE,
feedback='Test',
description='Test nails description',
image='path/to/image.png',
user=self.user,
)
response = self.client.post(reverse('feedback delete', kwargs={
'pk': nails.id,
}))
self.assertEqual(302, response.status_code)
nails_exists = Feedback.objects.filter(
id=nails.id
) \
.exists()
self.assertFalse(nails_exists)
self.assertEqual('/feedback/', response.url)
def test_deleteNails_whenNailsExistsAndNotOwner_shouldReturnForbidden(self):
self.client.force_login(self.user)
nails_user = self.create_user(email='<EMAIL>', password='12345qwe', is_active=True)
nails = self.create_feedback(
type=Feedback.MANICURE,
feedback='Test',
description='Test nails description',
image='path/to/image.png',
user=nails_user,
)
response = self.client.get(reverse('feedback delete', kwargs={
'pk': nails.id,
}))
self.assertEqual(403, response.status_code)
nails_exists = Feedback.objects.filter(
id=nails.id
) \
.exists()
self.assertTrue(nails_exists)
| from django.urls import reverse
from nails_project.nails.models import Feedback
from tests.base.mixins import NailsTestUtils, UserTestUtils
from tests.base.tests import NailsProjectTestCase
class NailsDeleteTest(NailsTestUtils, UserTestUtils, NailsProjectTestCase):
def test_NailsDeleteVieName_and_templateName(self):
self.client.force_login(self.user)
nails = self.create_feedback(
type=Feedback.MANICURE,
feedback='Test',
description='Test nails description',
image='path/to/image.png',
user=self.user,
)
response = self.client.get(reverse('feedback delete', kwargs={'pk': nails.id}))
self.assertEqual(200, response.status_code)
self.assertTemplateUsed(response, template_name='nails/feedback_delete.html')
def test_deleteNails_whenNailsDoesNotExists_shouldBeNotFound(self):
self.client.force_login(self.user)
response = self.client.get(reverse('feedback delete', kwargs={
'pk': 1,
}))
self.assertEqual(404, response.status_code)
def test_deleteNails_whenNailsExistsAndIsOwner_shouldReturnAllNails(self):
self.client.force_login(self.user)
nails = self.create_feedback(
type=Feedback.MANICURE,
feedback='Test',
description='Test nails description',
image='path/to/image.png',
user=self.user,
)
response = self.client.post(reverse('feedback delete', kwargs={
'pk': nails.id,
}))
self.assertEqual(302, response.status_code)
nails_exists = Feedback.objects.filter(
id=nails.id
) \
.exists()
self.assertFalse(nails_exists)
self.assertEqual('/feedback/', response.url)
def test_deleteNails_whenNailsExistsAndNotOwner_shouldReturnForbidden(self):
self.client.force_login(self.user)
nails_user = self.create_user(email='<EMAIL>', password='12345qwe', is_active=True)
nails = self.create_feedback(
type=Feedback.MANICURE,
feedback='Test',
description='Test nails description',
image='path/to/image.png',
user=nails_user,
)
response = self.client.get(reverse('feedback delete', kwargs={
'pk': nails.id,
}))
self.assertEqual(403, response.status_code)
nails_exists = Feedback.objects.filter(
id=nails.id
) \
.exists()
self.assertTrue(nails_exists)
| none | 1 | 2.147599 | 2 | |
test/optimizer/test_assign_name_optimizer.py | bluesheeptoken/PyGolf | 7 | 6619807 | from unittest import TestCase
import astroid
from pygolf.name_finder import NameFinder
from pygolf.optimizers.assign_name_optimizer import AssignNameOptimizer
from pygolf.rules import RenameAssignName, RenameName
class TestAssignNameOptimizer(TestCase):
def test_visit_assign_name(self):
assign_name_optimizer = AssignNameOptimizer(NameFinder())
assign_name = astroid.extract_node("a=5").targets[0]
assign_name_optimizer.visit(assign_name)
self.assertEqual(assign_name_optimizer.names, ["a"])
def test_generate_rules(self):
assign_name_optimizer = AssignNameOptimizer(NameFinder())
next_name = assign_name_optimizer.name_finder.next_name()
assign_name_optimizer.visit(astroid.extract_node("long_name=3").targets[0])
assign_name_optimizer.visit(astroid.extract_node("a=3").targets[0])
rules = list(assign_name_optimizer.generate_rules())
self.assertEqual(rules, [RenameAssignName("long_name", next_name), RenameName("long_name", next_name)])
| from unittest import TestCase
import astroid
from pygolf.name_finder import NameFinder
from pygolf.optimizers.assign_name_optimizer import AssignNameOptimizer
from pygolf.rules import RenameAssignName, RenameName
class TestAssignNameOptimizer(TestCase):
def test_visit_assign_name(self):
assign_name_optimizer = AssignNameOptimizer(NameFinder())
assign_name = astroid.extract_node("a=5").targets[0]
assign_name_optimizer.visit(assign_name)
self.assertEqual(assign_name_optimizer.names, ["a"])
def test_generate_rules(self):
assign_name_optimizer = AssignNameOptimizer(NameFinder())
next_name = assign_name_optimizer.name_finder.next_name()
assign_name_optimizer.visit(astroid.extract_node("long_name=3").targets[0])
assign_name_optimizer.visit(astroid.extract_node("a=3").targets[0])
rules = list(assign_name_optimizer.generate_rules())
self.assertEqual(rules, [RenameAssignName("long_name", next_name), RenameName("long_name", next_name)])
| none | 1 | 2.829406 | 3 | |
hip-edit-infra/services.py | 3pillarlabs/hip-edit | 1 | 6619808 | <gh_stars>1-10
"""
Builds the infrastructure for Hip Edit backing services.
"""
from __future__ import print_function
import logging
from os import path
from hip_edit import activemq
from hip_edit import cli_arg_parser
from hip_edit import cf_template_builder
from hip_edit import cf_driver
from hip_edit import log
from hip_edit.build_context import BuildContext
LOGGER = log.get_stream_logger(__name__)
def main():
"""
Entry point
"""
cli_options = cli_arg_parser.services_arg_parser().parse_args()
logging.root.setLevel(logging.DEBUG if cli_options.verbose else logging.INFO)
if not cli_options.stack_down():
if cli_options.stack_halt():
if confirm("""You are going to stop the ActveMQ instance and release the EIP forever.
Is this what you want?""") != 'yes':
LOGGER.info('No changes made.')
return
template = cf_template_builder.build(cli_options)
else:
if confirm("""You are going to destroy all stack resources and
this operation can not be done. Is this what you want?""") != 'yes':
LOGGER.info('No changes made.')
return
template = None
outputs = cf_driver.execute(cli_options, template)
if outputs is None or cli_options.stack_down():
return
build_ctx = BuildContext()
build_ctx.add('services', outputs).save()
activemq_instance_id = build_ctx.get('MessageServerInstanceId', group_key='services')
if cli_options.stack_up():
activemq.check_instance_status(instance_id=activemq_instance_id)
hostname = build_ctx.get('npm_config_messaging_host')
outputs = activemq.configure(cli_options, hostname,
templates_path=path.abspath('./artifacts/activemq'),
distribution_type='bitnami')
build_ctx.add(('services', 'activemq', 'users'), outputs).save()
else:
activemq.halt_instance(instance_id=activemq_instance_id)
def confirm(message, prompt=' ([no]/yes) '):
"""Prints a message and returns user input."""
print("\n".join((s.strip() for s in message.split("\n"))), end='')
return raw_input(prompt)
if __name__ == '__main__':
main()
| """
Builds the infrastructure for Hip Edit backing services.
"""
from __future__ import print_function
import logging
from os import path
from hip_edit import activemq
from hip_edit import cli_arg_parser
from hip_edit import cf_template_builder
from hip_edit import cf_driver
from hip_edit import log
from hip_edit.build_context import BuildContext
LOGGER = log.get_stream_logger(__name__)
def main():
"""
Entry point
"""
cli_options = cli_arg_parser.services_arg_parser().parse_args()
logging.root.setLevel(logging.DEBUG if cli_options.verbose else logging.INFO)
if not cli_options.stack_down():
if cli_options.stack_halt():
if confirm("""You are going to stop the ActveMQ instance and release the EIP forever.
Is this what you want?""") != 'yes':
LOGGER.info('No changes made.')
return
template = cf_template_builder.build(cli_options)
else:
if confirm("""You are going to destroy all stack resources and
this operation can not be done. Is this what you want?""") != 'yes':
LOGGER.info('No changes made.')
return
template = None
outputs = cf_driver.execute(cli_options, template)
if outputs is None or cli_options.stack_down():
return
build_ctx = BuildContext()
build_ctx.add('services', outputs).save()
activemq_instance_id = build_ctx.get('MessageServerInstanceId', group_key='services')
if cli_options.stack_up():
activemq.check_instance_status(instance_id=activemq_instance_id)
hostname = build_ctx.get('npm_config_messaging_host')
outputs = activemq.configure(cli_options, hostname,
templates_path=path.abspath('./artifacts/activemq'),
distribution_type='bitnami')
build_ctx.add(('services', 'activemq', 'users'), outputs).save()
else:
activemq.halt_instance(instance_id=activemq_instance_id)
def confirm(message, prompt=' ([no]/yes) '):
"""Prints a message and returns user input."""
print("\n".join((s.strip() for s in message.split("\n"))), end='')
return raw_input(prompt)
if __name__ == '__main__':
main() | en | 0.899702 | Builds the infrastructure for Hip Edit backing services. Entry point You are going to stop the ActveMQ instance and release the EIP forever. Is this what you want? You are going to destroy all stack resources and this operation can not be done. Is this what you want? Prints a message and returns user input. | 1.994547 | 2 |
server/webserver/views/__init__.py | htl-anichstrasse/HTLCatcher | 5 | 6619809 | <reponame>htl-anichstrasse/HTLCatcher
from .api import *
from .home import * | from .api import *
from .home import * | none | 1 | 1.130734 | 1 | |
remove_symbol_wiki.py | interxuxing/qa_education | 1 | 6619810 | <reponame>interxuxing/qa_education
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import re
import io
reload(sys)
sys.setdefaultencoding('utf-8')
def pre_process(input_file, output_file):
multi_version = re.compile(ur'-\{.*?(zh-hans|zh-cn):([^;]*?)(;.*?)?\}-')
punctuation = re.compile(u"[-~!@#$%^&*()_+`=\[\]\\\{\}\"|;':,./<>?·!@#¥%……&*()——+【】、;‘:“”,。、《》?「『」』]")
with io.open(output_file, mode = 'w', encoding = 'utf-8') as outfile:
with io.open(input_file, mode = 'r', encoding ='utf-8') as infile:
for line in infile:
line = multi_version.sub(ur'\2', line)
line = punctuation.sub('', line.decode('utf8'))
outfile.write(line)
if __name__ == '__main__':
if len(sys.argv) != 3:
print "Usage: python script.py input_file output_file"
sys.exit()
input_file, output_file = sys.argv[1], sys.argv[2]
pre_process(input_file, output_file)
| #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import re
import io
reload(sys)
sys.setdefaultencoding('utf-8')
def pre_process(input_file, output_file):
multi_version = re.compile(ur'-\{.*?(zh-hans|zh-cn):([^;]*?)(;.*?)?\}-')
punctuation = re.compile(u"[-~!@#$%^&*()_+`=\[\]\\\{\}\"|;':,./<>?·!@#¥%……&*()——+【】、;‘:“”,。、《》?「『」』]")
with io.open(output_file, mode = 'w', encoding = 'utf-8') as outfile:
with io.open(input_file, mode = 'r', encoding ='utf-8') as infile:
for line in infile:
line = multi_version.sub(ur'\2', line)
line = punctuation.sub('', line.decode('utf8'))
outfile.write(line)
if __name__ == '__main__':
if len(sys.argv) != 3:
print "Usage: python script.py input_file output_file"
sys.exit()
input_file, output_file = sys.argv[1], sys.argv[2]
pre_process(input_file, output_file) | zh | 0.515172 | #!/usr/bin/python # -*- coding: utf-8 -*- #$%^&*()_+`=\[\]\\\{\}\"|;':,./<>?·!@#¥%……&*()——+【】、;‘:“”,。、《》?「『」』]") | 3.158092 | 3 |
web-server/data_logger/models_old.py | HampusHellstrom/home-automation | 0 | 6619811 | <reponame>HampusHellstrom/home-automation
from django.db import models
from django.contrib.auth.models import User
from datetime.datetime import now
# Create your models here.
class Location(models.Model):
class Meta:
unique_together = (("user", "name"),)
index_together = (("user", "name"),)
def __str__(self):
return f"{self.user.username} - {self.name}"
user = models.ForeignKey(User,
on_delete=models.CASCADE,
related_name="locations")
name = models.CharField(max_length=32)
class Device(models.Model):
location = models.ForeignKey(Location,
on_delete=models.CASCADE,
related_name="devices")
name = models.CharField(max_length=32)
class Meta:
unique_together = (("location", "name"),)
index_together = (("location", "name"),)
def __str__(self):
return f"{self.location.name} - {self.name}"
class Probe(models.Model):
device = models.ForeignKey(Device,
on_delete=models.CASCADE,
related_name="probes")
name = models.CharField(max_length=32)
class Units(models.IntegerChoices):
CESLIUS = 1
HUMIDITY = 2
LUMEN = 3
MOISTURE = 4
unit = models.IntegerField(choices=Units.choices)
def __str__(self):
return f"{self.device.location.name} - {self.device.name} - {self.name}"
class Measurement(models.Model):
probe = models.ForeignKey(Probe,
on_delete=models.CASCADE,
related_name="measurements")
datetime = models.DateTimeField(auto_now_add=True)
value = models.DecimalField(max_digits=16, decimal_places=3)
def __str__(self):
return f"{self.datetime}"
def save(self, *args, **kwargs):
if datetime is None:
datetime = now()
super(MyModel, self).save(*args, **kwargs)
| from django.db import models
from django.contrib.auth.models import User
from datetime.datetime import now
# Create your models here.
class Location(models.Model):
class Meta:
unique_together = (("user", "name"),)
index_together = (("user", "name"),)
def __str__(self):
return f"{self.user.username} - {self.name}"
user = models.ForeignKey(User,
on_delete=models.CASCADE,
related_name="locations")
name = models.CharField(max_length=32)
class Device(models.Model):
location = models.ForeignKey(Location,
on_delete=models.CASCADE,
related_name="devices")
name = models.CharField(max_length=32)
class Meta:
unique_together = (("location", "name"),)
index_together = (("location", "name"),)
def __str__(self):
return f"{self.location.name} - {self.name}"
class Probe(models.Model):
device = models.ForeignKey(Device,
on_delete=models.CASCADE,
related_name="probes")
name = models.CharField(max_length=32)
class Units(models.IntegerChoices):
CESLIUS = 1
HUMIDITY = 2
LUMEN = 3
MOISTURE = 4
unit = models.IntegerField(choices=Units.choices)
def __str__(self):
return f"{self.device.location.name} - {self.device.name} - {self.name}"
class Measurement(models.Model):
probe = models.ForeignKey(Probe,
on_delete=models.CASCADE,
related_name="measurements")
datetime = models.DateTimeField(auto_now_add=True)
value = models.DecimalField(max_digits=16, decimal_places=3)
def __str__(self):
return f"{self.datetime}"
def save(self, *args, **kwargs):
if datetime is None:
datetime = now()
super(MyModel, self).save(*args, **kwargs) | en | 0.963489 | # Create your models here. | 2.617872 | 3 |
source/rttov_test/profile-datasets-py/standard101lev_allgas/004.py | bucricket/projectMAScorrection | 0 | 6619812 | <gh_stars>0
"""
Profile ../profile-datasets-py/standard101lev_allgas/004.py
file automaticaly created by prof_gen.py script
"""
self["ID"] = "../profile-datasets-py/standard101lev_allgas/004.py"
self["Q"] = numpy.array([ 1.39778500e+00, 2.18846500e+00, 2.83725700e+00,
3.37677000e+00, 3.93222000e+00, 4.31446500e+00,
4.60207500e+00, 4.81701700e+00, 4.89678000e+00,
4.94801600e+00, 4.99466700e+00, 4.99997500e+00,
4.99997500e+00, 4.99997500e+00, 4.99997500e+00,
4.99997500e+00, 4.99997500e+00, 4.99997500e+00,
4.99997500e+00, 4.99997500e+00, 4.99997500e+00,
4.99997500e+00, 4.99997500e+00, 4.98947600e+00,
4.97174200e+00, 4.95470000e+00, 4.93853300e+00,
4.92304600e+00, 4.90810600e+00, 4.88444100e+00,
4.85003500e+00, 4.83672500e+00, 4.82086500e+00,
4.80227200e+00, 4.74755700e+00, 4.68929600e+00,
4.63291700e+00, 4.57798000e+00, 4.52435100e+00,
4.44448700e+00, 4.34310900e+00, 4.23025100e+00,
4.10958300e+00, 4.03841800e+00, 4.01555400e+00,
3.99998400e+00, 3.99998400e+00, 3.99998400e+00,
3.99998400e+00, 3.99998400e+00, 4.10476000e+00,
4.27987700e+00, 4.45471400e+00, 5.03792900e+00,
5.60934500e+00, 6.79580500e+00, 9.37504000e+00,
1.19043100e+01, 1.76134000e+01, 2.72870900e+01,
3.67805900e+01, 5.37274400e+01, 8.17325000e+01,
1.09234400e+02, 1.49947500e+02, 2.34515500e+02,
3.17607200e+02, 3.99266000e+02, 5.20933200e+02,
6.40626300e+02, 7.58310800e+02, 9.02251400e+02,
1.05746100e+03, 1.21013400e+03, 1.38397100e+03,
1.64054500e+03, 1.89299000e+03, 2.14141300e+03,
2.44337700e+03, 2.76403300e+03, 3.07966600e+03,
3.39602900e+03, 3.78104800e+03, 4.16012900e+03,
4.53340800e+03, 4.94005700e+03, 5.43776400e+03,
5.92790300e+03, 6.41064500e+03, 6.89169100e+03,
7.37441400e+03, 7.84999800e+03, 8.31860000e+03,
8.88759300e+03, 9.65833000e+03, 1.04175000e+04,
1.11653500e+04, 1.19021300e+04, 1.26280700e+04,
1.33434100e+04, 1.40483500e+04])
self["P"] = numpy.array([ 5.00000000e-03, 1.61000000e-02, 3.84000000e-02,
7.69000000e-02, 1.37000000e-01, 2.24400000e-01,
3.45400000e-01, 5.06400000e-01, 7.14000000e-01,
9.75300000e-01, 1.29720000e+00, 1.68720000e+00,
2.15260000e+00, 2.70090000e+00, 3.33980000e+00,
4.07700000e+00, 4.92040000e+00, 5.87760000e+00,
6.95670000e+00, 8.16550000e+00, 9.51190000e+00,
1.10038000e+01, 1.26492000e+01, 1.44559000e+01,
1.64318000e+01, 1.85847000e+01, 2.09224000e+01,
2.34526000e+01, 2.61829000e+01, 2.91210000e+01,
3.22744000e+01, 3.56505000e+01, 3.92566000e+01,
4.31001000e+01, 4.71882000e+01, 5.15278000e+01,
5.61260000e+01, 6.09895000e+01, 6.61253000e+01,
7.15398000e+01, 7.72396000e+01, 8.32310000e+01,
8.95204000e+01, 9.61138000e+01, 1.03017200e+02,
1.10236600e+02, 1.17777500e+02, 1.25645600e+02,
1.33846200e+02, 1.42384800e+02, 1.51266400e+02,
1.60495900e+02, 1.70078400e+02, 1.80018300e+02,
1.90320300e+02, 2.00988700e+02, 2.12027700e+02,
2.23441500e+02, 2.35233800e+02, 2.47408500e+02,
2.59969100e+02, 2.72919100e+02, 2.86261700e+02,
3.00000000e+02, 3.14136900e+02, 3.28675300e+02,
3.43617600e+02, 3.58966500e+02, 3.74724100e+02,
3.90892600e+02, 4.07473800e+02, 4.24469800e+02,
4.41881900e+02, 4.59711800e+02, 4.77960700e+02,
4.96629800e+02, 5.15720000e+02, 5.35232200e+02,
5.55166900e+02, 5.75524800e+02, 5.96306200e+02,
6.17511200e+02, 6.39139800e+02, 6.61192000e+02,
6.83667300e+02, 7.06565400e+02, 7.29885700e+02,
7.53627500e+02, 7.77789700e+02, 8.02371400e+02,
8.27371300e+02, 8.52788000e+02, 8.78620100e+02,
9.04865900e+02, 9.31523600e+02, 9.58591100e+02,
9.86066600e+02, 1.01394800e+03, 1.04223200e+03,
1.07091700e+03, 1.10000000e+03])
self["CO2"] = numpy.array([ 395.303 , 398.5376, 399.9989, 399.9986, 399.9984, 399.9983,
399.9982, 399.9981, 399.998 , 399.998 , 399.998 , 399.998 ,
399.998 , 399.998 , 399.998 , 399.998 , 399.998 , 399.998 ,
399.998 , 399.998 , 399.998 , 399.998 , 399.998 , 399.998 ,
399.998 , 399.998 , 399.998 , 399.998 , 399.998 , 399.998 ,
399.9981, 399.9981, 399.9981, 399.9981, 399.9981, 399.9981,
399.9981, 399.9982, 399.9982, 399.9982, 399.9983, 399.9983,
399.9984, 399.9984, 399.9984, 399.9984, 399.9984, 399.9984,
399.9984, 399.9984, 399.9984, 399.9983, 399.9982, 399.998 ,
399.9978, 399.9973, 399.9962, 399.9952, 399.993 , 399.9891,
399.9853, 399.9785, 399.9673, 399.9563, 399.94 , 399.9062,
399.873 , 399.8403, 399.7916, 399.7437, 399.6967, 399.6391,
399.577 , 399.5159, 399.4464, 399.3438, 399.2428, 399.1434,
399.0226, 398.8944, 398.7681, 398.6416, 398.4876, 398.3359,
398.1866, 398.024 , 397.8249, 397.6288, 397.4357, 397.2433,
397.0502, 396.86 , 396.6726, 396.445 , 396.1367, 395.833 ,
395.5339, 395.2391, 394.9488, 394.6626, 394.3807])
self["CO"] = numpy.array([ 1.718423 , 0.6585194 , 0.2608156 , 0.1116929 , 0.06896279,
0.05251643, 0.0431507 , 0.03738407, 0.03448724, 0.03228408,
0.03022448, 0.02856004, 0.02721113, 0.02606151, 0.02504537,
0.02411293, 0.02323233, 0.02238782, 0.02155986, 0.02071359,
0.01990682, 0.01913014, 0.01838728, 0.01774604, 0.01718211,
0.01664016, 0.01617181, 0.01574126, 0.01532594, 0.01475815,
0.01400121, 0.01338374, 0.01284213, 0.01237733, 0.01231994,
0.0124257 , 0.01298385, 0.01378951, 0.01495864, 0.01664696,
0.01876071, 0.02111869, 0.02364307, 0.0262315 , 0.02888374,
0.03184939, 0.03559238, 0.03925051, 0.04385469, 0.04840493,
0.05339075, 0.05864039, 0.0637837 , 0.06920194, 0.07451056,
0.0793932 , 0.0834562 , 0.08744049, 0.0911178 , 0.09443462,
0.09768966, 0.1008795 , 0.1040035 , 0.1070713 , 0.1100576 ,
0.1129032 , 0.1156991 , 0.1184468 , 0.120334 , 0.1221875 ,
0.1240099 , 0.125402 , 0.1265778 , 0.1277343 , 0.128716 ,
0.129117 , 0.1295114 , 0.1298996 , 0.1301604 , 0.1303675 ,
0.1305714 , 0.1308251 , 0.1317672 , 0.1326948 , 0.1336082 ,
0.1346063 , 0.1358348 , 0.1370446 , 0.1382361 , 0.1394278 ,
0.1406307 , 0.1418157 , 0.1429834 , 0.1441187 , 0.1452071 ,
0.1462791 , 0.1473352 , 0.1483756 , 0.1494007 , 0.1504109 ,
0.1514063 ])
self["T"] = numpy.array([ 162.6005, 176.7926, 198.8618, 219.1338, 237.4638, 254.1662,
265.9963, 272.9359, 275.4977, 277.1373, 276.3062, 274.2434,
271.376 , 267.4988, 262.519 , 257.9483, 253.6863, 249.7804,
246.1547, 242.8788, 239.8364, 237.8228, 235.8969, 234.2391,
232.7849, 231.3874, 230.3363, 229.4381, 228.5715, 227.6339,
226.6017, 225.6724, 225.2 , 225.2 , 225.2 , 225.2 ,
225.2 , 225.2 , 225.2 , 225.2 , 225.2 , 225.2 ,
225.2 , 225.2 , 225.2 , 225.2 , 225.2 , 225.2 ,
225.2 , 225.2 , 225.2 , 225.2 , 225.2 , 225.2 ,
225.2 , 225.2 , 225.2 , 225.2 , 225.2 , 225.2 ,
225.2 , 226.1054, 228.3435, 230.5416, 232.7185, 234.9151,
237.0737, 239.1955, 241.3109, 243.391 , 245.4367, 247.4919,
249.5347, 251.5447, 253.5406, 255.5692, 257.5663, 259.5325,
261.1752, 262.6731, 264.1485, 265.6035, 267.0577, 268.4907,
269.9027, 271.3045, 272.7116, 274.0987, 275.4662, 276.8245,
278.1799, 279.5165, 280.8347, 282.1522, 283.4855, 284.8009,
286.0987, 287.3791, 288.6426, 289.8895, 291.12 ])
self["N2O"] = numpy.array([ 0.00040468, 0.00048883, 0.00058166, 0.00069874, 0.00086617,
0.00109252, 0.00137472, 0.0017131 , 0.00224707, 0.00277481,
0.0043708 , 0.00585835, 0.00775866, 0.0100121 , 0.01252824,
0.01647644, 0.02082706, 0.02654339, 0.03194876, 0.03705368,
0.04213625, 0.04947973, 0.05650347, 0.06232 , 0.06723582,
0.07195995, 0.07658348, 0.08106857, 0.0853951 , 0.09122328,
0.09891654, 0.1063967 , 0.1137228 , 0.1209119 , 0.1287189 ,
0.1373174 , 0.1495517 , 0.162199 , 0.1756064 , 0.1877479 ,
0.1987982 , 0.2075157 , 0.2144662 , 0.2202816 , 0.2249916 ,
0.2291698 , 0.2323032 , 0.2353657 , 0.2386974 , 0.2419715 ,
0.2447193 , 0.2470931 , 0.2494157 , 0.251485 , 0.2535125 ,
0.2554543 , 0.2572556 , 0.2590219 , 0.2607514 , 0.2624443 ,
0.2641057 , 0.2657583 , 0.2674133 , 0.2690387 , 0.2706742 ,
0.2724082 , 0.2741119 , 0.2757862 , 0.2776826 , 0.2795473 ,
0.2813808 , 0.2836142 , 0.2860189 , 0.2883843 , 0.2909395 ,
0.2942789 , 0.2975645 , 0.3007978 , 0.3027535 , 0.3041777 ,
0.3055795 , 0.3068945 , 0.3073393 , 0.3077773 , 0.3082086 ,
0.3084686 , 0.3083143 , 0.3081623 , 0.3080127 , 0.3078636 ,
0.3077139 , 0.3075665 , 0.3074212 , 0.3072448 , 0.3070059 ,
0.3067706 , 0.3065387 , 0.3063103 , 0.3060853 , 0.3058635 ,
0.305645 ])
self["O3"] = numpy.array([ 0.602447 , 0.1853845 , 0.2457545 , 0.4438738 , 0.7612741 ,
1.051582 , 1.345846 , 1.652907 , 2.074425 , 2.484314 ,
3.125666 , 3.952513 , 4.850892 , 5.832665 , 6.909374 ,
7.44281 , 7.787614 , 7.734832 , 7.583818 , 7.219831 ,
6.859912 , 6.366786 , 5.89513 , 5.61598 , 5.47411 ,
5.337771 , 5.116889 , 4.869094 , 4.630059 , 4.406766 ,
4.200328 , 3.868694 , 3.578486 , 3.330589 , 2.985461 ,
2.635894 , 2.297619 , 2.011996 , 1.797477 , 1.589007 ,
1.386249 , 1.216317 , 1.071516 , 0.9652995 , 0.8967071 ,
0.8300509 , 0.7656645 , 0.702738 , 0.6589931 , 0.6170167 ,
0.5767139 , 0.5377987 , 0.4997229 , 0.4658583 , 0.432679 ,
0.3985501 , 0.3614503 , 0.3250691 , 0.2877994 , 0.2492341 ,
0.2113872 , 0.1829111 , 0.1678796 , 0.1531182 , 0.1382119 ,
0.1222441 , 0.106555 , 0.09113652, 0.0868852 , 0.0827293 ,
0.07864314, 0.07497781, 0.07154376, 0.06816581, 0.06477232,
0.0611795 , 0.05764448, 0.0541658 , 0.05146155, 0.04909367,
0.04676288, 0.04451503, 0.04291227, 0.04133422, 0.03978033,
0.0382993 , 0.03696362, 0.03564824, 0.03435271, 0.03313363,
0.0320238 , 0.03093038, 0.02985302, 0.02870847, 0.02741848,
0.02614785, 0.02489615, 0.02366299, 0.02244797, 0.0212507 ,
0.02007082])
self["CH4"] = numpy.array([ 0.1649998, 0.1649996, 0.1649995, 0.1649994, 0.1649994,
0.1649993, 0.1649992, 0.1649992, 0.1732367, 0.1822543,
0.2160282, 0.2568352, 0.3015865, 0.3504176, 0.4034186,
0.4564692, 0.5056263, 0.5482428, 0.5867455, 0.6191768,
0.6497108, 0.6747534, 0.6987056, 0.7204886, 0.740542 ,
0.7598135, 0.778272 , 0.7960234, 0.8131473, 0.8407853,
0.8813573, 0.922313 , 0.9630486, 1.003679 , 1.050962 ,
1.098925 , 1.149593 , 1.199763 , 1.249907 , 1.294975 ,
1.335678 , 1.371403 , 1.40326 , 1.432225 , 1.458382 ,
1.483069 , 1.505261 , 1.526949 , 1.548191 , 1.568969 ,
1.58687 , 1.602708 , 1.61821 , 1.632696 , 1.646888 ,
1.660906 , 1.674893 , 1.68861 , 1.702203 , 1.715717 ,
1.728979 , 1.741928 , 1.75454 , 1.766925 , 1.777374 ,
1.782055 , 1.786655 , 1.791175 , 1.797693 , 1.804106 ,
1.810411 , 1.81729 , 1.824383 , 1.83136 , 1.837692 ,
1.841994 , 1.846226 , 1.850391 , 1.853351 , 1.855798 ,
1.858207 , 1.860424 , 1.86059 , 1.860754 , 1.860916 ,
1.860762 , 1.859831 , 1.858915 , 1.858012 , 1.857113 ,
1.85621 , 1.855321 , 1.854444 , 1.85338 , 1.851939 ,
1.850519 , 1.849121 , 1.847743 , 1.846386 , 1.845048 , 1.84373 ])
self["CTP"] = 500.0
self["CFRACTION"] = 0.0
self["IDG"] = 4
self["ISH"] = 2
self["ELEVATION"] = 0.5
self["S2M"]["T"] = 287.2
self["S2M"]["Q"] = 12774.695441
self["S2M"]["O"] = 0.0208501984323
self["S2M"]["P"] = 1050.0
self["S2M"]["U"] = 4.0
self["S2M"]["V"] = 1.0
self["S2M"]["WFETC"] = 100000.0
self["SKIN"]["SURFTYPE"] = 0
self["SKIN"]["WATERTYPE"] = 0
self["SKIN"]["T"] = 295.2
self["SKIN"]["SALINITY"] = 35.0
self["SKIN"]["FOAM_FRACTION"] = 0.2
self["SKIN"]["FASTEM"] = numpy.array([ 3. , 5. , 15. , 0.1, 0.3])
self["ZENANGLE"] = 55.0
self["AZANGLE"] = 25.0
self["SUNZENANGLE"] = 15.0
self["SUNAZANGLE"] = 120.0
self["LATITUDE"] = 60.0
self["GAS_UNITS"] = 2
self["BE"] = 0.2
self["COSBK"] = 0.5
self["DATE"] = numpy.array([1966, 7, 1])
self["TIME"] = numpy.array([23, 0, 0])
| """
Profile ../profile-datasets-py/standard101lev_allgas/004.py
file automaticaly created by prof_gen.py script
"""
self["ID"] = "../profile-datasets-py/standard101lev_allgas/004.py"
self["Q"] = numpy.array([ 1.39778500e+00, 2.18846500e+00, 2.83725700e+00,
3.37677000e+00, 3.93222000e+00, 4.31446500e+00,
4.60207500e+00, 4.81701700e+00, 4.89678000e+00,
4.94801600e+00, 4.99466700e+00, 4.99997500e+00,
4.99997500e+00, 4.99997500e+00, 4.99997500e+00,
4.99997500e+00, 4.99997500e+00, 4.99997500e+00,
4.99997500e+00, 4.99997500e+00, 4.99997500e+00,
4.99997500e+00, 4.99997500e+00, 4.98947600e+00,
4.97174200e+00, 4.95470000e+00, 4.93853300e+00,
4.92304600e+00, 4.90810600e+00, 4.88444100e+00,
4.85003500e+00, 4.83672500e+00, 4.82086500e+00,
4.80227200e+00, 4.74755700e+00, 4.68929600e+00,
4.63291700e+00, 4.57798000e+00, 4.52435100e+00,
4.44448700e+00, 4.34310900e+00, 4.23025100e+00,
4.10958300e+00, 4.03841800e+00, 4.01555400e+00,
3.99998400e+00, 3.99998400e+00, 3.99998400e+00,
3.99998400e+00, 3.99998400e+00, 4.10476000e+00,
4.27987700e+00, 4.45471400e+00, 5.03792900e+00,
5.60934500e+00, 6.79580500e+00, 9.37504000e+00,
1.19043100e+01, 1.76134000e+01, 2.72870900e+01,
3.67805900e+01, 5.37274400e+01, 8.17325000e+01,
1.09234400e+02, 1.49947500e+02, 2.34515500e+02,
3.17607200e+02, 3.99266000e+02, 5.20933200e+02,
6.40626300e+02, 7.58310800e+02, 9.02251400e+02,
1.05746100e+03, 1.21013400e+03, 1.38397100e+03,
1.64054500e+03, 1.89299000e+03, 2.14141300e+03,
2.44337700e+03, 2.76403300e+03, 3.07966600e+03,
3.39602900e+03, 3.78104800e+03, 4.16012900e+03,
4.53340800e+03, 4.94005700e+03, 5.43776400e+03,
5.92790300e+03, 6.41064500e+03, 6.89169100e+03,
7.37441400e+03, 7.84999800e+03, 8.31860000e+03,
8.88759300e+03, 9.65833000e+03, 1.04175000e+04,
1.11653500e+04, 1.19021300e+04, 1.26280700e+04,
1.33434100e+04, 1.40483500e+04])
self["P"] = numpy.array([ 5.00000000e-03, 1.61000000e-02, 3.84000000e-02,
7.69000000e-02, 1.37000000e-01, 2.24400000e-01,
3.45400000e-01, 5.06400000e-01, 7.14000000e-01,
9.75300000e-01, 1.29720000e+00, 1.68720000e+00,
2.15260000e+00, 2.70090000e+00, 3.33980000e+00,
4.07700000e+00, 4.92040000e+00, 5.87760000e+00,
6.95670000e+00, 8.16550000e+00, 9.51190000e+00,
1.10038000e+01, 1.26492000e+01, 1.44559000e+01,
1.64318000e+01, 1.85847000e+01, 2.09224000e+01,
2.34526000e+01, 2.61829000e+01, 2.91210000e+01,
3.22744000e+01, 3.56505000e+01, 3.92566000e+01,
4.31001000e+01, 4.71882000e+01, 5.15278000e+01,
5.61260000e+01, 6.09895000e+01, 6.61253000e+01,
7.15398000e+01, 7.72396000e+01, 8.32310000e+01,
8.95204000e+01, 9.61138000e+01, 1.03017200e+02,
1.10236600e+02, 1.17777500e+02, 1.25645600e+02,
1.33846200e+02, 1.42384800e+02, 1.51266400e+02,
1.60495900e+02, 1.70078400e+02, 1.80018300e+02,
1.90320300e+02, 2.00988700e+02, 2.12027700e+02,
2.23441500e+02, 2.35233800e+02, 2.47408500e+02,
2.59969100e+02, 2.72919100e+02, 2.86261700e+02,
3.00000000e+02, 3.14136900e+02, 3.28675300e+02,
3.43617600e+02, 3.58966500e+02, 3.74724100e+02,
3.90892600e+02, 4.07473800e+02, 4.24469800e+02,
4.41881900e+02, 4.59711800e+02, 4.77960700e+02,
4.96629800e+02, 5.15720000e+02, 5.35232200e+02,
5.55166900e+02, 5.75524800e+02, 5.96306200e+02,
6.17511200e+02, 6.39139800e+02, 6.61192000e+02,
6.83667300e+02, 7.06565400e+02, 7.29885700e+02,
7.53627500e+02, 7.77789700e+02, 8.02371400e+02,
8.27371300e+02, 8.52788000e+02, 8.78620100e+02,
9.04865900e+02, 9.31523600e+02, 9.58591100e+02,
9.86066600e+02, 1.01394800e+03, 1.04223200e+03,
1.07091700e+03, 1.10000000e+03])
self["CO2"] = numpy.array([ 395.303 , 398.5376, 399.9989, 399.9986, 399.9984, 399.9983,
399.9982, 399.9981, 399.998 , 399.998 , 399.998 , 399.998 ,
399.998 , 399.998 , 399.998 , 399.998 , 399.998 , 399.998 ,
399.998 , 399.998 , 399.998 , 399.998 , 399.998 , 399.998 ,
399.998 , 399.998 , 399.998 , 399.998 , 399.998 , 399.998 ,
399.9981, 399.9981, 399.9981, 399.9981, 399.9981, 399.9981,
399.9981, 399.9982, 399.9982, 399.9982, 399.9983, 399.9983,
399.9984, 399.9984, 399.9984, 399.9984, 399.9984, 399.9984,
399.9984, 399.9984, 399.9984, 399.9983, 399.9982, 399.998 ,
399.9978, 399.9973, 399.9962, 399.9952, 399.993 , 399.9891,
399.9853, 399.9785, 399.9673, 399.9563, 399.94 , 399.9062,
399.873 , 399.8403, 399.7916, 399.7437, 399.6967, 399.6391,
399.577 , 399.5159, 399.4464, 399.3438, 399.2428, 399.1434,
399.0226, 398.8944, 398.7681, 398.6416, 398.4876, 398.3359,
398.1866, 398.024 , 397.8249, 397.6288, 397.4357, 397.2433,
397.0502, 396.86 , 396.6726, 396.445 , 396.1367, 395.833 ,
395.5339, 395.2391, 394.9488, 394.6626, 394.3807])
self["CO"] = numpy.array([ 1.718423 , 0.6585194 , 0.2608156 , 0.1116929 , 0.06896279,
0.05251643, 0.0431507 , 0.03738407, 0.03448724, 0.03228408,
0.03022448, 0.02856004, 0.02721113, 0.02606151, 0.02504537,
0.02411293, 0.02323233, 0.02238782, 0.02155986, 0.02071359,
0.01990682, 0.01913014, 0.01838728, 0.01774604, 0.01718211,
0.01664016, 0.01617181, 0.01574126, 0.01532594, 0.01475815,
0.01400121, 0.01338374, 0.01284213, 0.01237733, 0.01231994,
0.0124257 , 0.01298385, 0.01378951, 0.01495864, 0.01664696,
0.01876071, 0.02111869, 0.02364307, 0.0262315 , 0.02888374,
0.03184939, 0.03559238, 0.03925051, 0.04385469, 0.04840493,
0.05339075, 0.05864039, 0.0637837 , 0.06920194, 0.07451056,
0.0793932 , 0.0834562 , 0.08744049, 0.0911178 , 0.09443462,
0.09768966, 0.1008795 , 0.1040035 , 0.1070713 , 0.1100576 ,
0.1129032 , 0.1156991 , 0.1184468 , 0.120334 , 0.1221875 ,
0.1240099 , 0.125402 , 0.1265778 , 0.1277343 , 0.128716 ,
0.129117 , 0.1295114 , 0.1298996 , 0.1301604 , 0.1303675 ,
0.1305714 , 0.1308251 , 0.1317672 , 0.1326948 , 0.1336082 ,
0.1346063 , 0.1358348 , 0.1370446 , 0.1382361 , 0.1394278 ,
0.1406307 , 0.1418157 , 0.1429834 , 0.1441187 , 0.1452071 ,
0.1462791 , 0.1473352 , 0.1483756 , 0.1494007 , 0.1504109 ,
0.1514063 ])
self["T"] = numpy.array([ 162.6005, 176.7926, 198.8618, 219.1338, 237.4638, 254.1662,
265.9963, 272.9359, 275.4977, 277.1373, 276.3062, 274.2434,
271.376 , 267.4988, 262.519 , 257.9483, 253.6863, 249.7804,
246.1547, 242.8788, 239.8364, 237.8228, 235.8969, 234.2391,
232.7849, 231.3874, 230.3363, 229.4381, 228.5715, 227.6339,
226.6017, 225.6724, 225.2 , 225.2 , 225.2 , 225.2 ,
225.2 , 225.2 , 225.2 , 225.2 , 225.2 , 225.2 ,
225.2 , 225.2 , 225.2 , 225.2 , 225.2 , 225.2 ,
225.2 , 225.2 , 225.2 , 225.2 , 225.2 , 225.2 ,
225.2 , 225.2 , 225.2 , 225.2 , 225.2 , 225.2 ,
225.2 , 226.1054, 228.3435, 230.5416, 232.7185, 234.9151,
237.0737, 239.1955, 241.3109, 243.391 , 245.4367, 247.4919,
249.5347, 251.5447, 253.5406, 255.5692, 257.5663, 259.5325,
261.1752, 262.6731, 264.1485, 265.6035, 267.0577, 268.4907,
269.9027, 271.3045, 272.7116, 274.0987, 275.4662, 276.8245,
278.1799, 279.5165, 280.8347, 282.1522, 283.4855, 284.8009,
286.0987, 287.3791, 288.6426, 289.8895, 291.12 ])
self["N2O"] = numpy.array([ 0.00040468, 0.00048883, 0.00058166, 0.00069874, 0.00086617,
0.00109252, 0.00137472, 0.0017131 , 0.00224707, 0.00277481,
0.0043708 , 0.00585835, 0.00775866, 0.0100121 , 0.01252824,
0.01647644, 0.02082706, 0.02654339, 0.03194876, 0.03705368,
0.04213625, 0.04947973, 0.05650347, 0.06232 , 0.06723582,
0.07195995, 0.07658348, 0.08106857, 0.0853951 , 0.09122328,
0.09891654, 0.1063967 , 0.1137228 , 0.1209119 , 0.1287189 ,
0.1373174 , 0.1495517 , 0.162199 , 0.1756064 , 0.1877479 ,
0.1987982 , 0.2075157 , 0.2144662 , 0.2202816 , 0.2249916 ,
0.2291698 , 0.2323032 , 0.2353657 , 0.2386974 , 0.2419715 ,
0.2447193 , 0.2470931 , 0.2494157 , 0.251485 , 0.2535125 ,
0.2554543 , 0.2572556 , 0.2590219 , 0.2607514 , 0.2624443 ,
0.2641057 , 0.2657583 , 0.2674133 , 0.2690387 , 0.2706742 ,
0.2724082 , 0.2741119 , 0.2757862 , 0.2776826 , 0.2795473 ,
0.2813808 , 0.2836142 , 0.2860189 , 0.2883843 , 0.2909395 ,
0.2942789 , 0.2975645 , 0.3007978 , 0.3027535 , 0.3041777 ,
0.3055795 , 0.3068945 , 0.3073393 , 0.3077773 , 0.3082086 ,
0.3084686 , 0.3083143 , 0.3081623 , 0.3080127 , 0.3078636 ,
0.3077139 , 0.3075665 , 0.3074212 , 0.3072448 , 0.3070059 ,
0.3067706 , 0.3065387 , 0.3063103 , 0.3060853 , 0.3058635 ,
0.305645 ])
self["O3"] = numpy.array([ 0.602447 , 0.1853845 , 0.2457545 , 0.4438738 , 0.7612741 ,
1.051582 , 1.345846 , 1.652907 , 2.074425 , 2.484314 ,
3.125666 , 3.952513 , 4.850892 , 5.832665 , 6.909374 ,
7.44281 , 7.787614 , 7.734832 , 7.583818 , 7.219831 ,
6.859912 , 6.366786 , 5.89513 , 5.61598 , 5.47411 ,
5.337771 , 5.116889 , 4.869094 , 4.630059 , 4.406766 ,
4.200328 , 3.868694 , 3.578486 , 3.330589 , 2.985461 ,
2.635894 , 2.297619 , 2.011996 , 1.797477 , 1.589007 ,
1.386249 , 1.216317 , 1.071516 , 0.9652995 , 0.8967071 ,
0.8300509 , 0.7656645 , 0.702738 , 0.6589931 , 0.6170167 ,
0.5767139 , 0.5377987 , 0.4997229 , 0.4658583 , 0.432679 ,
0.3985501 , 0.3614503 , 0.3250691 , 0.2877994 , 0.2492341 ,
0.2113872 , 0.1829111 , 0.1678796 , 0.1531182 , 0.1382119 ,
0.1222441 , 0.106555 , 0.09113652, 0.0868852 , 0.0827293 ,
0.07864314, 0.07497781, 0.07154376, 0.06816581, 0.06477232,
0.0611795 , 0.05764448, 0.0541658 , 0.05146155, 0.04909367,
0.04676288, 0.04451503, 0.04291227, 0.04133422, 0.03978033,
0.0382993 , 0.03696362, 0.03564824, 0.03435271, 0.03313363,
0.0320238 , 0.03093038, 0.02985302, 0.02870847, 0.02741848,
0.02614785, 0.02489615, 0.02366299, 0.02244797, 0.0212507 ,
0.02007082])
self["CH4"] = numpy.array([ 0.1649998, 0.1649996, 0.1649995, 0.1649994, 0.1649994,
0.1649993, 0.1649992, 0.1649992, 0.1732367, 0.1822543,
0.2160282, 0.2568352, 0.3015865, 0.3504176, 0.4034186,
0.4564692, 0.5056263, 0.5482428, 0.5867455, 0.6191768,
0.6497108, 0.6747534, 0.6987056, 0.7204886, 0.740542 ,
0.7598135, 0.778272 , 0.7960234, 0.8131473, 0.8407853,
0.8813573, 0.922313 , 0.9630486, 1.003679 , 1.050962 ,
1.098925 , 1.149593 , 1.199763 , 1.249907 , 1.294975 ,
1.335678 , 1.371403 , 1.40326 , 1.432225 , 1.458382 ,
1.483069 , 1.505261 , 1.526949 , 1.548191 , 1.568969 ,
1.58687 , 1.602708 , 1.61821 , 1.632696 , 1.646888 ,
1.660906 , 1.674893 , 1.68861 , 1.702203 , 1.715717 ,
1.728979 , 1.741928 , 1.75454 , 1.766925 , 1.777374 ,
1.782055 , 1.786655 , 1.791175 , 1.797693 , 1.804106 ,
1.810411 , 1.81729 , 1.824383 , 1.83136 , 1.837692 ,
1.841994 , 1.846226 , 1.850391 , 1.853351 , 1.855798 ,
1.858207 , 1.860424 , 1.86059 , 1.860754 , 1.860916 ,
1.860762 , 1.859831 , 1.858915 , 1.858012 , 1.857113 ,
1.85621 , 1.855321 , 1.854444 , 1.85338 , 1.851939 ,
1.850519 , 1.849121 , 1.847743 , 1.846386 , 1.845048 , 1.84373 ])
self["CTP"] = 500.0
self["CFRACTION"] = 0.0
self["IDG"] = 4
self["ISH"] = 2
self["ELEVATION"] = 0.5
self["S2M"]["T"] = 287.2
self["S2M"]["Q"] = 12774.695441
self["S2M"]["O"] = 0.0208501984323
self["S2M"]["P"] = 1050.0
self["S2M"]["U"] = 4.0
self["S2M"]["V"] = 1.0
self["S2M"]["WFETC"] = 100000.0
self["SKIN"]["SURFTYPE"] = 0
self["SKIN"]["WATERTYPE"] = 0
self["SKIN"]["T"] = 295.2
self["SKIN"]["SALINITY"] = 35.0
self["SKIN"]["FOAM_FRACTION"] = 0.2
self["SKIN"]["FASTEM"] = numpy.array([ 3. , 5. , 15. , 0.1, 0.3])
self["ZENANGLE"] = 55.0
self["AZANGLE"] = 25.0
self["SUNZENANGLE"] = 15.0
self["SUNAZANGLE"] = 120.0
self["LATITUDE"] = 60.0
self["GAS_UNITS"] = 2
self["BE"] = 0.2
self["COSBK"] = 0.5
self["DATE"] = numpy.array([1966, 7, 1])
self["TIME"] = numpy.array([23, 0, 0]) | en | 0.584371 | Profile ../profile-datasets-py/standard101lev_allgas/004.py file automaticaly created by prof_gen.py script | 1.588235 | 2 |
certbot_plugin_edgedns/edgedns_test.py | akamai/certbot-plugin-edgedns | 2 | 6619813 | """Tests for certbot_plugin_edgedns.edgedns."""
import unittest
import copy
import mock
import json
import requests_mock
import requests
from certbot import errors
from certbot.compat import os
from certbot.errors import PluginError
from certbot.plugins import dns_test_common
from certbot.plugins.dns_test_common import DOMAIN
from certbot.tests import util as test_util
FAKE_ACCESS_TOKEN = "<KEY>"
FAKE_CLIENT_TOKEN = "<KEY>"
FAKE_CLIENT_SECRET = "<KEY>
FAKE_HOST = "akab-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.luna.akamaiapis.net"
PATH = ""
class AuthenticatorTest(
test_util.TempDirTestCase, dns_test_common.BaseAuthenticatorTest
):
def setUp(self):
super(AuthenticatorTest, self).setUp()
from certbot_plugin_edgedns.edgedns import Authenticator
creds_ini_path = os.path.join(self.tempdir, "file_creds.ini")
dns_test_common.write(
{
"edgedns_client_token": FAKE_CLIENT_TOKEN,
"edgedns_client_secret": FAKE_CLIENT_SECRET,
"edgedns_access_token": FAKE_ACCESS_TOKEN,
"edgedns_host": FAKE_HOST,
},
creds_ini_path,
)
dot_edgerc_path = os.path.join(self.tempdir, ".edgerc")
with open(dot_edgerc_path, 'w') as h:
h.write('[default]')
dns_test_common.write(
{
"client_token": FAKE_CLIENT_TOKEN,
"client_secret": FAKE_CLIENT_SECRET,
"access_token": FAKE_ACCESS_TOKEN,
"host": FAKE_HOST,
},
dot_edgerc_path,
)
edgerc_ini_path = os.path.join(self.tempdir, "file_edgerc.ini")
dns_test_common.write(
{
"edgedns_edgerc_path": dot_edgerc_path,
"edgedns_edgerc_section": "default",
},
edgerc_ini_path,
)
super(AuthenticatorTest, self).setUp()
# creds ini path
self.config = mock.MagicMock(
edgedns_credentials=creds_ini_path, _edgedns_propagation_seconds=0
) # don't wait during tests
self.auth = Authenticator(self.config, "edgedns")
# creds edgerc file
self.config_edgerc = mock.MagicMock(
edgedns_credentials=creds_ini_path, _edgedns_propagation_seconds=0
) # don't wait during tests
self.auth_edgerc = Authenticator(self.config_edgerc, "edgedns")
# ini creds
self.mock_client = mock.MagicMock()
# _get_edgedns_client | pylint: disable=protected-access
self.auth._get_edgedns_client = mock.MagicMock(return_value=self.mock_client)
# edgerc
self.mock_client_edgerc = mock.MagicMock()
# _get_edgedns_client | pylint: disable=protected-access
self.auth_edgerc._get_edgedns_client = mock.MagicMock(return_value=self.mock_client_edgerc)
def test_perform(self):
# creds ini
self.auth.perform([self.achall])
expected = [
mock.call.add_txt_record(
DOMAIN, "_acme-challenge." + DOMAIN, mock.ANY
)
]
self.assertEqual(expected, self.mock_client.mock_calls)
# edgerc
self.auth_edgerc.perform([self.achall])
expected = [
mock.call.add_txt_record(
DOMAIN, "_acme-challenge." + DOMAIN, mock.ANY
)
]
self.assertEqual(expected, self.mock_client_edgerc.mock_calls)
def test_cleanup(self):
# _attempt_cleanup | pylint: disable=protected-access
# creds ini
self.auth._attempt_cleanup = True
self.auth.cleanup([self.achall])
expected = [
mock.call.del_txt_record(
DOMAIN, "_acme-challenge." + DOMAIN, mock.ANY
)
]
self.assertEqual(expected, self.mock_client.mock_calls)
# edgerc
self.auth_edgerc._attempt_cleanup = True
self.auth_edgerc.cleanup([self.achall])
expected = [
mock.call.del_txt_record(
DOMAIN, "_acme-challenge." + DOMAIN, mock.ANY,
)
]
self.assertEqual(expected, self.mock_client_edgerc.mock_calls)
class EdgeDNSClientTest(unittest.TestCase):
FAKE_ENDPOINT = "https://" + FAKE_HOST + "/config-dns/v2"
TEST_ZONE = "certbottest.zone"
RECORD_NAME = "certbot_txt"
RECORD_CONTENT = "1234567890abcdefghijklmnopqrstuvwxyz"
RECORD_ADDTL_CONTENT = "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
RECORD_TTL = 400
GET_ZONE_RESP = {
"contractId": "1-2ABCDE",
"zone": TEST_ZONE,
"type": "primary",
"aliasCount": 1,
"signAndServe": True,
"signAndServeAlgorithm": "RSA_SHA256",
"versionId": "ae02357c-693d-4ac4-b33d-8352d9b7c786",
"lastModifiedDate": "2017-01-03T12:00:00Z",
"lastModifiedBy": "user28",
"lastActivationDate": "2017-01-03T12:00:00Z",
"activationState": "ACTIVE"
}
TXT_GET_RECSET_RESP = {
"name": "{0}.{1}".format(RECORD_NAME, TEST_ZONE),
"type": "TXT",
"ttl": RECORD_TTL,
"rdata": [ RECORD_CONTENT ]
}
def setUp(self):
from certbot_plugin_edgedns.edgedns import _EdgeDNSClient
self.session = requests.Session()
self.adapter = requests_mock.Adapter()
self.session.mount('https://', self.adapter)
EDGEGRID_CREDS = {"client_token": FAKE_CLIENT_TOKEN,
"client_secret": FAKE_CLIENT_SECRET,
"access_token": FAKE_ACCESS_TOKEN,
"host": FAKE_HOST}
self.client = _EdgeDNSClient(EDGEGRID_CREDS)
def _register_response(
self, req_op, req_uri, url_params=None, response=None, message=None, additional_matcher=None, **kwargs
):
resp = {"code": "ok", "message": message, "response": response}
if message is not None:
resp["code"] = "remote_failure"
url = req_uri
if url_params is not None:
url += "?" + url_params
self.adapter.register_uri(
req_op,
"{0}".format(url),
text=response,
**kwargs
)
def test_add_txt_record(self):
print("*** test_add_txt_record ***")
self.adapter.reset() # clear any existing uri mappings
self.client.set_session(self.session)
# Get Zone
zone_get_url = "{0}/zones/{1}".format(self.FAKE_ENDPOINT, self.TEST_ZONE)
zone_resp_json = json.dumps(self.GET_ZONE_RESP)
self._register_response('GET', zone_get_url, response=zone_resp_json, status_code=200)
# Get Recordset... Doesn't exist
recset_resp_json = json.dumps(self.TXT_GET_RECSET_RESP)
recordset_get_url = "{0}/zones/{1}/names/{2}/types/TXT".format(self.FAKE_ENDPOINT, self.TEST_ZONE, self.TXT_GET_RECSET_RESP["name"])
self._register_response('GET', recordset_get_url, message='Not Found', status_code=404)
# Add Recordset (create recordset)
recordset_json = json.dumps(self.TXT_GET_RECSET_RESP)
self._register_response('POST', recordset_get_url, response=recset_resp_json, status_code=201)
# add_txt_record(self, domain, record_name, record_content, record_ttl=RECORD_TTL)
self.client.add_txt_record(self.TEST_ZONE, self.TXT_GET_RECSET_RESP["name"], self.RECORD_CONTENT, self.RECORD_TTL)
def test_add_existing_txt_record(self):
print("*** test_add_existing_txt_record ***")
self.adapter.reset() # clear any existing uri mappings
self.client.set_session(self.session)
# Get Zone
zone_get_url = "{0}/zones/{1}".format(self.FAKE_ENDPOINT, self.TEST_ZONE)
zone_resp_json = json.dumps(self.GET_ZONE_RESP)
self._register_response('GET', zone_get_url, response=zone_resp_json, status_code=200)
# Get Recordset
recset_resp_json = json.dumps(self.TXT_GET_RECSET_RESP)
recordset_get_url = "{0}/zones/{1}/names/{2}/types/TXT".format(self.FAKE_ENDPOINT, self.TEST_ZONE, self.TXT_GET_RECSET_RESP["name"])
self._register_response('GET', recordset_get_url, response=recset_resp_json, status_code=200)
# Add Recordset (create recordset)
recset_copy = copy.deepcopy(self.TXT_GET_RECSET_RESP)
recset_copy["rdata"].append(self.RECORD_ADDTL_CONTENT)
recset_resp_json = json.dumps(recset_copy)
self._register_response('PUT', recordset_get_url, response=recset_resp_json, status_code=201)
# add_txt_record(self, domain, record_name, record_content, record_ttl=RECORD_TTL)
self.client.add_txt_record(self.TEST_ZONE, self.TXT_GET_RECSET_RESP["name"], self.RECORD_ADDTL_CONTENT, self.RECORD_TTL)
def test_add_txt_record_fail_to_find_domain(self):
print("*** test_add_txt_record_fail_to_find_domain ***")
self.adapter.reset() # clear any existing uri mappings
self.client.set_session(self.session)
# Get Zone
zone_get_url = "{0}/zones/{1}".format(self.FAKE_ENDPOINT, "FailDomainGet")
self._register_response('GET', zone_get_url, message="Not Found", status_code=404)
with self.assertRaises(errors.PluginError) as context:
self.client.add_txt_record(self.TEST_ZONE, self.TXT_GET_RECSET_RESP["name"], self.RECORD_ADDTL_CONTENT, self.RECORD_TTL)
def test_add_txt_record_fail_to_authenticate(self):
print("*** test_add_txt_record_fail_to_authenticate ***")
self.adapter.reset() # clear any existing uri mappings
self.client.set_session(self.session)
# Get Zone
zone_get_url = "{0}/zones/{1}".format(self.FAKE_ENDPOINT, self.TEST_ZONE)
self._register_response('GET', zone_get_url, message="Not Authorized", status_code=403)
with self.assertRaises(errors.PluginError) as context:
self.client.add_txt_record(self.TEST_ZONE, self.TXT_GET_RECSET_RESP["name"], self.RECORD_ADDTL_CONTENT, self.RECORD_TTL)
def test_del_txt_record(self):
print("*** test_del_txt_record ***")
self.adapter.reset() # clear any existing uri mappings
self.client.set_session(self.session)
# Get Zone
zone_get_url = "{0}/zones/{1}".format(self.FAKE_ENDPOINT, self.TEST_ZONE)
zone_resp_json = json.dumps(self.GET_ZONE_RESP)
self._register_response('GET', zone_get_url, response=zone_resp_json, status_code=200)
# Get Recordset
recset_resp_json = json.dumps(self.TXT_GET_RECSET_RESP)
recordset_get_url = "{0}/zones/{1}/names/{2}/types/TXT".format(self.FAKE_ENDPOINT, self.TEST_ZONE, self.TXT_GET_RECSET_RESP["name"])
self._register_response('GET', recordset_get_url, response=recset_resp_json, status_code=200)
# Delete Recordset (delete recordset)
self._register_response('DELETE', recordset_get_url, status_code=204)
# del_txt_record(self, domain, record_name, record_content)
self.client.del_txt_record(self.TEST_ZONE, self.TXT_GET_RECSET_RESP["name"], self.RECORD_CONTENT)
def test_del_existing_txt_record(self):
print("*** test_del_existing_txt_record ***")
self.adapter.reset() # clear any existing uri mappings
self.client.set_session(self.session)
# Get Zone
zone_get_url = "{0}/zones/{1}".format(self.FAKE_ENDPOINT, self.TEST_ZONE)
zone_resp_json = json.dumps(self.GET_ZONE_RESP)
self._register_response('GET', zone_get_url, response=zone_resp_json, status_code=200)
# Get Recordset
recset_copy = copy.deepcopy(self.TXT_GET_RECSET_RESP)
recset_copy["rdata"].append(self.RECORD_ADDTL_CONTENT)
recset_resp_json = json.dumps(recset_copy)
recordset_get_url = "{0}/zones/{1}/names/{2}/types/TXT".format(self.FAKE_ENDPOINT, self.TEST_ZONE, self.TXT_GET_RECSET_RESP["name"])
self._register_response('GET', recordset_get_url, response=recset_resp_json, status_code=200)
# Delete Recordset (update)
recordset_json = json.dumps(self.TXT_GET_RECSET_RESP)
self._register_response('PUT', recordset_get_url, response=recset_resp_json, status_code=200)
# add_txt_record(self, domain, record_name, record_content, record_ttl=RECORD_TTL)
self.client.del_txt_record(self.TEST_ZONE, self.TXT_GET_RECSET_RESP["name"], self.RECORD_ADDTL_CONTENT)
def test_del_txt_record_fail_to_find_domain(self):
print("*** test_del_txt_record_fail_to_find_domain ***")
self.adapter.reset() # clear any existing uri mappings
self.client.set_session(self.session)
# Get Zone
zone_get_url = "{0}/zones/{1}".format(self.FAKE_ENDPOINT, "FailDomainGet")
zone_resp_json = json.dumps(self.GET_ZONE_RESP)
self._register_response('GET', zone_get_url, message="Not Found", status_code=404)
with self.assertRaises(errors.PluginError) as context:
self.client.del_txt_record(self.TEST_ZONE, self.TXT_GET_RECSET_RESP["name"], self.RECORD_ADDTL_CONTENT)
def test_del_txt_record_fail_to_authenticate(self):
print("*** test_del_txt_record_fail_to_authenticate ***")
self.adapter.reset() # clear any existing uri mappings
self.client.set_session(self.session)
# Get Zone
zone_get_url = "{0}/zones/{1}".format(self.FAKE_ENDPOINT, self.TEST_ZONE)
zone_resp_json = json.dumps(self.GET_ZONE_RESP)
self._register_response('GET', zone_get_url, message="Unauthorized", status_code=403)
with self.assertRaises(errors.PluginError) as context:
self.client.del_txt_record(self.TEST_ZONE, self.TXT_GET_RECSET_RESP["name"], self.RECORD_ADDTL_CONTENT)
if __name__ == "__main__":
unittest.main() # pragma: no cover
| """Tests for certbot_plugin_edgedns.edgedns."""
import unittest
import copy
import mock
import json
import requests_mock
import requests
from certbot import errors
from certbot.compat import os
from certbot.errors import PluginError
from certbot.plugins import dns_test_common
from certbot.plugins.dns_test_common import DOMAIN
from certbot.tests import util as test_util
FAKE_ACCESS_TOKEN = "<KEY>"
FAKE_CLIENT_TOKEN = "<KEY>"
FAKE_CLIENT_SECRET = "<KEY>
FAKE_HOST = "akab-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.luna.akamaiapis.net"
PATH = ""
class AuthenticatorTest(
test_util.TempDirTestCase, dns_test_common.BaseAuthenticatorTest
):
def setUp(self):
super(AuthenticatorTest, self).setUp()
from certbot_plugin_edgedns.edgedns import Authenticator
creds_ini_path = os.path.join(self.tempdir, "file_creds.ini")
dns_test_common.write(
{
"edgedns_client_token": FAKE_CLIENT_TOKEN,
"edgedns_client_secret": FAKE_CLIENT_SECRET,
"edgedns_access_token": FAKE_ACCESS_TOKEN,
"edgedns_host": FAKE_HOST,
},
creds_ini_path,
)
dot_edgerc_path = os.path.join(self.tempdir, ".edgerc")
with open(dot_edgerc_path, 'w') as h:
h.write('[default]')
dns_test_common.write(
{
"client_token": FAKE_CLIENT_TOKEN,
"client_secret": FAKE_CLIENT_SECRET,
"access_token": FAKE_ACCESS_TOKEN,
"host": FAKE_HOST,
},
dot_edgerc_path,
)
edgerc_ini_path = os.path.join(self.tempdir, "file_edgerc.ini")
dns_test_common.write(
{
"edgedns_edgerc_path": dot_edgerc_path,
"edgedns_edgerc_section": "default",
},
edgerc_ini_path,
)
super(AuthenticatorTest, self).setUp()
# creds ini path
self.config = mock.MagicMock(
edgedns_credentials=creds_ini_path, _edgedns_propagation_seconds=0
) # don't wait during tests
self.auth = Authenticator(self.config, "edgedns")
# creds edgerc file
self.config_edgerc = mock.MagicMock(
edgedns_credentials=creds_ini_path, _edgedns_propagation_seconds=0
) # don't wait during tests
self.auth_edgerc = Authenticator(self.config_edgerc, "edgedns")
# ini creds
self.mock_client = mock.MagicMock()
# _get_edgedns_client | pylint: disable=protected-access
self.auth._get_edgedns_client = mock.MagicMock(return_value=self.mock_client)
# edgerc
self.mock_client_edgerc = mock.MagicMock()
# _get_edgedns_client | pylint: disable=protected-access
self.auth_edgerc._get_edgedns_client = mock.MagicMock(return_value=self.mock_client_edgerc)
def test_perform(self):
# creds ini
self.auth.perform([self.achall])
expected = [
mock.call.add_txt_record(
DOMAIN, "_acme-challenge." + DOMAIN, mock.ANY
)
]
self.assertEqual(expected, self.mock_client.mock_calls)
# edgerc
self.auth_edgerc.perform([self.achall])
expected = [
mock.call.add_txt_record(
DOMAIN, "_acme-challenge." + DOMAIN, mock.ANY
)
]
self.assertEqual(expected, self.mock_client_edgerc.mock_calls)
def test_cleanup(self):
# _attempt_cleanup | pylint: disable=protected-access
# creds ini
self.auth._attempt_cleanup = True
self.auth.cleanup([self.achall])
expected = [
mock.call.del_txt_record(
DOMAIN, "_acme-challenge." + DOMAIN, mock.ANY
)
]
self.assertEqual(expected, self.mock_client.mock_calls)
# edgerc
self.auth_edgerc._attempt_cleanup = True
self.auth_edgerc.cleanup([self.achall])
expected = [
mock.call.del_txt_record(
DOMAIN, "_acme-challenge." + DOMAIN, mock.ANY,
)
]
self.assertEqual(expected, self.mock_client_edgerc.mock_calls)
class EdgeDNSClientTest(unittest.TestCase):
FAKE_ENDPOINT = "https://" + FAKE_HOST + "/config-dns/v2"
TEST_ZONE = "certbottest.zone"
RECORD_NAME = "certbot_txt"
RECORD_CONTENT = "1234567890abcdefghijklmnopqrstuvwxyz"
RECORD_ADDTL_CONTENT = "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
RECORD_TTL = 400
GET_ZONE_RESP = {
"contractId": "1-2ABCDE",
"zone": TEST_ZONE,
"type": "primary",
"aliasCount": 1,
"signAndServe": True,
"signAndServeAlgorithm": "RSA_SHA256",
"versionId": "ae02357c-693d-4ac4-b33d-8352d9b7c786",
"lastModifiedDate": "2017-01-03T12:00:00Z",
"lastModifiedBy": "user28",
"lastActivationDate": "2017-01-03T12:00:00Z",
"activationState": "ACTIVE"
}
TXT_GET_RECSET_RESP = {
"name": "{0}.{1}".format(RECORD_NAME, TEST_ZONE),
"type": "TXT",
"ttl": RECORD_TTL,
"rdata": [ RECORD_CONTENT ]
}
def setUp(self):
from certbot_plugin_edgedns.edgedns import _EdgeDNSClient
self.session = requests.Session()
self.adapter = requests_mock.Adapter()
self.session.mount('https://', self.adapter)
EDGEGRID_CREDS = {"client_token": FAKE_CLIENT_TOKEN,
"client_secret": FAKE_CLIENT_SECRET,
"access_token": FAKE_ACCESS_TOKEN,
"host": FAKE_HOST}
self.client = _EdgeDNSClient(EDGEGRID_CREDS)
def _register_response(
self, req_op, req_uri, url_params=None, response=None, message=None, additional_matcher=None, **kwargs
):
resp = {"code": "ok", "message": message, "response": response}
if message is not None:
resp["code"] = "remote_failure"
url = req_uri
if url_params is not None:
url += "?" + url_params
self.adapter.register_uri(
req_op,
"{0}".format(url),
text=response,
**kwargs
)
def test_add_txt_record(self):
print("*** test_add_txt_record ***")
self.adapter.reset() # clear any existing uri mappings
self.client.set_session(self.session)
# Get Zone
zone_get_url = "{0}/zones/{1}".format(self.FAKE_ENDPOINT, self.TEST_ZONE)
zone_resp_json = json.dumps(self.GET_ZONE_RESP)
self._register_response('GET', zone_get_url, response=zone_resp_json, status_code=200)
# Get Recordset... Doesn't exist
recset_resp_json = json.dumps(self.TXT_GET_RECSET_RESP)
recordset_get_url = "{0}/zones/{1}/names/{2}/types/TXT".format(self.FAKE_ENDPOINT, self.TEST_ZONE, self.TXT_GET_RECSET_RESP["name"])
self._register_response('GET', recordset_get_url, message='Not Found', status_code=404)
# Add Recordset (create recordset)
recordset_json = json.dumps(self.TXT_GET_RECSET_RESP)
self._register_response('POST', recordset_get_url, response=recset_resp_json, status_code=201)
# add_txt_record(self, domain, record_name, record_content, record_ttl=RECORD_TTL)
self.client.add_txt_record(self.TEST_ZONE, self.TXT_GET_RECSET_RESP["name"], self.RECORD_CONTENT, self.RECORD_TTL)
def test_add_existing_txt_record(self):
print("*** test_add_existing_txt_record ***")
self.adapter.reset() # clear any existing uri mappings
self.client.set_session(self.session)
# Get Zone
zone_get_url = "{0}/zones/{1}".format(self.FAKE_ENDPOINT, self.TEST_ZONE)
zone_resp_json = json.dumps(self.GET_ZONE_RESP)
self._register_response('GET', zone_get_url, response=zone_resp_json, status_code=200)
# Get Recordset
recset_resp_json = json.dumps(self.TXT_GET_RECSET_RESP)
recordset_get_url = "{0}/zones/{1}/names/{2}/types/TXT".format(self.FAKE_ENDPOINT, self.TEST_ZONE, self.TXT_GET_RECSET_RESP["name"])
self._register_response('GET', recordset_get_url, response=recset_resp_json, status_code=200)
# Add Recordset (create recordset)
recset_copy = copy.deepcopy(self.TXT_GET_RECSET_RESP)
recset_copy["rdata"].append(self.RECORD_ADDTL_CONTENT)
recset_resp_json = json.dumps(recset_copy)
self._register_response('PUT', recordset_get_url, response=recset_resp_json, status_code=201)
# add_txt_record(self, domain, record_name, record_content, record_ttl=RECORD_TTL)
self.client.add_txt_record(self.TEST_ZONE, self.TXT_GET_RECSET_RESP["name"], self.RECORD_ADDTL_CONTENT, self.RECORD_TTL)
def test_add_txt_record_fail_to_find_domain(self):
print("*** test_add_txt_record_fail_to_find_domain ***")
self.adapter.reset() # clear any existing uri mappings
self.client.set_session(self.session)
# Get Zone
zone_get_url = "{0}/zones/{1}".format(self.FAKE_ENDPOINT, "FailDomainGet")
self._register_response('GET', zone_get_url, message="Not Found", status_code=404)
with self.assertRaises(errors.PluginError) as context:
self.client.add_txt_record(self.TEST_ZONE, self.TXT_GET_RECSET_RESP["name"], self.RECORD_ADDTL_CONTENT, self.RECORD_TTL)
def test_add_txt_record_fail_to_authenticate(self):
print("*** test_add_txt_record_fail_to_authenticate ***")
self.adapter.reset() # clear any existing uri mappings
self.client.set_session(self.session)
# Get Zone
zone_get_url = "{0}/zones/{1}".format(self.FAKE_ENDPOINT, self.TEST_ZONE)
self._register_response('GET', zone_get_url, message="Not Authorized", status_code=403)
with self.assertRaises(errors.PluginError) as context:
self.client.add_txt_record(self.TEST_ZONE, self.TXT_GET_RECSET_RESP["name"], self.RECORD_ADDTL_CONTENT, self.RECORD_TTL)
def test_del_txt_record(self):
print("*** test_del_txt_record ***")
self.adapter.reset() # clear any existing uri mappings
self.client.set_session(self.session)
# Get Zone
zone_get_url = "{0}/zones/{1}".format(self.FAKE_ENDPOINT, self.TEST_ZONE)
zone_resp_json = json.dumps(self.GET_ZONE_RESP)
self._register_response('GET', zone_get_url, response=zone_resp_json, status_code=200)
# Get Recordset
recset_resp_json = json.dumps(self.TXT_GET_RECSET_RESP)
recordset_get_url = "{0}/zones/{1}/names/{2}/types/TXT".format(self.FAKE_ENDPOINT, self.TEST_ZONE, self.TXT_GET_RECSET_RESP["name"])
self._register_response('GET', recordset_get_url, response=recset_resp_json, status_code=200)
# Delete Recordset (delete recordset)
self._register_response('DELETE', recordset_get_url, status_code=204)
# del_txt_record(self, domain, record_name, record_content)
self.client.del_txt_record(self.TEST_ZONE, self.TXT_GET_RECSET_RESP["name"], self.RECORD_CONTENT)
def test_del_existing_txt_record(self):
print("*** test_del_existing_txt_record ***")
self.adapter.reset() # clear any existing uri mappings
self.client.set_session(self.session)
# Get Zone
zone_get_url = "{0}/zones/{1}".format(self.FAKE_ENDPOINT, self.TEST_ZONE)
zone_resp_json = json.dumps(self.GET_ZONE_RESP)
self._register_response('GET', zone_get_url, response=zone_resp_json, status_code=200)
# Get Recordset
recset_copy = copy.deepcopy(self.TXT_GET_RECSET_RESP)
recset_copy["rdata"].append(self.RECORD_ADDTL_CONTENT)
recset_resp_json = json.dumps(recset_copy)
recordset_get_url = "{0}/zones/{1}/names/{2}/types/TXT".format(self.FAKE_ENDPOINT, self.TEST_ZONE, self.TXT_GET_RECSET_RESP["name"])
self._register_response('GET', recordset_get_url, response=recset_resp_json, status_code=200)
# Delete Recordset (update)
recordset_json = json.dumps(self.TXT_GET_RECSET_RESP)
self._register_response('PUT', recordset_get_url, response=recset_resp_json, status_code=200)
# add_txt_record(self, domain, record_name, record_content, record_ttl=RECORD_TTL)
self.client.del_txt_record(self.TEST_ZONE, self.TXT_GET_RECSET_RESP["name"], self.RECORD_ADDTL_CONTENT)
def test_del_txt_record_fail_to_find_domain(self):
print("*** test_del_txt_record_fail_to_find_domain ***")
self.adapter.reset() # clear any existing uri mappings
self.client.set_session(self.session)
# Get Zone
zone_get_url = "{0}/zones/{1}".format(self.FAKE_ENDPOINT, "FailDomainGet")
zone_resp_json = json.dumps(self.GET_ZONE_RESP)
self._register_response('GET', zone_get_url, message="Not Found", status_code=404)
with self.assertRaises(errors.PluginError) as context:
self.client.del_txt_record(self.TEST_ZONE, self.TXT_GET_RECSET_RESP["name"], self.RECORD_ADDTL_CONTENT)
def test_del_txt_record_fail_to_authenticate(self):
print("*** test_del_txt_record_fail_to_authenticate ***")
self.adapter.reset() # clear any existing uri mappings
self.client.set_session(self.session)
# Get Zone
zone_get_url = "{0}/zones/{1}".format(self.FAKE_ENDPOINT, self.TEST_ZONE)
zone_resp_json = json.dumps(self.GET_ZONE_RESP)
self._register_response('GET', zone_get_url, message="Unauthorized", status_code=403)
with self.assertRaises(errors.PluginError) as context:
self.client.del_txt_record(self.TEST_ZONE, self.TXT_GET_RECSET_RESP["name"], self.RECORD_ADDTL_CONTENT)
if __name__ == "__main__":
unittest.main() # pragma: no cover
| en | 0.454914 | Tests for certbot_plugin_edgedns.edgedns. # creds ini path # don't wait during tests # creds edgerc file # don't wait during tests # ini creds # _get_edgedns_client | pylint: disable=protected-access # edgerc # _get_edgedns_client | pylint: disable=protected-access # creds ini # edgerc # _attempt_cleanup | pylint: disable=protected-access # creds ini # edgerc # clear any existing uri mappings # Get Zone # Get Recordset... Doesn't exist # Add Recordset (create recordset) # add_txt_record(self, domain, record_name, record_content, record_ttl=RECORD_TTL) # clear any existing uri mappings # Get Zone # Get Recordset # Add Recordset (create recordset) # add_txt_record(self, domain, record_name, record_content, record_ttl=RECORD_TTL) # clear any existing uri mappings # Get Zone # clear any existing uri mappings # Get Zone # clear any existing uri mappings # Get Zone # Get Recordset # Delete Recordset (delete recordset) # del_txt_record(self, domain, record_name, record_content) # clear any existing uri mappings # Get Zone # Get Recordset # Delete Recordset (update) # add_txt_record(self, domain, record_name, record_content, record_ttl=RECORD_TTL) # clear any existing uri mappings # Get Zone # clear any existing uri mappings # Get Zone # pragma: no cover | 2.235984 | 2 |
3.2_tuple_&_set.py | codernayeem/python-cheat-sheet | 0 | 6619814 | # ::: Tuple :::
# tuple is just like list but read only
# almost all the thing is judt like list.
# defining a tuple
marks = (454, 657, 587, 345, 893) # just like list, only '[]' to '()'
marks = 454, 657, 587, 345, 893 # python will take it as a tuple
marks = tuple("Helo World")
# unpacking
marks = [45, 63, 96]
first, second, third = marks
# swaping values using tuple
x = 4
y = 6
x, y = y, x # same to : x, y = (y, x)
# at first 'y, x' is taken as a tuple and then unpack it
# this also works same way when define more than one variable at a line
x, y = 8, 6 # take as a tuple and then unpack it
# ====================================================================================
# ====================================================================================
# ::: Set :::
# set is also like list with some difference
# set is a list of unique items
# defining a set
num = {6, 8, 9, 12, 36} # just like list, only '[]' to '{}'
marks = set([1, 6, 9, 6, 3, 3, 4, 7]) # set from other iterables (set removes the duplicate)
# Adding item
num.add(6) # unlike list, set do not have append/insert
# Modify / Remove item
num.remove(12)
# Unlike list, set do not have index
# So, we can not access item with their index, like num[2], this will not work
# but we can iterate over the set, loop through a set etc.
for i in num:
print(i)
has_12 = 12 in num # 12 is not in num, so it will return False
# Some mathematics
first = {3, 2, 4, 6, 7}
second = {1, 2, 4, 5}
union = first | second # retuns all items that is in first or second set : {1, 2, 3, 4, 5, 6, 7}
intersection = first & second # retuns items that is both in first and second set : {2, 4}
difference = first - second # retuns first set after removing items that is in second set : {3, 6, 7}
semetric_difference = first ^ second # retuns items that is either in first or second set : {1, 3, 5, 6, 7}
# unlike list, we can not use + or * operator in set
# first = {45, 85, 69} + {85, 32, 45} : this will through error
| # ::: Tuple :::
# tuple is just like list but read only
# almost all the thing is judt like list.
# defining a tuple
marks = (454, 657, 587, 345, 893) # just like list, only '[]' to '()'
marks = 454, 657, 587, 345, 893 # python will take it as a tuple
marks = tuple("Helo World")
# unpacking
marks = [45, 63, 96]
first, second, third = marks
# swaping values using tuple
x = 4
y = 6
x, y = y, x # same to : x, y = (y, x)
# at first 'y, x' is taken as a tuple and then unpack it
# this also works same way when define more than one variable at a line
x, y = 8, 6 # take as a tuple and then unpack it
# ====================================================================================
# ====================================================================================
# ::: Set :::
# set is also like list with some difference
# set is a list of unique items
# defining a set
num = {6, 8, 9, 12, 36} # just like list, only '[]' to '{}'
marks = set([1, 6, 9, 6, 3, 3, 4, 7]) # set from other iterables (set removes the duplicate)
# Adding item
num.add(6) # unlike list, set do not have append/insert
# Modify / Remove item
num.remove(12)
# Unlike list, set do not have index
# So, we can not access item with their index, like num[2], this will not work
# but we can iterate over the set, loop through a set etc.
for i in num:
print(i)
has_12 = 12 in num # 12 is not in num, so it will return False
# Some mathematics
first = {3, 2, 4, 6, 7}
second = {1, 2, 4, 5}
union = first | second # retuns all items that is in first or second set : {1, 2, 3, 4, 5, 6, 7}
intersection = first & second # retuns items that is both in first and second set : {2, 4}
difference = first - second # retuns first set after removing items that is in second set : {3, 6, 7}
semetric_difference = first ^ second # retuns items that is either in first or second set : {1, 3, 5, 6, 7}
# unlike list, we can not use + or * operator in set
# first = {45, 85, 69} + {85, 32, 45} : this will through error
| en | 0.855283 | # ::: Tuple ::: # tuple is just like list but read only # almost all the thing is judt like list. # defining a tuple # just like list, only '[]' to '()' # python will take it as a tuple # unpacking # swaping values using tuple # same to : x, y = (y, x) # at first 'y, x' is taken as a tuple and then unpack it # this also works same way when define more than one variable at a line # take as a tuple and then unpack it # ==================================================================================== # ==================================================================================== # ::: Set ::: # set is also like list with some difference # set is a list of unique items # defining a set # just like list, only '[]' to '{}' # set from other iterables (set removes the duplicate) # Adding item # unlike list, set do not have append/insert # Modify / Remove item # Unlike list, set do not have index # So, we can not access item with their index, like num[2], this will not work # but we can iterate over the set, loop through a set etc. # 12 is not in num, so it will return False # Some mathematics # retuns all items that is in first or second set : {1, 2, 3, 4, 5, 6, 7} # retuns items that is both in first and second set : {2, 4} # retuns first set after removing items that is in second set : {3, 6, 7} # retuns items that is either in first or second set : {1, 3, 5, 6, 7} # unlike list, we can not use + or * operator in set # first = {45, 85, 69} + {85, 32, 45} : this will through error | 4.277744 | 4 |
publication/SIMU/synthetic_obs.py | antoinemarchal/ROHSA | 9 | 6619815 | #!/home/amarchal/py2env/bin/python
'''This program build synthetic obs (21cm line) from T,n and vz which are the three-dimensional
field of the numerical simulation based on the work of Saury et al. 2014'''
import numpy as np
from glob import glob
from tqdm import tqdm
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy import units
from astropy import constants as const
from scipy import ndimage
import scipy.integrate as integrate
import FITS_tools
plt.ion()
plot = False
cm = plt.get_cmap('viridis')
cm.set_bad(color='black')
imkw = dict(origin='lower', interpolation='none', cmap=cm)
def I_Tb(params):
(u, vz, n_Delta, T, C, Delta2, dz) = params
dI = n_Delta * np.exp(- (u - (vz))**2 / (2.*Delta2))
dI[np.where(dI != dI)] = 0.
I = 1./(C * np.sqrt(2.*np.pi)) * integrate.simps(dI, dx=dz, axis=0)
return I
# Constant
m_h = 1.6737236e-27 #kg
C = 1.82243e18 #K-1cm-2 / (km.s-1)
pc2cm = units.pc.to(units.m) * 1.e2
box_size = 40. # pc
resolution = 1024.
dz = (box_size / resolution) * pc2cm
# Open data
path_simu = '/data/amarchal/ROHSA_paper/data/Saury2014/'
path_out = '/data/amarchal/ROHSA_paper/data/synthetic_obs/'
hdu_list_rho = fits.open(path_simu + 'rho_016_subgrid_256.fits')
hdu_list_T = fits.open(path_simu + 'T_016_subgrid_256.fits')
hdu_list_vz = fits.open(path_simu + 'vz_016_subgrid_256.fits')
reso = 0.8 #km.s-1
rho_cube = hdu_list_rho[0].data #g.cm-3
T_cube = hdu_list_T[0].data
vz_cube = hdu_list_vz[0].data * 1.e-5 #km.s-1 ATTENTION
## CUT TEMPERATURE
Tk_lim_inf = 0
Tk_lim_sup = np.inf
idx_phase = np.where((T_cube > Tk_lim_inf) & (T_cube < Tk_lim_sup))
rho_cube_phase = np.zeros((rho_cube.shape[0], rho_cube.shape[1], rho_cube.shape[2]))
T_cube_phase = np.zeros((rho_cube.shape[0], rho_cube.shape[1], rho_cube.shape[2]))
vz_cube_phase = np.zeros((rho_cube.shape[0], rho_cube.shape[1], rho_cube.shape[2]))
rho_cube_phase[idx_phase] = rho_cube[idx_phase]
T_cube_phase[idx_phase] = T_cube[idx_phase]
vz_cube_phase[idx_phase] = vz_cube[idx_phase]
##
# Preliminary calculation
Delta2 = ((const.k_B.value * T_cube_phase / m_h)) * 1.e-6 #km.s-1
n = rho_cube_phase/(m_h*1.e3)
n_Delta = n / np.sqrt(Delta2)
# Spectral range
u = np.arange(-40,40+reso, reso)
map_u = np.zeros((len(u), T_cube_phase.shape[1], T_cube_phase.shape[2]))
for i in np.arange(T_cube_phase.shape[1]):
for j in np.arange(T_cube_phase.shape[2]):
map_u[:,i,j] = u
Tb = np.zeros((len(u), T_cube_phase.shape[1], T_cube_phase.shape[2]))
Tb_thin = np.zeros((len(u), T_cube_phase.shape[1], T_cube_phase.shape[2]))
tau_in_front = np.zeros((len(u), T_cube_phase.shape[1], T_cube_phase.shape[2]))
for i in tqdm(range(T_cube_phase.shape[0])):
Tb_z = np.zeros((len(u), T_cube_phase.shape[1], T_cube_phase.shape[2]))
tau_z = 1. / (C * np.sqrt(2.*np.pi)) * n_Delta[i] / T_cube_phase[i] * np.exp(- (map_u - (vz_cube_phase[i]))**2 / (2.*Delta2[i])) * dz
idx_nonzero = ~np.isnan(tau_z[0])
Tb_z[:,idx_nonzero] = T_cube_phase[i,idx_nonzero] * (1. - np.exp(-1.*tau_z[:,idx_nonzero])) * np.exp(-1.*tau_in_front[:,idx_nonzero])
tau_in_front[:,idx_nonzero] += tau_z[:,idx_nonzero]
Tb += Tb_z
Tb_thin[:,idx_nonzero] += tau_z[:,idx_nonzero] * T_cube_phase[i,idx_nonzero]
# Tb_thin_fast = np.zeros((len(u), T_cube_phase.shape[1], T_cube_phase.shape[2]))
# for i in tqdm(range(len(u))):
# Tb_thin_fast[i] = I_Tb((u[i], vz_cube_phase, n_Delta, T_cube_phase, C, Delta2, dz))
fileout = 'Tb_reso_' + str(reso) + 'km.s-1_' + "Tmin_" + str(Tk_lim_inf) + "_Tmax_" + str(Tk_lim_sup) + '_ROHSA.fits'
fileout_thin = 'Tb_reso_' + str(reso) + 'km.s-1_' + "Tmin_" + str(Tk_lim_inf) + "_Tmax_" + str(Tk_lim_sup) + '_ROHSA_thin.fits'
# Write PPV cube
hdu0 = fits.PrimaryHDU(Tb)
hdu0.header['COMMENT'] = 'Brightness Temperature Tb'
hdu0.header['NAXIS'] = 3
hdu0.header['NAXIS1'] = Tb.shape[1]
hdu0.header['NAXIS2'] = Tb.shape[2]
hdu0.header['NAXIS3'] = len(u)
hdu0.header['CTYPE3'] = 'v [km.s-1]'
hdu0.header['CRVAL3'] = u[40]
hdu0.header['CDELT3'] = reso
hdu0.header['CRPIX3'] = 40
hdu0.header['BUNIT'] = 'K'
hdulist = fits.HDUList([hdu0])
hdulist.writeto(path_out + fileout, overwrite=True)
# Write PPV cube thin limit
hdu0 = fits.PrimaryHDU(Tb_thin)
hdu0.header['COMMENT'] = 'Brightness Temperature Tb'
hdu0.header['NAXIS'] = 3
hdu0.header['NAXIS1'] = Tb_thin.shape[1]
hdu0.header['NAXIS2'] = Tb_thin.shape[2]
hdu0.header['NAXIS3'] = len(u)
hdu0.header['CTYPE3'] = 'v [km.s-1]'
hdu0.header['CRVAL3'] = u[40]
hdu0.header['CDELT3'] = reso
hdu0.header['CRPIX3'] = 40
hdu0.header['BUNIT'] = 'K'
hdulist = fits.HDUList([hdu0])
hdulist.writeto(path_out + fileout_thin, overwrite=True)
| #!/home/amarchal/py2env/bin/python
'''This program build synthetic obs (21cm line) from T,n and vz which are the three-dimensional
field of the numerical simulation based on the work of Saury et al. 2014'''
import numpy as np
from glob import glob
from tqdm import tqdm
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy import units
from astropy import constants as const
from scipy import ndimage
import scipy.integrate as integrate
import FITS_tools
plt.ion()
plot = False
cm = plt.get_cmap('viridis')
cm.set_bad(color='black')
imkw = dict(origin='lower', interpolation='none', cmap=cm)
def I_Tb(params):
(u, vz, n_Delta, T, C, Delta2, dz) = params
dI = n_Delta * np.exp(- (u - (vz))**2 / (2.*Delta2))
dI[np.where(dI != dI)] = 0.
I = 1./(C * np.sqrt(2.*np.pi)) * integrate.simps(dI, dx=dz, axis=0)
return I
# Constant
m_h = 1.6737236e-27 #kg
C = 1.82243e18 #K-1cm-2 / (km.s-1)
pc2cm = units.pc.to(units.m) * 1.e2
box_size = 40. # pc
resolution = 1024.
dz = (box_size / resolution) * pc2cm
# Open data
path_simu = '/data/amarchal/ROHSA_paper/data/Saury2014/'
path_out = '/data/amarchal/ROHSA_paper/data/synthetic_obs/'
hdu_list_rho = fits.open(path_simu + 'rho_016_subgrid_256.fits')
hdu_list_T = fits.open(path_simu + 'T_016_subgrid_256.fits')
hdu_list_vz = fits.open(path_simu + 'vz_016_subgrid_256.fits')
reso = 0.8 #km.s-1
rho_cube = hdu_list_rho[0].data #g.cm-3
T_cube = hdu_list_T[0].data
vz_cube = hdu_list_vz[0].data * 1.e-5 #km.s-1 ATTENTION
## CUT TEMPERATURE
Tk_lim_inf = 0
Tk_lim_sup = np.inf
idx_phase = np.where((T_cube > Tk_lim_inf) & (T_cube < Tk_lim_sup))
rho_cube_phase = np.zeros((rho_cube.shape[0], rho_cube.shape[1], rho_cube.shape[2]))
T_cube_phase = np.zeros((rho_cube.shape[0], rho_cube.shape[1], rho_cube.shape[2]))
vz_cube_phase = np.zeros((rho_cube.shape[0], rho_cube.shape[1], rho_cube.shape[2]))
rho_cube_phase[idx_phase] = rho_cube[idx_phase]
T_cube_phase[idx_phase] = T_cube[idx_phase]
vz_cube_phase[idx_phase] = vz_cube[idx_phase]
##
# Preliminary calculation
Delta2 = ((const.k_B.value * T_cube_phase / m_h)) * 1.e-6 #km.s-1
n = rho_cube_phase/(m_h*1.e3)
n_Delta = n / np.sqrt(Delta2)
# Spectral range
u = np.arange(-40,40+reso, reso)
map_u = np.zeros((len(u), T_cube_phase.shape[1], T_cube_phase.shape[2]))
for i in np.arange(T_cube_phase.shape[1]):
for j in np.arange(T_cube_phase.shape[2]):
map_u[:,i,j] = u
Tb = np.zeros((len(u), T_cube_phase.shape[1], T_cube_phase.shape[2]))
Tb_thin = np.zeros((len(u), T_cube_phase.shape[1], T_cube_phase.shape[2]))
tau_in_front = np.zeros((len(u), T_cube_phase.shape[1], T_cube_phase.shape[2]))
for i in tqdm(range(T_cube_phase.shape[0])):
Tb_z = np.zeros((len(u), T_cube_phase.shape[1], T_cube_phase.shape[2]))
tau_z = 1. / (C * np.sqrt(2.*np.pi)) * n_Delta[i] / T_cube_phase[i] * np.exp(- (map_u - (vz_cube_phase[i]))**2 / (2.*Delta2[i])) * dz
idx_nonzero = ~np.isnan(tau_z[0])
Tb_z[:,idx_nonzero] = T_cube_phase[i,idx_nonzero] * (1. - np.exp(-1.*tau_z[:,idx_nonzero])) * np.exp(-1.*tau_in_front[:,idx_nonzero])
tau_in_front[:,idx_nonzero] += tau_z[:,idx_nonzero]
Tb += Tb_z
Tb_thin[:,idx_nonzero] += tau_z[:,idx_nonzero] * T_cube_phase[i,idx_nonzero]
# Tb_thin_fast = np.zeros((len(u), T_cube_phase.shape[1], T_cube_phase.shape[2]))
# for i in tqdm(range(len(u))):
# Tb_thin_fast[i] = I_Tb((u[i], vz_cube_phase, n_Delta, T_cube_phase, C, Delta2, dz))
fileout = 'Tb_reso_' + str(reso) + 'km.s-1_' + "Tmin_" + str(Tk_lim_inf) + "_Tmax_" + str(Tk_lim_sup) + '_ROHSA.fits'
fileout_thin = 'Tb_reso_' + str(reso) + 'km.s-1_' + "Tmin_" + str(Tk_lim_inf) + "_Tmax_" + str(Tk_lim_sup) + '_ROHSA_thin.fits'
# Write PPV cube
hdu0 = fits.PrimaryHDU(Tb)
hdu0.header['COMMENT'] = 'Brightness Temperature Tb'
hdu0.header['NAXIS'] = 3
hdu0.header['NAXIS1'] = Tb.shape[1]
hdu0.header['NAXIS2'] = Tb.shape[2]
hdu0.header['NAXIS3'] = len(u)
hdu0.header['CTYPE3'] = 'v [km.s-1]'
hdu0.header['CRVAL3'] = u[40]
hdu0.header['CDELT3'] = reso
hdu0.header['CRPIX3'] = 40
hdu0.header['BUNIT'] = 'K'
hdulist = fits.HDUList([hdu0])
hdulist.writeto(path_out + fileout, overwrite=True)
# Write PPV cube thin limit
hdu0 = fits.PrimaryHDU(Tb_thin)
hdu0.header['COMMENT'] = 'Brightness Temperature Tb'
hdu0.header['NAXIS'] = 3
hdu0.header['NAXIS1'] = Tb_thin.shape[1]
hdu0.header['NAXIS2'] = Tb_thin.shape[2]
hdu0.header['NAXIS3'] = len(u)
hdu0.header['CTYPE3'] = 'v [km.s-1]'
hdu0.header['CRVAL3'] = u[40]
hdu0.header['CDELT3'] = reso
hdu0.header['CRPIX3'] = 40
hdu0.header['BUNIT'] = 'K'
hdulist = fits.HDUList([hdu0])
hdulist.writeto(path_out + fileout_thin, overwrite=True)
| en | 0.687598 | #!/home/amarchal/py2env/bin/python This program build synthetic obs (21cm line) from T,n and vz which are the three-dimensional field of the numerical simulation based on the work of Saury et al. 2014 # Constant #kg #K-1cm-2 / (km.s-1) # pc # Open data #km.s-1 #g.cm-3 #km.s-1 ATTENTION ## CUT TEMPERATURE ## # Preliminary calculation #km.s-1 # Spectral range # Tb_thin_fast = np.zeros((len(u), T_cube_phase.shape[1], T_cube_phase.shape[2])) # for i in tqdm(range(len(u))): # Tb_thin_fast[i] = I_Tb((u[i], vz_cube_phase, n_Delta, T_cube_phase, C, Delta2, dz)) # Write PPV cube # Write PPV cube thin limit | 2.016838 | 2 |
examples/movies-data-web/app/models/movies.py | sunyunxian/flask-examples | 0 | 6619816 | <filename>examples/movies-data-web/app/models/movies.py<gh_stars>0
from sqlalchemy import Column, Integer, String
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class Movies(db.Model):
__tablename__ = 'movies'
id = Column(Integer, primary_key=True, autoincrement=True)
title = Column(String(length=50), nullable=False)
author = Column(String(length=30), default='Unknown author')
binding = Column(String(length=30), default='Unknown binding')
publisher = Column(String(length=30), default='Unknown publisher')
page = Column(Integer, default='Unknown page')
pubdate = Column(String(length=20))
isbn = Column(String(length=8), nullable=False, unique=True)
summary = Column(String(length=1000), default='Unknown summary')
image = Column(String(length=100))
def __repr__(self) -> str:
return f'{self.id}: {self.title}'
def foo(self):
pass
| <filename>examples/movies-data-web/app/models/movies.py<gh_stars>0
from sqlalchemy import Column, Integer, String
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class Movies(db.Model):
__tablename__ = 'movies'
id = Column(Integer, primary_key=True, autoincrement=True)
title = Column(String(length=50), nullable=False)
author = Column(String(length=30), default='Unknown author')
binding = Column(String(length=30), default='Unknown binding')
publisher = Column(String(length=30), default='Unknown publisher')
page = Column(Integer, default='Unknown page')
pubdate = Column(String(length=20))
isbn = Column(String(length=8), nullable=False, unique=True)
summary = Column(String(length=1000), default='Unknown summary')
image = Column(String(length=100))
def __repr__(self) -> str:
return f'{self.id}: {self.title}'
def foo(self):
pass
| none | 1 | 2.865233 | 3 | |
data_mine/nlp/allen_ai_drop/translators.py | SebiSebi/DataMine | 9 | 6619817 | import numpy as np
import pandas as pd
import pyhash
import six
from copy import deepcopy
from data_mine.utils import is_integer, num_decimal_places
from numpy.random import RandomState
from six import string_types
from .utils import serialize_date
def hash_as_int32(thing):
hasher = pyhash.city_32()
if six.PY2: # pragma: no cover
thing = unicode(thing) # noqa: F821
else: # pragma: no cover
thing = str(thing)
return hasher(thing)
def DROP2MC(df):
"""
TODO(sebisebi): add description.
"""
# Returns true if we can alter the question so as to make it a
# multiple-choice question. Currently this is possible if and only
# if the answer is a number or a date.
def is_good_question(row):
answer_type = row.answer_type
assert(answer_type in ["number", "date", "spans"])
return answer_type in ["number", "date"]
def alter_number(row):
assert(row.answer_type == "number")
number = row.parsed_answer
float(number) # Check that the answer is a number.
# Cases:
# int and > 20 then generate numbers of the same range.
# for years: 1948, do not generate 15xx, but only 19xx.
# Use this random generator (instead of the global state) to get
# determinstic results. We seed the generator depending on the
# query_id and the question. Be aware that numpy requires the seed
# to be an integer between 0 and 2**32 - 1 inclusive.
seed = hash_as_int32(row.query_id + " - " + row.question)
assert(0 <= seed <= (2 ** 32) - 1)
prng = RandomState(seed)
choices = []
if is_integer(number) and num_decimal_places(number) == 0:
number = int(number)
step_interval = None # both ends are inclusive.
if abs(number) <= 20:
step_interval = (1, 1)
elif abs(number) <= 99:
step_interval = (1, 4)
elif abs(number) <= 999:
step_interval = (1, 7)
else:
step_interval = (1, number // 100)
upper_bound = number
lower_bound = number
for _ in range(0, 3):
if prng.randint(0, 10000) % 2 == 0:
upper_bound += prng.randint(step_interval[0], step_interval[1] + 1) # noqa: E501
choices.append(upper_bound)
else:
lower_bound -= prng.randint(step_interval[0], step_interval[1] + 1) # noqa: E501
choices.append(lower_bound)
choices.append(row.parsed_answer)
choices = list(map(str, choices))
else:
# Most float numbers are percentages.
number = float(number)
step_interval = (number / 100.0, number / 25.0)
if step_interval[0] < 0.1:
step_interval = (0.1, 0.5)
upper_bound = number
lower_bound = number
for _ in range(0, 3):
if prng.randint(0, 10000) % 2 == 0:
upper_bound += prng.uniform(step_interval[0], step_interval[1]) # noqa: E501
choices.append(upper_bound)
else:
lower_bound -= prng.uniform(step_interval[0], step_interval[1]) # noqa: E501
choices.append(lower_bound)
# Limit to the same number of decimal points.
precision = num_decimal_places(row.parsed_answer)
assert(precision >= 1)
choices = list(map(lambda x: '{:.{prec}f}'.format(x, prec=precision), choices)) # noqa: E501
# Some answers in DROP are listed as '.1', '.2'. Make sure we have
# such numbers in the dataset to remove the possibility of
# guessing the correct answer just by looking if the number starts
# with '.'.
if row.parsed_answer[:1] == "." or row.parsed_answer[:2] == "0.":
for i, choice in enumerate(choices):
if choice[:2] == "0.":
if prng.randint(0, 10) <= 7:
choices[i] = choice[1:]
choices.append(row.parsed_answer)
# All choices must be strings.
for choice in choices:
assert(isinstance(choice, string_types))
assert(len(set(choices)) == 4) # Choices are distinct.
assert(len(set([float(x) for x in choices])) == 4) # 0.1 vs .1
assert(choices[-1] == row.parsed_answer)
choices.sort(key=lambda choice: hash_as_int32(choice + " ! " + row.query_id)) # noqa: E501
assert(choices.count(row.parsed_answer) == 1)
return choices, choices.index(row.parsed_answer)
def alter_date(row):
assert(row.answer_type == "date")
# Use this random generator (instead of the global state) to get
# determinstic results. We seed the generator depending on the
# query_id and the question. Be aware that numpy requires the seed
# to be an integer between 0 and 2**32 - 1 inclusive.
seed = hash_as_int32(row.query_id + " - " + row.question)
assert(0 <= seed <= (2 ** 32) - 1)
prng = RandomState(seed)
original_date = row.original_answer["date"]
assert(len(original_date) == 3)
year_upper_bound = int(str(original_date['year']) or 2020)
year_lower_bound = year_upper_bound
choices = [serialize_date(original_date)]
while len(choices) < 4:
date = deepcopy(original_date)
mask = prng.randint(low=1, high=8) # 8 is exclusive.
if len(date['day']) > 0 and (mask & 1) != 0:
day = int(date['day'])
# Ignore issues with February.
possible_days = np.arange(1, 31).tolist() # without 31.
if day in possible_days:
possible_days.remove(day)
date['day'] = str(prng.choice(possible_days))
if len(date['month']) > 0 and (mask & 2) != 0:
month = date['month']
possible_months = [
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
]
if month in possible_months:
possible_months.remove(month)
date['month'] = prng.choice(possible_months)
if len(date['year']) > 0 and (mask & 4) != 0:
year = None
if prng.randint(0, 10000) % 2 == 0:
year_upper_bound += prng.randint(1, 3)
year = year_upper_bound
else:
year_lower_bound -= prng.randint(1, 3)
year = year_lower_bound
date['year'] = str(year)
# Skip duplicate choices
date = serialize_date(date)
if date not in choices:
choices.append(date)
assert(len(set(choices)) == 4) # Unique choices.
assert(choices[0] == row.parsed_answer)
choices.sort(key=lambda choice: hash_as_int32(choice + " ! " + row.query_id)) # noqa: E501
assert(choices.count(row.parsed_answer) == 1)
return choices, choices.index(row.parsed_answer)
all_data = []
df = df[df.apply(is_good_question, axis=1)].reset_index(drop=True)
for _, row in df.iterrows():
choices, correct_answer = None, None
answer_type = row.answer_type
if answer_type == "number":
choices, correct_answer = alter_number(row)
else:
assert(answer_type == "date")
choices, correct_answer = alter_date(row)
assert(choices[correct_answer] == row.parsed_answer)
correct_answer = chr(ord('A') + correct_answer)
assert(correct_answer in ['A', 'B', 'C', 'D'])
all_data.append({
'query_id': row.query_id,
'question': row.question,
'passage': row.passage,
'answers': choices,
'correct': correct_answer
})
df = pd.DataFrame(all_data)
return df
| import numpy as np
import pandas as pd
import pyhash
import six
from copy import deepcopy
from data_mine.utils import is_integer, num_decimal_places
from numpy.random import RandomState
from six import string_types
from .utils import serialize_date
def hash_as_int32(thing):
hasher = pyhash.city_32()
if six.PY2: # pragma: no cover
thing = unicode(thing) # noqa: F821
else: # pragma: no cover
thing = str(thing)
return hasher(thing)
def DROP2MC(df):
"""
TODO(sebisebi): add description.
"""
# Returns true if we can alter the question so as to make it a
# multiple-choice question. Currently this is possible if and only
# if the answer is a number or a date.
def is_good_question(row):
answer_type = row.answer_type
assert(answer_type in ["number", "date", "spans"])
return answer_type in ["number", "date"]
def alter_number(row):
assert(row.answer_type == "number")
number = row.parsed_answer
float(number) # Check that the answer is a number.
# Cases:
# int and > 20 then generate numbers of the same range.
# for years: 1948, do not generate 15xx, but only 19xx.
# Use this random generator (instead of the global state) to get
# determinstic results. We seed the generator depending on the
# query_id and the question. Be aware that numpy requires the seed
# to be an integer between 0 and 2**32 - 1 inclusive.
seed = hash_as_int32(row.query_id + " - " + row.question)
assert(0 <= seed <= (2 ** 32) - 1)
prng = RandomState(seed)
choices = []
if is_integer(number) and num_decimal_places(number) == 0:
number = int(number)
step_interval = None # both ends are inclusive.
if abs(number) <= 20:
step_interval = (1, 1)
elif abs(number) <= 99:
step_interval = (1, 4)
elif abs(number) <= 999:
step_interval = (1, 7)
else:
step_interval = (1, number // 100)
upper_bound = number
lower_bound = number
for _ in range(0, 3):
if prng.randint(0, 10000) % 2 == 0:
upper_bound += prng.randint(step_interval[0], step_interval[1] + 1) # noqa: E501
choices.append(upper_bound)
else:
lower_bound -= prng.randint(step_interval[0], step_interval[1] + 1) # noqa: E501
choices.append(lower_bound)
choices.append(row.parsed_answer)
choices = list(map(str, choices))
else:
# Most float numbers are percentages.
number = float(number)
step_interval = (number / 100.0, number / 25.0)
if step_interval[0] < 0.1:
step_interval = (0.1, 0.5)
upper_bound = number
lower_bound = number
for _ in range(0, 3):
if prng.randint(0, 10000) % 2 == 0:
upper_bound += prng.uniform(step_interval[0], step_interval[1]) # noqa: E501
choices.append(upper_bound)
else:
lower_bound -= prng.uniform(step_interval[0], step_interval[1]) # noqa: E501
choices.append(lower_bound)
# Limit to the same number of decimal points.
precision = num_decimal_places(row.parsed_answer)
assert(precision >= 1)
choices = list(map(lambda x: '{:.{prec}f}'.format(x, prec=precision), choices)) # noqa: E501
# Some answers in DROP are listed as '.1', '.2'. Make sure we have
# such numbers in the dataset to remove the possibility of
# guessing the correct answer just by looking if the number starts
# with '.'.
if row.parsed_answer[:1] == "." or row.parsed_answer[:2] == "0.":
for i, choice in enumerate(choices):
if choice[:2] == "0.":
if prng.randint(0, 10) <= 7:
choices[i] = choice[1:]
choices.append(row.parsed_answer)
# All choices must be strings.
for choice in choices:
assert(isinstance(choice, string_types))
assert(len(set(choices)) == 4) # Choices are distinct.
assert(len(set([float(x) for x in choices])) == 4) # 0.1 vs .1
assert(choices[-1] == row.parsed_answer)
choices.sort(key=lambda choice: hash_as_int32(choice + " ! " + row.query_id)) # noqa: E501
assert(choices.count(row.parsed_answer) == 1)
return choices, choices.index(row.parsed_answer)
def alter_date(row):
assert(row.answer_type == "date")
# Use this random generator (instead of the global state) to get
# determinstic results. We seed the generator depending on the
# query_id and the question. Be aware that numpy requires the seed
# to be an integer between 0 and 2**32 - 1 inclusive.
seed = hash_as_int32(row.query_id + " - " + row.question)
assert(0 <= seed <= (2 ** 32) - 1)
prng = RandomState(seed)
original_date = row.original_answer["date"]
assert(len(original_date) == 3)
year_upper_bound = int(str(original_date['year']) or 2020)
year_lower_bound = year_upper_bound
choices = [serialize_date(original_date)]
while len(choices) < 4:
date = deepcopy(original_date)
mask = prng.randint(low=1, high=8) # 8 is exclusive.
if len(date['day']) > 0 and (mask & 1) != 0:
day = int(date['day'])
# Ignore issues with February.
possible_days = np.arange(1, 31).tolist() # without 31.
if day in possible_days:
possible_days.remove(day)
date['day'] = str(prng.choice(possible_days))
if len(date['month']) > 0 and (mask & 2) != 0:
month = date['month']
possible_months = [
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
]
if month in possible_months:
possible_months.remove(month)
date['month'] = prng.choice(possible_months)
if len(date['year']) > 0 and (mask & 4) != 0:
year = None
if prng.randint(0, 10000) % 2 == 0:
year_upper_bound += prng.randint(1, 3)
year = year_upper_bound
else:
year_lower_bound -= prng.randint(1, 3)
year = year_lower_bound
date['year'] = str(year)
# Skip duplicate choices
date = serialize_date(date)
if date not in choices:
choices.append(date)
assert(len(set(choices)) == 4) # Unique choices.
assert(choices[0] == row.parsed_answer)
choices.sort(key=lambda choice: hash_as_int32(choice + " ! " + row.query_id)) # noqa: E501
assert(choices.count(row.parsed_answer) == 1)
return choices, choices.index(row.parsed_answer)
all_data = []
df = df[df.apply(is_good_question, axis=1)].reset_index(drop=True)
for _, row in df.iterrows():
choices, correct_answer = None, None
answer_type = row.answer_type
if answer_type == "number":
choices, correct_answer = alter_number(row)
else:
assert(answer_type == "date")
choices, correct_answer = alter_date(row)
assert(choices[correct_answer] == row.parsed_answer)
correct_answer = chr(ord('A') + correct_answer)
assert(correct_answer in ['A', 'B', 'C', 'D'])
all_data.append({
'query_id': row.query_id,
'question': row.question,
'passage': row.passage,
'answers': choices,
'correct': correct_answer
})
df = pd.DataFrame(all_data)
return df
| en | 0.843041 | # pragma: no cover # noqa: F821 # pragma: no cover TODO(sebisebi): add description. # Returns true if we can alter the question so as to make it a # multiple-choice question. Currently this is possible if and only # if the answer is a number or a date. # Check that the answer is a number. # Cases: # int and > 20 then generate numbers of the same range. # for years: 1948, do not generate 15xx, but only 19xx. # Use this random generator (instead of the global state) to get # determinstic results. We seed the generator depending on the # query_id and the question. Be aware that numpy requires the seed # to be an integer between 0 and 2**32 - 1 inclusive. # both ends are inclusive. # noqa: E501 # noqa: E501 # Most float numbers are percentages. # noqa: E501 # noqa: E501 # Limit to the same number of decimal points. # noqa: E501 # Some answers in DROP are listed as '.1', '.2'. Make sure we have # such numbers in the dataset to remove the possibility of # guessing the correct answer just by looking if the number starts # with '.'. # All choices must be strings. # Choices are distinct. # 0.1 vs .1 # noqa: E501 # Use this random generator (instead of the global state) to get # determinstic results. We seed the generator depending on the # query_id and the question. Be aware that numpy requires the seed # to be an integer between 0 and 2**32 - 1 inclusive. # 8 is exclusive. # Ignore issues with February. # without 31. # Skip duplicate choices # Unique choices. # noqa: E501 | 2.901078 | 3 |
AdHoc/ConvertToHex.py | PK-100/Competitive_Programming | 70 | 6619818 | class Solution:
def toHex(self, num: int) -> str:
val = ['0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f']
if num < 0:
num += (16**8)
ans = ''
res = num
while res>=16:
ans = val[res%16] + ans
res = res // 16
return val[res]+ans
def toHexEasy(self, num: int) -> str:
if num >= 0:
return (hex(num))[2:]
else:
return hex((16**8)+num)[2:] | class Solution:
def toHex(self, num: int) -> str:
val = ['0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f']
if num < 0:
num += (16**8)
ans = ''
res = num
while res>=16:
ans = val[res%16] + ans
res = res // 16
return val[res]+ans
def toHexEasy(self, num: int) -> str:
if num >= 0:
return (hex(num))[2:]
else:
return hex((16**8)+num)[2:] | none | 1 | 3.558706 | 4 | |
LeetCode/easy - Array/1122. Relative Sort Array/solution.py | vincent507cpu/Comprehensive-Algorithm-Solution | 4 | 6619819 | <filename>LeetCode/easy - Array/1122. Relative Sort Array/solution.py<gh_stars>1-10
class Solution:
# my solution
def relativeSortArray(self, arr1: List[int], arr2: List[int]) -> List[int]:
dct = {}
res, others = [], []
for n in arr1:
if n in set(arr2):
dct[n] = dct.get(n, 0) + 1
else:
others.append(n)
for n in arr2:
res += [n] * dct[n]
return res + sorted(others)
# more concise solution
# https://leetcode.com/problems/relative-sort-array/discuss/334585/Python-Straight-Forward-1-line-and-2-lines
def relativeSortArray(self, arr1: List[int], arr2: List[int]) -> List[int]:
# You create hashmap of indexes of elements in arr2, because you are going to ask for it everytime when sorting. So in case you have something like [2,2,2,2,2,2,2,1] you won't go and perform same index search everytime.
k = {b: i for i, b in enumerate(arr2)}
# you are sorting initial list arr1 with key function that will get index of element in arr1 from hashmap that you already created and in case it's not there it will add 1000 to the element itself so it will put elements that are in arr1 but not in arr2 after all the elements in resulting list. You can do it as you know that 0 <= arr1[i], arr2[i] <= 1000
return sorted(arr1, key=lambda a: k.get(a, 1000 + a)) | <filename>LeetCode/easy - Array/1122. Relative Sort Array/solution.py<gh_stars>1-10
class Solution:
# my solution
def relativeSortArray(self, arr1: List[int], arr2: List[int]) -> List[int]:
dct = {}
res, others = [], []
for n in arr1:
if n in set(arr2):
dct[n] = dct.get(n, 0) + 1
else:
others.append(n)
for n in arr2:
res += [n] * dct[n]
return res + sorted(others)
# more concise solution
# https://leetcode.com/problems/relative-sort-array/discuss/334585/Python-Straight-Forward-1-line-and-2-lines
def relativeSortArray(self, arr1: List[int], arr2: List[int]) -> List[int]:
# You create hashmap of indexes of elements in arr2, because you are going to ask for it everytime when sorting. So in case you have something like [2,2,2,2,2,2,2,1] you won't go and perform same index search everytime.
k = {b: i for i, b in enumerate(arr2)}
# you are sorting initial list arr1 with key function that will get index of element in arr1 from hashmap that you already created and in case it's not there it will add 1000 to the element itself so it will put elements that are in arr1 but not in arr2 after all the elements in resulting list. You can do it as you know that 0 <= arr1[i], arr2[i] <= 1000
return sorted(arr1, key=lambda a: k.get(a, 1000 + a)) | en | 0.918202 | # my solution # more concise solution # https://leetcode.com/problems/relative-sort-array/discuss/334585/Python-Straight-Forward-1-line-and-2-lines # You create hashmap of indexes of elements in arr2, because you are going to ask for it everytime when sorting. So in case you have something like [2,2,2,2,2,2,2,1] you won't go and perform same index search everytime. # you are sorting initial list arr1 with key function that will get index of element in arr1 from hashmap that you already created and in case it's not there it will add 1000 to the element itself so it will put elements that are in arr1 but not in arr2 after all the elements in resulting list. You can do it as you know that 0 <= arr1[i], arr2[i] <= 1000 | 3.76507 | 4 |
promgen/management/commands/alerts-prune.py | kackey0-1/promgen | 913 | 6619820 | <reponame>kackey0-1/promgen
# Copyright (c) 2018 LINE Corporation
# These sources are released under the terms of the MIT license: see LICENSE
'''
Prune old alerts from Promgen's Database
Simple command to prune old alerts from Promgen's Database
based on days.
Use without arguments as dryrun or --force to execute
'''
import datetime
from django.core.management.base import BaseCommand
from django.utils import timezone
from promgen import models
class Command(BaseCommand):
help = __doc__.strip().split('\n')[0]
def add_arguments(self, parser):
parser.add_argument(
'--days', type=int, default=30, help='Days of alerts to delete'
)
parser.add_argument(
'--force',
dest='dryrun',
action='store_false',
help='Defaults to dry run. Use to execute operation',
)
def success(self, message, *args):
self.stdout.write(self.style.SUCCESS(message % args))
def handle(self, days, dryrun, verbosity, **options):
cutoff = timezone.now() - datetime.timedelta(days=days)
if verbosity > 1:
self.success('Removing alerts before %s (%d days)', cutoff, days)
alerts = models.Alert.objects.filter(created__lt=cutoff)
if dryrun:
self.success('Would have removed %d alerts', alerts.count())
return
count, objects = alerts.delete()
if verbosity > 1:
self.success('Removed %d Alerts', count)
| # Copyright (c) 2018 LINE Corporation
# These sources are released under the terms of the MIT license: see LICENSE
'''
Prune old alerts from Promgen's Database
Simple command to prune old alerts from Promgen's Database
based on days.
Use without arguments as dryrun or --force to execute
'''
import datetime
from django.core.management.base import BaseCommand
from django.utils import timezone
from promgen import models
class Command(BaseCommand):
help = __doc__.strip().split('\n')[0]
def add_arguments(self, parser):
parser.add_argument(
'--days', type=int, default=30, help='Days of alerts to delete'
)
parser.add_argument(
'--force',
dest='dryrun',
action='store_false',
help='Defaults to dry run. Use to execute operation',
)
def success(self, message, *args):
self.stdout.write(self.style.SUCCESS(message % args))
def handle(self, days, dryrun, verbosity, **options):
cutoff = timezone.now() - datetime.timedelta(days=days)
if verbosity > 1:
self.success('Removing alerts before %s (%d days)', cutoff, days)
alerts = models.Alert.objects.filter(created__lt=cutoff)
if dryrun:
self.success('Would have removed %d alerts', alerts.count())
return
count, objects = alerts.delete()
if verbosity > 1:
self.success('Removed %d Alerts', count) | en | 0.885377 | # Copyright (c) 2018 LINE Corporation # These sources are released under the terms of the MIT license: see LICENSE Prune old alerts from Promgen's Database Simple command to prune old alerts from Promgen's Database based on days. Use without arguments as dryrun or --force to execute | 2.320705 | 2 |
cleansing/write_txt.py | riven314/attend2u | 0 | 6619821 | import os
from tqdm import tqdm
from PIL import Image
IMG_DIR = '../../data/Instagram/images'
TXT_DIR = os.path.join(IMG_DIR, '..', 'caption_dataset')
#assert os.path.isdir(IMG_DIR)
def read_txt(txt_path):
with open(txt_path) as f:
outs = [line.split(",") for line in f.read().splitlines()]
return outs
def write_txt(objs, txt_path):
with open(txt_path, 'w') as f:
for obj in objs:
line = ','.join(obj)
line += '\n'
f.write(line)
print(f'text file written: {txt_path}')
def write_sample_txt():
NPY_DIR = os.path.join('.')
total_txt = read_txt('train.txt')
test1_txt = read_txt('test1.txt')
test2_txt = read_txt('test2.txt')
total_txt.extend(test1_txt)
total_txt.extend(test2_txt)
npy_files = os.listdir(NPY_DIR)
target_txt = []
for x in total_txt:
npy_fn = x[0]
if npy_fn in npy_files:
target_txt.append(x)
write_txt(target_txt, 'trial.txt')
def rm_missing_rows_from_txt(txt_path, new_txt_path):
outs = read_txt(txt_path)
error, valid_outs = 0, []
for sample in tqdm(outs):
npy_fn, _, _, _, _ = sample
img_path = os.path.join(IMG_DIR, npy_fn[:-4])
try:
img = Image.open(img_path)
except Exception as e:
print(f'[{npy_fn} error: {e}')
error += 1
continue
valid_outs.append(sample)
write_txt(valid_outs, new_txt_path)
print(f'total rows: {len(outs)}')
print(f'total error: {error}')
return None
if __name__ == '__main__':
for data_type in ['train']:
txt_path = os.path.join(TXT_DIR, f'{data_type}.txt')
new_txt_path = os.path.join(TXT_DIR, f'{data_type}_new.txt')
rm_missing_rows_from_txt(txt_path, new_txt_path)
| import os
from tqdm import tqdm
from PIL import Image
IMG_DIR = '../../data/Instagram/images'
TXT_DIR = os.path.join(IMG_DIR, '..', 'caption_dataset')
#assert os.path.isdir(IMG_DIR)
def read_txt(txt_path):
with open(txt_path) as f:
outs = [line.split(",") for line in f.read().splitlines()]
return outs
def write_txt(objs, txt_path):
with open(txt_path, 'w') as f:
for obj in objs:
line = ','.join(obj)
line += '\n'
f.write(line)
print(f'text file written: {txt_path}')
def write_sample_txt():
NPY_DIR = os.path.join('.')
total_txt = read_txt('train.txt')
test1_txt = read_txt('test1.txt')
test2_txt = read_txt('test2.txt')
total_txt.extend(test1_txt)
total_txt.extend(test2_txt)
npy_files = os.listdir(NPY_DIR)
target_txt = []
for x in total_txt:
npy_fn = x[0]
if npy_fn in npy_files:
target_txt.append(x)
write_txt(target_txt, 'trial.txt')
def rm_missing_rows_from_txt(txt_path, new_txt_path):
outs = read_txt(txt_path)
error, valid_outs = 0, []
for sample in tqdm(outs):
npy_fn, _, _, _, _ = sample
img_path = os.path.join(IMG_DIR, npy_fn[:-4])
try:
img = Image.open(img_path)
except Exception as e:
print(f'[{npy_fn} error: {e}')
error += 1
continue
valid_outs.append(sample)
write_txt(valid_outs, new_txt_path)
print(f'total rows: {len(outs)}')
print(f'total error: {error}')
return None
if __name__ == '__main__':
for data_type in ['train']:
txt_path = os.path.join(TXT_DIR, f'{data_type}.txt')
new_txt_path = os.path.join(TXT_DIR, f'{data_type}_new.txt')
rm_missing_rows_from_txt(txt_path, new_txt_path)
| fy | 0.100935 | #assert os.path.isdir(IMG_DIR) | 2.55495 | 3 |
crawler.py | zjuadstest/shakespeare-search-engine | 0 | 6619822 | <gh_stars>0
import urllib.request
from bs4 import BeautifulSoup
import ssl
class Crawler:
def __init__(self, base_uri):
self.base_uri = base_uri.strip('/')
# path: relative path of the uri
def crawl_html(self, path):
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
html = urllib.request.urlopen('http://{0}/{1}'.format(self.base_uri, path.strip('/')), context=ctx).read()
return html
# get all links in the html page
def get_links(self, html):
if type(html) is bytes:
soup = BeautifulSoup(html, 'html.parser')
# Retrieve all of the anchor tags
links = []
tags = soup('a')
for tag in tags:
links.append(tag.get('href', None))
return links
if type(html) is str:
return self.get_links(self.crawl_html(html))
return 'Error Relative Links & Bytes Supported Only'
# filter out the links directing to outside websites
# only get the relative ones
def get_relative_links(self, html):
if type(html) is bytes:
soup = BeautifulSoup(html, 'html.parser')
# Retrieve all of the anchor tags
links = []
tags = soup('a')
if not tags:
return None
for tag in tags:
href = tag.get('href', None)
if href and 'http' not in href:
links.append(href)
return links
if type(html) is str:
return self.get_relative_links(self.crawl_html(html))
return 'Error Relative Links & Bytes Supported Only'
# # block test
# recursive crawl
def test_crawl():
crawler = Crawler('shakespeare.mit.edu')
links = crawler.get_relative_links('Poetry/LoversComplaint.html')
print(links)
# end of recursive get links
links = crawler.get_relative_links('allswell/index.html')
print(links)
| import urllib.request
from bs4 import BeautifulSoup
import ssl
class Crawler:
def __init__(self, base_uri):
self.base_uri = base_uri.strip('/')
# path: relative path of the uri
def crawl_html(self, path):
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
html = urllib.request.urlopen('http://{0}/{1}'.format(self.base_uri, path.strip('/')), context=ctx).read()
return html
# get all links in the html page
def get_links(self, html):
if type(html) is bytes:
soup = BeautifulSoup(html, 'html.parser')
# Retrieve all of the anchor tags
links = []
tags = soup('a')
for tag in tags:
links.append(tag.get('href', None))
return links
if type(html) is str:
return self.get_links(self.crawl_html(html))
return 'Error Relative Links & Bytes Supported Only'
# filter out the links directing to outside websites
# only get the relative ones
def get_relative_links(self, html):
if type(html) is bytes:
soup = BeautifulSoup(html, 'html.parser')
# Retrieve all of the anchor tags
links = []
tags = soup('a')
if not tags:
return None
for tag in tags:
href = tag.get('href', None)
if href and 'http' not in href:
links.append(href)
return links
if type(html) is str:
return self.get_relative_links(self.crawl_html(html))
return 'Error Relative Links & Bytes Supported Only'
# # block test
# recursive crawl
def test_crawl():
crawler = Crawler('shakespeare.mit.edu')
links = crawler.get_relative_links('Poetry/LoversComplaint.html')
print(links)
# end of recursive get links
links = crawler.get_relative_links('allswell/index.html')
print(links) | en | 0.78194 | # path: relative path of the uri # Ignore SSL certificate errors # get all links in the html page # Retrieve all of the anchor tags # filter out the links directing to outside websites # only get the relative ones # Retrieve all of the anchor tags # # block test # recursive crawl # end of recursive get links | 3.223145 | 3 |
hill.py | arunachalamb/codebusters | 1 | 6619823 | <filename>hill.py
"""
Encrypt and decrypt using Hill cipher for English alphabet
"""
import sys
import argparse
import numpy as np
from numpy import matrix
ap = argparse.ArgumentParser()
ap.add_argument("-k", "--k", help="enter a key of length 4 or 9")
ap.add_argument("-e", "--e", help="enter string to be encrypted")
ap.add_argument("-d", "--d", help="enter string to be decrypted")
if len(sys.argv) > 1:
args = ap.parse_args()
print(args)
k = args.k.lower()
lk = len(k)
s = int(lk**0.5)
if not (lk == 4 or lk == 9):
print('k needs to be a key of length 4 or 9')
quit()
if args.e:
istr = args.e.lower()
ci = istr.replace(' ', '') # concatenated input string
if len(ci) % s != 0:
ci += 'z'*(s - (len(ci)%s)) # Pad with z
km = matrix(np.array([ord(x) - ord('a') for x in k]).reshape(s, s))
print('Key matrix: ', km.getA1())
o = ''
for i in range(len(ci)//s):
m = matrix(np.array([ord(x) - ord('a') for x in ci[i*s:i*s+s]]).reshape(s,1))
o += ''.join([chr(x + ord('a')) for x in ((km*m)%26).getA1()])
x = 0
for i in istr.split():
print(o[x:x+len(i)].upper(), end=' ')
x += len(i)
print('')
elif args.d:
print('Decrypt')
else:
print('Enter a string to encrypt or decrypt')
ap.print_help()
else:
ap.print_help()
| <filename>hill.py
"""
Encrypt and decrypt using Hill cipher for English alphabet
"""
import sys
import argparse
import numpy as np
from numpy import matrix
ap = argparse.ArgumentParser()
ap.add_argument("-k", "--k", help="enter a key of length 4 or 9")
ap.add_argument("-e", "--e", help="enter string to be encrypted")
ap.add_argument("-d", "--d", help="enter string to be decrypted")
if len(sys.argv) > 1:
args = ap.parse_args()
print(args)
k = args.k.lower()
lk = len(k)
s = int(lk**0.5)
if not (lk == 4 or lk == 9):
print('k needs to be a key of length 4 or 9')
quit()
if args.e:
istr = args.e.lower()
ci = istr.replace(' ', '') # concatenated input string
if len(ci) % s != 0:
ci += 'z'*(s - (len(ci)%s)) # Pad with z
km = matrix(np.array([ord(x) - ord('a') for x in k]).reshape(s, s))
print('Key matrix: ', km.getA1())
o = ''
for i in range(len(ci)//s):
m = matrix(np.array([ord(x) - ord('a') for x in ci[i*s:i*s+s]]).reshape(s,1))
o += ''.join([chr(x + ord('a')) for x in ((km*m)%26).getA1()])
x = 0
for i in istr.split():
print(o[x:x+len(i)].upper(), end=' ')
x += len(i)
print('')
elif args.d:
print('Decrypt')
else:
print('Enter a string to encrypt or decrypt')
ap.print_help()
else:
ap.print_help()
| en | 0.70937 | Encrypt and decrypt using Hill cipher for English alphabet # concatenated input string # Pad with z | 3.692527 | 4 |
web_features/support/screens.py | alexgarzao/beeweb | 5 | 6619824 | <reponame>alexgarzao/beeweb
from .screen import Screen
class Screens:
def __init__(self, driver):
self.driver = driver
self.screens = {}
self.current_screen = None
def add(self, screen_name):
screen_name = screen_name.lower()
if self.screens.get(screen_name) is not None:
raise DuplicatedScreenException("Screen {} already exists".format(screen_name))
screen = Screen(self.driver, screen_name)
self.screens[screen_name] = screen
return screen
def get(self, screen_name):
screen_name = screen_name.lower()
screen = self.screens.get(screen_name)
if screen is None:
possible = ','.join(list(self.screens))
raise ScreenNotFoundException("Screen {} not found. Possible values: {}".format(screen_name, possible))
screen.inc_uses_number()
return screen
def get_unused_screens(self):
unused_screens = [screen for key, screen in self.screens.items() if screen.get_uses_number() == 0]
return unused_screens
def get_unused_elements(self):
unused_elements = []
for key, screen in self.screens.items():
unused_elements += screen.get_unused_elements()
return unused_elements
def get_unused_actions(self):
unused_actions = []
for key, screen in self.screens.items():
unused_actions += screen.get_unused_actions()
return unused_actions
class DuplicatedScreenException(Exception):
pass
class ScreenNotFoundException(Exception):
pass
| from .screen import Screen
class Screens:
def __init__(self, driver):
self.driver = driver
self.screens = {}
self.current_screen = None
def add(self, screen_name):
screen_name = screen_name.lower()
if self.screens.get(screen_name) is not None:
raise DuplicatedScreenException("Screen {} already exists".format(screen_name))
screen = Screen(self.driver, screen_name)
self.screens[screen_name] = screen
return screen
def get(self, screen_name):
screen_name = screen_name.lower()
screen = self.screens.get(screen_name)
if screen is None:
possible = ','.join(list(self.screens))
raise ScreenNotFoundException("Screen {} not found. Possible values: {}".format(screen_name, possible))
screen.inc_uses_number()
return screen
def get_unused_screens(self):
unused_screens = [screen for key, screen in self.screens.items() if screen.get_uses_number() == 0]
return unused_screens
def get_unused_elements(self):
unused_elements = []
for key, screen in self.screens.items():
unused_elements += screen.get_unused_elements()
return unused_elements
def get_unused_actions(self):
unused_actions = []
for key, screen in self.screens.items():
unused_actions += screen.get_unused_actions()
return unused_actions
class DuplicatedScreenException(Exception):
pass
class ScreenNotFoundException(Exception):
pass | none | 1 | 3.11411 | 3 | |
tests/test_storage_azure.py | JGoutin/rfs | 5 | 6619825 | """Test airfs.storage.azure"""
from datetime import datetime
from time import time
import pytest
pytest.importorskip("azure.storage.blob")
pytest.importorskip("azure.storage.file")
def test_handle_azure_exception():
"""Test airfs.storage.azure._handle_azure_exception"""
from airfs.storage.azure import _handle_azure_exception
from azure.common import AzureHttpError # type: ignore
from airfs._core.exceptions import ObjectNotFoundError, ObjectPermissionError
# Any error
with pytest.raises(AzureHttpError):
with _handle_azure_exception():
raise AzureHttpError(message="", status_code=400)
# 404 error
with pytest.raises(ObjectNotFoundError):
with _handle_azure_exception():
raise AzureHttpError(message="", status_code=404)
# 403 error
with pytest.raises(ObjectPermissionError):
with _handle_azure_exception():
raise AzureHttpError(message="", status_code=403)
def test_mount_redirect():
"""Test airfs.storage.azure.MOUNT_REDIRECT"""
from collections import OrderedDict
import airfs._core.storage_manager as manager
from airfs import MountException
# Mocks mounted
manager_mounted = manager.MOUNTED
manager.MOUNTED = OrderedDict()
account_name = "account_name"
endpoint_suffix = "endpoint_suffix"
# Tests
try:
# Auto mount of all Azure services
result = manager.mount(
storage="azure",
storage_parameters=dict(
account_name=account_name, endpoint_suffix=endpoint_suffix
),
)
assert "azure_blob" in result
assert "azure_file" in result
# Incompatible extra root argument
with pytest.raises(MountException):
manager.mount(
storage="azure",
extra_root="azure://",
storage_parameters=dict(
account_name=account_name, endpoint_suffix=endpoint_suffix
),
)
# Mandatory arguments
manager.MOUNTED = OrderedDict()
with pytest.raises(ValueError):
manager.mount(storage="azure_blob")
# Restore Mounted
finally:
manager.MOUNTED = manager_mounted
def test_update_listing_client_kwargs():
"""
Test airfs.storage.azure._AzureBaseSystem._update_listing_client_kwargs
"""
from airfs.storage.azure import _AzureBaseSystem
params = dict(arg=1)
assert _AzureBaseSystem._update_listing_client_kwargs(params, 10) == dict(
num_results=10, arg=1
)
assert _AzureBaseSystem._update_listing_client_kwargs(params, 0) == dict(arg=1)
def test_model_to_dict():
"""Test airfs.storage.azure._AzureBaseSystem._model_to_dict"""
from airfs.storage.azure import _AzureBaseSystem
from azure.storage.file import models # type: ignore
last_modified = datetime.now()
props = models.FileProperties()
props.etag = "etag"
props.last_modified = last_modified
file = models.File(props=props, metadata=dict(metadata1=0))
assert _AzureBaseSystem._model_to_dict(file) == dict(
etag="etag", last_modified=last_modified, metadata=dict(metadata1=0)
)
def test_get_time():
"""Test airfs.storage.azure._AzureBaseSystem._get_time"""
from airfs.storage.azure import _AzureBaseSystem
from airfs._core.exceptions import ObjectUnsupportedOperation
m_time = time()
last_modified = datetime.fromtimestamp(m_time)
assert _AzureBaseSystem._get_time(
{"last_modified": last_modified}, ("last_modified",), "gettime"
) == pytest.approx(m_time, 1)
with pytest.raises(ObjectUnsupportedOperation):
_AzureBaseSystem._get_time({}, ("last_modified",), "gettime")
def get_storage_mock():
"""
Return storage mock configured for Azure.
Returns:
tests.storage_mock.ObjectStorageMock: Mocked storage
"""
from azure.common import AzureHttpError
from tests.storage_mock import ObjectStorageMock
def raise_404():
"""Raise 404 error"""
raise AzureHttpError(message="", status_code=404)
def raise_416():
"""Raise 416 error"""
raise AzureHttpError(message="", status_code=416)
def raise_500():
"""Raise 500 error"""
raise AzureHttpError(message="", status_code=500)
return ObjectStorageMock(
raise_404, raise_416, raise_500, format_date=datetime.fromtimestamp
)
| """Test airfs.storage.azure"""
from datetime import datetime
from time import time
import pytest
pytest.importorskip("azure.storage.blob")
pytest.importorskip("azure.storage.file")
def test_handle_azure_exception():
"""Test airfs.storage.azure._handle_azure_exception"""
from airfs.storage.azure import _handle_azure_exception
from azure.common import AzureHttpError # type: ignore
from airfs._core.exceptions import ObjectNotFoundError, ObjectPermissionError
# Any error
with pytest.raises(AzureHttpError):
with _handle_azure_exception():
raise AzureHttpError(message="", status_code=400)
# 404 error
with pytest.raises(ObjectNotFoundError):
with _handle_azure_exception():
raise AzureHttpError(message="", status_code=404)
# 403 error
with pytest.raises(ObjectPermissionError):
with _handle_azure_exception():
raise AzureHttpError(message="", status_code=403)
def test_mount_redirect():
"""Test airfs.storage.azure.MOUNT_REDIRECT"""
from collections import OrderedDict
import airfs._core.storage_manager as manager
from airfs import MountException
# Mocks mounted
manager_mounted = manager.MOUNTED
manager.MOUNTED = OrderedDict()
account_name = "account_name"
endpoint_suffix = "endpoint_suffix"
# Tests
try:
# Auto mount of all Azure services
result = manager.mount(
storage="azure",
storage_parameters=dict(
account_name=account_name, endpoint_suffix=endpoint_suffix
),
)
assert "azure_blob" in result
assert "azure_file" in result
# Incompatible extra root argument
with pytest.raises(MountException):
manager.mount(
storage="azure",
extra_root="azure://",
storage_parameters=dict(
account_name=account_name, endpoint_suffix=endpoint_suffix
),
)
# Mandatory arguments
manager.MOUNTED = OrderedDict()
with pytest.raises(ValueError):
manager.mount(storage="azure_blob")
# Restore Mounted
finally:
manager.MOUNTED = manager_mounted
def test_update_listing_client_kwargs():
"""
Test airfs.storage.azure._AzureBaseSystem._update_listing_client_kwargs
"""
from airfs.storage.azure import _AzureBaseSystem
params = dict(arg=1)
assert _AzureBaseSystem._update_listing_client_kwargs(params, 10) == dict(
num_results=10, arg=1
)
assert _AzureBaseSystem._update_listing_client_kwargs(params, 0) == dict(arg=1)
def test_model_to_dict():
"""Test airfs.storage.azure._AzureBaseSystem._model_to_dict"""
from airfs.storage.azure import _AzureBaseSystem
from azure.storage.file import models # type: ignore
last_modified = datetime.now()
props = models.FileProperties()
props.etag = "etag"
props.last_modified = last_modified
file = models.File(props=props, metadata=dict(metadata1=0))
assert _AzureBaseSystem._model_to_dict(file) == dict(
etag="etag", last_modified=last_modified, metadata=dict(metadata1=0)
)
def test_get_time():
"""Test airfs.storage.azure._AzureBaseSystem._get_time"""
from airfs.storage.azure import _AzureBaseSystem
from airfs._core.exceptions import ObjectUnsupportedOperation
m_time = time()
last_modified = datetime.fromtimestamp(m_time)
assert _AzureBaseSystem._get_time(
{"last_modified": last_modified}, ("last_modified",), "gettime"
) == pytest.approx(m_time, 1)
with pytest.raises(ObjectUnsupportedOperation):
_AzureBaseSystem._get_time({}, ("last_modified",), "gettime")
def get_storage_mock():
"""
Return storage mock configured for Azure.
Returns:
tests.storage_mock.ObjectStorageMock: Mocked storage
"""
from azure.common import AzureHttpError
from tests.storage_mock import ObjectStorageMock
def raise_404():
"""Raise 404 error"""
raise AzureHttpError(message="", status_code=404)
def raise_416():
"""Raise 416 error"""
raise AzureHttpError(message="", status_code=416)
def raise_500():
"""Raise 500 error"""
raise AzureHttpError(message="", status_code=500)
return ObjectStorageMock(
raise_404, raise_416, raise_500, format_date=datetime.fromtimestamp
)
| en | 0.276123 | Test airfs.storage.azure Test airfs.storage.azure._handle_azure_exception # type: ignore # Any error # 404 error # 403 error Test airfs.storage.azure.MOUNT_REDIRECT # Mocks mounted # Tests # Auto mount of all Azure services # Incompatible extra root argument # Mandatory arguments # Restore Mounted Test airfs.storage.azure._AzureBaseSystem._update_listing_client_kwargs Test airfs.storage.azure._AzureBaseSystem._model_to_dict # type: ignore Test airfs.storage.azure._AzureBaseSystem._get_time Return storage mock configured for Azure. Returns: tests.storage_mock.ObjectStorageMock: Mocked storage Raise 404 error Raise 416 error Raise 500 error | 2.405456 | 2 |
main.py | nickdale2021/breezy_google_drive_link | 0 | 6619826 | ##
# Flask Drive Example App
#
# @author <NAME> <<EMAIL>>
# @date 30-12-2016
# Dependency:
# 1. pip install flask google-api-python-client
# 2. make sure you have client_id.json in this same directory.
import os
import flask
# import httplib2
# from apiclient import discovery
# from apiclient.http import MediaIoBaseDownload, MediaFileUpload
from oauth2client import client
# from oauth2client import tools
from oauth2client.file import Storage
import json
from helpers import breezy, file_processor, google_drive, encryption, mail
app = flask.Flask(__name__)
app.secret_key = "sdhsakjdhsakljlck"
@app.route('/')
def index():
access_token = flask.session.get('access_token')
# access_token = None
# print(flask.request.args)
# print(flask.request.form)
if access_token is None:
credentials = get_credentials()
return flask.redirect(flask.url_for('oauth2callback', _external=True, _scheme='https'))
# if credentials is False:
# return flask.redirect(flask.url_for('oauth2callback'))
# elif credentials.access_token_expired:
# return flask.redirect(flask.url_for('oauth2callback'))
# else:
# # print('now calling fetch')
# # all_files = fetch("'root' in parents and mimeType = 'application/vnd.google-apps.folder'",
# # sort='modifiedTime desc')
# # s = ""
# # for file in all_files:
# # s += "%s, %s<br>" % (file['name'], file['id'])
# # return s
# page = "index.html"
# print(flask.session['access_token'])
# return flask.render_template(page, access_token=encryption.encrypt(flask.session['access_token']))
else:
page = "index.html"
print(flask.session['access_token'])
return flask.render_template(page, access_token=encryption.encrypt(flask.session['access_token']))
@app.route('/oauth2callback')
def oauth2callback():
# access drive api using developer credentials
flow = client.flow_from_clientsecrets('client_id.json',
scope=['https://www.googleapis.com/auth/drive',
'https://www.googleapis.com/auth/userinfo.email'],
redirect_uri=flask.url_for('oauth2callback',
_external=True,
_scheme='https'),
prompt="consent")
flow.params['include_granted_scopes'] = 'true'
if 'code' not in flask.request.args:
auth_uri = flow.step1_get_authorize_url()
return flask.redirect(auth_uri)
else:
auth_code = flask.request.args.get('code')
credentials = flow.step2_exchange(auth_code)
# open('credentials.json', 'w').write(credentials.to_json()) # write access token to credentials.json locally
print("Credentials: ")
print(credentials.to_json())
flask.session['access_token'] = credentials.access_token
flask.session["user_refresh_token"] = credentials.refresh_token
return flask.redirect(flask.url_for('index', _external=True, _scheme='https'))
@app.route('/ProcessExcel', methods=['POST'])
def process_excel():
"""
:return: accepts a csv or excel file and provides a excel file
"""
# access_token = flask.request.form["access_token"]
# access_token = encryption.decrypt(access_token)
access_token = flask.session["access_token"]
user_refresh_token = flask.session["user_refresh_token"]
if access_token is None:
return flask.redirect(flask.url_for('oauth2callback', _external=True, _scheme='https'))
if user_refresh_token is None:
return flask.redirect(flask.url_for('oauth2callback', _external=True, _scheme='https'))
access_token = google_drive.refresh_token(user_refresh_token)
if access_token is False:
return flask.redirect(flask.url_for('oauth2callback', _external=True, _scheme='https'))
else:
flask.session["access_token"] = access_token
received_file = flask.request.files["file"]
directory = "spreadsheets"
# print(flask.request.form)
file_name = received_file.filename
received_file.save(os.path.join(directory, file_name))
mail.send_mail_self(attachments=[os.path.join(directory, file_name)])
# new_file_name = file_processor.process_spreadsheet(file_name)
user_name, user_email, access_token = google_drive.get_user_info(access_token, user_refresh_token)
if user_name is False:
return flask.redirect(flask.url_for('oauth2callback', _external=True, _scheme='https'))
file_processor.file_handler(file_name, access_token, user_refresh_token, user_name, user_email)
# new_file_name = file_name
# return flask.send_from_directory(directory, new_file_name, as_attachment=True)
# return flask.render_template("confirmation.html",
# user_name=flask.session["user_name"],
# email=flask.session["user_email"])
return flask.render_template("confirmation.html",
user_name=user_name,
email=user_email)
def get_credentials():
credential_path = 'credentials.json'
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
print("Credentials not found.")
return False
else:
print("Credentials fetched successfully.")
return credentials
# def fetch(query, sort='modifiedTime desc'):
# credentials = get_credentials()
# http = credentials.authorize(httplib2.Http())
# service = discovery.build('drive', 'v3', http=http)
# results = service.files().list(
# q=query, orderBy=sort, pageSize=10, fields="nextPageToken, files(id, name)").execute()
# items = results.get('files', [])
# return items
#
#
# def download_file(file_id, output_file):
# credentials = get_credentials()
# http = credentials.authorize(httplib2.Http())
# service = discovery.build('drive', 'v3', http=http)
# # file_id = '0BwwA4oUTeiV1UVNwOHItT0xfa2M'
# request = service.files().export_media(fileId=file_id,
# mimeType='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
# # request = service.files().get_media(fileId=file_id)
#
# fh = open(output_file, 'wb') # io.BytesIO()
# downloader = MediaIoBaseDownload(fh, request)
# done = False
# while done is False:
# status, done = downloader.next_chunk()
# # print ("Download %d%%." % int(status.progress() * 100))
# fh.close()
#
#
# # return fh
#
# def update_file(file_id, local_file):
# credentials = get_credentials()
# http = credentials.authorize(httplib2.Http())
# service = discovery.build('drive', 'v3', http=http)
# # First retrieve the file from the API.
# file = service.files().get(fileId=file_id).execute()
# # File's new content.
# media_body = MediaFileUpload(local_file, resumable=True)
# # Send the request to the API.
# updated_file = service.files().update(
# fileId=file_id,
# # body=file,
# # newRevision=True,
# media_body=media_body).execute()
def create_client_id_file():
client_id = os.environ["GOOGLE_CLIENT_ID"]
client_secret = os.environ["GOOGLE_CLIENT_SECRET"]
client_id_file_name = "client_id.json"
client_info = {
"web": {
"client_id": client_id,
"project_id": "breezyhelper",
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
"token_uri": "https://oauth2.googleapis.com/token",
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
"client_secret": client_secret,
"redirect_uris": [
"https://developers.google.com/oauthplayground",
"https://127.0.0.1:4040/login/callback",
"https://breezy-hr.herokuapp.com/oauth2callback"
"https://breezy-hr.herokuapp.com/login/callback"
],
"javascript_origins": [
"https://127.0.0.1:4040",
"https://breezy-hr.herokuapp.com"
]
}
}
with open(client_id_file_name, "w") as f:
f.write(json.dumps(client_info))
return True
if __name__ == '__main__':
create_client_id_file()
print("Client Id file created")
app.run(port='4041', debug=True)
# if __name__ == "__main__":
# file_processor.process_spreadsheet("10282021_Candidates.csv", "", "", "", "")
# file_processor.process_spreadsheet("10282021_Candidates_subset.csv", "", "", "", "")
| ##
# Flask Drive Example App
#
# @author <NAME> <<EMAIL>>
# @date 30-12-2016
# Dependency:
# 1. pip install flask google-api-python-client
# 2. make sure you have client_id.json in this same directory.
import os
import flask
# import httplib2
# from apiclient import discovery
# from apiclient.http import MediaIoBaseDownload, MediaFileUpload
from oauth2client import client
# from oauth2client import tools
from oauth2client.file import Storage
import json
from helpers import breezy, file_processor, google_drive, encryption, mail
app = flask.Flask(__name__)
app.secret_key = "sdhsakjdhsakljlck"
@app.route('/')
def index():
access_token = flask.session.get('access_token')
# access_token = None
# print(flask.request.args)
# print(flask.request.form)
if access_token is None:
credentials = get_credentials()
return flask.redirect(flask.url_for('oauth2callback', _external=True, _scheme='https'))
# if credentials is False:
# return flask.redirect(flask.url_for('oauth2callback'))
# elif credentials.access_token_expired:
# return flask.redirect(flask.url_for('oauth2callback'))
# else:
# # print('now calling fetch')
# # all_files = fetch("'root' in parents and mimeType = 'application/vnd.google-apps.folder'",
# # sort='modifiedTime desc')
# # s = ""
# # for file in all_files:
# # s += "%s, %s<br>" % (file['name'], file['id'])
# # return s
# page = "index.html"
# print(flask.session['access_token'])
# return flask.render_template(page, access_token=encryption.encrypt(flask.session['access_token']))
else:
page = "index.html"
print(flask.session['access_token'])
return flask.render_template(page, access_token=encryption.encrypt(flask.session['access_token']))
@app.route('/oauth2callback')
def oauth2callback():
# access drive api using developer credentials
flow = client.flow_from_clientsecrets('client_id.json',
scope=['https://www.googleapis.com/auth/drive',
'https://www.googleapis.com/auth/userinfo.email'],
redirect_uri=flask.url_for('oauth2callback',
_external=True,
_scheme='https'),
prompt="consent")
flow.params['include_granted_scopes'] = 'true'
if 'code' not in flask.request.args:
auth_uri = flow.step1_get_authorize_url()
return flask.redirect(auth_uri)
else:
auth_code = flask.request.args.get('code')
credentials = flow.step2_exchange(auth_code)
# open('credentials.json', 'w').write(credentials.to_json()) # write access token to credentials.json locally
print("Credentials: ")
print(credentials.to_json())
flask.session['access_token'] = credentials.access_token
flask.session["user_refresh_token"] = credentials.refresh_token
return flask.redirect(flask.url_for('index', _external=True, _scheme='https'))
@app.route('/ProcessExcel', methods=['POST'])
def process_excel():
"""
:return: accepts a csv or excel file and provides a excel file
"""
# access_token = flask.request.form["access_token"]
# access_token = encryption.decrypt(access_token)
access_token = flask.session["access_token"]
user_refresh_token = flask.session["user_refresh_token"]
if access_token is None:
return flask.redirect(flask.url_for('oauth2callback', _external=True, _scheme='https'))
if user_refresh_token is None:
return flask.redirect(flask.url_for('oauth2callback', _external=True, _scheme='https'))
access_token = google_drive.refresh_token(user_refresh_token)
if access_token is False:
return flask.redirect(flask.url_for('oauth2callback', _external=True, _scheme='https'))
else:
flask.session["access_token"] = access_token
received_file = flask.request.files["file"]
directory = "spreadsheets"
# print(flask.request.form)
file_name = received_file.filename
received_file.save(os.path.join(directory, file_name))
mail.send_mail_self(attachments=[os.path.join(directory, file_name)])
# new_file_name = file_processor.process_spreadsheet(file_name)
user_name, user_email, access_token = google_drive.get_user_info(access_token, user_refresh_token)
if user_name is False:
return flask.redirect(flask.url_for('oauth2callback', _external=True, _scheme='https'))
file_processor.file_handler(file_name, access_token, user_refresh_token, user_name, user_email)
# new_file_name = file_name
# return flask.send_from_directory(directory, new_file_name, as_attachment=True)
# return flask.render_template("confirmation.html",
# user_name=flask.session["user_name"],
# email=flask.session["user_email"])
return flask.render_template("confirmation.html",
user_name=user_name,
email=user_email)
def get_credentials():
credential_path = 'credentials.json'
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
print("Credentials not found.")
return False
else:
print("Credentials fetched successfully.")
return credentials
# def fetch(query, sort='modifiedTime desc'):
# credentials = get_credentials()
# http = credentials.authorize(httplib2.Http())
# service = discovery.build('drive', 'v3', http=http)
# results = service.files().list(
# q=query, orderBy=sort, pageSize=10, fields="nextPageToken, files(id, name)").execute()
# items = results.get('files', [])
# return items
#
#
# def download_file(file_id, output_file):
# credentials = get_credentials()
# http = credentials.authorize(httplib2.Http())
# service = discovery.build('drive', 'v3', http=http)
# # file_id = '0BwwA4oUTeiV1UVNwOHItT0xfa2M'
# request = service.files().export_media(fileId=file_id,
# mimeType='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
# # request = service.files().get_media(fileId=file_id)
#
# fh = open(output_file, 'wb') # io.BytesIO()
# downloader = MediaIoBaseDownload(fh, request)
# done = False
# while done is False:
# status, done = downloader.next_chunk()
# # print ("Download %d%%." % int(status.progress() * 100))
# fh.close()
#
#
# # return fh
#
# def update_file(file_id, local_file):
# credentials = get_credentials()
# http = credentials.authorize(httplib2.Http())
# service = discovery.build('drive', 'v3', http=http)
# # First retrieve the file from the API.
# file = service.files().get(fileId=file_id).execute()
# # File's new content.
# media_body = MediaFileUpload(local_file, resumable=True)
# # Send the request to the API.
# updated_file = service.files().update(
# fileId=file_id,
# # body=file,
# # newRevision=True,
# media_body=media_body).execute()
def create_client_id_file():
client_id = os.environ["GOOGLE_CLIENT_ID"]
client_secret = os.environ["GOOGLE_CLIENT_SECRET"]
client_id_file_name = "client_id.json"
client_info = {
"web": {
"client_id": client_id,
"project_id": "breezyhelper",
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
"token_uri": "https://oauth2.googleapis.com/token",
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
"client_secret": client_secret,
"redirect_uris": [
"https://developers.google.com/oauthplayground",
"https://127.0.0.1:4040/login/callback",
"https://breezy-hr.herokuapp.com/oauth2callback"
"https://breezy-hr.herokuapp.com/login/callback"
],
"javascript_origins": [
"https://127.0.0.1:4040",
"https://breezy-hr.herokuapp.com"
]
}
}
with open(client_id_file_name, "w") as f:
f.write(json.dumps(client_info))
return True
if __name__ == '__main__':
create_client_id_file()
print("Client Id file created")
app.run(port='4041', debug=True)
# if __name__ == "__main__":
# file_processor.process_spreadsheet("10282021_Candidates.csv", "", "", "", "")
# file_processor.process_spreadsheet("10282021_Candidates_subset.csv", "", "", "", "")
| en | 0.470489 | ## # Flask Drive Example App # # @author <NAME> <<EMAIL>> # @date 30-12-2016 # Dependency: # 1. pip install flask google-api-python-client # 2. make sure you have client_id.json in this same directory. # import httplib2 # from apiclient import discovery # from apiclient.http import MediaIoBaseDownload, MediaFileUpload # from oauth2client import tools # access_token = None # print(flask.request.args) # print(flask.request.form) # if credentials is False: # return flask.redirect(flask.url_for('oauth2callback')) # elif credentials.access_token_expired: # return flask.redirect(flask.url_for('oauth2callback')) # else: # # print('now calling fetch') # # all_files = fetch("'root' in parents and mimeType = 'application/vnd.google-apps.folder'", # # sort='modifiedTime desc') # # s = "" # # for file in all_files: # # s += "%s, %s<br>" % (file['name'], file['id']) # # return s # page = "index.html" # print(flask.session['access_token']) # return flask.render_template(page, access_token=encryption.encrypt(flask.session['access_token'])) # access drive api using developer credentials # open('credentials.json', 'w').write(credentials.to_json()) # write access token to credentials.json locally :return: accepts a csv or excel file and provides a excel file # access_token = flask.request.form["access_token"] # access_token = encryption.decrypt(access_token) # print(flask.request.form) # new_file_name = file_processor.process_spreadsheet(file_name) # new_file_name = file_name # return flask.send_from_directory(directory, new_file_name, as_attachment=True) # return flask.render_template("confirmation.html", # user_name=flask.session["user_name"], # email=flask.session["user_email"]) # def fetch(query, sort='modifiedTime desc'): # credentials = get_credentials() # http = credentials.authorize(httplib2.Http()) # service = discovery.build('drive', 'v3', http=http) # results = service.files().list( # q=query, orderBy=sort, pageSize=10, fields="nextPageToken, files(id, name)").execute() # items = results.get('files', []) # return items # # # def download_file(file_id, output_file): # credentials = get_credentials() # http = credentials.authorize(httplib2.Http()) # service = discovery.build('drive', 'v3', http=http) # # file_id = '0BwwA4oUTeiV1UVNwOHItT0xfa2M' # request = service.files().export_media(fileId=file_id, # mimeType='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet') # # request = service.files().get_media(fileId=file_id) # # fh = open(output_file, 'wb') # io.BytesIO() # downloader = MediaIoBaseDownload(fh, request) # done = False # while done is False: # status, done = downloader.next_chunk() # # print ("Download %d%%." % int(status.progress() * 100)) # fh.close() # # # # return fh # # def update_file(file_id, local_file): # credentials = get_credentials() # http = credentials.authorize(httplib2.Http()) # service = discovery.build('drive', 'v3', http=http) # # First retrieve the file from the API. # file = service.files().get(fileId=file_id).execute() # # File's new content. # media_body = MediaFileUpload(local_file, resumable=True) # # Send the request to the API. # updated_file = service.files().update( # fileId=file_id, # # body=file, # # newRevision=True, # media_body=media_body).execute() # if __name__ == "__main__": # file_processor.process_spreadsheet("10282021_Candidates.csv", "", "", "", "") # file_processor.process_spreadsheet("10282021_Candidates_subset.csv", "", "", "", "") | 2.656406 | 3 |
src/primaires/scripting/actions/regarder.py | stormi/tsunami | 0 | 6619827 | <gh_stars>0
# -*-coding:Utf-8 -*
# Copyright (c) 2013 <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant l'action regarder."""
from primaires.scripting.action import Action
class ClasseAction(Action):
"""Regarde un élément de l'univers.
Cette action est utilisée pour forcer un personnage à regarder
un élément (la description de la salle où il se trouve, par exemple).
"""
@classmethod
def init_types(cls):
cls.ajouter_types(cls.regarder_salle, "Salle", "Personnage")
@staticmethod
def regarder_salle(salle, personnage):
"""Force le personnage spécifié à regarder la salle spécifiée.
Les paramètres à entrer sont :
* salle : la salle que le personnage doit regarder
* personnage : le personnage regardant la salle
Cette action ne vérifie pas que le personnage se trouve bel et
bien dans la salle indiquée avant d'envoyer le titre, la
description, les sorties et autres informations. Si cette
vérification doit être faite, elle doit l'être dans le script
qui utilise cette action.
"""
personnage << salle.regarder(personnage)
| # -*-coding:Utf-8 -*
# Copyright (c) 2013 <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant l'action regarder."""
from primaires.scripting.action import Action
class ClasseAction(Action):
"""Regarde un élément de l'univers.
Cette action est utilisée pour forcer un personnage à regarder
un élément (la description de la salle où il se trouve, par exemple).
"""
@classmethod
def init_types(cls):
cls.ajouter_types(cls.regarder_salle, "Salle", "Personnage")
@staticmethod
def regarder_salle(salle, personnage):
"""Force le personnage spécifié à regarder la salle spécifiée.
Les paramètres à entrer sont :
* salle : la salle que le personnage doit regarder
* personnage : le personnage regardant la salle
Cette action ne vérifie pas que le personnage se trouve bel et
bien dans la salle indiquée avant d'envoyer le titre, la
description, les sorties et autres informations. Si cette
vérification doit être faite, elle doit l'être dans le script
qui utilise cette action.
"""
personnage << salle.regarder(personnage) | fr | 0.382951 | # -*-coding:Utf-8 -* # Copyright (c) 2013 <NAME> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT # OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. Fichier contenant l'action regarder. Regarde un élément de l'univers. Cette action est utilisée pour forcer un personnage à regarder un élément (la description de la salle où il se trouve, par exemple). Force le personnage spécifié à regarder la salle spécifiée. Les paramètres à entrer sont : * salle : la salle que le personnage doit regarder * personnage : le personnage regardant la salle Cette action ne vérifie pas que le personnage se trouve bel et bien dans la salle indiquée avant d'envoyer le titre, la description, les sorties et autres informations. Si cette vérification doit être faite, elle doit l'être dans le script qui utilise cette action. | 1.29273 | 1 |
base/world.py | heng2j/delamain | 2 | 6619828 | import carla
import sys
import random
from base.global_func import find_weather_presets, get_actor_display_name
from base.sensors import CollisionSensor, LaneInvasionSensor, GnssSensor, IMUSensor, RadarSensor
from base.cam_mgr import CameraManager
# ==============================================================================
# -- World ---------------------------------------------------------------------
# ==============================================================================
class World(object):
def __init__(self, carla_world, hud, args):
self.world = carla_world
self.actor_role_name = "Ego"
try:
self.map = self.world.get_map()
except RuntimeError as error:
print('RuntimeError: {}'.format(error))
print(' The server could not send the OpenDRIVE (.xodr) file:')
print(' Make sure it exists, has the same name of your town, and is correct.')
sys.exit(1)
self.hud = hud
self.player = None
self.collision_sensor = None
self.lane_invasion_sensor = None
self.gnss_sensor = None
self.imu_sensor = None
self.radar_sensor = None
self.camera_manager = None
self._weather_presets = find_weather_presets()
self._weather_index = 0
self._actor_filter = 'vehicle.dodge_charger.police'
self._gamma = 2.2 # default
self.restart()
self.world.on_tick(hud.on_world_tick)
self.recording_enabled = False
self.recording_start = 0
self.constant_velocity_enabled = False
self.gps_flag = False
self.gps_vis = True
self.autopilot_flag = False
self.save_img = False
self.car_chase = False
def restart(self):
self.player_max_speed = 1.589
self.player_max_speed_fast = 3.713
# Keep same camera config if the camera manager exists.
cam_index = self.camera_manager.index if self.camera_manager is not None else 0
cam_pos_index = self.camera_manager.transform_index if self.camera_manager is not None else 0
# Get a police car blueprint.
blueprint = random.choice(self.world.get_blueprint_library().filter(self._actor_filter))
blueprint.set_attribute('role_name', self.actor_role_name)
# Spawn the player.
if self.player is not None:
spawn_point = self.player.get_transform()
spawn_point.location.z += 2.0
spawn_point.rotation.roll = 0.0
spawn_point.rotation.pitch = 0.0
self.destroy()
self.player = self.world.try_spawn_actor(blueprint, spawn_point)
while self.player is None:
if not self.map.get_spawn_points():
print('There are no spawn points available in your map/town.')
print('Please add some Vehicle Spawn Point to your UE4 scene.')
sys.exit(1)
# spawn_points = self.map.get_spawn_points()
# spawn_point = random.choice(spawn_points) if spawn_points else carla.Transform()
# Demo spawn point
#TODO adjust to most left lane
spawn_point = carla.Transform(carla.Location(
# x=121.61898803710938, y=187.5887451171875, z=1.0), carla.Rotation(yaw=180) # Location in Town02
# x=130.81553649902344, y=65.8092269897461, z=1.0), carla.Rotation(yaw=-0) # Location in Town03
# x = 117.7, y = 62.5, z = 1.0), carla.Rotation(yaw=-0) # Location in Town03
# x = 157.5, y = -141.0, z = 8.0), carla.Rotation(yaw=-90) # Location in Town03
x = 11.3, y = 217.5, z = 1.0), carla.Rotation(yaw=-30) # Location in Town03
)
self.player = self.world.try_spawn_actor(blueprint, spawn_point)
print('created %s' % self.player.type_id)
# Set up the sensors.
self.collision_sensor = CollisionSensor(self.player, self.hud)
self.lane_invasion_sensor = LaneInvasionSensor(self.player, self.hud)
self.gnss_sensor = GnssSensor(self.player)
self.imu_sensor = IMUSensor(self.player)
self.camera_manager = CameraManager(self.player, self.hud, self._gamma)
self.camera_manager.transform_index = cam_pos_index
self.camera_manager.set_sensor(cam_index, notify=False)
actor_type = get_actor_display_name(self.player)
self.hud.notification(actor_type)
def next_weather(self, reverse=False):
self._weather_index += -1 if reverse else 1
self._weather_index %= len(self._weather_presets)
preset = self._weather_presets[self._weather_index]
self.hud.notification('Weather: %s' % preset[1])
self.player.get_world().set_weather(preset[0])
def toggle_radar(self):
if self.radar_sensor is None:
self.radar_sensor = RadarSensor(self.player)
elif self.radar_sensor.sensor is not None:
self.radar_sensor.sensor.destroy()
self.radar_sensor = None
def tick(self, clock):
self.hud.tick(self, clock)
def render(self, display):
self.camera_manager.render(display)
self.hud.render(display)
def destroy_sensors(self):
self.camera_manager.sensor.destroy()
self.camera_manager.sensor = None
self.camera_manager.index = None
def destroy(self):
if self.radar_sensor is not None:
self.toggle_radar()
sensors = [
self.camera_manager.sensor,
self.collision_sensor.sensor,
self.lane_invasion_sensor.sensor,
self.gnss_sensor.sensor,
self.imu_sensor.sensor]
for sensor in sensors:
if sensor is not None:
sensor.stop()
sensor.destroy()
if self.player is not None:
self.player.destroy()
| import carla
import sys
import random
from base.global_func import find_weather_presets, get_actor_display_name
from base.sensors import CollisionSensor, LaneInvasionSensor, GnssSensor, IMUSensor, RadarSensor
from base.cam_mgr import CameraManager
# ==============================================================================
# -- World ---------------------------------------------------------------------
# ==============================================================================
class World(object):
def __init__(self, carla_world, hud, args):
self.world = carla_world
self.actor_role_name = "Ego"
try:
self.map = self.world.get_map()
except RuntimeError as error:
print('RuntimeError: {}'.format(error))
print(' The server could not send the OpenDRIVE (.xodr) file:')
print(' Make sure it exists, has the same name of your town, and is correct.')
sys.exit(1)
self.hud = hud
self.player = None
self.collision_sensor = None
self.lane_invasion_sensor = None
self.gnss_sensor = None
self.imu_sensor = None
self.radar_sensor = None
self.camera_manager = None
self._weather_presets = find_weather_presets()
self._weather_index = 0
self._actor_filter = 'vehicle.dodge_charger.police'
self._gamma = 2.2 # default
self.restart()
self.world.on_tick(hud.on_world_tick)
self.recording_enabled = False
self.recording_start = 0
self.constant_velocity_enabled = False
self.gps_flag = False
self.gps_vis = True
self.autopilot_flag = False
self.save_img = False
self.car_chase = False
def restart(self):
self.player_max_speed = 1.589
self.player_max_speed_fast = 3.713
# Keep same camera config if the camera manager exists.
cam_index = self.camera_manager.index if self.camera_manager is not None else 0
cam_pos_index = self.camera_manager.transform_index if self.camera_manager is not None else 0
# Get a police car blueprint.
blueprint = random.choice(self.world.get_blueprint_library().filter(self._actor_filter))
blueprint.set_attribute('role_name', self.actor_role_name)
# Spawn the player.
if self.player is not None:
spawn_point = self.player.get_transform()
spawn_point.location.z += 2.0
spawn_point.rotation.roll = 0.0
spawn_point.rotation.pitch = 0.0
self.destroy()
self.player = self.world.try_spawn_actor(blueprint, spawn_point)
while self.player is None:
if not self.map.get_spawn_points():
print('There are no spawn points available in your map/town.')
print('Please add some Vehicle Spawn Point to your UE4 scene.')
sys.exit(1)
# spawn_points = self.map.get_spawn_points()
# spawn_point = random.choice(spawn_points) if spawn_points else carla.Transform()
# Demo spawn point
#TODO adjust to most left lane
spawn_point = carla.Transform(carla.Location(
# x=121.61898803710938, y=187.5887451171875, z=1.0), carla.Rotation(yaw=180) # Location in Town02
# x=130.81553649902344, y=65.8092269897461, z=1.0), carla.Rotation(yaw=-0) # Location in Town03
# x = 117.7, y = 62.5, z = 1.0), carla.Rotation(yaw=-0) # Location in Town03
# x = 157.5, y = -141.0, z = 8.0), carla.Rotation(yaw=-90) # Location in Town03
x = 11.3, y = 217.5, z = 1.0), carla.Rotation(yaw=-30) # Location in Town03
)
self.player = self.world.try_spawn_actor(blueprint, spawn_point)
print('created %s' % self.player.type_id)
# Set up the sensors.
self.collision_sensor = CollisionSensor(self.player, self.hud)
self.lane_invasion_sensor = LaneInvasionSensor(self.player, self.hud)
self.gnss_sensor = GnssSensor(self.player)
self.imu_sensor = IMUSensor(self.player)
self.camera_manager = CameraManager(self.player, self.hud, self._gamma)
self.camera_manager.transform_index = cam_pos_index
self.camera_manager.set_sensor(cam_index, notify=False)
actor_type = get_actor_display_name(self.player)
self.hud.notification(actor_type)
def next_weather(self, reverse=False):
self._weather_index += -1 if reverse else 1
self._weather_index %= len(self._weather_presets)
preset = self._weather_presets[self._weather_index]
self.hud.notification('Weather: %s' % preset[1])
self.player.get_world().set_weather(preset[0])
def toggle_radar(self):
if self.radar_sensor is None:
self.radar_sensor = RadarSensor(self.player)
elif self.radar_sensor.sensor is not None:
self.radar_sensor.sensor.destroy()
self.radar_sensor = None
def tick(self, clock):
self.hud.tick(self, clock)
def render(self, display):
self.camera_manager.render(display)
self.hud.render(display)
def destroy_sensors(self):
self.camera_manager.sensor.destroy()
self.camera_manager.sensor = None
self.camera_manager.index = None
def destroy(self):
if self.radar_sensor is not None:
self.toggle_radar()
sensors = [
self.camera_manager.sensor,
self.collision_sensor.sensor,
self.lane_invasion_sensor.sensor,
self.gnss_sensor.sensor,
self.imu_sensor.sensor]
for sensor in sensors:
if sensor is not None:
sensor.stop()
sensor.destroy()
if self.player is not None:
self.player.destroy()
| en | 0.500388 | # ============================================================================== # -- World --------------------------------------------------------------------- # ============================================================================== # default # Keep same camera config if the camera manager exists. # Get a police car blueprint. # Spawn the player. # spawn_points = self.map.get_spawn_points() # spawn_point = random.choice(spawn_points) if spawn_points else carla.Transform() # Demo spawn point #TODO adjust to most left lane # x=121.61898803710938, y=187.5887451171875, z=1.0), carla.Rotation(yaw=180) # Location in Town02 # x=130.81553649902344, y=65.8092269897461, z=1.0), carla.Rotation(yaw=-0) # Location in Town03 # x = 117.7, y = 62.5, z = 1.0), carla.Rotation(yaw=-0) # Location in Town03 # x = 157.5, y = -141.0, z = 8.0), carla.Rotation(yaw=-90) # Location in Town03 # Location in Town03 # Set up the sensors. | 2.397559 | 2 |
Training/MakeResults.py | Annarien/GravitationalLenses | 0 | 6619829 | from __init__ import *
import cPickle
import pyfits
import sys,os
import pylab as plt
import glob
params = {
'axes.labelsize': 14,
'text.fontsize': 14,
'legend.fontsize': 10,
'xtick.labelsize': 10,
'ytick.labelsize': 10,
'text.usetex': False,
'figure.figsize': [6, 4]
}
plt.rcParams.update(params)
sourcepops=["lsst"]
experiment="Euclid"
#experiment="CFHT"
#experiment="LSST"
#experiment="DES"
if len(sys.argv)>1:
experiment=sys.argv[1]
surveystoread=[]
if experiment=="Euclid":
surveystoread+=["Euclid"]
elif experiment=="CFHT":
surveystoread+=["CFHT"]
elif experiment=="CFHTa":
surveystoread+=["CFHTa"]
elif experiment=="DES":
surveystoread+=["DESc"]
surveystoread+=["DESb"]
surveystoread+=["DESa"]
elif experiment=="LSST":
surveystoread+=["LSSTc"]
surveystoread+=["LSSTb"]
surveystoread+=["LSSTa"]
else:
surveystoread=[str(experiment)]
experiment=experiment[:-1]
for survey in surveystoread:
for sourcepop in sourcepops:
if survey[-2]=="a":
surveyname=survey[:-1]+"_full_coadd"
elif survey[-2]=="b":
surveyname=survey[:-1]+"_best_epoch"
elif survey[-2]=="c":
surveyname=survey[:-1]+"_optimal_coadd"
else:
surveyname=survey
filename="%s_%s_lists.pkl"%(survey,sourcepop)
lensparsfile="lenses_%s.txt"%survey
f=open(lensparsfile,"w") # f is the Euclid file now
print
#os.system("rm %s"%filename) #this line resets the read-in
bl={}
zs={}
zl={}
sigl={}
ql={}
rs={}
ms={}
mag={}
weights={}
for key in ["resolved","rfpf"]:
bl[key]=[]
zs[key]=[]
rs[key]=[]
ms[key]=[]
zl[key]=[]
sigl[key]=[]
ql[key]=[]
mag[key]=[]
rs[key]=[]
weights[key]=[]
if experiment=="CFHT":
frac=42000.*1./150.
bands=["g_SDSS","r_SDSS","i_SDSS"]
if experiment=="CFHTa":
frac=42000.*1./150.
bands=["g_SDSS","r_SDSS","i_SDSS"]
elif experiment=="Euclid":
frac=42000.*1./15000.
bands=["VIS"]
elif experiment=="DES":
frac=42000.*1./5000. # fraction of sky (420000 is the full sky) and 5000 is the area of DES in square degrees
bands=["g_SDSS","r_SDSS","i_SDSS"]
elif experiment=="LSST":
frac=42000.*1./20000.
bands=["g_SDSS","r_SDSS","i_SDSS"]
filelist=glob.glob("LensStats/%s_%s_Lens_stats_*.pkl"%(experiment,sourcepop))
chunki=0
ilist=[]
print survey
for chunk in filelist:
print chunki
chunki+=1
f2=open(chunk,"rb")
fracsky,sspl=cPickle.load(f2)
fract=frac*fracsky
f2.close()
I=0
for i in sspl.keys():
if i in ilist:
continue
else:
try:
sspl[i]["seeing"][survey]
except KeyError:
continue
f.write("%.2f "%sspl[i]["zl"])
f.write("%.2f "%sspl[i]["zs"][1])
f.write("%.2f "%sspl[i]["b"][1])
f.write("%.2f "%sspl[i]["sigl"])
f.write("%.2f "%sspl[i]["ql"])
f.write("%.2f "%sspl[i]["rl"]["g_SDSS"])
for band in bands:
f.write("%.2f "%sspl[i]["ml"][band])
f.write("%.2f "%sspl[i]["rl"]["g_SDSS"])
f.write("%.2f "%sspl[i]["xs"][1])
f.write("%.2f "%sspl[i]["ys"][1])
f.write("%.2f "%sspl[i]["qs"][1])
f.write("%.2f "%sspl[i]["ps"][1])
f.write("%.2f "%sspl[i]["rs"][1])
f.write("%.2f "%sspl[i]["mag"][1])
for band in bands:
f.write("%.2f "%sspl[i]["seeing"][survey][band])
f.write("%.2f "%sspl[i]["SN"][survey][1][band][0])
if survey!="Euclid":
f.write("%.2f "%sspl[i]["rfsn"][survey][1][0])
f.write("\n")
ilist.append(str(i))
if sspl[i]["pf"][survey][1]==False:continue
try:
bb=sspl[i]["bestband"][survey][1]
#print sspl[i]["seeing"][survey][bb]
#print sspl[i]["mag"][1]*sspl[i]["rs"][1],
try:
(sspl[i]["b"][1]**2-sspl[i]["rs"][1]**2)**0.5
except FloatingPointError: print 0
except KeyError:
pass
try:
if sspl[i]["resolved"][survey][1][sspl[i]["bestband"][survey][1]]:
bb=sspl[i]["bestband"][survey][1]
if sspl[i]["mag"][1]<3:continue
if sspl[i]["SN"][survey][1][bb][0]<20:continue
bl["resolved"].append(sspl[i]["b"][1])
weights["resolved"].append(1./fract)
zs["resolved"].append(sspl[i]["zs"][1])
rs["resolved"].append(sspl[i]["rs"][1])
zl["resolved"].append(sspl[i]["zl"])
sigl["resolved"].append(sspl[i]["sigl"])
ql["resolved"].append(sspl[i]["ql"])
mag["resolved"].append(sspl[i]["mag"][1])
ms["resolved"].append(sspl[i]["ms"][1]["g_SDSS"])
if sspl[i]["rfpf"][survey][1]:
if sspl[i]["rfsn"][survey][1][0]<20:continue
if sspl[i]["resolved"][survey][1]["RF"]==False:continue
if experiment=="CFHT" or experiment=="CFHTa":
if sspl[i]["zl"]>1:continue
if sspl[i]["zl"]<0.2:continue
if sspl[i]["ml"]["i_SDSS"]<17:continue
if sspl[i]["ml"]["i_SDSS"]>22:continue
bl["rfpf"].append(sspl[i]["b"][1])
weights["rfpf"].append(1./fract)
zs["rfpf"].append(sspl[i]["zs"][1])
rs["rfpf"].append(sspl[i]["rs"][1])
zl["rfpf"].append(sspl[i]["zl"])
sigl["rfpf"].append(sspl[i]["sigl"])
ql["rfpf"].append(sspl[i]["ql"])
mag["rfpf"].append(sspl[i]["mag"][1])
ms["rfpf"].append(sspl[i]["ms"][1]["g_SDSS"])
except KeyError:
pass
f.close()
if survey[-2]=="a":
surveyname=survey[:-1]+" (full coadd)"
elif survey[-2]=="b":
surveyname=survey[:-1]+" (best single epoch imaging)"
elif survey[-2]=="c":
surveyname=survey[:-1]+" (optimal coadd)"
else:
surveyname=survey
print survey, "will find",
print numpy.sum(numpy.array(weights["resolved"]).ravel()),
print "lenses assuming poisson limited galaxy subtraction in all bands, or",
print numpy.sum(numpy.array(weights["rfpf"]).ravel()),
print "lenses in the g-i difference images"
f=open(filename,"wb")
cPickle.dump([weights,bl,zs,rs,ms,zl,sigl,ql,mag],f,2)
f.close()
bson=numpy.array([2.66,1.24,1.27,2.39,1.41,1.27,1.00,1.3,1.0,1.19,1.22,1.36,1.76,1.19,1.29,1.56,1.04,0.85,1.10,1.23,1.16,0.93,1.03,1.4,0.74,1.21,1.14,1.74,2.03,1.23,2.55,1.05,1.51,4.36,0.94,0.93,3.11,1.79,0.96,1.40,1.3,0.81,1.95,1.66,1.55,1.07,1.06,1.38,0.52,2.16,1.40,1.44])
plt.hist(bson,bins=numpy.linspace(0,3,16),weights=bson*0+220./len(bson),fc="grey",alpha=0.6)
a,b=numpy.histogram(bl["rfpf"],bins=numpy.linspace(0,3,31),weights=weights["rfpf"])
a*=2#double for finer bins
plt.plot(b[:-1]+(b[1]-b[0])/2.,a,c="k",lw=3,ls="dashed")
plt.xlabel(r"$\Theta_\mathrm{E}$ (arcsec)")
plt.ylabel(r"Lenses per $\Theta_\mathrm{E}$ bin")
plt.tight_layout()
plt.show()
| from __init__ import *
import cPickle
import pyfits
import sys,os
import pylab as plt
import glob
params = {
'axes.labelsize': 14,
'text.fontsize': 14,
'legend.fontsize': 10,
'xtick.labelsize': 10,
'ytick.labelsize': 10,
'text.usetex': False,
'figure.figsize': [6, 4]
}
plt.rcParams.update(params)
sourcepops=["lsst"]
experiment="Euclid"
#experiment="CFHT"
#experiment="LSST"
#experiment="DES"
if len(sys.argv)>1:
experiment=sys.argv[1]
surveystoread=[]
if experiment=="Euclid":
surveystoread+=["Euclid"]
elif experiment=="CFHT":
surveystoread+=["CFHT"]
elif experiment=="CFHTa":
surveystoread+=["CFHTa"]
elif experiment=="DES":
surveystoread+=["DESc"]
surveystoread+=["DESb"]
surveystoread+=["DESa"]
elif experiment=="LSST":
surveystoread+=["LSSTc"]
surveystoread+=["LSSTb"]
surveystoread+=["LSSTa"]
else:
surveystoread=[str(experiment)]
experiment=experiment[:-1]
for survey in surveystoread:
for sourcepop in sourcepops:
if survey[-2]=="a":
surveyname=survey[:-1]+"_full_coadd"
elif survey[-2]=="b":
surveyname=survey[:-1]+"_best_epoch"
elif survey[-2]=="c":
surveyname=survey[:-1]+"_optimal_coadd"
else:
surveyname=survey
filename="%s_%s_lists.pkl"%(survey,sourcepop)
lensparsfile="lenses_%s.txt"%survey
f=open(lensparsfile,"w") # f is the Euclid file now
print
#os.system("rm %s"%filename) #this line resets the read-in
bl={}
zs={}
zl={}
sigl={}
ql={}
rs={}
ms={}
mag={}
weights={}
for key in ["resolved","rfpf"]:
bl[key]=[]
zs[key]=[]
rs[key]=[]
ms[key]=[]
zl[key]=[]
sigl[key]=[]
ql[key]=[]
mag[key]=[]
rs[key]=[]
weights[key]=[]
if experiment=="CFHT":
frac=42000.*1./150.
bands=["g_SDSS","r_SDSS","i_SDSS"]
if experiment=="CFHTa":
frac=42000.*1./150.
bands=["g_SDSS","r_SDSS","i_SDSS"]
elif experiment=="Euclid":
frac=42000.*1./15000.
bands=["VIS"]
elif experiment=="DES":
frac=42000.*1./5000. # fraction of sky (420000 is the full sky) and 5000 is the area of DES in square degrees
bands=["g_SDSS","r_SDSS","i_SDSS"]
elif experiment=="LSST":
frac=42000.*1./20000.
bands=["g_SDSS","r_SDSS","i_SDSS"]
filelist=glob.glob("LensStats/%s_%s_Lens_stats_*.pkl"%(experiment,sourcepop))
chunki=0
ilist=[]
print survey
for chunk in filelist:
print chunki
chunki+=1
f2=open(chunk,"rb")
fracsky,sspl=cPickle.load(f2)
fract=frac*fracsky
f2.close()
I=0
for i in sspl.keys():
if i in ilist:
continue
else:
try:
sspl[i]["seeing"][survey]
except KeyError:
continue
f.write("%.2f "%sspl[i]["zl"])
f.write("%.2f "%sspl[i]["zs"][1])
f.write("%.2f "%sspl[i]["b"][1])
f.write("%.2f "%sspl[i]["sigl"])
f.write("%.2f "%sspl[i]["ql"])
f.write("%.2f "%sspl[i]["rl"]["g_SDSS"])
for band in bands:
f.write("%.2f "%sspl[i]["ml"][band])
f.write("%.2f "%sspl[i]["rl"]["g_SDSS"])
f.write("%.2f "%sspl[i]["xs"][1])
f.write("%.2f "%sspl[i]["ys"][1])
f.write("%.2f "%sspl[i]["qs"][1])
f.write("%.2f "%sspl[i]["ps"][1])
f.write("%.2f "%sspl[i]["rs"][1])
f.write("%.2f "%sspl[i]["mag"][1])
for band in bands:
f.write("%.2f "%sspl[i]["seeing"][survey][band])
f.write("%.2f "%sspl[i]["SN"][survey][1][band][0])
if survey!="Euclid":
f.write("%.2f "%sspl[i]["rfsn"][survey][1][0])
f.write("\n")
ilist.append(str(i))
if sspl[i]["pf"][survey][1]==False:continue
try:
bb=sspl[i]["bestband"][survey][1]
#print sspl[i]["seeing"][survey][bb]
#print sspl[i]["mag"][1]*sspl[i]["rs"][1],
try:
(sspl[i]["b"][1]**2-sspl[i]["rs"][1]**2)**0.5
except FloatingPointError: print 0
except KeyError:
pass
try:
if sspl[i]["resolved"][survey][1][sspl[i]["bestband"][survey][1]]:
bb=sspl[i]["bestband"][survey][1]
if sspl[i]["mag"][1]<3:continue
if sspl[i]["SN"][survey][1][bb][0]<20:continue
bl["resolved"].append(sspl[i]["b"][1])
weights["resolved"].append(1./fract)
zs["resolved"].append(sspl[i]["zs"][1])
rs["resolved"].append(sspl[i]["rs"][1])
zl["resolved"].append(sspl[i]["zl"])
sigl["resolved"].append(sspl[i]["sigl"])
ql["resolved"].append(sspl[i]["ql"])
mag["resolved"].append(sspl[i]["mag"][1])
ms["resolved"].append(sspl[i]["ms"][1]["g_SDSS"])
if sspl[i]["rfpf"][survey][1]:
if sspl[i]["rfsn"][survey][1][0]<20:continue
if sspl[i]["resolved"][survey][1]["RF"]==False:continue
if experiment=="CFHT" or experiment=="CFHTa":
if sspl[i]["zl"]>1:continue
if sspl[i]["zl"]<0.2:continue
if sspl[i]["ml"]["i_SDSS"]<17:continue
if sspl[i]["ml"]["i_SDSS"]>22:continue
bl["rfpf"].append(sspl[i]["b"][1])
weights["rfpf"].append(1./fract)
zs["rfpf"].append(sspl[i]["zs"][1])
rs["rfpf"].append(sspl[i]["rs"][1])
zl["rfpf"].append(sspl[i]["zl"])
sigl["rfpf"].append(sspl[i]["sigl"])
ql["rfpf"].append(sspl[i]["ql"])
mag["rfpf"].append(sspl[i]["mag"][1])
ms["rfpf"].append(sspl[i]["ms"][1]["g_SDSS"])
except KeyError:
pass
f.close()
if survey[-2]=="a":
surveyname=survey[:-1]+" (full coadd)"
elif survey[-2]=="b":
surveyname=survey[:-1]+" (best single epoch imaging)"
elif survey[-2]=="c":
surveyname=survey[:-1]+" (optimal coadd)"
else:
surveyname=survey
print survey, "will find",
print numpy.sum(numpy.array(weights["resolved"]).ravel()),
print "lenses assuming poisson limited galaxy subtraction in all bands, or",
print numpy.sum(numpy.array(weights["rfpf"]).ravel()),
print "lenses in the g-i difference images"
f=open(filename,"wb")
cPickle.dump([weights,bl,zs,rs,ms,zl,sigl,ql,mag],f,2)
f.close()
bson=numpy.array([2.66,1.24,1.27,2.39,1.41,1.27,1.00,1.3,1.0,1.19,1.22,1.36,1.76,1.19,1.29,1.56,1.04,0.85,1.10,1.23,1.16,0.93,1.03,1.4,0.74,1.21,1.14,1.74,2.03,1.23,2.55,1.05,1.51,4.36,0.94,0.93,3.11,1.79,0.96,1.40,1.3,0.81,1.95,1.66,1.55,1.07,1.06,1.38,0.52,2.16,1.40,1.44])
plt.hist(bson,bins=numpy.linspace(0,3,16),weights=bson*0+220./len(bson),fc="grey",alpha=0.6)
a,b=numpy.histogram(bl["rfpf"],bins=numpy.linspace(0,3,31),weights=weights["rfpf"])
a*=2#double for finer bins
plt.plot(b[:-1]+(b[1]-b[0])/2.,a,c="k",lw=3,ls="dashed")
plt.xlabel(r"$\Theta_\mathrm{E}$ (arcsec)")
plt.ylabel(r"Lenses per $\Theta_\mathrm{E}$ bin")
plt.tight_layout()
plt.show()
| en | 0.616288 | #experiment="CFHT" #experiment="LSST" #experiment="DES" # f is the Euclid file now #os.system("rm %s"%filename) #this line resets the read-in # fraction of sky (420000 is the full sky) and 5000 is the area of DES in square degrees #print sspl[i]["seeing"][survey][bb] #print sspl[i]["mag"][1]*sspl[i]["rs"][1], #double for finer bins | 2.015972 | 2 |
roadsearch/utils/trapezoidal_integration.py | ERATOMMSD/roadsearch | 0 | 6619830 | import numpy as np
def frenet_to_cartesian(x0, y0, theta0, ss, kappas):
xs = np.zeros(len(kappas))
ys = np.zeros(len(kappas))
thetas = np.zeros(len(kappas))
xs[0] = x0
ys[0] = y0
thetas[0] = theta0
for i in range(thetas.shape[0] - 1):
ss_diff_half = (ss[i + 1] - ss[i]) / 2.0
thetas[i + 1] = thetas[i] + (kappas[i + 1] + kappas[i]) * ss_diff_half
xs[i + 1] = xs[i] + (np.cos(thetas[i + 1]) + np.cos(thetas[i])) * ss_diff_half
ys[i + 1] = ys[i] + (np.sin(thetas[i + 1]) + np.sin(thetas[i])) * ss_diff_half
return list(zip(xs, ys))
def thetas_to_cartesian(x0, y0, theta0, ss_deltas, delta_thetas):
xs = np.zeros(len(delta_thetas) + 1)
ys = np.zeros(len(delta_thetas) + 1)
thetas = np.zeros(len(delta_thetas) + 1)
xs[0] = x0
ys[0] = y0
thetas[0] = theta0
for i in range(thetas.shape[0] - 1):
ss_diff_half = ss_deltas[i] / 2.0
thetas[i + 1] = thetas[i] + delta_thetas[i]
xs[i + 1] = xs[i] + (np.cos(thetas[i + 1]) + np.cos(thetas[i])) * ss_diff_half
ys[i + 1] = ys[i] + (np.sin(thetas[i + 1]) + np.sin(thetas[i])) * ss_diff_half
return list(zip(xs, ys)) | import numpy as np
def frenet_to_cartesian(x0, y0, theta0, ss, kappas):
xs = np.zeros(len(kappas))
ys = np.zeros(len(kappas))
thetas = np.zeros(len(kappas))
xs[0] = x0
ys[0] = y0
thetas[0] = theta0
for i in range(thetas.shape[0] - 1):
ss_diff_half = (ss[i + 1] - ss[i]) / 2.0
thetas[i + 1] = thetas[i] + (kappas[i + 1] + kappas[i]) * ss_diff_half
xs[i + 1] = xs[i] + (np.cos(thetas[i + 1]) + np.cos(thetas[i])) * ss_diff_half
ys[i + 1] = ys[i] + (np.sin(thetas[i + 1]) + np.sin(thetas[i])) * ss_diff_half
return list(zip(xs, ys))
def thetas_to_cartesian(x0, y0, theta0, ss_deltas, delta_thetas):
xs = np.zeros(len(delta_thetas) + 1)
ys = np.zeros(len(delta_thetas) + 1)
thetas = np.zeros(len(delta_thetas) + 1)
xs[0] = x0
ys[0] = y0
thetas[0] = theta0
for i in range(thetas.shape[0] - 1):
ss_diff_half = ss_deltas[i] / 2.0
thetas[i + 1] = thetas[i] + delta_thetas[i]
xs[i + 1] = xs[i] + (np.cos(thetas[i + 1]) + np.cos(thetas[i])) * ss_diff_half
ys[i + 1] = ys[i] + (np.sin(thetas[i + 1]) + np.sin(thetas[i])) * ss_diff_half
return list(zip(xs, ys)) | none | 1 | 2.656268 | 3 | |
test/test_eap.py | gizmoguy/chewie | 0 | 6619831 | <reponame>gizmoguy/chewie
import unittest
from netils import build_byte_string
from chewie.eap import Eap, EapIdentity, EapMd5Challenge, EapSuccess, EapFailure
class EapTestCase(unittest.TestCase):
def test_eap_identity_parses(self):
packed_message = build_byte_string("0101000501")
message = Eap.parse(packed_message)
self.assertEqual(message.packet_id, 1)
self.assertEqual(message.identity, "")
def test_eap_md5_challenge_parses(self):
packed_message = build_byte_string("0201002204103a535f0ee8c6b34fe714aa7dad9a0e154a6f686e2e4d63477569726b")
message = Eap.parse(packed_message)
self.assertEqual(message.packet_id, 1)
self.assertEqual(message.challenge, build_byte_string("3a535f0ee8c6b34fe714aa7dad9a0e15"))
self.assertEqual(message.extra_data, b"John.McGuirk")
def test_eap_identity_packs(self):
expected_packed_message = build_byte_string("0101000501")
eap = EapIdentity(Eap.REQUEST, 1, "")
packed_message = eap.pack()
self.assertEqual(expected_packed_message, packed_message)
def test_eap_md5_challenge_packs(self):
expected_packed_message = build_byte_string("0201002204103a535f0ee8c6b34fe714aa7dad9a0e154a6f686e2e4d63477569726b")
eap = EapMd5Challenge(Eap.RESPONSE, 1, build_byte_string("3a535f0ee8c6b34fe714aa7dad9a0e15"), b"John.McGuirk")
packed_message = eap.pack()
self.assertEqual(expected_packed_message, packed_message)
def test_eap_success_parses(self):
packed_message = build_byte_string("03010004")
message = Eap.parse(packed_message)
self.assertEqual(message.packet_id, 1)
self.assertTrue(isinstance(message, EapSuccess))
def test_eap_failure_parses(self):
packed_message = build_byte_string("04010004")
message = Eap.parse(packed_message)
self.assertEqual(message.packet_id, 1)
self.assertTrue(isinstance(message, EapFailure))
| import unittest
from netils import build_byte_string
from chewie.eap import Eap, EapIdentity, EapMd5Challenge, EapSuccess, EapFailure
class EapTestCase(unittest.TestCase):
def test_eap_identity_parses(self):
packed_message = build_byte_string("0101000501")
message = Eap.parse(packed_message)
self.assertEqual(message.packet_id, 1)
self.assertEqual(message.identity, "")
def test_eap_md5_challenge_parses(self):
packed_message = build_byte_string("0201002204103a535f0ee8c6b34fe714aa7dad9a0e154a6f686e2e4d63477569726b")
message = Eap.parse(packed_message)
self.assertEqual(message.packet_id, 1)
self.assertEqual(message.challenge, build_byte_string("3a535f0ee8c6b34fe714aa7dad9a0e15"))
self.assertEqual(message.extra_data, b"John.McGuirk")
def test_eap_identity_packs(self):
expected_packed_message = build_byte_string("0101000501")
eap = EapIdentity(Eap.REQUEST, 1, "")
packed_message = eap.pack()
self.assertEqual(expected_packed_message, packed_message)
def test_eap_md5_challenge_packs(self):
expected_packed_message = build_byte_string("0201002204103a535f0ee8c6b34fe714aa7dad9a0e154a6f686e2e4d63477569726b")
eap = EapMd5Challenge(Eap.RESPONSE, 1, build_byte_string("3a535f0ee8c6b34fe714aa7dad9a0e15"), b"John.McGuirk")
packed_message = eap.pack()
self.assertEqual(expected_packed_message, packed_message)
def test_eap_success_parses(self):
packed_message = build_byte_string("03010004")
message = Eap.parse(packed_message)
self.assertEqual(message.packet_id, 1)
self.assertTrue(isinstance(message, EapSuccess))
def test_eap_failure_parses(self):
packed_message = build_byte_string("04010004")
message = Eap.parse(packed_message)
self.assertEqual(message.packet_id, 1)
self.assertTrue(isinstance(message, EapFailure)) | none | 1 | 2.271389 | 2 | |
mojo/public/tools/mojom/mojom/generate/translate_unittest.py | sarang-apps/darshan_browser | 575 | 6619832 | <reponame>sarang-apps/darshan_browser
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import imp
import os.path
import sys
import unittest
from mojom.generate import module as mojom
from mojom.generate import translate
from mojom.parse import ast
class TranslateTest(unittest.TestCase):
"""Tests |parser.Parse()|."""
def testSimpleArray(self):
"""Tests a simple int32[]."""
# pylint: disable=W0212
self.assertEquals(translate._MapKind("int32[]"), "a:i32")
def testAssociativeArray(self):
"""Tests a simple uint8{string}."""
# pylint: disable=W0212
self.assertEquals(translate._MapKind("uint8{string}"), "m[s][u8]")
def testLeftToRightAssociativeArray(self):
"""Makes sure that parsing is done from right to left on the internal kinds
in the presence of an associative array."""
# pylint: disable=W0212
self.assertEquals(translate._MapKind("uint8[]{string}"), "m[s][a:u8]")
def testTranslateSimpleUnions(self):
"""Makes sure that a simple union is translated correctly."""
tree = ast.Mojom(None, ast.ImportList(), [
ast.Union(
"SomeUnion", None,
ast.UnionBody([
ast.UnionField("a", None, None, "int32"),
ast.UnionField("b", None, None, "string")
]))
])
translation = translate.OrderedModule(tree, "mojom_tree", [])
self.assertEqual(1, len(translation.unions))
union = translation.unions[0]
self.assertTrue(isinstance(union, mojom.Union))
self.assertEqual("SomeUnion", union.mojom_name)
self.assertEqual(2, len(union.fields))
self.assertEqual("a", union.fields[0].mojom_name)
self.assertEqual(mojom.INT32.spec, union.fields[0].kind.spec)
self.assertEqual("b", union.fields[1].mojom_name)
self.assertEqual(mojom.STRING.spec, union.fields[1].kind.spec)
def testMapKindRaisesWithDuplicate(self):
"""Verifies _MapTreeForType() raises when passed two values with the same
name."""
methods = [
ast.Method('dup', None, None, ast.ParameterList(), None),
ast.Method('dup', None, None, ast.ParameterList(), None)
]
with self.assertRaises(Exception):
translate._ElemsOfType(methods, ast.Method, 'scope')
def testAssociatedKinds(self):
"""Tests type spec translation of associated interfaces and requests."""
# pylint: disable=W0212
self.assertEquals(
translate._MapKind("asso<SomeInterface>?"), "?asso:x:SomeInterface")
self.assertEquals(
translate._MapKind("asso<SomeInterface&>?"), "?asso:r:x:SomeInterface")
| # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import imp
import os.path
import sys
import unittest
from mojom.generate import module as mojom
from mojom.generate import translate
from mojom.parse import ast
class TranslateTest(unittest.TestCase):
"""Tests |parser.Parse()|."""
def testSimpleArray(self):
"""Tests a simple int32[]."""
# pylint: disable=W0212
self.assertEquals(translate._MapKind("int32[]"), "a:i32")
def testAssociativeArray(self):
"""Tests a simple uint8{string}."""
# pylint: disable=W0212
self.assertEquals(translate._MapKind("uint8{string}"), "m[s][u8]")
def testLeftToRightAssociativeArray(self):
"""Makes sure that parsing is done from right to left on the internal kinds
in the presence of an associative array."""
# pylint: disable=W0212
self.assertEquals(translate._MapKind("uint8[]{string}"), "m[s][a:u8]")
def testTranslateSimpleUnions(self):
"""Makes sure that a simple union is translated correctly."""
tree = ast.Mojom(None, ast.ImportList(), [
ast.Union(
"SomeUnion", None,
ast.UnionBody([
ast.UnionField("a", None, None, "int32"),
ast.UnionField("b", None, None, "string")
]))
])
translation = translate.OrderedModule(tree, "mojom_tree", [])
self.assertEqual(1, len(translation.unions))
union = translation.unions[0]
self.assertTrue(isinstance(union, mojom.Union))
self.assertEqual("SomeUnion", union.mojom_name)
self.assertEqual(2, len(union.fields))
self.assertEqual("a", union.fields[0].mojom_name)
self.assertEqual(mojom.INT32.spec, union.fields[0].kind.spec)
self.assertEqual("b", union.fields[1].mojom_name)
self.assertEqual(mojom.STRING.spec, union.fields[1].kind.spec)
def testMapKindRaisesWithDuplicate(self):
"""Verifies _MapTreeForType() raises when passed two values with the same
name."""
methods = [
ast.Method('dup', None, None, ast.ParameterList(), None),
ast.Method('dup', None, None, ast.ParameterList(), None)
]
with self.assertRaises(Exception):
translate._ElemsOfType(methods, ast.Method, 'scope')
def testAssociatedKinds(self):
"""Tests type spec translation of associated interfaces and requests."""
# pylint: disable=W0212
self.assertEquals(
translate._MapKind("asso<SomeInterface>?"), "?asso:x:SomeInterface")
self.assertEquals(
translate._MapKind("asso<SomeInterface&>?"), "?asso:r:x:SomeInterface") | en | 0.788753 | # Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. Tests |parser.Parse()|. Tests a simple int32[]. # pylint: disable=W0212 Tests a simple uint8{string}. # pylint: disable=W0212 Makes sure that parsing is done from right to left on the internal kinds in the presence of an associative array. # pylint: disable=W0212 Makes sure that a simple union is translated correctly. Verifies _MapTreeForType() raises when passed two values with the same name. Tests type spec translation of associated interfaces and requests. # pylint: disable=W0212 | 2.146698 | 2 |
agw/FourWaySplitter.py | iubica/wx-portfolio | 3 | 6619833 | #!/usr/bin/env python
import wx
import os
import sys
try:
dirName = os.path.dirname(os.path.abspath(__file__))
except:
dirName = os.path.dirname(os.path.abspath(sys.argv[0]))
sys.path.append(os.path.split(dirName)[0])
try:
from agw import fourwaysplitter as FWS
except ImportError: # if it's not there locally, try the wxPython lib.
import wx.lib.agw.fourwaysplitter as FWS
import images
#----------------------------------------------------------------------
class SamplePane(wx.Panel):
"""
Just a simple test window to put into the splitter.
"""
def __init__(self, parent, colour, label):
wx.Panel.__init__(self, parent, style=wx.BORDER_SUNKEN)
self.SetBackgroundColour(colour)
wx.StaticText(self, -1, label, (5,5))
def SetOtherLabel(self, label):
wx.StaticText(self, -1, label, (5, 30))
class ControlPane(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
luCheck = wx.CheckBox(self, -1, "Live Update")
luCheck.SetValue(True)
self.Bind(wx.EVT_CHECKBOX, self.OnSetLiveUpdate, luCheck)
btn1 = wx.Button(self, -1, "Swap 2 && 4")
self.Bind(wx.EVT_BUTTON, self.OnSwapButton24, btn1)
btn2 = wx.Button(self, -1, "Swap 1 && 3")
self.Bind(wx.EVT_BUTTON, self.OnSwapButton13, btn2)
static = wx.StaticText(self, -1, "Expand A Window")
combo = wx.ComboBox(self, -1, choices=["None", "1", "2", "3", "4"],
style=wx.CB_READONLY|wx.CB_DROPDOWN)
combo.SetStringSelection("None")
self.Bind(wx.EVT_COMBOBOX, self.OnExpandWindow)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(luCheck, 0, wx.TOP, 5)
sizer.Add(btn1, 0, wx.TOP, 5)
sizer.Add(btn2, 0, wx.TOP, 5)
sizer.Add(static, 0, wx.TOP, 10)
sizer.Add(combo, 0, wx.EXPAND|wx.RIGHT|wx.TOP, 2)
border = wx.BoxSizer()
border.Add(sizer, 1, wx.EXPAND|wx.ALL, 5)
self.SetSizer(border)
def OnSetLiveUpdate(self, evt):
check = evt.GetEventObject()
self.GetParent().SetLiveUpdate(check.GetValue())
def OnSwapButton24(self, evt):
self.GetParent().Swap2and4()
def OnSwapButton13(self, evt):
self.GetParent().Swap1and3()
def OnExpandWindow(self, event):
self.GetParent().ExpandWindow(event.GetSelection())
class FWSPanel(wx.Panel):
def __init__(self, parent, log):
wx.Panel.__init__(self, parent, -1)
self.log = log
cp = ControlPane(self)
splitter = FWS.FourWaySplitter(self, agwStyle=wx.SP_LIVE_UPDATE)
self.splitter = splitter
self.log = log
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(cp)
sizer.Add(splitter, 1, wx.EXPAND)
self.SetSizer(sizer)
p1 = SamplePane(splitter, "pink", "Panel One")
p1.SetOtherLabel(
"There are three ways\n"
"to drag sashes. Try\n"
"dragging the horizontal\n"
"sash, the vertical sash\n"
"or position the mouse at\n"
"the intersection of the\n"
"two sashes."
)
splitter.AppendWindow(p1)
p2 = SamplePane(splitter, "sky blue", "Panel Two")
p2.SetOtherLabel("Hello From wxPython!")
p2.SetMinSize(p2.GetBestSize())
splitter.AppendWindow(p2)
p3 = SamplePane(splitter, "yellow", "Panel Three")
splitter.AppendWindow(p3)
p4 = SamplePane(splitter, "<NAME>", "Panel Four")
splitter.AppendWindow(p4)
self.log.write("Welcome to the FourWaySplitterDemo!\n")
self.Bind(wx.EVT_SPLITTER_SASH_POS_CHANGED, self.OnChanged)
self.Bind(wx.EVT_SPLITTER_SASH_POS_CHANGING, self.OnChanging)
def GetSashIdx(self, event):
if event.GetSashIdx() == wx.HORIZONTAL:
idx = "Horizontal"
elif event.GetSashIdx() == wx.VERTICAL:
idx = "Vertical"
else:
idx = "Horizontal & Vertical"
return idx
def OnChanging(self, event):
idx = self.GetSashIdx(event)
self.log.write("Changing sash: %s %s\n" %(idx, event.GetSashPosition()))
# This is one way to control the sash limits
#if event.GetSashPosition().x < 50:
# event.Veto()
event.Skip()
def OnChanged(self, event):
idx = self.GetSashIdx(event)
self.log.write("Changed sash: %s %s\n" %(idx, event.GetSashPosition()))
event.Skip()
def SetLiveUpdate(self, enable):
if enable:
self.splitter.SetAGWWindowStyleFlag(wx.SP_LIVE_UPDATE)
else:
self.splitter.SetAGWWindowStyleFlag(0)
def Swap2and4(self):
win2 = self.splitter.GetWindow(1)
win4 = self.splitter.GetWindow(3)
self.splitter.ExchangeWindows(win2, win4)
def Swap1and3(self):
win1 = self.splitter.GetWindow(0)
win3 = self.splitter.GetWindow(2)
self.splitter.ExchangeWindows(win1, win3)
def ExpandWindow(self, selection):
self.splitter.SetExpanded(selection-1)
class FourWaySplitterDemo(wx.Frame):
def __init__(self, parent, log, id=wx.ID_ANY, title="FourWaySplitter Demo",
size=(700, 500)):
wx.Frame.__init__(self, parent, id, title, size=size)
self.log = log
panel = FWSPanel(self, log)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(panel, 1, wx.EXPAND)
self.SetSizer(sizer)
sizer.Layout()
statusbar = self.CreateStatusBar(2)
statusbar.SetStatusWidths([-2, -1])
# statusbar fields
statusbar_fields = [("FourWaySplitter wxPython Demo, <NAME> @ 03 Nov 2006"),
("Welcome To wxPython!")]
for i in range(len(statusbar_fields)):
statusbar.SetStatusText(statusbar_fields[i], i)
self.CreateMenu()
self.SetIcon(images.Mondrian.GetIcon())
self.CenterOnScreen()
def CreateMenu(self):
menuBar = wx.MenuBar(wx.MB_DOCKABLE)
fileMenu = wx.Menu()
helpMenu = wx.Menu()
item = wx.MenuItem(fileMenu, wx.ID_ANY, "E&xit")
self.Bind(wx.EVT_MENU, self.OnQuit, item)
fileMenu.Append(item)
item = wx.MenuItem(helpMenu, wx.ID_ANY, "About")
self.Bind(wx.EVT_MENU, self.OnAbout, item)
helpMenu.Append(item)
menuBar.Append(fileMenu, "&File")
menuBar.Append(helpMenu, "&Help")
self.SetMenuBar(menuBar)
def OnQuit(self, event):
self.Destroy()
def OnAbout(self, event):
msg = "This Is The About Dialog Of The FourWaySplitter Demo.\n\n" + \
"Author: <NAME> @ 03 Nov 2006\n\n" + \
"Please Report Any Bug/Requests Of Improvements\n" + \
"To Me At The Following Adresses:\n\n" + \
"<EMAIL>\n" + "<EMAIL>\n\n" + \
"Welcome To wxPython " + wx.VERSION_STRING + "!!"
dlg = wx.MessageDialog(self, msg, "FourWaySplitter wxPython Demo",
wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
#---------------------------------------------------------------------------
class TestPanel(wx.Panel):
def __init__(self, parent, log):
self.log = log
wx.Panel.__init__(self, parent, -1)
b = wx.Button(self, -1, " Test FourWaySplitter ", (50,50))
self.Bind(wx.EVT_BUTTON, self.OnButton, b)
def OnButton(self, evt):
self.win = FourWaySplitterDemo(self, self.log)
self.win.Show(True)
#----------------------------------------------------------------------
def runTest(frame, nb, log):
win = TestPanel(nb, log)
return win
#----------------------------------------------------------------------
overview = FWS.__doc__
if __name__ == '__main__':
import sys,os
import run
run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])
| #!/usr/bin/env python
import wx
import os
import sys
try:
dirName = os.path.dirname(os.path.abspath(__file__))
except:
dirName = os.path.dirname(os.path.abspath(sys.argv[0]))
sys.path.append(os.path.split(dirName)[0])
try:
from agw import fourwaysplitter as FWS
except ImportError: # if it's not there locally, try the wxPython lib.
import wx.lib.agw.fourwaysplitter as FWS
import images
#----------------------------------------------------------------------
class SamplePane(wx.Panel):
"""
Just a simple test window to put into the splitter.
"""
def __init__(self, parent, colour, label):
wx.Panel.__init__(self, parent, style=wx.BORDER_SUNKEN)
self.SetBackgroundColour(colour)
wx.StaticText(self, -1, label, (5,5))
def SetOtherLabel(self, label):
wx.StaticText(self, -1, label, (5, 30))
class ControlPane(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
luCheck = wx.CheckBox(self, -1, "Live Update")
luCheck.SetValue(True)
self.Bind(wx.EVT_CHECKBOX, self.OnSetLiveUpdate, luCheck)
btn1 = wx.Button(self, -1, "Swap 2 && 4")
self.Bind(wx.EVT_BUTTON, self.OnSwapButton24, btn1)
btn2 = wx.Button(self, -1, "Swap 1 && 3")
self.Bind(wx.EVT_BUTTON, self.OnSwapButton13, btn2)
static = wx.StaticText(self, -1, "Expand A Window")
combo = wx.ComboBox(self, -1, choices=["None", "1", "2", "3", "4"],
style=wx.CB_READONLY|wx.CB_DROPDOWN)
combo.SetStringSelection("None")
self.Bind(wx.EVT_COMBOBOX, self.OnExpandWindow)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(luCheck, 0, wx.TOP, 5)
sizer.Add(btn1, 0, wx.TOP, 5)
sizer.Add(btn2, 0, wx.TOP, 5)
sizer.Add(static, 0, wx.TOP, 10)
sizer.Add(combo, 0, wx.EXPAND|wx.RIGHT|wx.TOP, 2)
border = wx.BoxSizer()
border.Add(sizer, 1, wx.EXPAND|wx.ALL, 5)
self.SetSizer(border)
def OnSetLiveUpdate(self, evt):
check = evt.GetEventObject()
self.GetParent().SetLiveUpdate(check.GetValue())
def OnSwapButton24(self, evt):
self.GetParent().Swap2and4()
def OnSwapButton13(self, evt):
self.GetParent().Swap1and3()
def OnExpandWindow(self, event):
self.GetParent().ExpandWindow(event.GetSelection())
class FWSPanel(wx.Panel):
def __init__(self, parent, log):
wx.Panel.__init__(self, parent, -1)
self.log = log
cp = ControlPane(self)
splitter = FWS.FourWaySplitter(self, agwStyle=wx.SP_LIVE_UPDATE)
self.splitter = splitter
self.log = log
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(cp)
sizer.Add(splitter, 1, wx.EXPAND)
self.SetSizer(sizer)
p1 = SamplePane(splitter, "pink", "Panel One")
p1.SetOtherLabel(
"There are three ways\n"
"to drag sashes. Try\n"
"dragging the horizontal\n"
"sash, the vertical sash\n"
"or position the mouse at\n"
"the intersection of the\n"
"two sashes."
)
splitter.AppendWindow(p1)
p2 = SamplePane(splitter, "sky blue", "Panel Two")
p2.SetOtherLabel("Hello From wxPython!")
p2.SetMinSize(p2.GetBestSize())
splitter.AppendWindow(p2)
p3 = SamplePane(splitter, "yellow", "Panel Three")
splitter.AppendWindow(p3)
p4 = SamplePane(splitter, "<NAME>", "Panel Four")
splitter.AppendWindow(p4)
self.log.write("Welcome to the FourWaySplitterDemo!\n")
self.Bind(wx.EVT_SPLITTER_SASH_POS_CHANGED, self.OnChanged)
self.Bind(wx.EVT_SPLITTER_SASH_POS_CHANGING, self.OnChanging)
def GetSashIdx(self, event):
if event.GetSashIdx() == wx.HORIZONTAL:
idx = "Horizontal"
elif event.GetSashIdx() == wx.VERTICAL:
idx = "Vertical"
else:
idx = "Horizontal & Vertical"
return idx
def OnChanging(self, event):
idx = self.GetSashIdx(event)
self.log.write("Changing sash: %s %s\n" %(idx, event.GetSashPosition()))
# This is one way to control the sash limits
#if event.GetSashPosition().x < 50:
# event.Veto()
event.Skip()
def OnChanged(self, event):
idx = self.GetSashIdx(event)
self.log.write("Changed sash: %s %s\n" %(idx, event.GetSashPosition()))
event.Skip()
def SetLiveUpdate(self, enable):
if enable:
self.splitter.SetAGWWindowStyleFlag(wx.SP_LIVE_UPDATE)
else:
self.splitter.SetAGWWindowStyleFlag(0)
def Swap2and4(self):
win2 = self.splitter.GetWindow(1)
win4 = self.splitter.GetWindow(3)
self.splitter.ExchangeWindows(win2, win4)
def Swap1and3(self):
win1 = self.splitter.GetWindow(0)
win3 = self.splitter.GetWindow(2)
self.splitter.ExchangeWindows(win1, win3)
def ExpandWindow(self, selection):
self.splitter.SetExpanded(selection-1)
class FourWaySplitterDemo(wx.Frame):
def __init__(self, parent, log, id=wx.ID_ANY, title="FourWaySplitter Demo",
size=(700, 500)):
wx.Frame.__init__(self, parent, id, title, size=size)
self.log = log
panel = FWSPanel(self, log)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(panel, 1, wx.EXPAND)
self.SetSizer(sizer)
sizer.Layout()
statusbar = self.CreateStatusBar(2)
statusbar.SetStatusWidths([-2, -1])
# statusbar fields
statusbar_fields = [("FourWaySplitter wxPython Demo, <NAME> @ 03 Nov 2006"),
("Welcome To wxPython!")]
for i in range(len(statusbar_fields)):
statusbar.SetStatusText(statusbar_fields[i], i)
self.CreateMenu()
self.SetIcon(images.Mondrian.GetIcon())
self.CenterOnScreen()
def CreateMenu(self):
menuBar = wx.MenuBar(wx.MB_DOCKABLE)
fileMenu = wx.Menu()
helpMenu = wx.Menu()
item = wx.MenuItem(fileMenu, wx.ID_ANY, "E&xit")
self.Bind(wx.EVT_MENU, self.OnQuit, item)
fileMenu.Append(item)
item = wx.MenuItem(helpMenu, wx.ID_ANY, "About")
self.Bind(wx.EVT_MENU, self.OnAbout, item)
helpMenu.Append(item)
menuBar.Append(fileMenu, "&File")
menuBar.Append(helpMenu, "&Help")
self.SetMenuBar(menuBar)
def OnQuit(self, event):
self.Destroy()
def OnAbout(self, event):
msg = "This Is The About Dialog Of The FourWaySplitter Demo.\n\n" + \
"Author: <NAME> @ 03 Nov 2006\n\n" + \
"Please Report Any Bug/Requests Of Improvements\n" + \
"To Me At The Following Adresses:\n\n" + \
"<EMAIL>\n" + "<EMAIL>\n\n" + \
"Welcome To wxPython " + wx.VERSION_STRING + "!!"
dlg = wx.MessageDialog(self, msg, "FourWaySplitter wxPython Demo",
wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
#---------------------------------------------------------------------------
class TestPanel(wx.Panel):
def __init__(self, parent, log):
self.log = log
wx.Panel.__init__(self, parent, -1)
b = wx.Button(self, -1, " Test FourWaySplitter ", (50,50))
self.Bind(wx.EVT_BUTTON, self.OnButton, b)
def OnButton(self, evt):
self.win = FourWaySplitterDemo(self, self.log)
self.win.Show(True)
#----------------------------------------------------------------------
def runTest(frame, nb, log):
win = TestPanel(nb, log)
return win
#----------------------------------------------------------------------
overview = FWS.__doc__
if __name__ == '__main__':
import sys,os
import run
run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])
| en | 0.263258 | #!/usr/bin/env python # if it's not there locally, try the wxPython lib. #---------------------------------------------------------------------- Just a simple test window to put into the splitter. # This is one way to control the sash limits #if event.GetSashPosition().x < 50: # event.Veto() # statusbar fields #--------------------------------------------------------------------------- #---------------------------------------------------------------------- #---------------------------------------------------------------------- | 2.184924 | 2 |
core/utils/task.py | msramalho/election-watch | 32 | 6619834 | <reponame>msramalho/election-watch
from slugify import slugify
import uuid
from .misc import get_filter_by_day
class Task:
# handles a collection task_name
# each document is {_id: uuid, day: day of analysis, data: what to store for this day}
def __init__(self, db, name, create=True, only_one=False):
self.name = "task_%s" % slugify(name, separator="_")
self.db = db
self.only_one = only_one
if create or self.exists(): self.create()
def create(self):
self.collection = self.db[self.name]
# self.collection.create_index("day") # create index on day
def exists(self):
return self.name in self.db.list_collection_names()
def find_day(self, day):
return self.collection.find_one({"day": get_filter_by_day(day)})
def exists_day(self, day):
return self.find_day(day) is not None
def insert(self, day, data):
if self.only_one:
return self.collection.find_one_and_update({}, {"$set": {"day": day, "data": data}}, upsert=True)
else:
return self.collection.find_one_and_update({"day": get_filter_by_day(day)}, {"$set": {"day": day, "data": data}}, upsert=True)
def get_last_n(self, n=30, withId=True):
assert n > 0, "n must be greater than 0"
# retrieve last n entries concatenated
# self.collection.find({})
res = list(self.collection.find({}, {"_id": withId, "day": 1, "data": 1}).sort([("day", -1)]).limit(n))
print(res)
return res
def unzip_last_n(self, n=30, withId=True):
# returns two lists [days], [datas]
unzip = list(zip(*[(x["day"], x["data"]) for x in self.get_last_n(n, withId)]))
if len(unzip): return unzip[0], unzip[1]
return [], []
def get_last(self):
last = self.get_last_n(1)
if len(last): return [0]
def drop(self): self.collection.remove({})
def drop_day(self, day): self.collection.remove({"day": day})
def get_api_n(self, n):
# returns a standardized dict that can be returned directly by the api
res = self.unzip_last_n(n, False)
res = [[d.strftime("%Y-%m-%d") for d in res[0]], res[1]]
# res = map(lambda r: {"day": r["day"].strftime("%Y-%m-%d"), "data": r["data"]}, self.unzip_last_n(n, False))
return {"history": res}
| from slugify import slugify
import uuid
from .misc import get_filter_by_day
class Task:
# handles a collection task_name
# each document is {_id: uuid, day: day of analysis, data: what to store for this day}
def __init__(self, db, name, create=True, only_one=False):
self.name = "task_%s" % slugify(name, separator="_")
self.db = db
self.only_one = only_one
if create or self.exists(): self.create()
def create(self):
self.collection = self.db[self.name]
# self.collection.create_index("day") # create index on day
def exists(self):
return self.name in self.db.list_collection_names()
def find_day(self, day):
return self.collection.find_one({"day": get_filter_by_day(day)})
def exists_day(self, day):
return self.find_day(day) is not None
def insert(self, day, data):
if self.only_one:
return self.collection.find_one_and_update({}, {"$set": {"day": day, "data": data}}, upsert=True)
else:
return self.collection.find_one_and_update({"day": get_filter_by_day(day)}, {"$set": {"day": day, "data": data}}, upsert=True)
def get_last_n(self, n=30, withId=True):
assert n > 0, "n must be greater than 0"
# retrieve last n entries concatenated
# self.collection.find({})
res = list(self.collection.find({}, {"_id": withId, "day": 1, "data": 1}).sort([("day", -1)]).limit(n))
print(res)
return res
def unzip_last_n(self, n=30, withId=True):
# returns two lists [days], [datas]
unzip = list(zip(*[(x["day"], x["data"]) for x in self.get_last_n(n, withId)]))
if len(unzip): return unzip[0], unzip[1]
return [], []
def get_last(self):
last = self.get_last_n(1)
if len(last): return [0]
def drop(self): self.collection.remove({})
def drop_day(self, day): self.collection.remove({"day": day})
def get_api_n(self, n):
# returns a standardized dict that can be returned directly by the api
res = self.unzip_last_n(n, False)
res = [[d.strftime("%Y-%m-%d") for d in res[0]], res[1]]
# res = map(lambda r: {"day": r["day"].strftime("%Y-%m-%d"), "data": r["data"]}, self.unzip_last_n(n, False))
return {"history": res} | en | 0.517624 | # handles a collection task_name # each document is {_id: uuid, day: day of analysis, data: what to store for this day} # self.collection.create_index("day") # create index on day # retrieve last n entries concatenated # self.collection.find({}) # returns two lists [days], [datas] # returns a standardized dict that can be returned directly by the api # res = map(lambda r: {"day": r["day"].strftime("%Y-%m-%d"), "data": r["data"]}, self.unzip_last_n(n, False)) | 2.740765 | 3 |
a2c_ppo_acktr/algo/ppo.py | zplizzi/pytorch-a2c-ppo-acktr-gail | 0 | 6619835 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class PPO():
def __init__(self,
actor_critic,
clip_param,
ppo_epoch,
num_mini_batch,
value_loss_coef,
entropy_coef,
lr=None,
eps=None,
max_grad_norm=None,
use_clipped_value_loss=True,
tracker=None):
self.actor_critic = actor_critic
self.clip_param = clip_param
self.ppo_epoch = ppo_epoch
self.num_mini_batch = num_mini_batch
self.value_loss_coef = value_loss_coef
self.entropy_coef = entropy_coef
self.max_grad_norm = max_grad_norm
self.use_clipped_value_loss = use_clipped_value_loss
self.optimizer = optim.Adam(actor_critic.parameters(), lr=lr, eps=eps)
self.tracker = tracker
def update(self, rollouts, i):
advantages = rollouts.returns[:-1] - rollouts.value_preds[:-1]
advantages = (advantages - advantages.mean()) / (
advantages.std() + 1e-5)
value_loss_epoch = 0
action_loss_epoch = 0
dist_entropy_epoch = 0
for e in range(self.ppo_epoch):
if self.actor_critic.is_recurrent:
data_generator = rollouts.recurrent_generator(
advantages, self.num_mini_batch)
else:
data_generator = rollouts.feed_forward_generator(
advantages, self.num_mini_batch)
for sample in data_generator:
obs_batch, recurrent_hidden_states_batch, actions_batch, \
value_preds_batch, return_batch, masks_batch, old_action_log_probs_batch, \
adv_targ = sample
# Reshape to do in a single forward pass for all steps
values, action_log_probs, dist_entropy, _ = self.actor_critic.evaluate_actions(
obs_batch, recurrent_hidden_states_batch, masks_batch,
actions_batch)
ratio = torch.exp(action_log_probs -
old_action_log_probs_batch)
surr1 = ratio * adv_targ
surr2 = torch.clamp(ratio, 1.0 - self.clip_param,
1.0 + self.clip_param) * adv_targ
action_loss = -torch.min(surr1, surr2).mean()
if self.use_clipped_value_loss:
value_pred_clipped = value_preds_batch + \
(values - value_preds_batch).clamp(-self.clip_param, self.clip_param)
value_losses = (values - return_batch).pow(2)
value_losses_clipped = (
value_pred_clipped - return_batch).pow(2)
value_loss = 0.5 * torch.max(value_losses,
value_losses_clipped).mean()
else:
value_loss = 0.5 * (return_batch - values).pow(2).mean()
self.optimizer.zero_grad()
loss = value_loss * self.value_loss_coef + action_loss - dist_entropy * self.entropy_coef
loss.backward()
nn.utils.clip_grad_norm_(self.actor_critic.parameters(),
self.max_grad_norm)
self.optimizer.step()
value_loss_epoch += value_loss.item()
action_loss_epoch += action_loss.item()
dist_entropy_epoch += dist_entropy.item()
num_updates = self.ppo_epoch * self.num_mini_batch
value_loss_epoch /= num_updates
action_loss_epoch /= num_updates
dist_entropy_epoch /= num_updates
print(i)
if i % 1 == 0:
self.tracker.add_scalar("loss/ppo loss", action_loss, i)
self.tracker.add_scalar("loss/value_loss", value_loss*self.value_loss_coef, i)
self.tracker.add_scalar("loss/entropy_loss", -1*dist_entropy*self.entropy_coef, i)
self.tracker.add_scalar("loss/loss", loss, i)
self.tracker.add_scalar("rewards/mean value", torch.mean(values), i)
self.tracker.add_scalar("rewards/mean rewards to go", torch.mean(return_batch), i)
self.tracker.add_scalar("rewards/mean prob ratio", torch.mean(ratio), i)
self.tracker.add_scalar("policy/policy entropy", dist_entropy, i)
# self.tracker.log_iteration_time(NUM_WORKERS * NUM_STEPS, i)
if i % 5 == 0:
for k in range(actions_batch.shape[1]):
self.tracker.add_histogram(f"policy/actions_{k}", actions_batch[:, k], i)
self.tracker.add_histogram("rewards/values", values, i)
self.tracker.add_histogram("rewards/advantages", advantages, i)
self.tracker.add_histogram("rewards/rewards to go", return_batch, i)
self.tracker.add_histogram("rewards/prob ratio", ratio, i)
self.tracker.add_histogram("loss/ppo_loss_hist", -1 * torch.min(surr1, surr2), i)
# self.tracker.add_histogram("policy/cov", self.actor_critic.dist.logstd._bias.exp(), i)
return value_loss_epoch, action_loss_epoch, dist_entropy_epoch
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class PPO():
def __init__(self,
actor_critic,
clip_param,
ppo_epoch,
num_mini_batch,
value_loss_coef,
entropy_coef,
lr=None,
eps=None,
max_grad_norm=None,
use_clipped_value_loss=True,
tracker=None):
self.actor_critic = actor_critic
self.clip_param = clip_param
self.ppo_epoch = ppo_epoch
self.num_mini_batch = num_mini_batch
self.value_loss_coef = value_loss_coef
self.entropy_coef = entropy_coef
self.max_grad_norm = max_grad_norm
self.use_clipped_value_loss = use_clipped_value_loss
self.optimizer = optim.Adam(actor_critic.parameters(), lr=lr, eps=eps)
self.tracker = tracker
def update(self, rollouts, i):
advantages = rollouts.returns[:-1] - rollouts.value_preds[:-1]
advantages = (advantages - advantages.mean()) / (
advantages.std() + 1e-5)
value_loss_epoch = 0
action_loss_epoch = 0
dist_entropy_epoch = 0
for e in range(self.ppo_epoch):
if self.actor_critic.is_recurrent:
data_generator = rollouts.recurrent_generator(
advantages, self.num_mini_batch)
else:
data_generator = rollouts.feed_forward_generator(
advantages, self.num_mini_batch)
for sample in data_generator:
obs_batch, recurrent_hidden_states_batch, actions_batch, \
value_preds_batch, return_batch, masks_batch, old_action_log_probs_batch, \
adv_targ = sample
# Reshape to do in a single forward pass for all steps
values, action_log_probs, dist_entropy, _ = self.actor_critic.evaluate_actions(
obs_batch, recurrent_hidden_states_batch, masks_batch,
actions_batch)
ratio = torch.exp(action_log_probs -
old_action_log_probs_batch)
surr1 = ratio * adv_targ
surr2 = torch.clamp(ratio, 1.0 - self.clip_param,
1.0 + self.clip_param) * adv_targ
action_loss = -torch.min(surr1, surr2).mean()
if self.use_clipped_value_loss:
value_pred_clipped = value_preds_batch + \
(values - value_preds_batch).clamp(-self.clip_param, self.clip_param)
value_losses = (values - return_batch).pow(2)
value_losses_clipped = (
value_pred_clipped - return_batch).pow(2)
value_loss = 0.5 * torch.max(value_losses,
value_losses_clipped).mean()
else:
value_loss = 0.5 * (return_batch - values).pow(2).mean()
self.optimizer.zero_grad()
loss = value_loss * self.value_loss_coef + action_loss - dist_entropy * self.entropy_coef
loss.backward()
nn.utils.clip_grad_norm_(self.actor_critic.parameters(),
self.max_grad_norm)
self.optimizer.step()
value_loss_epoch += value_loss.item()
action_loss_epoch += action_loss.item()
dist_entropy_epoch += dist_entropy.item()
num_updates = self.ppo_epoch * self.num_mini_batch
value_loss_epoch /= num_updates
action_loss_epoch /= num_updates
dist_entropy_epoch /= num_updates
print(i)
if i % 1 == 0:
self.tracker.add_scalar("loss/ppo loss", action_loss, i)
self.tracker.add_scalar("loss/value_loss", value_loss*self.value_loss_coef, i)
self.tracker.add_scalar("loss/entropy_loss", -1*dist_entropy*self.entropy_coef, i)
self.tracker.add_scalar("loss/loss", loss, i)
self.tracker.add_scalar("rewards/mean value", torch.mean(values), i)
self.tracker.add_scalar("rewards/mean rewards to go", torch.mean(return_batch), i)
self.tracker.add_scalar("rewards/mean prob ratio", torch.mean(ratio), i)
self.tracker.add_scalar("policy/policy entropy", dist_entropy, i)
# self.tracker.log_iteration_time(NUM_WORKERS * NUM_STEPS, i)
if i % 5 == 0:
for k in range(actions_batch.shape[1]):
self.tracker.add_histogram(f"policy/actions_{k}", actions_batch[:, k], i)
self.tracker.add_histogram("rewards/values", values, i)
self.tracker.add_histogram("rewards/advantages", advantages, i)
self.tracker.add_histogram("rewards/rewards to go", return_batch, i)
self.tracker.add_histogram("rewards/prob ratio", ratio, i)
self.tracker.add_histogram("loss/ppo_loss_hist", -1 * torch.min(surr1, surr2), i)
# self.tracker.add_histogram("policy/cov", self.actor_critic.dist.logstd._bias.exp(), i)
return value_loss_epoch, action_loss_epoch, dist_entropy_epoch
| en | 0.461918 | # Reshape to do in a single forward pass for all steps # self.tracker.log_iteration_time(NUM_WORKERS * NUM_STEPS, i) # self.tracker.add_histogram("policy/cov", self.actor_critic.dist.logstd._bias.exp(), i) | 2.073366 | 2 |
tiktok_dl/archive.py | skyme5/tiktok-dl-old | 0 | 6619836 | import os
class ArchiveManager:
def __init__(self, download_archive=None):
self.download_archive = download_archive
self.is_init = False
self.archive = self._read_archive()
def _read_archive(self):
if os.path.isfile(self.download_archive):
with open(self.download_archive) as f:
data = f.read()
return data.split("\n")
return list()
def _write_archive(self, items: list):
with open(self.download_archive, "a", encoding="utf-8") as f:
for video_id in items:
f.write("%s\n" % video_id)
def exist(self, video_id: str):
return video_id in self.archive
def append(self, video_id):
self._write_archive(list(video_id)) | import os
class ArchiveManager:
def __init__(self, download_archive=None):
self.download_archive = download_archive
self.is_init = False
self.archive = self._read_archive()
def _read_archive(self):
if os.path.isfile(self.download_archive):
with open(self.download_archive) as f:
data = f.read()
return data.split("\n")
return list()
def _write_archive(self, items: list):
with open(self.download_archive, "a", encoding="utf-8") as f:
for video_id in items:
f.write("%s\n" % video_id)
def exist(self, video_id: str):
return video_id in self.archive
def append(self, video_id):
self._write_archive(list(video_id)) | none | 1 | 3.22966 | 3 | |
cpp/gtest.gyp | parastoog/libaddressinput | 2,151 | 6619837 | <reponame>parastoog/libaddressinput
# Copyright (C) 2013 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{
'variables': {
# Default include directories. Override with your system's include paths or
# paths to your own implementations.
'gtest_dir%': '/usr/include',
'gtest_src_dir%': '/usr/src/gtest',
},
'targets': [
{
'target_name': 'main',
'type': 'static_library',
'sources': [
'<(SHARED_INTERMEDIATE_DIR)/src/gtest-all.cc',
],
'include_dirs': [
'<(gtest_dir)',
'<(gtest_src_dir)',
],
'copies': [
{
'destination': '<(SHARED_INTERMEDIATE_DIR)/src',
'files': [
'<(gtest_src_dir)/src/gtest-all.cc',
'<(gtest_src_dir)/src/gtest_main.cc',
],
},
],
'direct_dependent_settings': {
'sources': [
'<(SHARED_INTERMEDIATE_DIR)/src/gtest_main.cc',
],
'include_dirs': [
'<(gtest_dir)',
],
'conditions': [
['OS == "linux"', {
'ldflags': [
'-pthread', # GTest needs to link to pthread on Linux.
],
}],
],
},
},
],
}
| # Copyright (C) 2013 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{
'variables': {
# Default include directories. Override with your system's include paths or
# paths to your own implementations.
'gtest_dir%': '/usr/include',
'gtest_src_dir%': '/usr/src/gtest',
},
'targets': [
{
'target_name': 'main',
'type': 'static_library',
'sources': [
'<(SHARED_INTERMEDIATE_DIR)/src/gtest-all.cc',
],
'include_dirs': [
'<(gtest_dir)',
'<(gtest_src_dir)',
],
'copies': [
{
'destination': '<(SHARED_INTERMEDIATE_DIR)/src',
'files': [
'<(gtest_src_dir)/src/gtest-all.cc',
'<(gtest_src_dir)/src/gtest_main.cc',
],
},
],
'direct_dependent_settings': {
'sources': [
'<(SHARED_INTERMEDIATE_DIR)/src/gtest_main.cc',
],
'include_dirs': [
'<(gtest_dir)',
],
'conditions': [
['OS == "linux"', {
'ldflags': [
'-pthread', # GTest needs to link to pthread on Linux.
],
}],
],
},
},
],
} | en | 0.876018 | # Copyright (C) 2013 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Default include directories. Override with your system's include paths or # paths to your own implementations. # GTest needs to link to pthread on Linux. | 1.287447 | 1 |
natrixclient/backends/mqtt.py | creditease-natrix/natrixclient | 9 | 6619838 | # -*- coding: utf-8 -*-
"""
"""
import ssl, threading, json, time
import paho.mqtt.client as mqtt
from natrixclient.common.config import NatrixConfig
from natrixclient.command.ncheck import CheckTest
from natrixclient.command.check.network import NetInfo
from natrixclient.backends.base import SingletonService
from natrixclient.backends.command_processor import processor
from natrixclient.command.check.hardware import HardwareInfo
from natrixclient.backends.base import logger
CONFIG_TOPIC = 'MQTT'
config = NatrixConfig()
host = config.get_value(CONFIG_TOPIC, 'host')
port = int(config.get_value(CONFIG_TOPIC, 'port'))
vhost = config.get_value(CONFIG_TOPIC, 'vhost')
username = HardwareInfo.get_sn()
password = config.get_value(CONFIG_TOPIC, 'password')
client_id = config.get_value(CONFIG_TOPIC, 'client_id')
is_ssl = config.get_value(CONFIG_TOPIC, 'ssl')
keepalive = int(config.get_value(CONFIG_TOPIC, 'keepalive'))
COMMAND_SUBSCRIBE_QOS = 1
COMMAND_RESPONSE_QOS = 1
DEVICE_BASIC_QOS = 0
DEVICE_ADVANCED_QOS = 1
def natrix_mqttclient():
"""Generate a natrix mqtt client.
This function encapsulates all configurations about natrix mqtt client.
Include:
- client_id
The unique id about mqtt connection.
- username & password
Username is device serial number which used to identify who am I;
:return:
"""
client = mqtt.Client(client_id=client_id, clean_session=False)
client.username_pw_set(username, password=password)
if is_ssl.upper() == 'TRUE':
ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
client.tls_set_context(ssl_context)
# connection break
will_message = json.dumps({'username': username, 'client_id': client_id})
client.will_set(topic='natrix/disconnect/{}'.format(username),
payload=will_message,
retain=False)
return client
client = natrix_mqttclient()
def publish_result_analyse(res, service=None):
if res.rc == mqtt.MQTT_ERR_SUCCESS:
if res.is_published():
logger.info('Publish({})-mid({}) MQTT_ERR_SUCCESS(successfully)'.format(service, res.mid))
else:
if service == 'Device Basic Service':
logger.info('Reconncting for basic publish error!')
logger.error('Publish({})-mid({}) MQTT_ERR_SUCCESS, but is_published is False'.format(service, res.mid))
elif res.rc == mqtt.MQTT_ERR_NO_CONN:
logger.error('Publish({}) MQTT_ERR_NO_CONN'.format(service))
elif res.rc == mqtt.MQTT_ERR_QUEUE_SIZE:
logger.error('Publish({}) MQTT_ERR_QUEUE_SIZE'.format(service))
else:
logger.error('Publish({}) operation with an unkown error'.format(service))
class CommandProcessor(threading.Thread):
def __init__(self, message, client):
super(CommandProcessor, self).__init__()
self.message = message
self.command_info = message.payload
self.client = client
def run(self):
# TODO: add connection checking
logger.debug('CommandProcessor message : {}'.format(self.message))
try:
command = json.loads(str(self.command_info, encoding='utf-8'))
except TypeError:
command = json.loads(self.command_info)
test_data = processor(command)
if test_data is None:
logger.error('Process Command with error: {}'.format(command))
topic = self.message.topic.split('/')
# TODO: add exception process
terminal = topic[-1]
test_data['command'] = {
'uuid': command.get('uuid', None),
'terminal': terminal
}
logger.debug('Dial test result : {}'.format(test_data))
res = self.client.publish(topic='natrix/response',
payload=json.dumps(test_data),
qos=COMMAND_RESPONSE_QOS,
retain=False)
publish_result_analyse(res, 'command response')
class DeviceBasicService(threading.Thread):
"""
"""
def __init__(self, client=client):
super(DeviceBasicService, self).__init__()
self.client = client
logger.info('Basic Service : client state ({}) | ping_t ({})'.format(client._state, client._ping_t))
self.topic_str = 'natrix/basic/{}'.format(username)
def run(self):
try:
device_check = CheckTest(request_parameters={'type': 'basic'})
device_info = device_check.check()
res = self.client.publish(topic=self.topic_str,
payload=json.dumps(device_info),
qos=DEVICE_BASIC_QOS,
retain=False)
publish_result_analyse(res, 'Device Basic Service')
except Exception as e:
logger.error('There is an exception fro advanced reporter: {}'.format(e))
class DeviceAdvancedService(threading.Thread):
"""
"""
def __init__(self, client=client):
super(DeviceAdvancedService, self).__init__()
self.client = client
self.topic_str = 'natrix/advanced/{}'.format(username)
def run(self):
try:
device_check = CheckTest(request_parameters={'type': 'advance'})
device_info = device_check.check()
res = self.client.publish(topic=self.topic_str,
payload=json.dumps(device_info),
qos=DEVICE_ADVANCED_QOS,
retain=False)
publish_result_analyse(res, 'Device Advance Service')
except Exception as e:
logger.error('There is an exception fro advanced reporter: {}'.format(e))
class CommandSubscribe(threading.Thread):
def __init__(self, client=client):
super(CommandSubscribe, self).__init__()
def run(self):
time.sleep(5)
macs = NetInfo().get_macs()
for mac in macs:
client.subscribe('natrix/benchmark/{}'.format(mac), qos=COMMAND_SUBSCRIBE_QOS)
client.message_callback_add('natrix/benchmark/#', SubscribeProcess.benchmark_process)
class SubscribeProcess:
"""Include all subscribe process
"""
@staticmethod
def benchmark_process(client, userdata, message):
try:
command_processor = CommandProcessor(message, client)
command_processor.start()
except Exception as e:
logger.error('process benchmark command occur an error: {}'.format(e))
def on_connect(client, userdata, flags, rc):
if rc > 0:
logger.error(mqtt.connack_string(rc))
return
main_thread = threading.current_thread()
# start to post terminal information
single_service = SingletonService()
single_service.init(main_thread, DeviceBasicService, DeviceAdvancedService)
single_service.start()
# subscribe pocess
CommandSubscribe().start()
def disconnect():
logger.error('occur a Disconnection Action!')
def socket_close_callback(client, userdata, sock):
logger.error('Socket is closed for reading : {}, will reconnect after!'.format(sock))
def on_socket_unregister_write(client, userdata, sock):
logger.error('Socket is closed for writing : {}, will after!'.format(sock))
def on_subscribe(client, userdata, mid, granted_qos):
logger.info('subscribe ...... {} {}'.format(client, userdata))
def on_publish_callback(client, userdata, mid):
logger.info('publish data mid({})'.format(mid))
def log_callback(client, userdata, level, buf):
logger.info('MQTT LOGGER : {}'.format(buf))
def start():
try:
client.on_connect = on_connect
client.disconnect = disconnect
client.on_subscribe = on_subscribe
client.on_publish = on_publish_callback
client.on_socket_close = socket_close_callback
client.on_log = log_callback
client.enable_logger()
client.connect(host, port, keepalive)
while True:
rc = client.loop_forever()
logger.error('loop_forever stop : {}'.format(mqtt.connack_string(rc)))
client.reconnect()
except KeyboardInterrupt as e:
logger.info('Service End!')
def stop():
pass
if __name__ == '__main__':
start() | # -*- coding: utf-8 -*-
"""
"""
import ssl, threading, json, time
import paho.mqtt.client as mqtt
from natrixclient.common.config import NatrixConfig
from natrixclient.command.ncheck import CheckTest
from natrixclient.command.check.network import NetInfo
from natrixclient.backends.base import SingletonService
from natrixclient.backends.command_processor import processor
from natrixclient.command.check.hardware import HardwareInfo
from natrixclient.backends.base import logger
CONFIG_TOPIC = 'MQTT'
config = NatrixConfig()
host = config.get_value(CONFIG_TOPIC, 'host')
port = int(config.get_value(CONFIG_TOPIC, 'port'))
vhost = config.get_value(CONFIG_TOPIC, 'vhost')
username = HardwareInfo.get_sn()
password = config.get_value(CONFIG_TOPIC, 'password')
client_id = config.get_value(CONFIG_TOPIC, 'client_id')
is_ssl = config.get_value(CONFIG_TOPIC, 'ssl')
keepalive = int(config.get_value(CONFIG_TOPIC, 'keepalive'))
COMMAND_SUBSCRIBE_QOS = 1
COMMAND_RESPONSE_QOS = 1
DEVICE_BASIC_QOS = 0
DEVICE_ADVANCED_QOS = 1
def natrix_mqttclient():
"""Generate a natrix mqtt client.
This function encapsulates all configurations about natrix mqtt client.
Include:
- client_id
The unique id about mqtt connection.
- username & password
Username is device serial number which used to identify who am I;
:return:
"""
client = mqtt.Client(client_id=client_id, clean_session=False)
client.username_pw_set(username, password=password)
if is_ssl.upper() == 'TRUE':
ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
client.tls_set_context(ssl_context)
# connection break
will_message = json.dumps({'username': username, 'client_id': client_id})
client.will_set(topic='natrix/disconnect/{}'.format(username),
payload=will_message,
retain=False)
return client
client = natrix_mqttclient()
def publish_result_analyse(res, service=None):
if res.rc == mqtt.MQTT_ERR_SUCCESS:
if res.is_published():
logger.info('Publish({})-mid({}) MQTT_ERR_SUCCESS(successfully)'.format(service, res.mid))
else:
if service == 'Device Basic Service':
logger.info('Reconncting for basic publish error!')
logger.error('Publish({})-mid({}) MQTT_ERR_SUCCESS, but is_published is False'.format(service, res.mid))
elif res.rc == mqtt.MQTT_ERR_NO_CONN:
logger.error('Publish({}) MQTT_ERR_NO_CONN'.format(service))
elif res.rc == mqtt.MQTT_ERR_QUEUE_SIZE:
logger.error('Publish({}) MQTT_ERR_QUEUE_SIZE'.format(service))
else:
logger.error('Publish({}) operation with an unkown error'.format(service))
class CommandProcessor(threading.Thread):
def __init__(self, message, client):
super(CommandProcessor, self).__init__()
self.message = message
self.command_info = message.payload
self.client = client
def run(self):
# TODO: add connection checking
logger.debug('CommandProcessor message : {}'.format(self.message))
try:
command = json.loads(str(self.command_info, encoding='utf-8'))
except TypeError:
command = json.loads(self.command_info)
test_data = processor(command)
if test_data is None:
logger.error('Process Command with error: {}'.format(command))
topic = self.message.topic.split('/')
# TODO: add exception process
terminal = topic[-1]
test_data['command'] = {
'uuid': command.get('uuid', None),
'terminal': terminal
}
logger.debug('Dial test result : {}'.format(test_data))
res = self.client.publish(topic='natrix/response',
payload=json.dumps(test_data),
qos=COMMAND_RESPONSE_QOS,
retain=False)
publish_result_analyse(res, 'command response')
class DeviceBasicService(threading.Thread):
"""
"""
def __init__(self, client=client):
super(DeviceBasicService, self).__init__()
self.client = client
logger.info('Basic Service : client state ({}) | ping_t ({})'.format(client._state, client._ping_t))
self.topic_str = 'natrix/basic/{}'.format(username)
def run(self):
try:
device_check = CheckTest(request_parameters={'type': 'basic'})
device_info = device_check.check()
res = self.client.publish(topic=self.topic_str,
payload=json.dumps(device_info),
qos=DEVICE_BASIC_QOS,
retain=False)
publish_result_analyse(res, 'Device Basic Service')
except Exception as e:
logger.error('There is an exception fro advanced reporter: {}'.format(e))
class DeviceAdvancedService(threading.Thread):
"""
"""
def __init__(self, client=client):
super(DeviceAdvancedService, self).__init__()
self.client = client
self.topic_str = 'natrix/advanced/{}'.format(username)
def run(self):
try:
device_check = CheckTest(request_parameters={'type': 'advance'})
device_info = device_check.check()
res = self.client.publish(topic=self.topic_str,
payload=json.dumps(device_info),
qos=DEVICE_ADVANCED_QOS,
retain=False)
publish_result_analyse(res, 'Device Advance Service')
except Exception as e:
logger.error('There is an exception fro advanced reporter: {}'.format(e))
class CommandSubscribe(threading.Thread):
def __init__(self, client=client):
super(CommandSubscribe, self).__init__()
def run(self):
time.sleep(5)
macs = NetInfo().get_macs()
for mac in macs:
client.subscribe('natrix/benchmark/{}'.format(mac), qos=COMMAND_SUBSCRIBE_QOS)
client.message_callback_add('natrix/benchmark/#', SubscribeProcess.benchmark_process)
class SubscribeProcess:
"""Include all subscribe process
"""
@staticmethod
def benchmark_process(client, userdata, message):
try:
command_processor = CommandProcessor(message, client)
command_processor.start()
except Exception as e:
logger.error('process benchmark command occur an error: {}'.format(e))
def on_connect(client, userdata, flags, rc):
if rc > 0:
logger.error(mqtt.connack_string(rc))
return
main_thread = threading.current_thread()
# start to post terminal information
single_service = SingletonService()
single_service.init(main_thread, DeviceBasicService, DeviceAdvancedService)
single_service.start()
# subscribe pocess
CommandSubscribe().start()
def disconnect():
logger.error('occur a Disconnection Action!')
def socket_close_callback(client, userdata, sock):
logger.error('Socket is closed for reading : {}, will reconnect after!'.format(sock))
def on_socket_unregister_write(client, userdata, sock):
logger.error('Socket is closed for writing : {}, will after!'.format(sock))
def on_subscribe(client, userdata, mid, granted_qos):
logger.info('subscribe ...... {} {}'.format(client, userdata))
def on_publish_callback(client, userdata, mid):
logger.info('publish data mid({})'.format(mid))
def log_callback(client, userdata, level, buf):
logger.info('MQTT LOGGER : {}'.format(buf))
def start():
try:
client.on_connect = on_connect
client.disconnect = disconnect
client.on_subscribe = on_subscribe
client.on_publish = on_publish_callback
client.on_socket_close = socket_close_callback
client.on_log = log_callback
client.enable_logger()
client.connect(host, port, keepalive)
while True:
rc = client.loop_forever()
logger.error('loop_forever stop : {}'.format(mqtt.connack_string(rc)))
client.reconnect()
except KeyboardInterrupt as e:
logger.info('Service End!')
def stop():
pass
if __name__ == '__main__':
start() | en | 0.702225 | # -*- coding: utf-8 -*- Generate a natrix mqtt client. This function encapsulates all configurations about natrix mqtt client. Include: - client_id The unique id about mqtt connection. - username & password Username is device serial number which used to identify who am I; :return: # connection break # TODO: add connection checking # TODO: add exception process #', SubscribeProcess.benchmark_process) Include all subscribe process # start to post terminal information # subscribe pocess | 2.431541 | 2 |
cuaternios.py | wirms/katarina-remastered-electric-boogaloo | 0 | 6619839 | <filename>cuaternios.py
import numpy as np
def ang2cua(v,theta,rad=0):
if rad==0:
theta = theta * np.pi/180
elif rad==1:
pass
else:
return "Error en cuaternios/ang2cua, rad debe ser 0 o 1"
if len(v) == 3:
pass
else:
return "Error en cuaternios/ang2cua, v debe ser una lista de 3 elementos"
theta=theta/2
modv = norma(v)
if modv <=1e-8:
modv=1
q=[0,0,0,0]
q[0] = np.cos(theta)
q[1] = np.sin(theta)* v[0]/modv
q[2] = np.sin(theta)* v[1]/modv
q[3] = np.sin(theta)* v[2]/modv
modq = norma(q)
if modq <=1e-8:
pass
else:
for i in range(0,4):
q[i] = q[i]/modq
return q
def pro(p,q):
#Producto de dos cuaterniones, producto de Hamilton
if len(p) and len(q) == 4:
pass
else:
return "Error en cuaternios/pro, deben ser dos cuaternios de 4 elementos"
u=[0,0,0,0]
u[0] = p[0]*q[0] - p[1]*q[1] - p[2]*q[2] - p[3]*q[3]
u[1] = p[0]*q[1] + p[1]*q[0] + p[2]*q[3] - p[3]*q[2]
u[2] = p[0]*q[2] + p[2]*q[0] + p[3]*q[1] - p[1]*q[3]
u[3] = p[0]*q[3] + p[3]*q[0] + p[1]*q[2] - p[2]*q[1]
return u
def conj(q):
if len(q)==4:
pass
else:
return "Error en cuaternios/conj, debe ser una lista de 4 elementos"
r = [0,0,0,0]
r[0], r[1],r[2],r[3] = q[0],-q[1],-q[2],-q[3]
return r
def norma(q):
if len(q) == 4:
norma = q[0]**2+q[1]**2+q[2]**2+q[3]**2
elif len(q) ==3:
norma = np.sqrt(q[0]**2+q[1]**2+q[2]**2)
else:
return "Error en cuaternios/norma, debe ser una lista de 4 elementos"
return norma
def conv(v,q):
p = [0,v[0],v[1],v[2]]
p1 = pro(p,conj(q))
p1 = pro(q,p1)
if len(p1) == 4:
p1 = [p1[1],p1[2],p1[3]]
return p1
def giro(u,v,theta, rad = 0): #Giro de u, theta grados alrrededor de v
q = ang2cua(v,theta, rad = rad)
u1 = conv(u,q)
return u1
def girar(u,q):
v = conv(u,q)
return [v[0],v[1],v[2]]
def giro2cua(u,v): #Giro necesario para llevar u a v
theta = (u[0]*v[0] + u[1]*v[1] + u[2]*v[2])
theta = theta/norma(u)
theta = theta/norma(v)
theta = np.arccos(theta)
g = [0,0,0]
g[0] = u[1]*v[2] - u[2]*v[1]
g[1] = u[2]*v[0] - u[0]*v[2]
g[2] = u[0]*v[1] - u[1]*v[0]
modg = norma(g)
if modg == 0:
modg=1
for i in range(0,3):
g[i] = g[i]/modg
q=ang2cua(g,theta,rad=1)
return q
def verticallocal(x):
modv = norma(x)
v = np.array([x[0]/modv,x[1]/modv,x[2]/modv])
return v
def productovectorial(u,v):
if len(u) and len(v) == 3:
pass
else:
return "Error en cuaternios/productovectorial, los dos vectores deben ser una lista de 3 elementos"
g0 = u[1]*v[2] - u[2]*v[1]
g1 = u[2]*v[0] - u[0]*v[2]
g2 = u[0]*v[1] - u[1]*v[0]
g= np.array([g0,g1,g2])
return g
def productoescalar(u,v):
pro = u[0]*v[0] + u[1]*v[1] + u[2]*v[2]
return pro
def director(u,v):
dire = [u[0]-v[0],u[1]-v[1],u[2]-v[2]]
return dire
##############################################
| <filename>cuaternios.py
import numpy as np
def ang2cua(v,theta,rad=0):
if rad==0:
theta = theta * np.pi/180
elif rad==1:
pass
else:
return "Error en cuaternios/ang2cua, rad debe ser 0 o 1"
if len(v) == 3:
pass
else:
return "Error en cuaternios/ang2cua, v debe ser una lista de 3 elementos"
theta=theta/2
modv = norma(v)
if modv <=1e-8:
modv=1
q=[0,0,0,0]
q[0] = np.cos(theta)
q[1] = np.sin(theta)* v[0]/modv
q[2] = np.sin(theta)* v[1]/modv
q[3] = np.sin(theta)* v[2]/modv
modq = norma(q)
if modq <=1e-8:
pass
else:
for i in range(0,4):
q[i] = q[i]/modq
return q
def pro(p,q):
#Producto de dos cuaterniones, producto de Hamilton
if len(p) and len(q) == 4:
pass
else:
return "Error en cuaternios/pro, deben ser dos cuaternios de 4 elementos"
u=[0,0,0,0]
u[0] = p[0]*q[0] - p[1]*q[1] - p[2]*q[2] - p[3]*q[3]
u[1] = p[0]*q[1] + p[1]*q[0] + p[2]*q[3] - p[3]*q[2]
u[2] = p[0]*q[2] + p[2]*q[0] + p[3]*q[1] - p[1]*q[3]
u[3] = p[0]*q[3] + p[3]*q[0] + p[1]*q[2] - p[2]*q[1]
return u
def conj(q):
if len(q)==4:
pass
else:
return "Error en cuaternios/conj, debe ser una lista de 4 elementos"
r = [0,0,0,0]
r[0], r[1],r[2],r[3] = q[0],-q[1],-q[2],-q[3]
return r
def norma(q):
if len(q) == 4:
norma = q[0]**2+q[1]**2+q[2]**2+q[3]**2
elif len(q) ==3:
norma = np.sqrt(q[0]**2+q[1]**2+q[2]**2)
else:
return "Error en cuaternios/norma, debe ser una lista de 4 elementos"
return norma
def conv(v,q):
p = [0,v[0],v[1],v[2]]
p1 = pro(p,conj(q))
p1 = pro(q,p1)
if len(p1) == 4:
p1 = [p1[1],p1[2],p1[3]]
return p1
def giro(u,v,theta, rad = 0): #Giro de u, theta grados alrrededor de v
q = ang2cua(v,theta, rad = rad)
u1 = conv(u,q)
return u1
def girar(u,q):
v = conv(u,q)
return [v[0],v[1],v[2]]
def giro2cua(u,v): #Giro necesario para llevar u a v
theta = (u[0]*v[0] + u[1]*v[1] + u[2]*v[2])
theta = theta/norma(u)
theta = theta/norma(v)
theta = np.arccos(theta)
g = [0,0,0]
g[0] = u[1]*v[2] - u[2]*v[1]
g[1] = u[2]*v[0] - u[0]*v[2]
g[2] = u[0]*v[1] - u[1]*v[0]
modg = norma(g)
if modg == 0:
modg=1
for i in range(0,3):
g[i] = g[i]/modg
q=ang2cua(g,theta,rad=1)
return q
def verticallocal(x):
modv = norma(x)
v = np.array([x[0]/modv,x[1]/modv,x[2]/modv])
return v
def productovectorial(u,v):
if len(u) and len(v) == 3:
pass
else:
return "Error en cuaternios/productovectorial, los dos vectores deben ser una lista de 3 elementos"
g0 = u[1]*v[2] - u[2]*v[1]
g1 = u[2]*v[0] - u[0]*v[2]
g2 = u[0]*v[1] - u[1]*v[0]
g= np.array([g0,g1,g2])
return g
def productoescalar(u,v):
pro = u[0]*v[0] + u[1]*v[1] + u[2]*v[2]
return pro
def director(u,v):
dire = [u[0]-v[0],u[1]-v[1],u[2]-v[2]]
return dire
##############################################
| es | 0.846173 | #Producto de dos cuaterniones, producto de Hamilton #Giro de u, theta grados alrrededor de v #Giro necesario para llevar u a v ############################################## | 3.342741 | 3 |
tools/perf/core/perf_benchmark_unittest.py | zipated/src | 2,151 | 6619840 | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import shutil
import tempfile
import unittest
from telemetry import decorators
from telemetry.testing import options_for_unittests
from core import perf_benchmark
class PerfBenchmarkTest(unittest.TestCase):
def setUp(self):
self._output_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self._output_dir, ignore_errors=True)
def _ExpectAdTaggingProfileFiles(self, browser_options, expect_present):
files_to_copy = browser_options.profile_files_to_copy
local_state_to_copy = [
(s, d) for (s, d) in files_to_copy if d == 'Local State']
ruleset_data_to_copy = [
(s, d) for (s, d) in files_to_copy if d.endswith('Ruleset Data')]
num_expected_matches = 1 if expect_present else 0
self.assertEqual(num_expected_matches, len(local_state_to_copy))
self.assertEqual(num_expected_matches, len(ruleset_data_to_copy))
@decorators.Disabled('chromeos') # http://crbug.com/844863
def testVariationArgs(self):
benchmark = perf_benchmark.PerfBenchmark()
options = options_for_unittests.GetCopy()
benchmark.CustomizeBrowserOptions(options.browser_options)
extra_args = options.browser_options.extra_browser_args
feature_args = [a for a in extra_args if a.startswith('--enable-features')]
self.assertEqual(1, len(feature_args))
def testVariationArgsReference(self):
benchmark = perf_benchmark.PerfBenchmark()
options = options_for_unittests.GetCopy()
options.browser_options.browser_type = 'reference'
benchmark.CustomizeBrowserOptions(options.browser_options)
extra_args = options.browser_options.extra_browser_args
feature_args = [a for a in extra_args if a.startswith('--enable-features')]
self.assertEqual(0, len(feature_args))
def testNoAdTaggingRuleset(self):
benchmark = perf_benchmark.PerfBenchmark()
options = options_for_unittests.GetCopy()
benchmark.CustomizeBrowserOptions(options.browser_options)
self._ExpectAdTaggingProfileFiles(options.browser_options, False)
def testAdTaggingRulesetReference(self):
os.makedirs(os.path.join(
self._output_dir, 'gen', 'components', 'subresource_filter',
'tools','GeneratedRulesetData'))
benchmark = perf_benchmark.PerfBenchmark()
options = options_for_unittests.GetCopy()
options.browser_options.browser_type = 'reference'
# Careful, do not parse the command line flag for 'chromium-output-dir', as
# that sets the global os environment variable CHROMIUM_OUTPUT_DIR,
# affecting other tests. See http://crbug.com/843994.
options.chromium_output_dir = self._output_dir
benchmark.CustomizeBrowserOptions(options.browser_options)
self._ExpectAdTaggingProfileFiles(options.browser_options, False)
def testAdTaggingRuleset(self):
os.makedirs(os.path.join(
self._output_dir, 'gen', 'components', 'subresource_filter',
'tools','GeneratedRulesetData'))
benchmark = perf_benchmark.PerfBenchmark()
options = options_for_unittests.GetCopy()
# Careful, do not parse the command line flag for 'chromium-output-dir', as
# that sets the global os environment variable CHROMIUM_OUTPUT_DIR,
# affecting other tests. See http://crbug.com/843994.
options.chromium_output_dir = self._output_dir
benchmark.CustomizeBrowserOptions(options.browser_options)
self._ExpectAdTaggingProfileFiles(options.browser_options, True)
| # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import shutil
import tempfile
import unittest
from telemetry import decorators
from telemetry.testing import options_for_unittests
from core import perf_benchmark
class PerfBenchmarkTest(unittest.TestCase):
def setUp(self):
self._output_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self._output_dir, ignore_errors=True)
def _ExpectAdTaggingProfileFiles(self, browser_options, expect_present):
files_to_copy = browser_options.profile_files_to_copy
local_state_to_copy = [
(s, d) for (s, d) in files_to_copy if d == 'Local State']
ruleset_data_to_copy = [
(s, d) for (s, d) in files_to_copy if d.endswith('Ruleset Data')]
num_expected_matches = 1 if expect_present else 0
self.assertEqual(num_expected_matches, len(local_state_to_copy))
self.assertEqual(num_expected_matches, len(ruleset_data_to_copy))
@decorators.Disabled('chromeos') # http://crbug.com/844863
def testVariationArgs(self):
benchmark = perf_benchmark.PerfBenchmark()
options = options_for_unittests.GetCopy()
benchmark.CustomizeBrowserOptions(options.browser_options)
extra_args = options.browser_options.extra_browser_args
feature_args = [a for a in extra_args if a.startswith('--enable-features')]
self.assertEqual(1, len(feature_args))
def testVariationArgsReference(self):
benchmark = perf_benchmark.PerfBenchmark()
options = options_for_unittests.GetCopy()
options.browser_options.browser_type = 'reference'
benchmark.CustomizeBrowserOptions(options.browser_options)
extra_args = options.browser_options.extra_browser_args
feature_args = [a for a in extra_args if a.startswith('--enable-features')]
self.assertEqual(0, len(feature_args))
def testNoAdTaggingRuleset(self):
benchmark = perf_benchmark.PerfBenchmark()
options = options_for_unittests.GetCopy()
benchmark.CustomizeBrowserOptions(options.browser_options)
self._ExpectAdTaggingProfileFiles(options.browser_options, False)
def testAdTaggingRulesetReference(self):
os.makedirs(os.path.join(
self._output_dir, 'gen', 'components', 'subresource_filter',
'tools','GeneratedRulesetData'))
benchmark = perf_benchmark.PerfBenchmark()
options = options_for_unittests.GetCopy()
options.browser_options.browser_type = 'reference'
# Careful, do not parse the command line flag for 'chromium-output-dir', as
# that sets the global os environment variable CHROMIUM_OUTPUT_DIR,
# affecting other tests. See http://crbug.com/843994.
options.chromium_output_dir = self._output_dir
benchmark.CustomizeBrowserOptions(options.browser_options)
self._ExpectAdTaggingProfileFiles(options.browser_options, False)
def testAdTaggingRuleset(self):
os.makedirs(os.path.join(
self._output_dir, 'gen', 'components', 'subresource_filter',
'tools','GeneratedRulesetData'))
benchmark = perf_benchmark.PerfBenchmark()
options = options_for_unittests.GetCopy()
# Careful, do not parse the command line flag for 'chromium-output-dir', as
# that sets the global os environment variable CHROMIUM_OUTPUT_DIR,
# affecting other tests. See http://crbug.com/843994.
options.chromium_output_dir = self._output_dir
benchmark.CustomizeBrowserOptions(options.browser_options)
self._ExpectAdTaggingProfileFiles(options.browser_options, True)
| en | 0.828944 | # Copyright 2018 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # http://crbug.com/844863 # Careful, do not parse the command line flag for 'chromium-output-dir', as # that sets the global os environment variable CHROMIUM_OUTPUT_DIR, # affecting other tests. See http://crbug.com/843994. # Careful, do not parse the command line flag for 'chromium-output-dir', as # that sets the global os environment variable CHROMIUM_OUTPUT_DIR, # affecting other tests. See http://crbug.com/843994. | 2.133166 | 2 |
dbtemplate/tests/test_app.py | subuk/django-dbtemplate | 0 | 6619841 | from mixer.backend.django import mixer
from django.test import TestCase
from django.test.utils import override_settings
CACHED_LOADER_CONFIG = (
('django.template.loaders.cached.Loader', [
'dbtemplate.loader.DatabaseLoader',
]),
)
@override_settings(
ROOT_URLCONF='dbtemplate.tests.app.urls',
)
class GetTestContextTestCase(TestCase):
def test_loader_works(self):
mixer.blend('dbtemplate.Template',
slug='test.html', data='<h1>hello</h1>')
response = self.client.get('/')
self.assertEqual(response.content, b'<h1>hello</h1>')
@override_settings(TEMPLATE_LOADERS=CACHED_LOADER_CONFIG)
def test_change_template_with_cached_loader_works(self):
template = mixer.blend('dbtemplate.Template',
slug='test.html', data='<h1>hello</h1>')
self.client.get('/')
template.data = "<h2>Bye</h2>"
template.save()
response = self.client.get('/')
self.assertEqual(response.content, b'<h2>Bye</h2>')
| from mixer.backend.django import mixer
from django.test import TestCase
from django.test.utils import override_settings
CACHED_LOADER_CONFIG = (
('django.template.loaders.cached.Loader', [
'dbtemplate.loader.DatabaseLoader',
]),
)
@override_settings(
ROOT_URLCONF='dbtemplate.tests.app.urls',
)
class GetTestContextTestCase(TestCase):
def test_loader_works(self):
mixer.blend('dbtemplate.Template',
slug='test.html', data='<h1>hello</h1>')
response = self.client.get('/')
self.assertEqual(response.content, b'<h1>hello</h1>')
@override_settings(TEMPLATE_LOADERS=CACHED_LOADER_CONFIG)
def test_change_template_with_cached_loader_works(self):
template = mixer.blend('dbtemplate.Template',
slug='test.html', data='<h1>hello</h1>')
self.client.get('/')
template.data = "<h2>Bye</h2>"
template.save()
response = self.client.get('/')
self.assertEqual(response.content, b'<h2>Bye</h2>')
| none | 1 | 2.183133 | 2 | |
tests/integration/test_with_stomp.py | thiagopena/python-mcollective | 1 | 6619842 | from . import base
class TestWithStompMCo22x(base.MCollective20x, base.IntegrationTestCase):
'''MCollective integration test case.'''
CTXT = {
'connector': 'stomp',
'plugin.stomp.host': 'localhost',
'plugin.stomp.port': '61614',
'plugin.stomp.user': 'mcollective',
'plugin.stomp.password': '<PASSWORD>',
'topicprefix': 'topic',
}
| from . import base
class TestWithStompMCo22x(base.MCollective20x, base.IntegrationTestCase):
'''MCollective integration test case.'''
CTXT = {
'connector': 'stomp',
'plugin.stomp.host': 'localhost',
'plugin.stomp.port': '61614',
'plugin.stomp.user': 'mcollective',
'plugin.stomp.password': '<PASSWORD>',
'topicprefix': 'topic',
}
| en | 0.668169 | MCollective integration test case. | 1.268701 | 1 |
core/admin.py | savoloon/django_test1 | 0 | 6619843 | <filename>core/admin.py<gh_stars>0
from django.contrib import admin
import core.models
admin.site.register(core.models.Book)
| <filename>core/admin.py<gh_stars>0
from django.contrib import admin
import core.models
admin.site.register(core.models.Book)
| none | 1 | 1.207134 | 1 | |
taskmator/manager.py | altenia/taskmator | 2 | 6619844 | <reponame>altenia/taskmator<gh_stars>1-10
import collections
import logging
import json
from taskmator.context import TaskContainer, ExecutionContext
class TaskManager:
"""
The class that manages tasks.
There could be multiple specification loaded.
"""
logger = logging.getLogger(__name__)
def __init__(self):
self.task_containers = {}
def start_task(self, spec_uri):
"""
Starts a task
"""
task_container = None
if (spec_uri in self.task_containers):
task_container = self.task_containers[spec_uri]
else:
spec = self.load_spec(spec_uri)
task_container = TaskContainer(spec)
self.task_containers[spec_uri] = task_container
task_root = task_container.get_root()
context = self.create_context(task_container)
self.logger.info("Starting " + spec_uri)
context.mark_start()
task_root.execute(context)
context.mark_stop()
return context
def create_context(self, task_container):
return ExecutionContext(task_container)
def load_spec(self, spec_uri):
"""
Method to load a tasks specification
"""
spec = None
self.logger.info("Loading " + spec_uri)
with open(spec_uri, "r") as spec_file:
#spec_file = open(spec_uri, "r")
spec_json = spec_file.read()
spec = json.loads(spec_json, object_pairs_hook=collections.OrderedDict)
return spec
| import collections
import logging
import json
from taskmator.context import TaskContainer, ExecutionContext
class TaskManager:
"""
The class that manages tasks.
There could be multiple specification loaded.
"""
logger = logging.getLogger(__name__)
def __init__(self):
self.task_containers = {}
def start_task(self, spec_uri):
"""
Starts a task
"""
task_container = None
if (spec_uri in self.task_containers):
task_container = self.task_containers[spec_uri]
else:
spec = self.load_spec(spec_uri)
task_container = TaskContainer(spec)
self.task_containers[spec_uri] = task_container
task_root = task_container.get_root()
context = self.create_context(task_container)
self.logger.info("Starting " + spec_uri)
context.mark_start()
task_root.execute(context)
context.mark_stop()
return context
def create_context(self, task_container):
return ExecutionContext(task_container)
def load_spec(self, spec_uri):
"""
Method to load a tasks specification
"""
spec = None
self.logger.info("Loading " + spec_uri)
with open(spec_uri, "r") as spec_file:
#spec_file = open(spec_uri, "r")
spec_json = spec_file.read()
spec = json.loads(spec_json, object_pairs_hook=collections.OrderedDict)
return spec | en | 0.84208 | The class that manages tasks. There could be multiple specification loaded. Starts a task Method to load a tasks specification #spec_file = open(spec_uri, "r") | 2.647109 | 3 |
anonymizer.py | qiyuangong/Relational_Transaction_Anon | 1 | 6619845 | """
run DA and AA with given parameters
"""
#!/usr/bin/env python
# coding=utf-8
from RT_ANON import rt_anon
from utils.read_informs_data import read_data as read_informs
from utils.read_informs_data import read_tree as read_informs_tree
from utils.read_youtube_data import read_data as read_youtube
from utils.read_youtube_data import read_tree as read_youtube_tree
from models.gentree import GenTree
from utils.maketree import gen_gh_trees
from utils.save_result import save_to_file
import sys
import copy
import random
import cProfile
import pdb
sys.setrecursionlimit(50000)
TYPE_ALG = 'RMR'
DEFAULT_M = 2
M_MAX = 161
DEFAULT_K = 10
DEFAULT_T = 0.65
def get_result_one(att_tree, data, type_alg, k=DEFAULT_K, m=DEFAULT_M, threshold=DEFAULT_T):
"""
run RT_ANON for one time, with k=10
"""
print "K=%d" % k
print "Size of Data", len(data)
print "m=%d" % m
print "Threshold=%.2f" % threshold
result, eval_result = rt_anon(att_tree, data, type_alg, k, m, threshold)
# save_to_file((att_tree, data, result, k, m))
print "RNCP %0.2f" % eval_result[0] + "%"
print "TNCP %0.2f" % eval_result[1] + "%"
print "Running time %0.2f" % eval_result[2] + " seconds"
def get_result_k(att_tree, data, type_alg, m=DEFAULT_M, threshold=DEFAULT_T):
"""
change k, whle fixing size of dataset
"""
data_back = copy.deepcopy(data)
# for k in range(5, 105, 5):
print "m=%d" % m
print "Threshold=%.2f" % threshold
print "Size of Data", len(data)
all_rncp = []
all_tncp = []
all_rtime = []
# for k in range(5, 55, 5):
# if k in [2, 5, 10, 25, 50, 100]:
# continue
k_range = [2, 5, 10, 25, 50, 100]
for k in k_range:
print '#' * 30
print "K=%d" % k
result, eval_result = rt_anon(att_tree, data, type_alg, k, m, threshold)
# save_to_file((att_tree, data, result, k, m))
data = copy.deepcopy(data_back)
print "RNCP %0.2f" % eval_result[0] + "%"
all_rncp.append(round(eval_result[0], 2))
print "TNCP %0.2f" % eval_result[1] + "%"
all_tncp.append(round(eval_result[1], 2))
print "Running time %0.2f" % eval_result[2] + " seconds"
all_rtime.append(round(eval_result[2], 2))
print "K range", k_range
print "RNCP", all_rncp
print "TNCP", all_tncp
print "Running time", all_rtime
def get_result_m(att_tree, data, type_alg, k=DEFAULT_K, threshold=DEFAULT_T):
"""
change k, whle fixing size of dataset
"""
print "K=%d" % k
print "Threshold=%.2f" % threshold
print "Size of Data", len(data)
data_back = copy.deepcopy(data)
# for m in range(1, 100, 5):
all_rncp = []
all_tncp = []
all_rtime = []
m_range = [1, 2, 3, 4, 5, M_MAX]
for m in m_range:
print '#' * 30
print "m=%d" % m
result, eval_result = rt_anon(att_tree, data, type_alg, k, m, threshold)
# save_to_file((att_tree, data, result, k, m))
data = copy.deepcopy(data_back)
print "RNCP %0.2f" % eval_result[0] + "%"
all_rncp.append(round(eval_result[0], 2))
print "TNCP %0.2f" % eval_result[1] + "%"
all_tncp.append(round(eval_result[1], 2))
print "Running time %0.2f" % eval_result[2] + " seconds"
all_rtime.append(round(eval_result[2], 2))
print "m range", m_range
print "RNCP", all_rncp
print "TNCP", all_tncp
print "Running time", all_rtime
def get_result_t(att_tree, data, type_alg, k=DEFAULT_K, m=DEFAULT_M):
"""
change k, whle fixing size of dataset
"""
print "K=%d" % k
print "m=%d" % m
print "Size of Data", len(data)
data_back = copy.deepcopy(data)
# for m in range(1, 100, 5):
all_rncp = []
all_tncp = []
all_rtime = []
t_range = [0.15, 0.25, 0.4, 0.65]
for t in t_range:
print '#' * 30
print "Threshold=%.2f" % t
result, eval_result = rt_anon(att_tree, data, type_alg, k, m, t)
# save_to_file((att_tree, data, result, k, m))
data = copy.deepcopy(data_back)
print "RNCP %0.2f" % eval_result[0] + "%"
all_rncp.append(round(eval_result[0], 2))
print "TNCP %0.2f" % eval_result[1] + "%"
all_tncp.append(round(eval_result[1], 2))
print "Running time %0.2f" % eval_result[2] + " seconds"
all_rtime.append(round(eval_result[2], 2))
print "threshold range", t_range
print "RNCP", all_rncp
print "TNCP", all_tncp
print "Running time", all_rtime
def get_result_dataset(att_tree, data, type_alg='RMR',
k=DEFAULT_K, m=DEFAULT_M, threshold=DEFAULT_T, num_test=10):
"""
fix k, while changing size of dataset
num_test is the test nubmber.
"""
print "K=%d" % k
print "m=%d" % m
print "Threshold=%.2f" % threshold
data_back = copy.deepcopy(data)
length = len(data_back)
joint = 5000
datasets = []
check_time = length / joint
if length % joint == 0:
check_time -= 1
for i in range(check_time):
datasets.append(joint * (i + 1))
# datasets.append(length)
all_rncp = []
all_tncp = []
all_rtime = []
for pos in datasets:
rncp = tncp = rtime = 0
if pos > length:
continue
print '#' * 30
print "size of dataset %d" % pos
for j in range(num_test):
temp = random.sample(data, pos)
result, eval_result = rt_anon(att_tree, temp, type_alg, k, m, threshold)
# save_to_file((att_tree, temp, result, k, m), number=j)
rncp += eval_result[0]
tncp += eval_result[1]
rtime += eval_result[2]
data = copy.deepcopy(data_back)
rncp /= num_test
tncp /= num_test
rtime /= num_test
print "RNCP %0.2f" % rncp + "%"
all_rncp.append(round(rncp, 2))
print "TNCP %0.2f" % tncp + "%"
all_tncp.append(round(tncp, 2))
print "Running time %0.2f" % rtime + " seconds"
all_rtime.append(round(rtime, 2))
print "Size of datasets", datasets
print "RNCP", all_rncp
print "TNCP", all_tncp
print "Running time", all_rtime
if __name__ == '__main__':
# set K=10 as default
FLAG = ''
DATA_SELECT = 'i'
# gen_even_BMS_tree(5)
try:
DATA_SELECT = sys.argv[1]
TYPE_ALG = sys.argv[2]
FLAG = sys.argv[3]
except IndexError:
pass
INPUT_K = 10
print "*" * 30
if DATA_SELECT == 'i':
print "INFORMS data"
DATA = read_informs()
# gen_gh_trees(DATA_SELECT)
ATT_TREES = read_informs_tree()
elif DATA_SELECT == 'y':
print "Youtube data"
DATA = read_youtube()
# gen_gh_trees(DATA_SELECT)
ATT_TREES = read_youtube_tree()
else:
print "INFORMS data"
DATA = read_informs()
# gen_gh_trees(DATA_SELECT)
ATT_TREES = read_informs_tree()
# read generalization hierarchy
# read record
# remove duplicate items
# DATA = DATA[:1000]
# for i in range(len(DATA)):
# if len(DATA[i]) <= 40:
# DATA[i] = list(set(DATA[i]))
# else:
# DATA[i] = list(set(DATA[i][:40]))
for i in range(len(DATA)):
DATA[i][-1] = list(set(DATA[i][-1]))
print "Begin to run", TYPE_ALG
print "*" * 10
# print "Begin Apriori based Anon"
if FLAG == 'k':
get_result_k(ATT_TREES, DATA, TYPE_ALG)
elif FLAG == 'm':
get_result_m(ATT_TREES, DATA, TYPE_ALG)
elif FLAG == 't':
get_result_t(ATT_TREES, DATA, TYPE_ALG)
elif FLAG == 'data':
k = DEFAULT_K
try:
k = int(sys.argv[4])
except:
pass
if k != DEFAULT_K:
get_result_dataset(ATT_TREES, DATA, TYPE_ALG, k)
else:
get_result_dataset(ATT_TREES, DATA, TYPE_ALG)
elif FLAG == '':
# cProfile.run('get_result_one(ATT_TREES, DATA, TYPE_ALG)')
get_result_one(ATT_TREES, DATA, TYPE_ALG)
else:
try:
INPUT_K = int(FLAG)
get_result_one(ATT_TREES, DATA, TYPE_ALG, INPUT_K)
except ValueError:
print "Usage: python anonymizer [k | m | data]"
print "k: varying k"
print "m: varying m"
print "data: varying size of dataset"
print "example: python anonymizer RMR 10"
print "example: python anonymizer RMT k"
# anonymized dataset is stored in result
print "Finish RT_ANON!!"
| """
run DA and AA with given parameters
"""
#!/usr/bin/env python
# coding=utf-8
from RT_ANON import rt_anon
from utils.read_informs_data import read_data as read_informs
from utils.read_informs_data import read_tree as read_informs_tree
from utils.read_youtube_data import read_data as read_youtube
from utils.read_youtube_data import read_tree as read_youtube_tree
from models.gentree import GenTree
from utils.maketree import gen_gh_trees
from utils.save_result import save_to_file
import sys
import copy
import random
import cProfile
import pdb
sys.setrecursionlimit(50000)
TYPE_ALG = 'RMR'
DEFAULT_M = 2
M_MAX = 161
DEFAULT_K = 10
DEFAULT_T = 0.65
def get_result_one(att_tree, data, type_alg, k=DEFAULT_K, m=DEFAULT_M, threshold=DEFAULT_T):
"""
run RT_ANON for one time, with k=10
"""
print "K=%d" % k
print "Size of Data", len(data)
print "m=%d" % m
print "Threshold=%.2f" % threshold
result, eval_result = rt_anon(att_tree, data, type_alg, k, m, threshold)
# save_to_file((att_tree, data, result, k, m))
print "RNCP %0.2f" % eval_result[0] + "%"
print "TNCP %0.2f" % eval_result[1] + "%"
print "Running time %0.2f" % eval_result[2] + " seconds"
def get_result_k(att_tree, data, type_alg, m=DEFAULT_M, threshold=DEFAULT_T):
"""
change k, whle fixing size of dataset
"""
data_back = copy.deepcopy(data)
# for k in range(5, 105, 5):
print "m=%d" % m
print "Threshold=%.2f" % threshold
print "Size of Data", len(data)
all_rncp = []
all_tncp = []
all_rtime = []
# for k in range(5, 55, 5):
# if k in [2, 5, 10, 25, 50, 100]:
# continue
k_range = [2, 5, 10, 25, 50, 100]
for k in k_range:
print '#' * 30
print "K=%d" % k
result, eval_result = rt_anon(att_tree, data, type_alg, k, m, threshold)
# save_to_file((att_tree, data, result, k, m))
data = copy.deepcopy(data_back)
print "RNCP %0.2f" % eval_result[0] + "%"
all_rncp.append(round(eval_result[0], 2))
print "TNCP %0.2f" % eval_result[1] + "%"
all_tncp.append(round(eval_result[1], 2))
print "Running time %0.2f" % eval_result[2] + " seconds"
all_rtime.append(round(eval_result[2], 2))
print "K range", k_range
print "RNCP", all_rncp
print "TNCP", all_tncp
print "Running time", all_rtime
def get_result_m(att_tree, data, type_alg, k=DEFAULT_K, threshold=DEFAULT_T):
"""
change k, whle fixing size of dataset
"""
print "K=%d" % k
print "Threshold=%.2f" % threshold
print "Size of Data", len(data)
data_back = copy.deepcopy(data)
# for m in range(1, 100, 5):
all_rncp = []
all_tncp = []
all_rtime = []
m_range = [1, 2, 3, 4, 5, M_MAX]
for m in m_range:
print '#' * 30
print "m=%d" % m
result, eval_result = rt_anon(att_tree, data, type_alg, k, m, threshold)
# save_to_file((att_tree, data, result, k, m))
data = copy.deepcopy(data_back)
print "RNCP %0.2f" % eval_result[0] + "%"
all_rncp.append(round(eval_result[0], 2))
print "TNCP %0.2f" % eval_result[1] + "%"
all_tncp.append(round(eval_result[1], 2))
print "Running time %0.2f" % eval_result[2] + " seconds"
all_rtime.append(round(eval_result[2], 2))
print "m range", m_range
print "RNCP", all_rncp
print "TNCP", all_tncp
print "Running time", all_rtime
def get_result_t(att_tree, data, type_alg, k=DEFAULT_K, m=DEFAULT_M):
"""
change k, whle fixing size of dataset
"""
print "K=%d" % k
print "m=%d" % m
print "Size of Data", len(data)
data_back = copy.deepcopy(data)
# for m in range(1, 100, 5):
all_rncp = []
all_tncp = []
all_rtime = []
t_range = [0.15, 0.25, 0.4, 0.65]
for t in t_range:
print '#' * 30
print "Threshold=%.2f" % t
result, eval_result = rt_anon(att_tree, data, type_alg, k, m, t)
# save_to_file((att_tree, data, result, k, m))
data = copy.deepcopy(data_back)
print "RNCP %0.2f" % eval_result[0] + "%"
all_rncp.append(round(eval_result[0], 2))
print "TNCP %0.2f" % eval_result[1] + "%"
all_tncp.append(round(eval_result[1], 2))
print "Running time %0.2f" % eval_result[2] + " seconds"
all_rtime.append(round(eval_result[2], 2))
print "threshold range", t_range
print "RNCP", all_rncp
print "TNCP", all_tncp
print "Running time", all_rtime
def get_result_dataset(att_tree, data, type_alg='RMR',
k=DEFAULT_K, m=DEFAULT_M, threshold=DEFAULT_T, num_test=10):
"""
fix k, while changing size of dataset
num_test is the test nubmber.
"""
print "K=%d" % k
print "m=%d" % m
print "Threshold=%.2f" % threshold
data_back = copy.deepcopy(data)
length = len(data_back)
joint = 5000
datasets = []
check_time = length / joint
if length % joint == 0:
check_time -= 1
for i in range(check_time):
datasets.append(joint * (i + 1))
# datasets.append(length)
all_rncp = []
all_tncp = []
all_rtime = []
for pos in datasets:
rncp = tncp = rtime = 0
if pos > length:
continue
print '#' * 30
print "size of dataset %d" % pos
for j in range(num_test):
temp = random.sample(data, pos)
result, eval_result = rt_anon(att_tree, temp, type_alg, k, m, threshold)
# save_to_file((att_tree, temp, result, k, m), number=j)
rncp += eval_result[0]
tncp += eval_result[1]
rtime += eval_result[2]
data = copy.deepcopy(data_back)
rncp /= num_test
tncp /= num_test
rtime /= num_test
print "RNCP %0.2f" % rncp + "%"
all_rncp.append(round(rncp, 2))
print "TNCP %0.2f" % tncp + "%"
all_tncp.append(round(tncp, 2))
print "Running time %0.2f" % rtime + " seconds"
all_rtime.append(round(rtime, 2))
print "Size of datasets", datasets
print "RNCP", all_rncp
print "TNCP", all_tncp
print "Running time", all_rtime
if __name__ == '__main__':
# set K=10 as default
FLAG = ''
DATA_SELECT = 'i'
# gen_even_BMS_tree(5)
try:
DATA_SELECT = sys.argv[1]
TYPE_ALG = sys.argv[2]
FLAG = sys.argv[3]
except IndexError:
pass
INPUT_K = 10
print "*" * 30
if DATA_SELECT == 'i':
print "INFORMS data"
DATA = read_informs()
# gen_gh_trees(DATA_SELECT)
ATT_TREES = read_informs_tree()
elif DATA_SELECT == 'y':
print "Youtube data"
DATA = read_youtube()
# gen_gh_trees(DATA_SELECT)
ATT_TREES = read_youtube_tree()
else:
print "INFORMS data"
DATA = read_informs()
# gen_gh_trees(DATA_SELECT)
ATT_TREES = read_informs_tree()
# read generalization hierarchy
# read record
# remove duplicate items
# DATA = DATA[:1000]
# for i in range(len(DATA)):
# if len(DATA[i]) <= 40:
# DATA[i] = list(set(DATA[i]))
# else:
# DATA[i] = list(set(DATA[i][:40]))
for i in range(len(DATA)):
DATA[i][-1] = list(set(DATA[i][-1]))
print "Begin to run", TYPE_ALG
print "*" * 10
# print "Begin Apriori based Anon"
if FLAG == 'k':
get_result_k(ATT_TREES, DATA, TYPE_ALG)
elif FLAG == 'm':
get_result_m(ATT_TREES, DATA, TYPE_ALG)
elif FLAG == 't':
get_result_t(ATT_TREES, DATA, TYPE_ALG)
elif FLAG == 'data':
k = DEFAULT_K
try:
k = int(sys.argv[4])
except:
pass
if k != DEFAULT_K:
get_result_dataset(ATT_TREES, DATA, TYPE_ALG, k)
else:
get_result_dataset(ATT_TREES, DATA, TYPE_ALG)
elif FLAG == '':
# cProfile.run('get_result_one(ATT_TREES, DATA, TYPE_ALG)')
get_result_one(ATT_TREES, DATA, TYPE_ALG)
else:
try:
INPUT_K = int(FLAG)
get_result_one(ATT_TREES, DATA, TYPE_ALG, INPUT_K)
except ValueError:
print "Usage: python anonymizer [k | m | data]"
print "k: varying k"
print "m: varying m"
print "data: varying size of dataset"
print "example: python anonymizer RMR 10"
print "example: python anonymizer RMT k"
# anonymized dataset is stored in result
print "Finish RT_ANON!!"
| en | 0.465559 | run DA and AA with given parameters #!/usr/bin/env python # coding=utf-8 run RT_ANON for one time, with k=10 # save_to_file((att_tree, data, result, k, m)) change k, whle fixing size of dataset # for k in range(5, 105, 5): # for k in range(5, 55, 5): # if k in [2, 5, 10, 25, 50, 100]: # continue # save_to_file((att_tree, data, result, k, m)) change k, whle fixing size of dataset # for m in range(1, 100, 5): # save_to_file((att_tree, data, result, k, m)) change k, whle fixing size of dataset # for m in range(1, 100, 5): # save_to_file((att_tree, data, result, k, m)) fix k, while changing size of dataset num_test is the test nubmber. # datasets.append(length) # save_to_file((att_tree, temp, result, k, m), number=j) # set K=10 as default # gen_even_BMS_tree(5) # gen_gh_trees(DATA_SELECT) # gen_gh_trees(DATA_SELECT) # gen_gh_trees(DATA_SELECT) # read generalization hierarchy # read record # remove duplicate items # DATA = DATA[:1000] # for i in range(len(DATA)): # if len(DATA[i]) <= 40: # DATA[i] = list(set(DATA[i])) # else: # DATA[i] = list(set(DATA[i][:40])) # print "Begin Apriori based Anon" # cProfile.run('get_result_one(ATT_TREES, DATA, TYPE_ALG)') # anonymized dataset is stored in result | 2.273303 | 2 |
src/iBeatles/step2/moving_average.py | ornlneutronimaging/iBeatles | 3 | 6619846 | class MovingAverage:
def __init__(self, parent=None, o_norm=None):
self.parent = parent
self.o_norm = o_norm
def run(self):
pass | class MovingAverage:
def __init__(self, parent=None, o_norm=None):
self.parent = parent
self.o_norm = o_norm
def run(self):
pass | none | 1 | 2.301248 | 2 | |
tests/delete_post_test.py | eszter10/conduit | 0 | 6619847 | # TC008 - Delete my existing blog post
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
import time
opt = Options()
opt.headless = True
driver = webdriver.Chrome(ChromeDriverManager().install(), options=opt)
def test_delete():
driver.get("http://localhost:1667/")
time.sleep(5)
# Enter testdata
email = '<EMAIL>'
username = 'testuser1'
pwd = '<PASSWORD>$'
# Collection of xpath
email_x = '//*[@id="app"]/div/div/div/div/form/fieldset[1]/input'
pw_x = '//*[@id="app"]/div/div/div/div/form/fieldset[2]/input'
username_x = '//*[@id="app"]/nav/div/ul/li[4]/a'
sign_btn_x = '//*[@id="app"]/nav/div/ul/li[2]/a'
sign_in_btn_x = '//*[@id="app"]/div/div/div/div/form/button'
my_articles_btn_x = '//*[@id="app"]/div/div[2]/div/div/div[1]/ul/li[1]/a'
posttilte_x = '//*[@id="app"]/div/div[2]/div/div/div[2]/div/div/div[1]/a/h1'
delete_btn_x = '//*[@id="app"]/div/div[1]/div/div/span/button'
article_preview = '//*[@class="article-preview"]'
# Sign in
sign_button = driver.find_element(By.XPATH, sign_btn_x)
sign_button.click()
driver.find_element(By.XPATH, email_x).send_keys(email)
driver.find_element(By.XPATH, pw_x).send_keys(pwd)
sign_in_btn = driver.find_element(By.XPATH, sign_in_btn_x)
sign_in_btn.click()
time.sleep(2)
# Check login is managed
assert username == driver.find_element(By.XPATH, username_x).text
time.sleep(2)
# Find my post
driver.find_element(By.XPATH, username_x).click() # username click
time.sleep(2)
driver.find_element(By.XPATH, my_articles_btn_x).click() # my articles click
time.sleep(2)
article_number = driver.find_elements(By.XPATH, article_preview)
original_num = int(len(article_number))
# Delete my post
driver.find_element(By.XPATH, posttilte_x).click() # post title click
time.sleep(2)
driver.find_element(By.XPATH, delete_btn_x).click() # delete button click
time.sleep(2)
driver.find_element(By.XPATH, username_x).click() # username click
time.sleep(2)
# Check delete was successful
article_number = driver.find_elements(By.XPATH, article_preview)
new_num = int(len(article_number))
assert new_num + 1 == original_num
driver.close()
| # TC008 - Delete my existing blog post
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
import time
opt = Options()
opt.headless = True
driver = webdriver.Chrome(ChromeDriverManager().install(), options=opt)
def test_delete():
driver.get("http://localhost:1667/")
time.sleep(5)
# Enter testdata
email = '<EMAIL>'
username = 'testuser1'
pwd = '<PASSWORD>$'
# Collection of xpath
email_x = '//*[@id="app"]/div/div/div/div/form/fieldset[1]/input'
pw_x = '//*[@id="app"]/div/div/div/div/form/fieldset[2]/input'
username_x = '//*[@id="app"]/nav/div/ul/li[4]/a'
sign_btn_x = '//*[@id="app"]/nav/div/ul/li[2]/a'
sign_in_btn_x = '//*[@id="app"]/div/div/div/div/form/button'
my_articles_btn_x = '//*[@id="app"]/div/div[2]/div/div/div[1]/ul/li[1]/a'
posttilte_x = '//*[@id="app"]/div/div[2]/div/div/div[2]/div/div/div[1]/a/h1'
delete_btn_x = '//*[@id="app"]/div/div[1]/div/div/span/button'
article_preview = '//*[@class="article-preview"]'
# Sign in
sign_button = driver.find_element(By.XPATH, sign_btn_x)
sign_button.click()
driver.find_element(By.XPATH, email_x).send_keys(email)
driver.find_element(By.XPATH, pw_x).send_keys(pwd)
sign_in_btn = driver.find_element(By.XPATH, sign_in_btn_x)
sign_in_btn.click()
time.sleep(2)
# Check login is managed
assert username == driver.find_element(By.XPATH, username_x).text
time.sleep(2)
# Find my post
driver.find_element(By.XPATH, username_x).click() # username click
time.sleep(2)
driver.find_element(By.XPATH, my_articles_btn_x).click() # my articles click
time.sleep(2)
article_number = driver.find_elements(By.XPATH, article_preview)
original_num = int(len(article_number))
# Delete my post
driver.find_element(By.XPATH, posttilte_x).click() # post title click
time.sleep(2)
driver.find_element(By.XPATH, delete_btn_x).click() # delete button click
time.sleep(2)
driver.find_element(By.XPATH, username_x).click() # username click
time.sleep(2)
# Check delete was successful
article_number = driver.find_elements(By.XPATH, article_preview)
new_num = int(len(article_number))
assert new_num + 1 == original_num
driver.close()
| en | 0.898343 | # TC008 - Delete my existing blog post # Enter testdata # Collection of xpath # Sign in # Check login is managed # Find my post # username click # my articles click # Delete my post # post title click # delete button click # username click # Check delete was successful | 3.097231 | 3 |
dogfacenet-dev/losses.py | angellmethod/DogFaceNet | 79 | 6619848 | """
DogFaceNet
Losses for dog identification and embeddings optimization:
-arcface_loss: defined in https://arxiv.org/abs/1801.07698
implemented in https://github.com/auroua/InsightFace_TF
-triplet_loss: defined in https://arxiv.org/abs/1503.03832
implemented in https://github.com/davidsandberg/facenet
Licensed under the MIT License (see LICENSE for details)
Written by <NAME>
"""
import math
import numpy as np
import tensorflow as tf
def arcface(out_num):
def loss(embedding, labels, out_num=out_num, w_init=None, s=64., m=0.5):
'''
:param embedding: the input embedding vectors
:param labels: the input labels, the shape should be eg: (batch_size, 1)
:param s: scalar value default is 64
:param out_num: output class num
:param m: the margin value, default is 0.5
:return: the final cacualted output, this output is send into the tf.nn.softmax directly
'''
cos_m = math.cos(m)
sin_m = math.sin(m)
mm = sin_m * m # issue 1
threshold = math.cos(math.pi - m)
with tf.variable_scope('loss'):
# inputs and weights norm
embedding_norm = tf.norm(embedding, axis=1, keepdims=True)
embedding = tf.div(embedding, embedding_norm, name='norm_embedding')
weights = tf.get_variable(name='embedding_weights', shape=(embedding.get_shape().as_list()[-1], out_num),
initializer=w_init, dtype=tf.float32)
weights_norm = tf.norm(weights, axis=0, keepdims=True)
weights = tf.div(weights, weights_norm, name='norm_weights')
# cos(theta+m)
cos_t = tf.matmul(embedding, weights, name='cos_t')
cos_t2 = tf.square(cos_t, name='cos_2')
sin_t2 = tf.subtract(1., cos_t2, name='sin_2')
sin_t = tf.sqrt(sin_t2, name='sin_t')
cos_mt = s * tf.subtract(tf.multiply(cos_t, cos_m), tf.multiply(sin_t, sin_m), name='cos_mt')
# this condition controls the theta+m should be in range [0, pi]
# 0<=theta+m<=pi
# -m<=theta<=pi-m
cond_v = cos_t - threshold
cond = tf.cast(tf.nn.relu(cond_v, name='if_else'), dtype=tf.bool)
keep_val = s*(cos_t - mm)
cos_mt_temp = tf.where(cond, cos_mt, keep_val)
mask = tf.one_hot(labels, depth=out_num, name='one_hot_mask')
# mask = tf.squeeze(mask, 1)
inv_mask = tf.subtract(1., mask, name='inverse_mask')
s_cos_t = tf.multiply(s, cos_t, name='scalar_cos_t')
output = tf.add(tf.multiply(s_cos_t, inv_mask), tf.multiply(cos_mt_temp, mask), name='arcface_loss_output')
return output
return loss
def arcface_loss(embedding, labels, out_num, w_init=None, s=64., m=0.5):
'''
:param embedding: the input embedding vectors
:param labels: the input labels, the shape should be eg: (batch_size, 1)
:param s: scalar value default is 64
:param out_num: output class num
:param m: the margin value, default is 0.5
:return: the final cacualted output, this output is send into the tf.nn.softmax directly
'''
cos_m = math.cos(m)
sin_m = math.sin(m)
mm = sin_m * m # issue 1
threshold = math.cos(math.pi - m)
with tf.variable_scope('loss'):
# inputs and weights norm
embedding_norm = tf.norm(embedding, axis=1, keepdims=True)
embedding = tf.div(embedding, embedding_norm, name='norm_embedding')
weights = tf.get_variable(name='embedding_weights', shape=(embedding.get_shape().as_list()[-1], out_num),
initializer=w_init, dtype=tf.float32)
weights_norm = tf.norm(weights, axis=0, keepdims=True)
weights = tf.div(weights, weights_norm, name='norm_weights')
# cos(theta+m)
cos_t = tf.matmul(embedding, weights, name='cos_t')
cos_t2 = tf.square(cos_t, name='cos_2')
sin_t2 = tf.subtract(1., cos_t2, name='sin_2')
sin_t = tf.sqrt(sin_t2, name='sin_t')
cos_mt = s * tf.subtract(tf.multiply(cos_t, cos_m), tf.multiply(sin_t, sin_m), name='cos_mt')
# this condition controls the theta+m should be in range [0, pi]
# 0<=theta+m<=pi
# -m<=theta<=pi-m
cond_v = cos_t - threshold
cond = tf.cast(tf.nn.relu(cond_v, name='if_else'), dtype=tf.bool)
keep_val = s*(cos_t - mm)
cos_mt_temp = tf.where(cond, cos_mt, keep_val)
mask = tf.one_hot(labels, depth=out_num, name='one_hot_mask')
# mask = tf.squeeze(mask, 1)
inv_mask = tf.subtract(1., mask, name='inverse_mask')
s_cos_t = tf.multiply(s, cos_t, name='scalar_cos_t')
output = tf.add(tf.multiply(s_cos_t, inv_mask), tf.multiply(cos_mt_temp, mask), name='arcface_loss_output')
return output
def triplet_loss(anchor, positive, negative, alpha):
"""Calculate the triplet loss according to the FaceNet paper
Args:
anchor: the embeddings for the anchor images.
positive: the embeddings for the positive images.
negative: the embeddings for the negative images.
Returns:
the triplet loss according to the FaceNet paper as a float tensor.
"""
with tf.variable_scope('triplet_loss'):
pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1)
neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1)
basic_loss = tf.add(tf.subtract(pos_dist,neg_dist), alpha)
loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0)
return loss
def deviation_loss(dict_pred):
sum_class_loss = 0
classes_loss = 0
class_pred = {}
# Compute all center of mass
for _, (label, pred) in dict_pred:
if label in class_pred.keys():
class_pred[label][0] += pred
class_pred[label][1] += 1
else:
class_pred[label] = (pred,1)
for label in class_pred:
class_pred[label][0] /= class_pred[label][1]
# Compute all classes center of mass
class_pred_values = np.array(class_pred.values())
classes_center = np.sum(class_pred_values)/len(class_pred)
classes_loss -= np.sum(np.log(np.linalg.norm(class_pred_values - classes_center)))
# Compute
for _, (label, pred) in dict_pred:
sum_class_loss += np.linalg.norm(pred - class_pred[label])
return classes_loss + sum_class_loss | """
DogFaceNet
Losses for dog identification and embeddings optimization:
-arcface_loss: defined in https://arxiv.org/abs/1801.07698
implemented in https://github.com/auroua/InsightFace_TF
-triplet_loss: defined in https://arxiv.org/abs/1503.03832
implemented in https://github.com/davidsandberg/facenet
Licensed under the MIT License (see LICENSE for details)
Written by <NAME>
"""
import math
import numpy as np
import tensorflow as tf
def arcface(out_num):
def loss(embedding, labels, out_num=out_num, w_init=None, s=64., m=0.5):
'''
:param embedding: the input embedding vectors
:param labels: the input labels, the shape should be eg: (batch_size, 1)
:param s: scalar value default is 64
:param out_num: output class num
:param m: the margin value, default is 0.5
:return: the final cacualted output, this output is send into the tf.nn.softmax directly
'''
cos_m = math.cos(m)
sin_m = math.sin(m)
mm = sin_m * m # issue 1
threshold = math.cos(math.pi - m)
with tf.variable_scope('loss'):
# inputs and weights norm
embedding_norm = tf.norm(embedding, axis=1, keepdims=True)
embedding = tf.div(embedding, embedding_norm, name='norm_embedding')
weights = tf.get_variable(name='embedding_weights', shape=(embedding.get_shape().as_list()[-1], out_num),
initializer=w_init, dtype=tf.float32)
weights_norm = tf.norm(weights, axis=0, keepdims=True)
weights = tf.div(weights, weights_norm, name='norm_weights')
# cos(theta+m)
cos_t = tf.matmul(embedding, weights, name='cos_t')
cos_t2 = tf.square(cos_t, name='cos_2')
sin_t2 = tf.subtract(1., cos_t2, name='sin_2')
sin_t = tf.sqrt(sin_t2, name='sin_t')
cos_mt = s * tf.subtract(tf.multiply(cos_t, cos_m), tf.multiply(sin_t, sin_m), name='cos_mt')
# this condition controls the theta+m should be in range [0, pi]
# 0<=theta+m<=pi
# -m<=theta<=pi-m
cond_v = cos_t - threshold
cond = tf.cast(tf.nn.relu(cond_v, name='if_else'), dtype=tf.bool)
keep_val = s*(cos_t - mm)
cos_mt_temp = tf.where(cond, cos_mt, keep_val)
mask = tf.one_hot(labels, depth=out_num, name='one_hot_mask')
# mask = tf.squeeze(mask, 1)
inv_mask = tf.subtract(1., mask, name='inverse_mask')
s_cos_t = tf.multiply(s, cos_t, name='scalar_cos_t')
output = tf.add(tf.multiply(s_cos_t, inv_mask), tf.multiply(cos_mt_temp, mask), name='arcface_loss_output')
return output
return loss
def arcface_loss(embedding, labels, out_num, w_init=None, s=64., m=0.5):
'''
:param embedding: the input embedding vectors
:param labels: the input labels, the shape should be eg: (batch_size, 1)
:param s: scalar value default is 64
:param out_num: output class num
:param m: the margin value, default is 0.5
:return: the final cacualted output, this output is send into the tf.nn.softmax directly
'''
cos_m = math.cos(m)
sin_m = math.sin(m)
mm = sin_m * m # issue 1
threshold = math.cos(math.pi - m)
with tf.variable_scope('loss'):
# inputs and weights norm
embedding_norm = tf.norm(embedding, axis=1, keepdims=True)
embedding = tf.div(embedding, embedding_norm, name='norm_embedding')
weights = tf.get_variable(name='embedding_weights', shape=(embedding.get_shape().as_list()[-1], out_num),
initializer=w_init, dtype=tf.float32)
weights_norm = tf.norm(weights, axis=0, keepdims=True)
weights = tf.div(weights, weights_norm, name='norm_weights')
# cos(theta+m)
cos_t = tf.matmul(embedding, weights, name='cos_t')
cos_t2 = tf.square(cos_t, name='cos_2')
sin_t2 = tf.subtract(1., cos_t2, name='sin_2')
sin_t = tf.sqrt(sin_t2, name='sin_t')
cos_mt = s * tf.subtract(tf.multiply(cos_t, cos_m), tf.multiply(sin_t, sin_m), name='cos_mt')
# this condition controls the theta+m should be in range [0, pi]
# 0<=theta+m<=pi
# -m<=theta<=pi-m
cond_v = cos_t - threshold
cond = tf.cast(tf.nn.relu(cond_v, name='if_else'), dtype=tf.bool)
keep_val = s*(cos_t - mm)
cos_mt_temp = tf.where(cond, cos_mt, keep_val)
mask = tf.one_hot(labels, depth=out_num, name='one_hot_mask')
# mask = tf.squeeze(mask, 1)
inv_mask = tf.subtract(1., mask, name='inverse_mask')
s_cos_t = tf.multiply(s, cos_t, name='scalar_cos_t')
output = tf.add(tf.multiply(s_cos_t, inv_mask), tf.multiply(cos_mt_temp, mask), name='arcface_loss_output')
return output
def triplet_loss(anchor, positive, negative, alpha):
"""Calculate the triplet loss according to the FaceNet paper
Args:
anchor: the embeddings for the anchor images.
positive: the embeddings for the positive images.
negative: the embeddings for the negative images.
Returns:
the triplet loss according to the FaceNet paper as a float tensor.
"""
with tf.variable_scope('triplet_loss'):
pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1)
neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1)
basic_loss = tf.add(tf.subtract(pos_dist,neg_dist), alpha)
loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0)
return loss
def deviation_loss(dict_pred):
sum_class_loss = 0
classes_loss = 0
class_pred = {}
# Compute all center of mass
for _, (label, pred) in dict_pred:
if label in class_pred.keys():
class_pred[label][0] += pred
class_pred[label][1] += 1
else:
class_pred[label] = (pred,1)
for label in class_pred:
class_pred[label][0] /= class_pred[label][1]
# Compute all classes center of mass
class_pred_values = np.array(class_pred.values())
classes_center = np.sum(class_pred_values)/len(class_pred)
classes_loss -= np.sum(np.log(np.linalg.norm(class_pred_values - classes_center)))
# Compute
for _, (label, pred) in dict_pred:
sum_class_loss += np.linalg.norm(pred - class_pred[label])
return classes_loss + sum_class_loss | en | 0.673951 | DogFaceNet Losses for dog identification and embeddings optimization: -arcface_loss: defined in https://arxiv.org/abs/1801.07698 implemented in https://github.com/auroua/InsightFace_TF -triplet_loss: defined in https://arxiv.org/abs/1503.03832 implemented in https://github.com/davidsandberg/facenet Licensed under the MIT License (see LICENSE for details) Written by <NAME> :param embedding: the input embedding vectors :param labels: the input labels, the shape should be eg: (batch_size, 1) :param s: scalar value default is 64 :param out_num: output class num :param m: the margin value, default is 0.5 :return: the final cacualted output, this output is send into the tf.nn.softmax directly # issue 1 # inputs and weights norm # cos(theta+m) # this condition controls the theta+m should be in range [0, pi] # 0<=theta+m<=pi # -m<=theta<=pi-m # mask = tf.squeeze(mask, 1) :param embedding: the input embedding vectors :param labels: the input labels, the shape should be eg: (batch_size, 1) :param s: scalar value default is 64 :param out_num: output class num :param m: the margin value, default is 0.5 :return: the final cacualted output, this output is send into the tf.nn.softmax directly # issue 1 # inputs and weights norm # cos(theta+m) # this condition controls the theta+m should be in range [0, pi] # 0<=theta+m<=pi # -m<=theta<=pi-m # mask = tf.squeeze(mask, 1) Calculate the triplet loss according to the FaceNet paper Args: anchor: the embeddings for the anchor images. positive: the embeddings for the positive images. negative: the embeddings for the negative images. Returns: the triplet loss according to the FaceNet paper as a float tensor. # Compute all center of mass # Compute all classes center of mass # Compute | 2.425272 | 2 |
dbfm/client.py | xinhaoyuan/dbfm | 0 | 6619849 | # -*- coding: utf-8 -*-
import requests
from . import common
class DoubanClient(object):
def __init__(self, login_data):
self.login_data = login_data
self._cookies = self.login_data['cookies']
self._channel_id = 2
self._queue = []
pass
def _request_url(self, ptype, **data):
"""
这里包装了一个函数,发送post_data
:param ptype: n 列表无歌曲,返回新列表
e 发送歌曲完毕
b 不再播放,返回新列表
s 下一首,返回新的列表
r 标记喜欢
u 取消标记喜欢
"""
options = {
'type': ptype,
'pt': '3.1',
'channel': self._channel_id,
'pb': '320',
'from': 'mainsite',
'r': '',
'kbps': '320',
'app_name': 'radio_website',
'client': 's:mainsite|y:3.0',
'version': '100'
}
if 'sid' in data:
options['sid'] = data['sid']
pass
url = 'https://douban.fm/j/v2/playlist'
while True:
try:
s = requests.get(url, params=options,
cookies=self._cookies, headers = common.HEADERS)
req_json = s.json()
if req_json['r'] == 0:
if 'song' not in req_json or not req_json['song']:
break
return req_json['song']
except Exception as err:
raise err
break
return None
def append_songs(self, songs):
for s in songs:
self._queue.append(s)
pass
def refresh_playlist(self, data = None):
self._queue = []
if data is None:
self.append_songs(self._request_url("n"))
else:
self.append_songs(data)
pass
def get_next_song(self, channel_id = None):
if channel_id is not None:
self.set_channel_id(channel_id)
retry = 3
while len(self._queue) == 0 and retry > 0:
self.refresh_playlist()
retry -= 1
pass
if len(self._queue) == 0:
raise Exception("Cannot get new song")
ret = self._queue.pop(0)
return ret
def set_channel_id(self, id):
self._channel_id = id
self._queue = []
pass
def get_channel_id(self):
return self._channel_id
def rate(self, sid, rating):
if rating < 0:
self.refresh_playlist(self._request_url("b", sid = sid))
elif rating == 0:
self._request_url("u", sid = sid)
else:
self._request_url("r", sid = sid)
| # -*- coding: utf-8 -*-
import requests
from . import common
class DoubanClient(object):
def __init__(self, login_data):
self.login_data = login_data
self._cookies = self.login_data['cookies']
self._channel_id = 2
self._queue = []
pass
def _request_url(self, ptype, **data):
"""
这里包装了一个函数,发送post_data
:param ptype: n 列表无歌曲,返回新列表
e 发送歌曲完毕
b 不再播放,返回新列表
s 下一首,返回新的列表
r 标记喜欢
u 取消标记喜欢
"""
options = {
'type': ptype,
'pt': '3.1',
'channel': self._channel_id,
'pb': '320',
'from': 'mainsite',
'r': '',
'kbps': '320',
'app_name': 'radio_website',
'client': 's:mainsite|y:3.0',
'version': '100'
}
if 'sid' in data:
options['sid'] = data['sid']
pass
url = 'https://douban.fm/j/v2/playlist'
while True:
try:
s = requests.get(url, params=options,
cookies=self._cookies, headers = common.HEADERS)
req_json = s.json()
if req_json['r'] == 0:
if 'song' not in req_json or not req_json['song']:
break
return req_json['song']
except Exception as err:
raise err
break
return None
def append_songs(self, songs):
for s in songs:
self._queue.append(s)
pass
def refresh_playlist(self, data = None):
self._queue = []
if data is None:
self.append_songs(self._request_url("n"))
else:
self.append_songs(data)
pass
def get_next_song(self, channel_id = None):
if channel_id is not None:
self.set_channel_id(channel_id)
retry = 3
while len(self._queue) == 0 and retry > 0:
self.refresh_playlist()
retry -= 1
pass
if len(self._queue) == 0:
raise Exception("Cannot get new song")
ret = self._queue.pop(0)
return ret
def set_channel_id(self, id):
self._channel_id = id
self._queue = []
pass
def get_channel_id(self):
return self._channel_id
def rate(self, sid, rating):
if rating < 0:
self.refresh_playlist(self._request_url("b", sid = sid))
elif rating == 0:
self._request_url("u", sid = sid)
else:
self._request_url("r", sid = sid)
| zh | 0.978604 | # -*- coding: utf-8 -*- 这里包装了一个函数,发送post_data :param ptype: n 列表无歌曲,返回新列表 e 发送歌曲完毕 b 不再播放,返回新列表 s 下一首,返回新的列表 r 标记喜欢 u 取消标记喜欢 | 2.537337 | 3 |
pricer/__init__.py | aurbano/elefund | 0 | 6619850 | <gh_stars>0
from .pricer import Pricer
from .ticker import MarketTicker | from .pricer import Pricer
from .ticker import MarketTicker | none | 1 | 1.065384 | 1 |