text stringlengths 0 1.05M | meta dict |
|---|---|
# analyze log files from the apache web server.
# log lines are in the 'combined' format.
from apachelogparser import ApacheLogParser
from useragent import UserAgent
import time
def ymd(epoch):
"""formats a unix timestamp as string of format yyyymmdd
ymd(1258308085) => '20091115'"""
time_tuple = time.gmtime(epoch)
return time.strftime("%Y%m%d", time_tuple)
def map(key, logline):
try:
parsed = ApacheLogParser().parse(logline)
except: return
# timestamp of format yyyymmdd.
timestamp = ymd(parsed['epoch'])
# dimension attributes are strings.
dimensions = {}
dimensions['host'] = parsed['host']
dimensions['method'] = parsed['method']
dimensions['path'] = parsed['path']
dimensions['status'] = parsed['status']
dimensions['referrer'] = parsed['referrer']
dimensions['agent'] = UserAgent(parsed['agent']).classify()
# measurements are integers.
measurements = {}
measurements['bytes'] = int(parsed['size'])
measurements['requests'] = 1
yield timestamp, dimensions, measurements
| {
"repo_name": "zohmg/zohmg",
"path": "examples/apache/mappers/apache.py",
"copies": "1",
"size": "1093",
"license": "apache-2.0",
"hash": -1824033890397719600,
"line_mean": 29.3611111111,
"line_max": 66,
"alpha_frac": 0.6660567246,
"autogenerated": false,
"ratio": 4.003663003663004,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.008752766617298364,
"num_lines": 36
} |
# Analyze reflectance signal data in an index
import os
import numpy as np
from plantcv.plantcv import params
from plantcv.plantcv import outputs
from plantcv.plantcv._debug import _debug
from plantcv.plantcv import fatal_error
from plotnine import labs
from plantcv.plantcv.visualize import histogram
from plantcv.plantcv import deprecation_warning
def analyze_index(index_array, mask, bins=100, min_bin=0, max_bin=1, histplot=None, label="default"):
"""This extracts the hyperspectral index statistics and writes the values as observations out to
the Outputs class.
Inputs:
index_array = Instance of the Spectral_data class, usually the output from pcv.hyperspectral.extract_index
mask = Binary mask made from selected contours
histplot = if True plots histogram of intensity values
bins = optional, number of classes to divide spectrum into
min_bin = optional, minimum bin value ("auto" or user input minimum value)
max_bin = optional, maximum bin value ("auto" or user input maximum value)
label = optional label parameter, modifies the variable name of observations recorded
:param index_array: __main__.Spectral_data
:param mask: numpy array
:param histplot: bool
:param bins: int
:param max_bin: float, str
:param min_bin: float, str
:param label: str
:return analysis_image: ggplot, None
"""
if histplot is not None:
deprecation_warning("'histplot' will be deprecated in a future version of PlantCV. "
"This function creates a histogram by default.")
debug = params.debug
params.debug = None
if len(np.shape(mask)) > 2 or len(np.unique(mask)) > 2:
fatal_error("Mask should be a binary image of 0 and nonzero values.")
if len(np.shape(index_array.array_data)) > 2:
fatal_error("index_array data should be a grayscale image.")
# Mask data and collect statistics about pixels within the masked image
masked_array = index_array.array_data[np.where(mask > 0)]
masked_array = masked_array[np.isfinite(masked_array)]
index_mean = np.nanmean(masked_array)
index_median = np.nanmedian(masked_array)
index_std = np.nanstd(masked_array)
# Set starting point and max bin values
maxval = max_bin
b = min_bin
# Calculate observed min and max pixel values of the masked array
observed_max = np.nanmax(masked_array)
observed_min = np.nanmin(masked_array)
# Auto calculate max_bin if set
if type(max_bin) is str and (max_bin.upper() == "AUTO"):
maxval = float(round(observed_max, 8)) # Auto bins will detect maxval to use for calculating labels/bins
if type(min_bin) is str and (min_bin.upper() == "AUTO"):
b = float(round(observed_min, 8)) # If bin_min is auto then overwrite starting value
# Print a warning if observed min/max outside user defined range
if observed_max > maxval or observed_min < b:
print("WARNING!!! The observed range of pixel values in your masked index provided is [" + str(observed_min) +
", " + str(observed_max) + "] but the user defined range of bins for pixel frequencies is [" + str(b) +
", " + str(maxval) + "]. Adjust min_bin and max_bin in order to avoid cutting off data being collected.")
# Calculate histogram
hist_fig, hist_data = histogram(index_array.array_data, mask=mask, bins=bins, lower_bound=b, upper_bound=maxval,
hist_data=True)
bin_labels, hist_percent = hist_data['pixel intensity'].tolist(), hist_data['proportion of pixels (%)'].tolist()
# Restore user debug setting
params.debug = debug
hist_fig = hist_fig + labs(x='Index Reflectance', y='Proportion of pixels (%)')
# Print or plot histogram
_debug(visual=hist_fig,
filename=os.path.join(params.debug_outdir, str(params.device) + index_array.array_type + "_hist.png"))
analysis_image = hist_fig
outputs.add_observation(sample=label, variable='mean_' + index_array.array_type,
trait='Average ' + index_array.array_type + ' reflectance',
method='plantcv.plantcv.hyperspectral.analyze_index', scale='reflectance', datatype=float,
value=float(index_mean), label='none')
outputs.add_observation(sample=label, variable='med_' + index_array.array_type,
trait='Median ' + index_array.array_type + ' reflectance',
method='plantcv.plantcv.hyperspectral.analyze_index', scale='reflectance', datatype=float,
value=float(index_median), label='none')
outputs.add_observation(sample=label, variable='std_' + index_array.array_type,
trait='Standard deviation ' + index_array.array_type + ' reflectance',
method='plantcv.plantcv.hyperspectral.analyze_index', scale='reflectance', datatype=float,
value=float(index_std), label='none')
outputs.add_observation(sample=label, variable='index_frequencies_' + index_array.array_type,
trait='index frequencies', method='plantcv.plantcv.analyze_index', scale='frequency',
datatype=list, value=hist_percent, label=bin_labels)
# Print or plot the masked image
_debug(visual=masked_array,
filename=os.path.join(params.debug_outdir, str(params.device) + index_array.array_type + ".png"))
# Store images
outputs.images.append(analysis_image)
return analysis_image
| {
"repo_name": "stiphyMT/plantcv",
"path": "plantcv/plantcv/hyperspectral/analyze_index.py",
"copies": "2",
"size": "5653",
"license": "mit",
"hash": 787654139049483400,
"line_mean": 46.5042016807,
"line_max": 119,
"alpha_frac": 0.6562886963,
"autogenerated": false,
"ratio": 3.995053003533569,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.565134169983357,
"avg_score": null,
"num_lines": null
} |
# Analyze reflectance signal hyperspectral images
import os
import numpy as np
import pandas as pd
from plantcv.plantcv import params
from plantcv.plantcv import outputs
from plotnine import ggplot, aes, geom_line, scale_x_continuous
from plantcv.plantcv import deprecation_warning
from plantcv.plantcv._debug import _debug
def analyze_spectral(array, mask, histplot=None, label="default"):
"""This extracts the hyperspectral reflectance values of each pixel writes the values out to
a file. It can also print out a histogram plot of pixel intensity
and a pseudocolor image of the plant.
Inputs:
array = Hyperspectral data instance
mask = Binary mask made from selected contours
histplot = (to be deprecated) if True plots histogram of reflectance intensity values
label = optional label parameter, modifies the variable name of observations recorded
Returns:
analysis_img = output image
:param array: __main__.Spectral_data
:param mask: numpy array
:param histplot: bool
:param label: str
:return analysis_img: ggplot
"""
if histplot is not None:
deprecation_warning("'histplot' will be deprecated in a future version of PlantCV. "
"Instead of a histogram this function plots the mean of spectra in the masked area.")
array_data = array.array_data
# List of wavelengths recorded created from parsing the header file will be string, make list of floats
wavelength_data = array_data[np.where(mask > 0)]
# Calculate mean reflectance across wavelengths
wavelength_freq = wavelength_data.mean(axis=0)
max_per_band = wavelength_data.max(axis=0)
min_per_band = wavelength_data.min(axis=0)
std_per_band = wavelength_data.std(axis=0)
# Identify smallest and largest wavelengths available to scale the x-axis
min_wavelength = array.min_wavelength
max_wavelength = array.max_wavelength
# Create lists with wavelengths in float format rather than as strings
# and make a list of the frequencies since they are in an array
new_wavelengths = []
new_freq = []
new_std_per_band = []
new_max_per_band = []
new_min_per_band = []
for i, wavelength in enumerate(array.wavelength_dict):
new_wavelengths.append(wavelength)
new_freq.append((wavelength_freq[i]).astype(float))
new_std_per_band.append(std_per_band[i].astype(float))
new_max_per_band.append(max_per_band[i].astype(float))
new_min_per_band.append(min_per_band[i].astype(float))
# Calculate reflectance statistics
avg_reflectance = np.average(wavelength_data)
std_reflectance = np.std(wavelength_data)
median_reflectance = np.median(wavelength_data)
wavelength_labels = []
for i in array.wavelength_dict.keys():
wavelength_labels.append(i)
# Store data into outputs class
outputs.add_observation(sample=label, variable='global_mean_reflectance', trait='global mean reflectance',
method='plantcv.plantcv.hyperspectral.analyze_spectral', scale='reflectance',
datatype=float, value=float(avg_reflectance), label='reflectance')
outputs.add_observation(sample=label, variable='global_median_reflectance', trait='global median reflectance',
method='plantcv.plantcv.hyperspectral.analyze_spectral', scale='reflectance',
datatype=float, value=float(median_reflectance), label='reflectance')
outputs.add_observation(sample=label, variable='global_spectral_std',
trait='pixel-wise standard deviation per band',
method='plantcv.plantcv.hyperspectral.analyze_spectral', scale='None', datatype=float,
value=float(std_reflectance), label='reflectance')
outputs.add_observation(sample=label, variable='global_spectral_std', trait='pixel-wise standard deviation ',
method='plantcv.plantcv.hyperspectral.analyze_spectral', scale='None', datatype=float,
value=float(std_reflectance), label='reflectance')
outputs.add_observation(sample=label, variable='max_reflectance', trait='maximum reflectance per band',
method='plantcv.plantcv.hyperspectral.analyze_spectral', scale='reflectance', datatype=list,
value=new_max_per_band, label=wavelength_labels)
outputs.add_observation(sample=label, variable='min_reflectance', trait='minimum reflectance per band',
method='plantcv.plantcv.hyperspectral.analyze_spectral', scale='reflectance', datatype=list,
value=new_min_per_band, label=wavelength_labels)
outputs.add_observation(sample=label, variable='spectral_std', trait='pixel-wise standard deviation per band',
method='plantcv.plantcv.hyperspectral.analyze_spectral', scale='None', datatype=list,
value=new_std_per_band, label=wavelength_labels)
outputs.add_observation(sample=label, variable='spectral_frequencies', trait='spectral frequencies',
method='plantcv.plantcv.hyperspectral.analyze_spectral', scale='frequency', datatype=list,
value=new_freq, label=wavelength_labels)
dataset = pd.DataFrame({'Wavelength (' + array.wavelength_units + ')': new_wavelengths,
'Reflectance': wavelength_freq})
mean_spectra = (ggplot(data=dataset,
mapping=aes(x='Wavelength (' + array.wavelength_units + ')', y='Reflectance'))
+ geom_line(color='purple')
+ scale_x_continuous(breaks=list(range(int(np.floor(min_wavelength)),
int(np.ceil(max_wavelength)), 50)))
)
analysis_img = mean_spectra
_debug(visual=mean_spectra, filename=os.path.join(params.debug_outdir, str(params.device) + "_mean_spectra.png"))
return analysis_img
| {
"repo_name": "danforthcenter/plantcv",
"path": "plantcv/plantcv/hyperspectral/analyze_spectral.py",
"copies": "2",
"size": "6135",
"license": "mit",
"hash": 8260687495565371000,
"line_mean": 51.8879310345,
"line_max": 120,
"alpha_frac": 0.6588427058,
"autogenerated": false,
"ratio": 4.153689911983751,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002284756246813111,
"num_lines": 116
} |
"""Analyzer for audio feature extraction using Essentia"""
from __future__ import print_function
import os
import mimetypes
import subprocess
import json
from damn_at import logger
from damn_at import AssetId, FileId, FileDescription, AssetDescription
from damn_at.pluginmanager import IAnalyzer
from damn_at.analyzers.audio import metadata
import tempfile
def get_supported_formats():
try:
pro = subprocess.Popen(['ffmpeg', '-formats'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = pro.communicate()
if pro.returncode != 0:
logger.debug(
"GetFeatureExtractorTypes failed with error code %d! "
% pro.returncode,
out,
err
)
return []
except OSError as oserror:
logger.debug("GetFeatureExtractorTypes failed! %s", oserror)
return []
extensions = [
line.split()[1] for line in out.decode("utf-8").split("\n")[4:]
if len(line.split()) > 1]
mimes = []
for ext in extensions:
mime = mimetypes.guess_type('file.' + ext, False)[0]
if mime and mime.startswith('audio/'):
mimes.append(mime)
return mimes
def get_extracted_ll_features(ofile):
"""returns the extracted low-level features from json file in dictionary
format"""
features = {}
with open(ofile, 'r') as ef:
content = json.load(ef)
if 'rhythm' in content:
if 'bpm' in content['rhythm']:
features['bpm'] = int(content['rhythm']['bpm'])
if 'beats_count' in content['rhythm']:
features['beats_count'] = content['rhythm']['beats_count']
if 'tonal' in content:
if ('chords_key' in content['tonal']
and 'chords_scale' in content['tonal']):
features['chord'] = \
str(content['tonal']['chords_key']) + ' ' + \
str(content['tonal']['chords_scale'])
if ('key_key' in content['tonal']
and 'key_scale' in content['tonal']):
features['key'] = \
str(content['tonal']['key_key']) + ' ' + \
str(content['tonal']['key_scale'])
if 'lowlevel' in content:
if 'average_loudness' in content['lowlevel']:
features['average_loudness'] = \
content['lowlevel']['average_loudness']
if 'metadata' in content:
if 'audio_properties' in content['metadata']:
if 'lossless' in content['metadata']['audio_properties']:
features['lossless'] = \
content['metadata']['audio_properties']['lossless']
return features
def get_extracted_hl_features(ofile):
"""returns the extracted high-level features from json file in dictionary
format"""
features = {}
with open(ofile, 'r') as ef:
content = json.load(ef)
for f in content['highlevel']:
features[f] = content['highlevel'][f]['value']
return features
def extract_feature(ex, in_file, out_file, conf_file=''):
"""Extract feature using 'ex' extractor and stores it to 'out_file'"""
try:
pro = subprocess.Popen([ex, in_file, out_file, conf_file],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
err, out = pro.communicate()
if pro.returncode != 0:
print("FeatureExtractor failed with error code %d! "
% pro.returncode,
out,
err)
else:
logger.debug("Extracting audio features: \n%s",
out.decode("utf-8"))
except OSError as e:
print(('E: Feature Extraction failed %s with error %s'
% (in_file, e)))
class SoundAnalyzer(IAnalyzer):
"""Class for sound analyzer called in the analyzer"""
handled_types = get_supported_formats()
def __init__(self):
IAnalyzer.__init__(self)
self.ll_ex = 'streaming_extractor_music'
self.hl_ex = 'essentia_streaming_extractor_music_svm'
self.conf = os.path.join(os.path.dirname(__file__), 'profile.conf')
def activate(self):
pass
def analyze(self, anURI):
fileid = FileId(filename=os.path.abspath(anURI))
file_descr = FileDescription(file=fileid)
file_descr.assets = []
asset_descr = AssetDescription(asset=AssetId(
subname=os.path.basename(anURI),
mimetype=mimetypes.guess_type(anURI, False)[0],
file=fileid)
)
output_file_ll = tempfile.NamedTemporaryFile(
suffix='.json',
prefix=os.path.basename(anURI).split(".")[0] + '_ll',
dir='/dev/shm',
delete=True
)
output_file_hl = tempfile.NamedTemporaryFile(
suffix='.json',
prefix=os.path.basename(anURI).split(".")[0] + '_hl',
dir='/dev/shm',
delete=True
)
meta = {}
# low-level features
extract_feature(self.ll_ex, anURI, output_file_ll.name)
ll_meta = get_extracted_ll_features(output_file_ll.name)
meta.update(ll_meta)
# high-level features
extract_feature(self.hl_ex, output_file_ll.name,
output_file_hl.name, self.conf)
hl_meta = get_extracted_hl_features(output_file_hl.name)
meta.update(hl_meta)
asset_descr.metadata = metadata.MetaDataFeatureExtraction.extract(meta)
file_descr.assets.append(asset_descr)
output_file_ll.close()
output_file_hl.close()
return file_descr
| {
"repo_name": "peragro/peragro-at",
"path": "src/damn_at/analyzers/audio/feature_extraction.py",
"copies": "1",
"size": "5784",
"license": "bsd-3-clause",
"hash": 6673061830718431000,
"line_mean": 32.4335260116,
"line_max": 79,
"alpha_frac": 0.555670816,
"autogenerated": false,
"ratio": 4.011095700416089,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 173
} |
"""Analyzer for audio files using AcoustID"""
from __future__ import print_function
import os
import mimetypes
import subprocess
import uuid
from damn_at import logger
from damn_at import AssetId, FileId, FileDescription, AssetDescription
from damn_at.pluginmanager import IAnalyzer
from damn_at.analyzers.audio import metadata
from acoustid import fingerprint_file
def get_supported_formats():
try:
pro = subprocess.Popen(['ffmpeg', '-formats'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = pro.communicate()
if pro.returncode != 0:
logger.debug(
'GetAcoustIDTypes failed with error code %d! '
% pro.returncode,
out,
err
)
return []
except OSError as oserror:
logger.debug('GetAcoustIDTypes failed! %s', oserror)
return []
extensions = [
line.split()[1] for line in out.decode('utf-8').split('\n')[4:]
if len(line.split()) > 1]
mimes = []
for ext in extensions:
mime = mimetypes.guess_type('file.' + ext, False)[0]
if mime and mime.startswith('audio/'):
mimes.append(mime)
return mimes
class SoundAnalyzer(IAnalyzer):
"""Class for sound analyzer called in the analyzer"""
handled_types = get_supported_formats()
def __init__(self):
IAnalyzer.__init__(self)
def activate(self):
pass
def analyze(self, anURI):
fileid = FileId(filename=os.path.abspath(anURI))
file_descr = FileDescription(file=fileid)
file_descr.assets = []
asset_descr = AssetDescription(asset=AssetId(
subname=os.path.basename(anURI),
mimetype=mimetypes.guess_type(anURI, False)[0],
file=fileid))
try:
duration, fingerprint = fingerprint_file(anURI)
fingerprint_uuid = uuid.uuid5(uuid.NAMESPACE_DNS,
str(duration) + str(fingerprint))
except Exception as e:
print(('E: AcoustID analyzer failed %s with error %s'
% (anURI, e)))
return False
meta = {
'duration': str(duration) + 's',
'fingerprint': fingerprint,
'fingerprint_uuid': fingerprint_uuid
}
asset_descr.metadata = metadata.MetaDataAcoustID.extract(meta)
file_descr.assets.append(asset_descr)
return file_descr
| {
"repo_name": "peragro/peragro-at",
"path": "src/damn_at/analyzers/audio/acoustid_analyzer.py",
"copies": "1",
"size": "2506",
"license": "bsd-3-clause",
"hash": 5108976354710713000,
"line_mean": 29.9382716049,
"line_max": 78,
"alpha_frac": 0.5865921788,
"autogenerated": false,
"ratio": 4.108196721311476,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5194788900111476,
"avg_score": null,
"num_lines": null
} |
"""Analyzer for audio files using sox"""
from __future__ import print_function
import os
import re
import logging
import mimetypes
import subprocess
from damn_at import logger
from damn_at import AssetId, FileId, FileDescription, AssetDescription
from damn_at import MetaDataValue, MetaDataType
from damn_at.pluginmanager import IAnalyzer
from damn_at.analyzers.audio import metadata
def get_sox_types():
"""Extract all possible formats for the audio file and store their mime
types"""
try:
pro = subprocess.Popen(['sox', '-h'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = pro.communicate()
if pro.returncode != 0:
logger.debug(
"GetSoxTypes failed with error code %d! " % (pro.returncode),
out,
err
)
return []
except OSError as oserror:
logger.debug("GetSoxTypes failed! %s", oserror)
return []
match = re.search(r'AUDIO FILE FORMATS:(.*)PLAYLIST FORMATS',
out.decode("utf-8"), re.DOTALL)
if not match:
logger.debug("GetSoxTypes failed to parse output! %s %s", out, err)
return []
extensions = match.group(1).strip().split(' ')
mimes = []
for ext in extensions:
mime = mimetypes.guess_type('file.'+ext, False)[0]
if mime and mime.startswith('audio/'):
mimes.append(mime)
return mimes
class SoundAnalyzer(IAnalyzer):
"""class for sound analyzer called in the analyzer"""
handled_types = get_sox_types()
def __init__(self):
IAnalyzer.__init__(self)
def activate(self):
pass
def analyze(self, anURI):
fileid = FileId(filename=os.path.abspath(anURI))
file_descr = FileDescription(file=fileid)
file_descr.assets = []
asset_descr = AssetDescription(asset=AssetId(
subname=os.path.basename(anURI),
mimetype=mimetypes.guess_type(anURI, False)[0],
file=fileid))
try:
pro = subprocess.Popen(
['sox', '--i', anURI],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
out, err = pro.communicate()
if pro.returncode != 0:
print(("E: SoundAnalyzer failed %s with error code %d! "
% (anURI, pro.returncode), out, err))
return False
except OSError:
print(("E: SoundAnalyzer failed %s!" % anURI, out, err))
return False
meta = {}
lines = out.decode("utf-8").strip().split('\n')
for line in lines:
line = line.split(':', 1)
if len(line) == 1:
line = line[0].split('=')
line = [l.strip() for l in line]
if line[0] in ['Input File', 'Comment']:
continue
meta[line[0].lower().replace(' ', '_')] = line[1]
asset_descr.metadata = metadata.MetaDataSox.extract(meta)
for key, value in meta.items():
# Add none default metadata.
if key not in asset_descr.metadata:
asset_descr.metadata['Sox-'+key] = MetaDataValue(
type=MetaDataType.STRING,
string_value=value)
file_descr.assets.append(asset_descr)
return file_descr
| {
"repo_name": "peragro/peragro-at",
"path": "src/damn_at/analyzers/audio/soxanalyzer.py",
"copies": "1",
"size": "3438",
"license": "bsd-3-clause",
"hash": -2983946654614998000,
"line_mean": 31.1308411215,
"line_max": 77,
"alpha_frac": 0.5526468877,
"autogenerated": false,
"ratio": 4.102625298329356,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5155272186029355,
"avg_score": null,
"num_lines": null
} |
"""Analyzer for Videos """
# Standard
import os
import logging
import subprocess
# Damn
import mimetypes
from damn_at import (
MetaDataType,
MetaDataValue,
FileId,
FileDescription,
AssetDescription,
AssetId
)
from damn_at.pluginmanager import IAnalyzer
from damn_at.analyzers.video import metadata
LOG = logging.getLogger(__name__)
class GenericVideoAnalyzer(IAnalyzer):
"""Generic Video Analyzer"""
handled_types = ["video/mp4", "video/x-msvideo", "video/x-matroska",
"video/quicktime", "video/mpeg", "video/x-flv"]
def __init__(self):
IAnalyzer.__init__(self)
def activate(self):
pass
def analyze(self, an_uri):
fileid = FileId(filename=os.path.abspath(an_uri))
file_descr = FileDescription(file=fileid)
file_descr.assets = []
video_mimetype = mimetypes.guess_type(an_uri)[0]
asset_descr = AssetDescription(asset=AssetId(
subname=os.path.basename(an_uri),
mimetype=video_mimetype,
file=fileid
))
try:
pro = subprocess.Popen(
['exiftool', an_uri],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
out, err = pro.communicate()
if pro.returncode != 0:
LOG.debug("VideoAnalyzer failed %s with error code %d"
% (an_uri, pro.returncode), out, err)
return False
except OSError:
LOG.debug("VideoAnalyzer failed %s\n\t%s\n\t%s" % (
an_uri,
out,
err
))
return False
meta = {}
flag = False
lines = out.decode('utf-8').strip().split('\n')
for line in lines:
line = line.split(':', 1)
if len(line) == 1:
line = line.split('=')
line = [l.strip() for l in line]
if line[0] == 'MIME Type':
flag = True
continue
if flag:
meta[line[0].lower().replace(' ', '_')] = line[1]
if line[0] == 'Frame Rate':
meta['video_frame_rate'] = meta.pop('frame_rate')
asset_descr.metadata = metadata.MetaDataExif.extract(meta)
for key, value in meta.items():
if key not in asset_descr.metadata:
asset_descr.metadata['Exif-'+key] = MetaDataValue(
type=MetaDataType.STRING,
string_value=value
)
file_descr.assets.append(asset_descr)
return file_descr
| {
"repo_name": "peragro/peragro-at",
"path": "src/damn_at/analyzers/video/videoanalyzer.py",
"copies": "1",
"size": "2654",
"license": "bsd-3-clause",
"hash": -5204380268657263000,
"line_mean": 28.4888888889,
"line_max": 72,
"alpha_frac": 0.5222305953,
"autogenerated": false,
"ratio": 4.058103975535168,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5080334570835168,
"avg_score": null,
"num_lines": null
} |
# analyzer / generator for historical models
import sys
import os
import pickle
libdir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'lib')
sys.path.append(libdir)
from msp_isa import isa
import smt
import historical_models
def is_valid_mode(ins, rsname, rdname):
source_valid = True
dest_valid = True
if ins.fmt in {'fmt1', 'fmt2'}:
if ins.smode in {'Rn'}:
source_valid = True
elif ins.smode in {'X(Rn)'}:
source_valid = rsname not in {smt.smt_rnames[0], smt.smt_rnames[2], smt.smt_rnames[3]}
elif ins.smode in {'ADDR'}:
source_valid = rsname in {smt.smt_rnames[0]}
elif ins.smode in {'&ADDR'}:
source_valid = rsname in {smt.smt_rnames[2]}
elif ins.smode in {'#1'}:
source_valid = rsname in {smt.smt_rnames[3]}
elif ins.smode in {'@Rn'}:
source_valid = rsname not in {smt.smt_rnames[0]}
elif ins.smode in {'#@N'}:
source_valid = rsname in {smt.smt_rnames[0]}
elif ins.smode in {'@Rn+'}:
source_valid = rsname not in {smt.smt_rnames[0]}
elif ins.smode in {'#N'}:
source_valid = rsname in {smt.smt_rnames[0]}
else:
raise ValueError('smode??? {:s}'.format(ins.smode))
if ins.fmt in {'fmt1'}:
if ins.dmode in {'Rn'}:
dest_valid = True
elif ins.dmode in {'X(Rn)'}:
dest_valid = rdname not in {smt.smt_rnames[0], smt.smt_rnames[2], smt.smt_rnames[3]}
elif ins.dmode in {'ADDR'}:
dest_valid = rdname in {smt.smt_rnames[0]}
elif ins.dmode in {'&ADDR'}:
dest_valid = rdname in {smt.smt_rnames[2]}
elif ins.dmode in {'#1'}:
dest_valid = rdname in {smt.smt_rnames[3]}
else:
raise ValueError('dmode??? {:s}'.format(ins.dmode))
return source_valid and dest_valid
def is_supported_instruction(ins, rsname, rdname):
if ins.fmt == 'fmt1':
if ins.name in {'DADD'}:
raise ValueError('DADD - bad')
elif ins.dmode in {'#1'} and rdname in {smt.smt_rnames[3]}:
raise ValueError('bad destination for fmt1: {:s} {:s} {:s}'
.format(ins.name, ins.dmode, rdname))
elif rsname == smt.smt_rnames[-1] or rdname == smt.smt_rnames[-1]:
raise ValueError('bad registers for fmt1: {:s} {:s}'.format(rsname, rdname))
elif ins.dmode in {'Rn'} and rdname in {smt.smt_rnames[0], smt.smt_rnames[2]}:
# constant generator
if ins.smode in {'@Rn', '@Rn+'} and rsname in {smt.smt_rnames[2], smt.smt_rnames[3]}:
return ins.name in {'CMP', 'BIT'} or (rdname in {smt.smt_rnames[2]} and ins.name in {'MOV', 'BIC', 'BIS'})
elif ins.smode in {'#1'} and rsname in {smt.smt_rnames[3]}:
return ins.name in {'CMP', 'BIT'} or (rdname in {smt.smt_rnames[2]} and ins.name in {'MOV', 'BIC', 'BIS'})
# PCSR
elif ins.smode in {'Rn'} and rsname in {smt.smt_rnames[0]}:
return ins.name in {'CMP', 'BIT'}
# elif ins.smode in {'Rn'} and rsname in {smt.smt_rnames[0], smt.smt_rnames[2]}:
# return ins.name in {'CMP', 'BIT'}
# this disagrees with the tables I have of supported fmt1...
# what's happening is that we can measure some of these if we manage to set R2 to 0,
# because we then pick that up as a usable identity
elif ins.smode in {'Rn'} and rsname in {smt.smt_rnames[2]}:
if rdname in {smt.smt_rnames[0]}:
return ins.name in {'CMP', 'BIT'}
elif rdname in {smt.smt_rnames[2]}:
return ins.name not in {'AND', 'SUBC'}
elif ins.smode in {'Rn'} and rsname in {smt.smt_rnames[3]}:
if rdname in {smt.smt_rnames[0]}:
return ins.name in {'CMP', 'BIT'}
else:
return ins.name not in {'AND', 'SUBC'}
# remainder of PC
elif rdname in {smt.smt_rnames[0]}:
return ins.name in {'CMP', 'BIT', 'MOV'}
return True
elif ins.fmt == 'fmt2':
if rsname == smt.smt_rnames[-1] or rdname != smt.smt_rnames[-1]:
raise ValueError('bad registers for fmt2: {:s} {:s}'.format(rsname, rdname))
elif ins.name in {'RETI'}:
return ins.smode == 'Rn' and rsname == smt.smt_rnames[0]
elif ins.name in {'CALL', 'PUSH'}:
if rsname == smt.smt_rnames[1] and ins.smode in {'X(Rn)', '@Rn', '@Rn+'}:
return False
elif ins.name in {'CALL'}:
if (ins.smode, rsname) not in {
('Rn', smt.smt_rnames[4]),
('ADDR', smt.smt_rnames[0]),
('&ADDR', smt.smt_rnames[2]),
('X(Rn)', smt.smt_rnames[4]),
('#@N', smt.smt_rnames[0]),
('@Rn', smt.smt_rnames[4]),
('#N', smt.smt_rnames[0]),
('@Rn+', smt.smt_rnames[4])
}:
return False
return True
elif ins.name in {'SWPB', 'SXT', 'RRC', 'RRA'}:
if ins.smode in {'Rn'} and rsname in {smt.smt_rnames[0], smt.smt_rnames[2]}:
return False
elif ins.smode in {'#1'} and rsname in {smt.smt_rnames[3]}:
return False
elif ins.smode in {'@Rn', '#@N', '@Rn+', '#N'} and rsname in {smt.smt_rnames[0], smt.smt_rnames[2], smt.smt_rnames[3]}:
return False
return True
else:
raise ValueError('what fmt2 is this? {:s}'.format(ins.name))
elif ins.fmt == 'jump':
if rsname != smt.smt_rnames[-1] or rdname != smt.smt_rnames[-1]:
raise ValueError('bad registers for jump: {:s} {:s}'.format(rsname, rdname))
return True
else:
raise ValueError('what is this? {:s} {:s}'.format(ins.fmt, ins.name))
def create_model_table_10(record, fname):
rsrc_rdst_strings = record['time_fn_rsrc_rdst']
state0_strings = record['state_fn_init']
state_strings = record['state_fn_default']
inames = {smt.smt_iname(ins) : ins for ins in isa.ids_ins}
states = [0, 1]
#states = [0, 1, 2, 3]
state_default = smt.get_state_id(state0_strings)
rsrc_rdst_pool = set([(s, x, rs, rd)
for s in states
for x in inames
for rs in smt.smt_rnames.values()
for rd in smt.smt_rnames.values()])
ttab = {}
rsrc_rdst_else = None
for arg, res in smt.split_function_string(rsrc_rdst_strings):
if arg == ('else',):
rsrc_rdst_else = int(res)
else:
(statestr, iname, rsname, rdname) = arg
state = smt.get_state_id(statestr)
if (state, iname, rsname, rdname) in rsrc_rdst_pool:
rsrc_rdst_pool.remove((state, iname, rsname, rdname))
ttab[(state, iname, rsname, rdname)] = int(res)
else:
print('not in pool: {:s}'.format(repr((state, iname, rsname, rdname))))
print('rsrc_rdst_pool has {:d} remaining entries'.format(len(rsrc_rdst_pool)))
dadds = 0
exts = 0
xr3s = 0
invr = 0
invm = 0
unsupported = 0
inf = 0
other = 0
for x in rsrc_rdst_pool:
state, iname, rsname, rdname = x
ins = inames[iname]
if ins.name == 'DADD':
dadds += 1
ttab[x] = None
elif ins.fmt == 'EXT':
exts += 1
ttab[x] = None
elif ins.fmt == 'fmt1' and ins.dmode == 'X(Rn)' and rdname == smt.smt_rnames[3]:
xr3s += 1
ttab[x] = None
elif ins.fmt == 'jump' and (rsname != smt.smt_rnames[-1] or rdname != smt.smt_rnames[-1]):
invr += 1
ttab[x] = None
elif ins.fmt == 'fmt2' and (rsname == smt.smt_rnames[-1] or rdname != smt.smt_rnames[-1]):
invr += 1
ttab[x] = None
elif ins.fmt == 'fmt1' and (rsname == smt.smt_rnames[-1] or rdname == smt.smt_rnames[-1]):
invr += 1
ttab[x] = None
elif not is_valid_mode(ins, rsname, rdname):
invm += 1
ttab[x] = None
elif not is_supported_instruction(ins, rsname, rdname):
unsupported += 1
ttab[x] = None
else:
# print(state, ins.name, ins.smode, rsname, ins.dmode, rdname)
# for s in states:
# if (s, iname, rsname, rdname) in ttab:
# print(' ', s, iname, rsname, rdname, ' : ', ttab[(s, iname, rsname, rdname)])
if (state_default, iname, rsname, rdname) in ttab:
inf += 1
ttab[x] = ttab[state_default, iname, rsname, rdname]
elif rsname in {smt.smt_rnames[2]} and (state, iname, smt.smt_rnames[4], rdname) in ttab:
print('assuming R4 behavior for R2 source:')
print(' ', state, ins.name, ins.smode, rsname, ins.dmode, rdname)
ttab[x] = ttab[(state, iname, smt.smt_rnames[4], rdname)]
else:
print(state, ins.name, ins.smode, rsname, ins.dmode, rdname)
other += 1
ttab[x] = None
print('excluded {:d} dadd, {:d} ext, {:d} X(R3), {:d} invalid register, {:d} invalid mode, {:d} unsupported, {:d} inferred, {:d} other'
.format(dadds, exts, xr3s, invr, invm, unsupported, inf, other))
state_pool = set([(s, x, rs, rd)
for s in states
for x in inames
for rs in smt.smt_rnames.values()
for rd in smt.smt_rnames.values()])
stab = {}
state_else = None
for arg, res in smt.split_function_string(state_strings):
if arg == ('else',):
state_else = smt.get_state_id(res)
else:
(statestr, iname, rsname, rdname) = arg
state = smt.get_state_id(statestr)
if (state, iname, rsname, rdname) in state_pool:
state_pool.remove((state, iname, rsname, rdname))
stab[(state, iname, rsname, rdname)] = smt.get_state_id(res)
else:
print('not in pool: {:s}'.format(repr((state, iname, rsname, rdname))))
print('state_pool has {:d} remaining entries'.format(len(state_pool)))
dadds = 0
exts = 0
xr3s = 0
invr = 0
invm = 0
unsupported = 0
inf = 0
other = 0
for x in state_pool:
state, iname, rsname, rdname = x
ins = inames[iname]
if ins.name == 'DADD':
dadds += 1
stab[x] = None
elif ins.fmt == 'EXT':
exts += 1
stab[x] = None
elif ins.fmt == 'fmt1' and ins.dmode == 'X(Rn)' and rdname == smt.smt_rnames[3]:
xr3s += 1
stab[x] = None
elif ins.fmt == 'jump' and (rsname != smt.smt_rnames[-1] or rdname != smt.smt_rnames[-1]):
invr += 1
stab[x] = None
elif ins.fmt == 'fmt2' and (rsname == smt.smt_rnames[-1] or rdname != smt.smt_rnames[-1]):
invr += 1
stab[x] = None
elif ins.fmt == 'fmt1' and (rsname == smt.smt_rnames[-1] or rdname == smt.smt_rnames[-1]):
invr += 1
stab[x] = None
elif not is_valid_mode(ins, rsname, rdname):
invm += 1
stab[x] = None
elif not is_supported_instruction(ins, rsname, rdname):
unsupported += 1
stab[x] = None
else:
# print(state, ins.name, ins.smode, rsname, ins.dmode, rdname)
# for s in states:
# if (s, iname, rsname, rdname) in stab:
# print(' ', s, iname, rsname, rdname, ' : ', stab[(s, iname, rsname, rdname)])
known_transitions = set()
for s in states:
if (s, iname, rsname, rdname) in stab:
known_transitions.add(stab[s, iname, rsname, rdname])
if known_transitions == {state_default}:
inf += 1
stab[x] = state_default
else:
other += 1
stab[x] = state_else
print('excluded {:d} dadd, {:d} ext, {:d} X(R3), {:d} invalid register, {:d} invalid mode, {:d} unsupported, {:d} inferred to initial state, {:d} other'
.format(dadds, exts, xr3s, invr, invm, unsupported, inf, other))
print('DONE: ttab has {:d} entries, stab has {:d} entries'
.format(len(ttab), len(stab)))
with open(fname, 'wb') as f:
print('writing to {:s}'.format(fname))
pickle.dump({'state_default':state_default, 'ttab':ttab, 'stab':stab}, f)
if __name__ == '__main__':
fname = sys.argv[1]
#create_model_table_10(historical_models.model_m9_s10, fname)
create_model_table_10(historical_models.model_m10_simple, fname)
#create_model_table_10(historical_models.model_m10_full, fname)
| {
"repo_name": "billzorn/msp-pymodel",
"path": "getmodel.py",
"copies": "1",
"size": "13164",
"license": "mit",
"hash": 4605615075140933600,
"line_mean": 42.4455445545,
"line_max": 156,
"alpha_frac": 0.5116226071,
"autogenerated": false,
"ratio": 3.1172152498224013,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9117347227528919,
"avg_score": 0.0022981258786964895,
"num_lines": 303
} |
"""Analyzer
Analyzer module for setting menu bar setup for OSX
"""
__author__ = "ales lerch"
import os
import cv2
import numpy
from PIL import Image
def check_image_color(image):
"""Returns string containing 'ligh' or 'dark' that tells if image is for day or night,
Y -- converting to gray to detect if it's really dark or light"""
def check_color(i, j, k):
""" Function used only for DEBUGGING"""
img.show()
image = Image.new("RGB", (200, 200), (int(Y), int(Y), int(Y)))
image.show()
image = Image.new("RGB", (200, 200), (int(i), int(j), int(k)))
image.show()
if not os.path.isfile(image):
return "Image not found"
def calculate_bgr(data):
average_color_per_row = numpy.average(data, axis=0)
average_color = numpy.average(average_color_per_row, axis=0)
return tuple(average_color)
def calculate_y(r, g, b):
alpha = 0.299
betta = 0.587
gamma = 0.114
return alpha * r + betta * g + gamma * b
# split the image for four squares calucate averate pixel for them and take higest value
# blure image and save to /Library/Caches as com.apple.desktop.admin.png
# in case using blur tool --> blur = cv2.blur(img,(5,5))
try:
img_cv_data = cv2.imread(image)
B, G, R = calculate_bgr(img_cv_data)
Y = calculate_y(B, G, R)
height, width = img_cv_data.shape[:2]
except Exception as err:
print(f"[ERROR] {err} with image: {image}")
return "Error parsing image"
# image detection
if Y < 72.0:
_type = "dark"
elif Y >= 73.0 and Y <= 108.0:
_type = "evening"
else:
_type = "light"
return _type
| {
"repo_name": "L3rchal/WallpDesk",
"path": "wall-desk/analyzer.py",
"copies": "1",
"size": "1734",
"license": "mit",
"hash": -5923397346266415000,
"line_mean": 28.3898305085,
"line_max": 92,
"alpha_frac": 0.5888119954,
"autogenerated": false,
"ratio": 3.32183908045977,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9408808558981714,
"avg_score": 0.0003685033756110847,
"num_lines": 59
} |
"""Analyzers decorate AgileTickets with contextual information.
Analyzers look at tickets through the lens of "this is my start state", or
"this is what defects look like" and modify AgileTickets to contain information
based on that context like "ended_at", "commited_at", "started_at", etc.
"""
from .models import AnalyzedAgileTicket
class MissingPhaseInformation(Exception):
"""Raise when a ticket is missing information for a phase.
Arguments:
message (unicode): Human readable string describing the exception.
phase (unicode): The phase that no state could be found for.
state_list (list[unicode]): List of states that were included in the phase.
Attritbutes:
message (unicode): Human readable string describing the exception.
phase (unicode): The phase that no state could be found for.
state_list (list[unicode]): List of states that were included in the phase.
"""
def __init__(self, message, phase, state_list):
"""Create the exception."""
self.message = message
self.phase = phase
self.state_list = state_list
super(Exception, self).__init__(message)
class PartialDateAnalyzer(object):
"""Analyze Tickets that might not have been started or completed.
Attributes:
commit_state (list[str]): The list of names of the state when work was committed to.
start_state (list[str]): The list of names of the state when work was started.
end_state (list[str]): The list of names of the state when work was completed.
"""
NEWEST_DATE = 'newest'
OLDEST_DATE = 'oldest'
def __init__(self, commit_states, start_states, end_states):
"""Create instances."""
self.end_states = end_states
self.commit_states = commit_states
self.start_states = start_states
super().__init__()
@property
def states_context(self):
"""Enumerate the states that match the phases of an analyzed ticket."""
return {
u'committed': self.commit_states,
u'started': self.start_states,
u'ended': self.end_states,
}
def _find_entered_at(self, state_list, ticket, strategy):
entry = dict(state=None, entered_at=None)
entries = []
for state_name in state_list:
for log in ticket.flow_log:
if log['state'] == state_name:
entries.append(log)
if len(entries) > 0:
break
if len(entries) > 0:
if strategy == self.NEWEST_DATE:
entry = entries[-1]
else:
entry = entries[0]
return entry['state'], entry['entered_at']
def analyze(self, tickets, strategy=None):
"""Return a list of AnalyzedAgileTicket.
Arguments:
tickets (list[AgileTicket]): The list of tickets to be analyzed
strategy (analyzer.OLDEST_DATE | analyzer.NEWEST_DATE): Which date to pick when a ticket entered a state multiple times
Returns:
list[AnalyzedAgileTicket]: The list of tickets
"""
if strategy is None:
strategy = self.OLDEST_DATE
analyzed_tickets = []
ignored_tickets = []
for ticket in tickets:
analyzed_tickets.append(self.analyze_ticket(ticket, strategy))
return analyzed_tickets, ignored_tickets
def analyze_ticket(self, ticket, strategy):
"""Convert a single AgileTicket into an AnalyzedAgileTicket.
Arguments:
ticket (AgileTicket): The AgileTicket under consideration
strategy (analyzer.OLDEST_DATE | analyzer.NEWEST_DATE): Which date to pick when a ticket entered a state multiple times
Returns:
AnalyzedAgileTicket
"""
kwargs = {
"key": ticket.key,
"ttype": ticket.type,
"title": ticket.title,
}
for phase, state_list in self.states_context.items():
state, datetime = self._find_entered_at(state_list, ticket, strategy)
kwargs[phase] = dict(state=state, entered_at=datetime)
return AnalyzedAgileTicket(**kwargs)
class DateAnalyzer(PartialDateAnalyzer):
"""Analyze Tickets for cycle data.
Attributes:
commit_state (list[str]): The list of names of the state when work was committed to.
start_state (list[str]): The list of names of the state when work was started.
end_state (list[str]): The list of names of the state when work was completed.
"""
def analyze(self, tickets, strategy=None):
"""Return a list of AnalyzedAgileTicket.
Arguments:
tickets (list[AgileTicket]): The list of tickets to be analyzed
strategy (analyzer.OLDEST_DATE | analyzer.NEWEST_DATE): Which date to pick when a ticket entered a state multiple times
Returns:
list[AnalyzedAgileTicket]: The list of tickets
"""
if strategy is None:
strategy = self.OLDEST_DATE
analyzed_tickets = []
ignored_tickets = []
for ticket in tickets:
try:
analyzed_tickets.append(self.analyze_ticket(ticket, strategy))
except MissingPhaseInformation as e:
ignored_tickets.append(dict(ticket=ticket, phase=e.phase, state_list=e.state_list))
return analyzed_tickets, ignored_tickets
def analyze_ticket(self, ticket, strategy):
"""Convert a single AgileTicket into an AnalyzedAgileTicket.
Arguments:
ticket (AgileTicket): The AgileTicket under consideration
strategy (analyzer.OLDEST_DATE | analyzer.NEWEST_DATE): Which date to pick when a ticket entered a state multiple times
Returns:
AnalyzedAgileTicket
"""
kwargs = {
"key": ticket.key,
"ttype": ticket.type,
"title": ticket.title,
}
for phase, state_list in self.states_context.items():
state, datetime = self._find_entered_at(state_list, ticket, strategy)
if None in (state, datetime):
msg = "{key} is missing flow_log information for {state_list}".format(key=ticket.key, state_list=state_list)
raise MissingPhaseInformation(
msg,
phase,
state_list,
)
kwargs[phase] = dict(state=state, entered_at=datetime)
return AnalyzedAgileTicket(**kwargs)
| {
"repo_name": "cmheisel/agile-analytics",
"path": "agile_analytics/analyzers.py",
"copies": "2",
"size": "6558",
"license": "mit",
"hash": 4240071034936810000,
"line_mean": 36.0508474576,
"line_max": 131,
"alpha_frac": 0.6152790485,
"autogenerated": false,
"ratio": 4.27509778357236,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.589037683207236,
"avg_score": null,
"num_lines": null
} |
analyzers = {
"filter" : {
"lay_to_legalise" : {
"type" : "synonym",
"synonyms" : [
"dog catcher => idea_1234, idea_25",
"animal control => idea_1234, idea_25",
"spca officer => idea_1234, idea_25",
"rabies shot => idea_5215, idea_25",
"lyssaviruses => idea_5215, idea_25",
"rabid dog => idea_5215, idea_25",
"rabies vaccine => idea_5215, idea_25",
"rabies vaccination => idea_5215, idea_25",
"rabies vaccinations => idea_5215, idea_25",
"rabies inoculation => idea_5215, idea_25",
"health code => idea_936, idea_72"
]
}
},
"analyzer": {
"legalise": {
"tokenizer": "standard",
"filter": ["lowercase", "lay_to_legalise"]
}
}
}
settings = {
"mappings": {
"law": {
"properties": {
"text": {
"type": "string",
"analyzer": "legalise",
"term_vector": "with_positions_offsets_payloads",
},
"catch_line": {
"type": "string",
"analyzer": "legalise",
"term_vector": "with_positions_offsets_payloads",
}
}
}
},
"settings": {
"index" : {
"number_of_shards" : 1,
"number_of_replicas" : 0,
"analysis": analyzers
},
}
}
from elasticsearch import Elasticsearch, TransportError
try:
es = Elasticsearch("http://localhost:9200")
es.indices.delete(index='statedecoded', ignore=[400,404])
es.indices.create(index='statedecoded', body=settings)
except TransportError as e:
print(repr(e))
| {
"repo_name": "o19s/semantic-search-course",
"path": "taxonomyDemo.py",
"copies": "1",
"size": "1783",
"license": "apache-2.0",
"hash": 6624088900943525000,
"line_mean": 27.3015873016,
"line_max": 63,
"alpha_frac": 0.4699943915,
"autogenerated": false,
"ratio": 3.691511387163561,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.9636851223492229,
"avg_score": 0.004930911034266354,
"num_lines": 63
} |
"""Analyzes an MP3 file, gathering statistics and looking for errors."""
import cStringIO
import hashlib
import os
from chirp.common import mp3_frame
# Files with fewer than this many MPEG frames will be rejected as
# invalid. 100 frames is about 2.6s of audio.
_MINIMUM_FRAMES = 100
_MINIMUM_REASONABLE_FILE_SIZE = 100<<10 # Files should be larger than 100k...
_MAXIMUM_REASONABLE_FILE_SIZE = 20<<20 # ...and smaller than 20MB.
class InvalidFileError(Exception):
"""Raised when a file appears to be invalid or somehow corrupted."""
# TODO(trow): Some of the validity checks in this function might be
# too strict.
def analyze(file_obj, au_file, compute_fingerprint=True, get_payload=True):
"""Populate an AudioFile object with information extracted from a file.
Args:
file_obj: A file-like object.
au_file: An AudioFile object to store the results of the analysis in.
compute_fingerprint: If False, do not compute a fingerprint.
Returns:
The same AudioFile object that was passed in as au_file, which
should now have several fields set.
Raises:
InvalidFileError: if the file appears to be corrupted.
"""
au_file.frame_count = 0
au_file.frame_size = 0
au_file.duration_ms = 0
sha1_calc = hashlib.sha1() # unused if compute_fingerprint is False.
payload = cStringIO.StringIO() # unused if get_payload is False.
bit_rate_kbps_sum = 0
expected_hdr = None
first_bit_rate_kbps = None
is_vbr = False
for hdr, data_buffer in mp3_frame.split(file_obj):
if hdr is None:
continue
au_file.frame_count += 1
au_file.frame_size += len(data_buffer)
au_file.duration_ms += hdr.duration_ms
if compute_fingerprint:
sha1_calc.update(data_buffer)
if get_payload:
payload.write(data_buffer)
# If we've seen a valid header previously, make sure that all of the
# fields that should match do actually match.
if expected_hdr:
if not hdr.match(expected_hdr):
raise InvalidFileError(
"Bad header: found %s, expected %s (path=%s)" % (
hdr, expected_hdr, au_file.path))
# Keep track of if this is a variable bit-rate file.
if hdr.bit_rate_kbps != first_bit_rate_kbps:
is_vbr = True
# Add this frame's bit rate to our sum; we will use this to compute
# the average bit rate.
bit_rate_kbps_sum += hdr.bit_rate_kbps
# If this is the first header we've seen, make a copy and then blank
# out the fields that can vary. All future headers are expected to
# match this template.
if expected_hdr is None:
expected_hdr = hdr
first_bit_rate_kbps = expected_hdr.bit_rate_kbps
expected_hdr.bit_rate_kbps = None # Might be a VBR file.
expected_hdr.padding = None # Not all frames are padded.
expected_hdr.frame_size = None
# You'd think that this would be constant, but MP3s
# encountered in the wild prove otherwise.
expected_hdr.protected = None
if au_file.frame_count < _MINIMUM_FRAMES:
raise InvalidFileError("Found only %d MPEG frames"
% au_file.frame_count)
# Add the bit rate back into the template header, then return it.
# If this is a VBR file, use the average bit rate instead.
if is_vbr:
expected_hdr.bit_rate_kbps = (
float(bit_rate_kbps_sum) / au_file.frame_count)
else:
expected_hdr.bit_rate_kbps = first_bit_rate_kbps
# Finishing populating and then return the AudioFile object.
au_file.mp3_header = expected_hdr
# Round the duration down to an integral number of microseconds.
au_file.duration_ms = int(au_file.duration_ms)
if compute_fingerprint:
au_file.fingerprint = sha1_calc.hexdigest()
if get_payload:
au_file.payload = payload.getvalue()
return au_file
def sample_and_analyze(au_file, mp3_path_list):
"""Pick a representative file from a list of filenames and analyze it.
Args:
mp3_path_list: A list of paths to MP3 files.
Returns:
A representative MP3 header from a file whose size
is approximately equal to the the median of those in the list.
"""
if not mp3_path_list:
return None
sizes_and_paths = sorted((os.stat(path).st_size, path)
for path in mp3_path_list)
# Find the median element.
size, sample_path = sizes_and_paths[len(sizes_and_paths)/2]
# Complain if file is < 100k or > 20M
if (size < _MINIMUM_REASONABLE_FILE_SIZE
or size > _MAXIMUM_REASONABLE_FILE_SIZE):
raise InvalidFileError("Sample file has bad size: %s %d" % (
sample_path, size))
f_in = open(sample_path)
try:
analyze(f_in, au_file, compute_fingerprint=False)
finally:
f_in.close()
# We return only the MP3 header, since the rest of the au_file
# information is tied to that specific file.
return au_file.mp3_header
| {
"repo_name": "chirpradio/chirpradio-machine",
"path": "chirp/library/analyzer.py",
"copies": "1",
"size": "5178",
"license": "apache-2.0",
"hash": 3501255408051768300,
"line_mean": 35.7234042553,
"line_max": 78,
"alpha_frac": 0.6361529548,
"autogenerated": false,
"ratio": 3.735930735930736,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4872083690730736,
"avg_score": null,
"num_lines": null
} |
# Analyzes an object and outputs numeric properties
import cv2
import numpy as np
from . import fatal_error
from . import print_image
from . import plot_image
from . import rgb2gray_hsv
from . import find_objects
from . import binary_threshold
from . import define_roi
from . import roi_objects
from . import object_composition
def report_size_marker_area(img, shape, device, debug, marker='define', x_adj=0, y_adj=0, w_adj=0, h_adj=0,
base='white', objcolor='dark', thresh_channel=None, thresh=None, filename=False):
"""Outputs numeric properties for an input object (contour or grouped contours).
Inputs:
img = image object (most likely the original), color(RGB)
shape = 'rectangle', 'circle', 'ellipse'
device = device number. Used to count steps in the pipeline
debug = None, print, or plot. Print = save to file, Plot = print to screen.
marker = define or detect, if define it means you set an area, if detect it means you want to
detect within an area
x_adj = x position of shape, integer
y_adj = y position of shape, integer
w_adj = width
h_adj = height
base = background color 'white' is default
objcolor = object color is 'dark' or 'light'
thresh_channel = 'h', 's','v'
thresh = integer value
filename = name of file
Returns:
device = device number
marker_header = shape data table headers
marker_data = shape data table values
analysis_images = list of output images
:param img: numpy array
:param shape: str
:param device: int
:param debug: str
:param marker: str
:param x_adj:int
:param y_adj:int
:param w_adj:int
:param h_adj:int
:param h_adj:int
:param base:str
:param objcolor: str
:param thresh_channel:str
:param thresh:int
:param filename: str
:return: device: int
:return: marker_header: str
:return: marker_data: int
:return: analysis_images: list
"""
device += 1
ori_img = np.copy(img)
if len(np.shape(img)) == 3:
ix, iy, iz = np.shape(img)
else:
ix, iy = np.shape(img)
size = ix, iy
roi_background = np.zeros(size, dtype=np.uint8)
roi_size = (ix - 5), (iy - 5)
roi = np.zeros(roi_size, dtype=np.uint8)
roi1 = roi + 1
roi_contour, roi_heirarchy = cv2.findContours(roi1, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
cv2.drawContours(roi_background, roi_contour[0], -1, (255, 0, 0), 5)
if (x_adj > 0 and w_adj > 0) or (y_adj > 0 and h_adj > 0):
fatal_error('Adjusted ROI position is out of frame, this will cause problems in detecting objects')
for cnt in roi_contour:
size1 = ix, iy, 3
background = np.zeros(size1, dtype=np.uint8)
if shape == 'rectangle' and (x_adj >= 0 and y_adj >= 0):
x, y, w, h = cv2.boundingRect(cnt)
x1 = x + x_adj
y1 = y + y_adj
w1 = w + w_adj
h1 = h + h_adj
cv2.rectangle(background, (x1, y1), (x + w1, y + h1), (1, 1, 1), -1)
elif shape == 'circle':
x, y, w, h = cv2.boundingRect(cnt)
x1 = x + x_adj
y1 = y + y_adj
w1 = w + w_adj
h1 = h + h_adj
center = (int((w + x1) / 2), int((h + y1) / 2))
if h > w:
radius = int(w1 / 2)
cv2.circle(background, center, radius, (1, 1, 1), -1)
else:
radius = int(h1 / 2)
cv2.circle(background, center, radius, (1, 1, 1), -1)
elif shape == 'ellipse':
x, y, w, h = cv2.boundingRect(cnt)
x1 = x + x_adj
y1 = y + y_adj
w1 = w + w_adj
h1 = h + h_adj
center = (int((w + x1) / 2), int((h + y1) / 2))
if w > h:
cv2.ellipse(background, center, (w1 / 2, h1 / 2), 0, 0, 360, (1, 1, 1), -1)
else:
cv2.ellipse(background, center, (h1 / 2, w1 / 2), 0, 0, 360, (1, 1, 1), -1)
else:
fatal_error('Shape' + str(shape) + ' is not "rectangle", "circle", or "ellipse"!')
markerback = cv2.cvtColor(background, cv2.COLOR_RGB2GRAY)
shape_contour, hierarchy = cv2.findContours(markerback, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
cv2.drawContours(ori_img, shape_contour, -1, (255, 255, 0), 5)
if debug is 'print':
print_image(ori_img, (str(device) + '_marker_roi.png'))
elif debug is 'plot':
plot_image(ori_img)
if marker == 'define':
m = cv2.moments(markerback, binaryImage=True)
area = m['m00']
device, id_objects, obj_hierarchy = find_objects(img, markerback, device, debug)
device, obj, mask = object_composition(img, id_objects, obj_hierarchy, device, debug)
center, axes, angle = cv2.fitEllipse(obj)
major_axis = np.argmax(axes)
minor_axis = 1 - major_axis
major_axis_length = axes[major_axis]
minor_axis_length = axes[minor_axis]
eccentricity = np.sqrt(1 - (axes[minor_axis] / axes[major_axis]) ** 2)
elif marker == 'detect':
if thresh_channel is not None and thresh is not None:
if base == 'white':
masked = cv2.multiply(img, background)
marker1 = markerback * 255
mask1 = cv2.bitwise_not(marker1)
markstack = np.dstack((mask1, mask1, mask1))
added = cv2.add(masked, markstack)
else:
added = cv2.multiply(img, background)
device, maskedhsv = rgb2gray_hsv(added, thresh_channel, device, debug)
device, masked2a_thresh = binary_threshold(maskedhsv, thresh, 255, objcolor, device, debug)
device, id_objects, obj_hierarchy = find_objects(added, masked2a_thresh, device, debug)
device, roi1, roi_hierarchy = define_roi(added, shape, device, None, 'default', debug, True, x_adj, y_adj,
w_adj, h_adj)
device, roi_o, hierarchy3, kept_mask, obj_area = roi_objects(img, 'partial', roi1, roi_hierarchy,
id_objects, obj_hierarchy, device, debug)
device, obj, mask = object_composition(img, roi_o, hierarchy3, device, debug)
cv2.drawContours(ori_img, roi_o, -1, (0, 255, 0), -1, lineType=8, hierarchy=hierarchy3)
m = cv2.moments(mask, binaryImage=True)
area = m['m00']
center, axes, angle = cv2.fitEllipse(obj)
major_axis = np.argmax(axes)
minor_axis = 1 - major_axis
major_axis_length = axes[major_axis]
minor_axis_length = axes[minor_axis]
eccentricity = np.sqrt(1 - (axes[minor_axis] / axes[major_axis]) ** 2)
else:
fatal_error('thresh_channel and thresh must be defined in detect mode')
else:
fatal_error("marker must be either in 'detect' or 'define' mode")
analysis_images = []
if filename:
out_file = str(filename[0:-4]) + '_sizemarker.jpg'
print_image(ori_img, out_file)
analysis_images.append(['IMAGE', 'marker', out_file])
if debug is 'print':
print_image(ori_img, (str(device) + '_marker_shape.png'))
elif debug is 'plot':
plot_image(ori_img)
marker_header = (
'HEADER_MARKER',
'marker_area',
'marker_major_axis_length',
'marker_minor_axis_length',
'marker_eccentricity'
)
marker_data = (
'MARKER_DATA',
area,
major_axis_length,
minor_axis_length,
eccentricity
)
return device, marker_header, marker_data, analysis_images
| {
"repo_name": "AntonSax/plantcv",
"path": "plantcv/report_size_marker_area.py",
"copies": "2",
"size": "7900",
"license": "mit",
"hash": 8553613279588709000,
"line_mean": 38.3034825871,
"line_max": 118,
"alpha_frac": 0.5548101266,
"autogenerated": false,
"ratio": 3.4154777345438823,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9954116807351636,
"avg_score": 0.003234210758449338,
"num_lines": 201
} |
# Analyzes an object and outputs numeric properties
import cv2
import numpy as np
import os
from plantcv.plantcv import fatal_error
from plantcv.plantcv import print_image
from plantcv.plantcv import plot_image
from plantcv.plantcv import rgb2gray_hsv
from plantcv.plantcv import find_objects
from plantcv.plantcv.threshold import binary as binary_threshold
from plantcv.plantcv import roi_objects
from plantcv.plantcv import object_composition
from plantcv.plantcv import apply_mask
from plantcv.plantcv import params
from plantcv.plantcv import outputs
def report_size_marker_area(img, roi_contour, roi_hierarchy, marker='define', objcolor='dark', thresh_channel=None,
thresh=None, label="default"):
"""Detects a size marker in a specified region and reports its size and eccentricity
Inputs:
img = An RGB or grayscale image to plot the marker object on
roi_contour = A region of interest contour (e.g. output from pcv.roi.rectangle or other methods)
roi_hierarchy = A region of interest contour hierarchy (e.g. output from pcv.roi.rectangle or other methods)
marker = 'define' or 'detect'. If define it means you set an area, if detect it means you want to
detect within an area
objcolor = Object color is 'dark' or 'light' (is the marker darker or lighter than the background)
thresh_channel = 'h', 's', or 'v' for hue, saturation or value
thresh = Binary threshold value (integer)
label = optional label parameter, modifies the variable name of observations recorded
Returns:
analysis_images = List of output images
:param img: numpy.ndarray
:param roi_contour: list
:param roi_hierarchy: numpy.ndarray
:param marker: str
:param objcolor: str
:param thresh_channel: str
:param thresh: int
:param label: str
:return: analysis_images: list
"""
# Store debug
debug = params.debug
params.debug = None
params.device += 1
# Make a copy of the reference image
ref_img = np.copy(img)
# If the reference image is grayscale convert it to color
if len(np.shape(ref_img)) == 2:
ref_img = cv2.cvtColor(ref_img, cv2.COLOR_GRAY2BGR)
# Marker components
# If the marker type is "defined" then the marker_mask and marker_contours are equal to the input ROI
# Initialize a binary image
roi_mask = np.zeros(np.shape(img)[:2], dtype=np.uint8)
# Draw the filled ROI on the mask
cv2.drawContours(roi_mask, roi_contour, -1, (255), -1)
marker_mask = []
marker_contour = []
# If the marker type is "detect" then we will use the ROI to isolate marker contours from the input image
if marker.upper() == 'DETECT':
# We need to convert the input image into an one of the HSV channels and then threshold it
if thresh_channel is not None and thresh is not None:
# Mask the input image
masked = apply_mask(img=ref_img, mask=roi_mask, mask_color="black")
# Convert the masked image to hue, saturation, or value
marker_hsv = rgb2gray_hsv(rgb_img=masked, channel=thresh_channel)
# Threshold the HSV image
marker_bin = binary_threshold(gray_img=marker_hsv, threshold=thresh, max_value=255, object_type=objcolor)
# Identify contours in the masked image
contours, hierarchy = find_objects(img=ref_img, mask=marker_bin)
# Filter marker contours using the input ROI
kept_contours, kept_hierarchy, kept_mask, obj_area = roi_objects(img=ref_img, object_contour=contours,
obj_hierarchy=hierarchy,
roi_contour=roi_contour,
roi_hierarchy=roi_hierarchy,
roi_type="partial")
# If there are more than one contour detected, combine them into one
# These become the marker contour and mask
marker_contour, marker_mask = object_composition(img=ref_img, contours=kept_contours,
hierarchy=kept_hierarchy)
else:
# Reset debug mode
params.debug = debug
fatal_error('thresh_channel and thresh must be defined in detect mode')
elif marker.upper() == "DEFINE":
# Identify contours in the masked image
contours, hierarchy = find_objects(img=ref_img, mask=roi_mask)
# If there are more than one contour detected, combine them into one
# These become the marker contour and mask
marker_contour, marker_mask = object_composition(img=ref_img, contours=contours, hierarchy=hierarchy)
else:
# Reset debug mode
params.debug = debug
fatal_error("marker must be either 'define' or 'detect' but {0} was provided.".format(marker))
# Calculate the moments of the defined marker region
m = cv2.moments(marker_mask, binaryImage=True)
# Calculate the marker area
marker_area = m['m00']
# Fit a bounding ellipse to the marker
center, axes, angle = cv2.fitEllipse(marker_contour)
major_axis = np.argmax(axes)
minor_axis = 1 - major_axis
major_axis_length = axes[major_axis]
minor_axis_length = axes[minor_axis]
# Calculate the bounding ellipse eccentricity
eccentricity = np.sqrt(1 - (axes[minor_axis] / axes[major_axis]) ** 2)
cv2.drawContours(ref_img, marker_contour, -1, (255, 0, 0), 5)
analysis_image = ref_img
# Reset debug mode
params.debug = debug
if params.debug == 'print':
print_image(ref_img, os.path.join(params.debug_outdir, str(params.device) + '_marker_shape.png'))
elif params.debug == 'plot':
plot_image(ref_img)
outputs.add_observation(sample=label, variable='marker_area', trait='marker area',
method='plantcv.plantcv.report_size_marker_area', scale='pixels', datatype=int,
value=marker_area, label='pixels')
outputs.add_observation(sample=label, variable='marker_ellipse_major_axis',
trait='marker ellipse major axis length',
method='plantcv.plantcv.report_size_marker_area', scale='pixels', datatype=int,
value=major_axis_length, label='pixels')
outputs.add_observation(sample=label, variable='marker_ellipse_minor_axis',
trait='marker ellipse minor axis length',
method='plantcv.plantcv.report_size_marker_area', scale='pixels', datatype=int,
value=minor_axis_length, label='pixels')
outputs.add_observation(sample=label, variable='marker_ellipse_eccentricity', trait='marker ellipse eccentricity',
method='plantcv.plantcv.report_size_marker_area', scale='none', datatype=float,
value=eccentricity, label='none')
# Store images
outputs.images.append(analysis_image)
return analysis_image
| {
"repo_name": "danforthcenter/plantcv",
"path": "plantcv/plantcv/report_size_marker_area.py",
"copies": "2",
"size": "7276",
"license": "mit",
"hash": -5033075891600188000,
"line_mean": 48.4965986395,
"line_max": 118,
"alpha_frac": 0.6293293018,
"autogenerated": false,
"ratio": 4.071628427532177,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5700957729332178,
"avg_score": null,
"num_lines": null
} |
# Analyzes an object and outputs numeric properties
import os
import cv2
import numpy as np
from plantcv.plantcv import params
from plantcv.plantcv import outputs
from plantcv.plantcv import within_frame
from plantcv.plantcv._debug import _debug
def analyze_object(img, obj, mask, label="default"):
"""Outputs numeric properties for an input object (contour or grouped contours).
Inputs:
img = RGB or grayscale image data for plotting
obj = single or grouped contour object
mask = Binary image to use as mask
label = optional label parameter, modifies the variable name of observations recorded
Returns:
analysis_images = list of output images
:param img: numpy.ndarray
:param obj: list
:param mask: numpy.ndarray
:param label: str
:return analysis_images: list
"""
# Valid objects can only be analyzed if they have >= 5 vertices
if len(obj) < 5:
return None
ori_img = np.copy(img)
# Convert grayscale images to color
if len(np.shape(ori_img)) == 2:
ori_img = cv2.cvtColor(ori_img, cv2.COLOR_GRAY2BGR)
if len(np.shape(img)) == 3:
ix, iy, iz = np.shape(img)
else:
ix, iy = np.shape(img)
size = ix, iy, 3
size1 = ix, iy
background = np.zeros(size, dtype=np.uint8)
background1 = np.zeros(size1, dtype=np.uint8)
background2 = np.zeros(size1, dtype=np.uint8)
# Check is object is touching image boundaries (QC)
in_bounds = within_frame(mask=mask, label=label)
# Convex Hull
hull = cv2.convexHull(obj)
hull_vertices = len(hull)
# Moments
# m = cv2.moments(obj)
m = cv2.moments(mask, binaryImage=True)
# Properties
# Area
area = m['m00']
if area:
# Convex Hull area
hull_area = cv2.contourArea(hull)
# Solidity
solidity = 1
if int(hull_area) != 0:
solidity = area / hull_area
# Perimeter
perimeter = cv2.arcLength(obj, closed=True)
# x and y position (bottom left?) and extent x (width) and extent y (height)
x, y, width, height = cv2.boundingRect(obj)
# Centroid (center of mass x, center of mass y)
cmx, cmy = (float(m['m10'] / m['m00']), float(m['m01'] / m['m00']))
# Ellipse
center, axes, angle = cv2.fitEllipse(obj)
major_axis = np.argmax(axes)
minor_axis = 1 - major_axis
major_axis_length = float(axes[major_axis])
minor_axis_length = float(axes[minor_axis])
eccentricity = float(np.sqrt(1 - (axes[minor_axis] / axes[major_axis]) ** 2))
# Longest Axis: line through center of mass and point on the convex hull that is furthest away
cv2.circle(background, (int(cmx), int(cmy)), 4, (255, 255, 255), -1)
center_p = cv2.cvtColor(background, cv2.COLOR_BGR2GRAY)
ret, centerp_binary = cv2.threshold(center_p, 0, 255, cv2.THRESH_BINARY)
centerpoint, cpoint_h = cv2.findContours(centerp_binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[-2:]
dist = []
vhull = np.vstack(hull)
for i, c in enumerate(vhull):
xy = tuple(c)
pptest = cv2.pointPolygonTest(centerpoint[0], xy, measureDist=True)
dist.append(pptest)
abs_dist = np.absolute(dist)
max_i = np.argmax(abs_dist)
caliper_max_x, caliper_max_y = list(tuple(vhull[max_i]))
caliper_mid_x, caliper_mid_y = [int(cmx), int(cmy)]
xdiff = float(caliper_max_x - caliper_mid_x)
ydiff = float(caliper_max_y - caliper_mid_y)
# Set default values
slope = 1
if xdiff != 0:
slope = (float(ydiff / xdiff))
b_line = caliper_mid_y - (slope * caliper_mid_x)
if slope != 0:
xintercept = int(-b_line / slope)
xintercept1 = int((ix - b_line) / slope)
if 0 <= xintercept <= iy and 0 <= xintercept1 <= iy:
cv2.line(background1, (xintercept1, ix), (xintercept, 0), (255), params.line_thickness)
elif xintercept < 0 or xintercept > iy or xintercept1 < 0 or xintercept1 > iy:
yintercept = int(b_line)
yintercept1 = int((slope * iy) + b_line)
cv2.line(background1, (0, yintercept), (iy, yintercept1), (255), 5)
else:
cv2.line(background1, (iy, caliper_mid_y), (0, caliper_mid_y), (255), params.line_thickness)
ret1, line_binary = cv2.threshold(background1, 0, 255, cv2.THRESH_BINARY)
cv2.drawContours(background2, [hull], -1, (255), -1)
ret2, hullp_binary = cv2.threshold(background2, 0, 255, cv2.THRESH_BINARY)
caliper = cv2.multiply(line_binary, hullp_binary)
caliper_y, caliper_x = np.array(caliper.nonzero())
caliper_matrix = np.vstack((caliper_x, caliper_y))
caliper_transpose = np.transpose(caliper_matrix)
caliper_length = len(caliper_transpose)
caliper_transpose1 = np.lexsort((caliper_y, caliper_x))
caliper_transpose2 = [(caliper_x[i], caliper_y[i]) for i in caliper_transpose1]
caliper_transpose = np.array(caliper_transpose2)
analysis_images = []
# Draw properties
if area:
cv2.drawContours(ori_img, obj, -1, (255, 0, 0), params.line_thickness)
cv2.drawContours(ori_img, [hull], -1, (255, 0, 255), params.line_thickness)
cv2.line(ori_img, (x, y), (x + width, y), (255, 0, 255), params.line_thickness)
cv2.line(ori_img, (int(cmx), y), (int(cmx), y + height), (255, 0, 255), params.line_thickness)
cv2.line(ori_img, (tuple(caliper_transpose[caliper_length - 1])), (tuple(caliper_transpose[0])), (255, 0, 255),
params.line_thickness)
cv2.circle(ori_img, (int(cmx), int(cmy)), 10, (255, 0, 255), params.line_thickness)
analysis_images.append(ori_img)
analysis_images.append(mask)
else:
pass
outputs.add_observation(sample=label, variable='area', trait='area',
method='plantcv.plantcv.analyze_object', scale='pixels', datatype=int,
value=area, label='pixels')
outputs.add_observation(sample=label, variable='convex_hull_area', trait='convex hull area',
method='plantcv.plantcv.analyze_object', scale='pixels', datatype=int,
value=hull_area, label='pixels')
outputs.add_observation(sample=label, variable='solidity', trait='solidity',
method='plantcv.plantcv.analyze_object', scale='none', datatype=float,
value=solidity, label='none')
outputs.add_observation(sample=label, variable='perimeter', trait='perimeter',
method='plantcv.plantcv.analyze_object', scale='pixels', datatype=int,
value=perimeter, label='pixels')
outputs.add_observation(sample=label, variable='width', trait='width',
method='plantcv.plantcv.analyze_object', scale='pixels', datatype=int,
value=width, label='pixels')
outputs.add_observation(sample=label, variable='height', trait='height',
method='plantcv.plantcv.analyze_object', scale='pixels', datatype=int,
value=height, label='pixels')
outputs.add_observation(sample=label, variable='longest_path', trait='longest path',
method='plantcv.plantcv.analyze_object', scale='pixels', datatype=int,
value=caliper_length, label='pixels')
outputs.add_observation(sample=label, variable='center_of_mass', trait='center of mass',
method='plantcv.plantcv.analyze_object', scale='none', datatype=tuple,
value=(cmx, cmy), label=("x", "y"))
outputs.add_observation(sample=label, variable='convex_hull_vertices', trait='convex hull vertices',
method='plantcv.plantcv.analyze_object', scale='none', datatype=int,
value=hull_vertices, label='none')
outputs.add_observation(sample=label, variable='object_in_frame', trait='object in frame',
method='plantcv.plantcv.analyze_object', scale='none', datatype=bool,
value=in_bounds, label='none')
outputs.add_observation(sample=label, variable='ellipse_center', trait='ellipse center',
method='plantcv.plantcv.analyze_object', scale='none', datatype=tuple,
value=(center[0], center[1]), label=("x", "y"))
outputs.add_observation(sample=label, variable='ellipse_major_axis', trait='ellipse major axis length',
method='plantcv.plantcv.analyze_object', scale='pixels', datatype=int,
value=major_axis_length, label='pixels')
outputs.add_observation(sample=label, variable='ellipse_minor_axis', trait='ellipse minor axis length',
method='plantcv.plantcv.analyze_object', scale='pixels', datatype=int,
value=minor_axis_length, label='pixels')
outputs.add_observation(sample=label, variable='ellipse_angle', trait='ellipse major axis angle',
method='plantcv.plantcv.analyze_object', scale='degrees', datatype=float,
value=float(angle), label='degrees')
outputs.add_observation(sample=label, variable='ellipse_eccentricity', trait='ellipse eccentricity',
method='plantcv.plantcv.analyze_object', scale='none', datatype=float,
value=float(eccentricity), label='none')
# Debugging output
params.device += 1
cv2.drawContours(ori_img, obj, -1, (255, 0, 0), params.line_thickness)
cv2.drawContours(ori_img, [hull], -1, (255, 0, 255), params.line_thickness)
cv2.line(ori_img, (x, y), (x + width, y), (255, 0, 255), params.line_thickness)
cv2.line(ori_img, (int(cmx), y), (int(cmx), y + height), (255, 0, 255), params.line_thickness)
cv2.circle(ori_img, (int(cmx), int(cmy)), 10, (255, 0, 255), params.line_thickness)
cv2.line(ori_img, (tuple(caliper_transpose[caliper_length - 1])), (tuple(caliper_transpose[0])), (255, 0, 255),
params.line_thickness)
_debug(visual=ori_img, filename=os.path.join(params.debug_outdir, str(params.device) + '_shapes.png'))
# Store images
outputs.images.append(analysis_images)
return ori_img
| {
"repo_name": "stiphyMT/plantcv",
"path": "plantcv/plantcv/analyze_object.py",
"copies": "1",
"size": "10586",
"license": "mit",
"hash": -5408985623731181000,
"line_mean": 47.3378995434,
"line_max": 119,
"alpha_frac": 0.6012658228,
"autogenerated": false,
"ratio": 3.4459635416666665,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9535026234345234,
"avg_score": 0.0024406260242863555,
"num_lines": 219
} |
"""Analyzes backwards-optimization experiment."""
import numpy
import matplotlib
matplotlib.use('agg')
from matplotlib import pyplot
from generalexam.machine_learning import evaluation_utils
from gewittergefahr.deep_learning import backwards_optimization as backwards_opt
from gewittergefahr.plotting import plotting_utils
L2_WEIGHTS = numpy.concatenate((
numpy.logspace(-5, 0, num=11), numpy.logspace(0, 2, num=11)[1:]
))
LEARNING_RATES = numpy.logspace(-5, -2, num=10)
TOP_EXPERIMENT_DIR_NAME = (
'/glade/work/ryanlage/prediction_paper_2019/gridrad_experiment/'
'dropout=0.500_l2=0.010000_num-dense-layers=2_data-aug=1/testing/'
'extreme_examples/unique_storm_cells/bwo_experiment')
COLOUR_MAP_OBJECT = pyplot.get_cmap('viridis')
FIGURE_RESOLUTION_DPI = 300
FONT_SIZE = 30
pyplot.rc('font', size=FONT_SIZE)
pyplot.rc('axes', titlesize=FONT_SIZE)
pyplot.rc('axes', labelsize=FONT_SIZE)
pyplot.rc('xtick', labelsize=FONT_SIZE)
pyplot.rc('ytick', labelsize=FONT_SIZE)
pyplot.rc('legend', fontsize=FONT_SIZE)
pyplot.rc('figure', titlesize=FONT_SIZE)
def _run():
"""Analyzes backwards-optimization experiment.
This is effectively the main method.
"""
num_l2_weights = len(L2_WEIGHTS)
num_learning_rates = len(LEARNING_RATES)
mean_final_activation_matrix = numpy.full(
(num_l2_weights, num_learning_rates), numpy.nan
)
for i in range(num_l2_weights):
for j in range(num_learning_rates):
this_file_name = (
'{0:s}/bwo_l2-weight={1:014.10f}_learning-rate={2:014.10f}_'
'pmm.p'
).format(
TOP_EXPERIMENT_DIR_NAME, L2_WEIGHTS[i], LEARNING_RATES[j]
)
print('Reading data from: "{0:s}"...'.format(this_file_name))
this_bwo_dict = backwards_opt.read_file(this_file_name)[0]
mean_final_activation_matrix[i, j] = this_bwo_dict[
backwards_opt.MEAN_FINAL_ACTIVATION_KEY
]
x_tick_labels = ['{0:.1f}'.format(r) for r in numpy.log10(LEARNING_RATES)]
y_tick_labels = ['{0:.1f}'.format(w) for w in numpy.log10(L2_WEIGHTS)]
axes_object = evaluation_utils.plot_scores_2d(
score_matrix=mean_final_activation_matrix,
x_tick_label_strings=x_tick_labels, y_tick_label_strings=y_tick_labels,
colour_map_object=COLOUR_MAP_OBJECT,
min_colour_value=0., max_colour_value=1.)
axes_object.set_xlabel(r'Learning rate (log$_{10}$)')
axes_object.set_ylabel(r'L$_2$ weight (log$_{10}$)')
plotting_utils.plot_linear_colour_bar(
axes_object_or_matrix=axes_object,
data_matrix=mean_final_activation_matrix,
colour_map_object=COLOUR_MAP_OBJECT, min_value=0., max_value=1.,
orientation_string='vertical', extend_min=False, extend_max=False,
font_size=FONT_SIZE)
output_file_name = '{0:s}/mean_final_activations.jpg'.format(
TOP_EXPERIMENT_DIR_NAME)
print('Saving figure to: "{0:s}"...'.format(output_file_name))
pyplot.savefig(
output_file_name, dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,
bbox_inches='tight')
pyplot.close()
if __name__ == '__main__':
_run()
| {
"repo_name": "thunderhoser/GewitterGefahr",
"path": "gewittergefahr/interpretation_paper_2019/analyze_bwo_experiment.py",
"copies": "1",
"size": "3186",
"license": "mit",
"hash": 3920574737115734500,
"line_mean": 33.6304347826,
"line_max": 80,
"alpha_frac": 0.6572504708,
"autogenerated": false,
"ratio": 3.0256410256410255,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9177735305473329,
"avg_score": 0.001031238193539396,
"num_lines": 92
} |
"""Analyzes backwards-optimization experiment."""
import numpy
import matplotlib
matplotlib.use('agg')
from matplotlib import pyplot
from gewittergefahr.gg_utils import model_evaluation
from gewittergefahr.deep_learning import backwards_optimization as backwards_opt
from gewittergefahr.plotting import plotting_utils
L2_WEIGHTS = numpy.logspace(-4, 1, num=11)
MINMAX_WEIGHTS = numpy.logspace(-4, 1, num=11)
TOP_EXPERIMENT_DIR_NAME = (
'/glade/p/univ/uokl0026/myrorss_experiment/conus_only/'
'dropout=0.750_l2=0.003162_num-dense-layers=2_data-aug=1/testing/'
'extreme_examples/unique_storm_cells/bwo_experiment_best_hits'
)
COLOUR_MAP_OBJECT = pyplot.get_cmap('viridis')
FIGURE_RESOLUTION_DPI = 300
FONT_SIZE = 30
pyplot.rc('font', size=FONT_SIZE)
pyplot.rc('axes', titlesize=FONT_SIZE)
pyplot.rc('axes', labelsize=FONT_SIZE)
pyplot.rc('xtick', labelsize=FONT_SIZE)
pyplot.rc('ytick', labelsize=FONT_SIZE)
pyplot.rc('legend', fontsize=FONT_SIZE)
pyplot.rc('figure', titlesize=FONT_SIZE)
def _run():
"""Analyzes backwards-optimization experiment.
This is effectively the main method.
"""
num_l2_weights = len(L2_WEIGHTS)
num_minmax_weights = len(MINMAX_WEIGHTS)
mean_final_activation_matrix = numpy.full(
(num_l2_weights, num_minmax_weights), numpy.nan
)
for i in range(num_l2_weights):
for j in range(num_minmax_weights):
this_file_name = (
'{0:s}/bwo_pmm_l2-weight={1:.10f}_minmax-weight={2:.10f}.p'
).format(
TOP_EXPERIMENT_DIR_NAME, L2_WEIGHTS[i], MINMAX_WEIGHTS[j]
)
print('Reading data from: "{0:s}"...'.format(this_file_name))
this_bwo_dict = backwards_opt.read_file(this_file_name)[0]
mean_final_activation_matrix[i, j] = this_bwo_dict[
backwards_opt.MEAN_FINAL_ACTIVATION_KEY
]
x_tick_labels = ['{0:.1f}'.format(r) for r in numpy.log10(MINMAX_WEIGHTS)]
y_tick_labels = ['{0:.1f}'.format(w) for w in numpy.log10(L2_WEIGHTS)]
axes_object = model_evaluation.plot_hyperparam_grid(
score_matrix=mean_final_activation_matrix,
colour_map_object=COLOUR_MAP_OBJECT,
min_colour_value=0., max_colour_value=1.
)
axes_object.set_xticklabels(x_tick_labels, rotation=90.)
axes_object.set_yticklabels(y_tick_labels)
axes_object.set_xlabel(r'Min-max weight (log$_{10}$)')
axes_object.set_ylabel(r'L$_2$ weight (log$_{10}$)')
plotting_utils.plot_linear_colour_bar(
axes_object_or_matrix=axes_object,
data_matrix=mean_final_activation_matrix,
colour_map_object=COLOUR_MAP_OBJECT, min_value=0., max_value=1.,
orientation_string='vertical', extend_min=False, extend_max=False,
font_size=FONT_SIZE
)
output_file_name = '{0:s}/mean_final_activations.jpg'.format(
TOP_EXPERIMENT_DIR_NAME
)
print('Saving figure to: "{0:s}"...'.format(output_file_name))
pyplot.savefig(
output_file_name, dpi=FIGURE_RESOLUTION_DPI,
pad_inches=0, bbox_inches='tight'
)
pyplot.close()
if __name__ == '__main__':
_run()
| {
"repo_name": "thunderhoser/GewitterGefahr",
"path": "gewittergefahr/dissertation/myrorss/analyze_bwo_experiment.py",
"copies": "1",
"size": "3152",
"license": "mit",
"hash": 6157690709022115000,
"line_mean": 31.8333333333,
"line_max": 80,
"alpha_frac": 0.6583121827,
"autogenerated": false,
"ratio": 3.0076335877862594,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4165945770486259,
"avg_score": null,
"num_lines": null
} |
"""Analyzes executables to find out which other executables anywhere
in the loaded libraries call that one. This is repeated recursively
to generate possible trees that end up in calling the one at the end.
"""
#NOTE: this was originally intended to augment the setting of breakpoints
#in the interactive debugging; I realized that only the explicitly
#specified pre-reqs ever get called first, so that we only need list those.
def _exec_callers(xinst, result):
"""Adds the dependency calls from the specified executable instance
to the results dictionary.
"""
for depkey, depval in xinst.dependencies.items():
if depval.target is not None:
if depval.target.name in result:
if xinst not in result[depval.target.name]:
result[depval.target.name].append(xinst)
else:
result[depval.target.name] = [xinst]
for xname, xvalue in xinst.executables:
_exec_callers(xvalue, result)
def _module_callers(parser, modname, result):
"""Adds any calls to executables contained in the specified module.
"""
if modname in result:
#We have already processed this module.
return
module = parser.get(modname)
mresult = {}
if module is not None:
for xname, xinst in module.executables():
_exec_callers(xinst, mresult)
result[modname] = mresult
for depkey in module.dependencies:
depmod = depkey.split('.')[0].lower()
_module_callers(parser, depmod, result)
def tree(parser, startmod):
"""Returns the call tree for all modules in the library that are
linked to the specified starting module.
"""
result = {}
_module_callers(parser, startmod, result)
return result
def _call_fan(branch, calls, executable):
"""Appends a list of callees to the branch for each parent
in the call list that calls this executable.
"""
#Since we don't keep track of the specific logic in the executables
#it is possible that we could get a infinite recursion of executables
#that keep calling each other.
if executable in branch:
return
branch.append(executable)
if executable.name in calls:
for caller in calls[executable.name]:
twig = []
_call_fan(twig, calls, caller)
branch
def callers(parser, executable):
"""Returns a list of 'module.executable' that call the specified
executable (i.e. is part of their dependency list).
"""
calls = tree(parser, executable.module)
return calls
# if executable.name in calls:
# stack = calls[executable.name]
# result = []
# while len(stack) > 0:
# branch = [executable]
# caller = stack.pop()
# branch.append(caller)
# if caller.name in calls:
| {
"repo_name": "rosenbrockc/fortpy",
"path": "fortpy/stats/calltree.py",
"copies": "1",
"size": "2900",
"license": "mit",
"hash": -8595396519207858000,
"line_mean": 35.7088607595,
"line_max": 75,
"alpha_frac": 0.6420689655,
"autogenerated": false,
"ratio": 4.2212518195050945,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.009983222910788386,
"num_lines": 79
} |
# Analyze signal data in NIR image
import os
import cv2
import numpy as np
from . import print_image
from . import plot_image
from . import plot_colorbar
from . import binary_threshold
from . import apply_mask
def analyze_NIR_intensity(img, rgbimg, mask, bins, device, histplot=False, debug=None, filename=False):
"""This function calculates the intensity of each pixel associated with the plant and writes the values out to
a file. It can also print out a histogram plot of pixel intensity and a pseudocolor image of the plant.
Inputs:
img = input image original NIR image
rgbimg = RGB NIR image
mask = mask made from selected contours
bins = number of classes to divide spectrum into
device = device number. Used to count steps in the pipeline
histplot = if True plots histogram of intensity values
debug = None, print, or plot. Print = save to file, Plot = print to screen.
filename = False or image name. If defined print image
Returns:
device = device number
hist_header = NIR histogram data table headers
hist_data = NIR histogram data table values
analysis_img = output image
:param img: numpy array
:param rgbimg: numpy array
:param mask: numpy array
:param bins: int
:param device: int
:param histplot: bool
:param debug: str
:param filename: str
:return device: int
:return hist_header: list
:return hist_data: list
:return analysis_img: str
"""
device += 1
# apply plant shaped mask to image
device, mask1 = binary_threshold(mask, 0, 255, 'light', device, None)
mask1 = (mask1 / 255)
masked = np.multiply(img, mask1)
# calculate histogram
if img.dtype == 'uint16':
maxval = 65536
else:
maxval = 256
hist_nir, hist_bins = np.histogram(masked, bins, (1, maxval), False, None, None)
hist_bins1 = hist_bins[:-1]
hist_bins2 = [l for l in hist_bins1]
hist_nir1 = [l for l in hist_nir]
# make hist percentage for plotting
pixels = cv2.countNonZero(mask1)
hist_percent = (hist_nir / float(pixels)) * 100
# report histogram data
hist_header = [
'HEADER_HISTOGRAM',
'bin-number',
'bin-values',
'nir'
]
hist_data = [
'HISTOGRAM_DATA',
bins,
hist_bins2,
hist_nir1
]
analysis_img = []
# make mask to select the background
mask_inv = cv2.bitwise_not(mask)
img_back = cv2.bitwise_and(rgbimg, rgbimg, mask=mask_inv)
img_back1 = cv2.applyColorMap(img_back, colormap=1)
# mask the background and color the plant with color scheme 'jet'
cplant = cv2.applyColorMap(rgbimg, colormap=2)
device, masked1 = apply_mask(cplant, mask, 'black', device, debug=None)
cplant_back = cv2.add(masked1, img_back1)
if filename:
path = os.path.dirname(filename)
fig_name = 'NIR_pseudocolor_colorbar.svg'
if not os.path.isfile(path + '/' + fig_name):
plot_colorbar(path, fig_name, bins)
fig_name_pseudo = (str(filename[0:-4]) + '_nir_pseudo_col.jpg')
print_image(cplant_back, fig_name_pseudo)
analysis_img.append(['IMAGE', 'pseudo', fig_name_pseudo])
if debug is not None:
if debug == "print":
print_image(masked1, (str(device) + "_nir_pseudo_plant.jpg"))
print_image(cplant_back, (str(device) + "_nir_pseudo_plant_back.jpg"))
if debug == "plot":
plot_image(masked1)
plot_image(cplant_back)
if histplot is True:
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
# plot hist percent
plt.plot(hist_percent, color='green', label='Signal Intensity')
plt.xlim([0, (bins - 1)])
plt.xlabel(('Grayscale pixel intensity (0-' + str(bins) + ")"))
plt.ylabel('Proportion of pixels (%)')
if filename:
fig_name_hist = (str(filename[0:-4]) + '_nir_hist.svg')
plt.savefig(fig_name_hist)
if debug == "print":
plt.savefig((str(device) + "_nir_histogram.jpg"))
if debug == "plot":
plt.figure()
plt.clf()
analysis_img.append(['IMAGE', 'hist', fig_name_hist])
return device, hist_header, hist_data, analysis_img
| {
"repo_name": "AntonSax/plantcv",
"path": "plantcv/analyze_NIR_intensity.py",
"copies": "1",
"size": "4387",
"license": "mit",
"hash": 92303856101619540,
"line_mean": 30.3357142857,
"line_max": 114,
"alpha_frac": 0.6156826989,
"autogenerated": false,
"ratio": 3.6376451077943615,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9747213864781555,
"avg_score": 0.0012227883825612375,
"num_lines": 140
} |
# Analyze signal data in Thermal image
import os
import numpy as np
from plantcv.plantcv import params
from plantcv.plantcv import outputs
from plotnine import labs
from plantcv.plantcv.visualize import histogram
from plantcv.plantcv import deprecation_warning
from plantcv.plantcv._debug import _debug
def analyze_thermal_values(thermal_array, mask, histplot=None, label="default"):
"""This extracts the thermal values of each pixel writes the values out to
a file. It can also print out a histogram plot of pixel intensity
and a pseudocolor image of the plant.
Inputs:
array = numpy array of thermal values
mask = Binary mask made from selected contours
histplot = if True plots histogram of intensity values
label = optional label parameter, modifies the variable name of observations recorded
Returns:
analysis_image = output image
:param thermal_array: numpy.ndarray
:param mask: numpy.ndarray
:param histplot: bool
:param label: str
:return analysis_image: ggplot
"""
if histplot is not None:
deprecation_warning("'histplot' will be deprecated in a future version of PlantCV. "
"This function creates a histogram by default.")
# Store debug mode
debug = params.debug
# apply plant shaped mask to image and calculate statistics based on the masked image
masked_thermal = thermal_array[np.where(mask > 0)]
maxtemp = np.amax(masked_thermal)
mintemp = np.amin(masked_thermal)
avgtemp = np.average(masked_thermal)
mediantemp = np.median(masked_thermal)
# call the histogram function
params.debug = None
hist_fig, hist_data = histogram(thermal_array, mask=mask, hist_data=True)
bin_labels, hist_percent = hist_data['pixel intensity'].tolist(), hist_data['proportion of pixels (%)'].tolist()
# Store data into outputs class
outputs.add_observation(sample=label, variable='max_temp', trait='maximum temperature',
method='plantcv.plantcv.analyze_thermal_values', scale='degrees', datatype=float,
value=maxtemp, label='degrees')
outputs.add_observation(sample=label, variable='min_temp', trait='minimum temperature',
method='plantcv.plantcv.analyze_thermal_values', scale='degrees', datatype=float,
value=mintemp, label='degrees')
outputs.add_observation(sample=label, variable='mean_temp', trait='mean temperature',
method='plantcv.plantcv.analyze_thermal_values', scale='degrees', datatype=float,
value=avgtemp, label='degrees')
outputs.add_observation(sample=label, variable='median_temp', trait='median temperature',
method='plantcv.plantcv.analyze_thermal_values', scale='degrees', datatype=float,
value=mediantemp, label='degrees')
outputs.add_observation(sample=label, variable='thermal_frequencies', trait='thermal frequencies',
method='plantcv.plantcv.analyze_thermal_values', scale='frequency', datatype=list,
value=hist_percent, label=bin_labels)
# Restore user debug setting
params.debug = debug
# change column names of "hist_data"
hist_fig = hist_fig + labs(x="Temperature C", y="Proportion of pixels (%)")
# Print or plot histogram
_debug(visual=hist_fig, filename=os.path.join(params.debug_outdir, str(params.device) + "_therm_histogram.png"))
analysis_image = hist_fig
# Store images
outputs.images.append(analysis_image)
return analysis_image
| {
"repo_name": "danforthcenter/plantcv",
"path": "plantcv/plantcv/analyze_thermal_values.py",
"copies": "2",
"size": "3700",
"license": "mit",
"hash": -8001538498496774000,
"line_mean": 44.1219512195,
"line_max": 116,
"alpha_frac": 0.6697297297,
"autogenerated": false,
"ratio": 4.190260475651189,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001953447517012068,
"num_lines": 82
} |
"""Analyzes part 2 of backwards-optimization experiment."""
import numpy
from gewittergefahr.deep_learning import backwards_optimization as backwards_opt
MINMAX_WEIGHTS = numpy.concatenate((
numpy.logspace(-4, 1, num=26), numpy.logspace(1, 3, num=21)[1:]
))
TOP_EXPERIMENT_DIR_NAME = (
'/glade/work/ryanlage/prediction_paper_2019/gridrad_experiment/'
'dropout=0.500_l2=0.010000_num-dense-layers=2_data-aug=1/testing/'
'extreme_examples/unique_storm_cells/bwo_experiment_part2')
def _run():
"""Analyzes backwards-optimization experiment.
This is effectively the main method.
"""
num_weights = len(MINMAX_WEIGHTS)
for i in range(num_weights):
this_file_name = '{0:s}/bwo_minmax-weight={1:014.10f}_pmm.p'.format(
TOP_EXPERIMENT_DIR_NAME, MINMAX_WEIGHTS[i]
)
this_bwo_dict = backwards_opt.read_file(this_file_name)[0]
this_mean_final_activation = this_bwo_dict[
backwards_opt.MEAN_FINAL_ACTIVATION_KEY
]
print((
'Min-max weight = 10^{0:.1f} ... mean final activation = {1:.4f}'
).format(
numpy.log10(MINMAX_WEIGHTS[i]), this_mean_final_activation
))
if __name__ == '__main__':
_run()
| {
"repo_name": "thunderhoser/GewitterGefahr",
"path": "gewittergefahr/interpretation_paper_2019/analyze_bwo_experiment_part2.py",
"copies": "1",
"size": "1244",
"license": "mit",
"hash": 2791240383458750500,
"line_mean": 28.619047619,
"line_max": 80,
"alpha_frac": 0.6382636656,
"autogenerated": false,
"ratio": 3.1653944020356235,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9302188343943678,
"avg_score": 0.00029394473838918284,
"num_lines": 42
} |
"""Analyzes posts to determine whether they are considered recipes."""
from time import time, sleep
from datetime import datetime
import praw
from src.Recipe import Recipe, PostInfo, RefinedPost
from src.RecipeHandler import RecipeHandler
from src import Analyzer
DAY_SECONDS = 24 * 3600
class RedditAPI:
"""The Reddit API."""
def __init__(self):
self.reddit = praw.Reddit('ECAH Scraper by /u/ECAH_Scraper')
self.subreddit = self.reddit.get_subreddit('EatCheapAndHealthy')
self.old_submission_ids = []
self.fill_old_submissions()
self.recipe_handler = RecipeHandler()
while True:
try:
self.run_bot()
if datetime.now().isoweekday() == 1: # Monday
self.recipe_handler.post_weekly()
self.go_sleep(DAY_SECONDS)
except praw.errors.HTTPException:
print("Server is down! Gonna sleep for 30 mins til things are "
"fixed up!")
self.go_sleep(1800)
def fill_old_submissions(self):
"""Retrieves old submissions stored.
Used in case of Reddit's servers going down.
"""
with open('../misc/submission_IDs.txt') as submissions_list:
for line in submissions_list.readlines():
line = line[:-1]
self.old_submission_ids.append(line)
def run_bot(self):
"""Runs the script endlessly."""
print("Working...")
submissions = self.get_submissions()
for submission in submissions:
if submission.id not in self.old_submission_ids and \
int(time()) - submission.created > DAY_SECONDS:
try:
self.check_post(submission)
except AttributeError:
pass
comments = self.get_comments(submission)
for comment in comments:
try:
self.check_post(comment)
except AttributeError:
pass
self.add_checked_submission(submission.id)
def get_submissions(self):
"""Retrieves a set of new submissions that are older than 1 day."""
return self.subreddit.get_hot(limit=None)
def get_comments(self, submission):
"""Retrieves a set of comments pertaining to a submission.
:param submission: The submission to get the comments from.
"""
submission.replace_more_comments(limit=None, threshold=0)
all_comments = praw.helpers.flatten_tree(submission.comments)
return all_comments
def go_sleep(self, length_time):
"""Puts the program to sleep for a set amount of time.
:param length_time: The amount of time the program should sleep.
"""
print("Sleeping for", length_time, "seconds")
sleep(length_time)
def is_recipe(self, content):
"""
Calls Analyzer method to check if a body of text has the qualities of a
recipe.
:param content: The body of text to analyze.
"""
return Analyzer.determine_if_recipe(content)
def check_post(self, post):
"""Checks a post for qualities of a recipe.
:param post: The post to get the comments from.
"""
if isinstance(post, praw.objects.Submission):
content = post.selftext
url = post.permalink
else: # Comment
content = post.body
submission_id = post.link_id[3:]
parent_post = self.reddit.get_submission(
submission_id=submission_id)
url = parent_post.permalink + post.id
clean_content = Analyzer.clean_up(content)
if self.is_recipe(clean_content):
print("Got a recipe!! Mama mia! " + str(datetime.now()))
all_text = self.get_all_text(post)
author = post.author.name
karma = post.score
date_posted = post.created
post_id = post.id
title = Analyzer.determine_title(post)
ingredients = Analyzer.get_ingredients(content)
instructions = Analyzer.get_instructions(content)
recipe_type = Analyzer.determine_type(all_text)
post_info = PostInfo(author, karma, date_posted, post_id, url)
refined_post = RefinedPost(title, ingredients, instructions,
recipe_type)
recipe = Recipe(post_info, refined_post)
self.recipe_handler.add(recipe)
def add_checked_submission(self, submission_id):
"""
Records a submission id when it was already scanned to prevent the
bot from scanning it again.
:param submission_id: The submission to record.
"""
self.old_submission_ids.append(submission_id)
with open('../misc/submission_IDs.txt', 'a') as submissions:
submissions.write(submission_id + '\n')
def get_all_text(self, post):
"""
Obtain all the text of a submission, including its comments.
If a comment is passed, its parent submission will be determined.
:param post: The comment of submission to obtain the thread's text
from.
:return: A cleaned up version of all the text within the thread, ready
for further analysis.
"""
all_text = ''
# Get the parent submission of a comment
if isinstance(post, praw.objects.Comment):
submission_id = post.link_id[3:]
post = self.reddit.get_submission(submission_id=submission_id)
submission_text = post.selftext
submission_title = post.title
''.join((all_text, submission_title + '\n', submission_text + '\n'))
comments = self.get_comments(post)
for comment in comments:
''.join((all_text, comment.body + '\n'))
clean_content = Analyzer.clean_up(all_text)
return clean_content
| {
"repo_name": "IgorGee/Eat-Cheap-And-Healthy-Recipe-Centralizer",
"path": "src/Scraper.py",
"copies": "1",
"size": "6041",
"license": "mit",
"hash": -2729081771586332000,
"line_mean": 33.9190751445,
"line_max": 79,
"alpha_frac": 0.5868233736,
"autogenerated": false,
"ratio": 4.290482954545454,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 173
} |
# Analyze stem characteristics
import os
import cv2
import numpy as np
from plantcv.plantcv import params
from plantcv.plantcv import outputs
from plantcv.plantcv import plot_image
from plantcv.plantcv import print_image
def analyze_stem(rgb_img, stem_objects, label="default"):
""" Calculate angle of segments (in degrees) by fitting a linear regression line to segments.
Inputs:
rgb_img = RGB image to plot debug image
stem_objects = List of stem segments (output from segment_sort function)
label = optional label parameter, modifies the variable name of observations recorded
Returns:
labeled_img = Stem analysis debugging image
:param rgb_img: numpy.ndarray
:param stem_objects: list
:param label: str
:return labeled_img: numpy.ndarray
"""
params.device += 1
labeled_img = np.copy(rgb_img)
img_x, img_y, _ = np.shape(labeled_img)
grouped_stem = np.vstack(stem_objects)
# Find vertical height of the stem by measuring bounding box
stem_x, stem_y, width, height = cv2.boundingRect(grouped_stem)
# Calculate stem angle
[vx, vy, x, y] = cv2.fitLine(grouped_stem, cv2.DIST_L2, 0, 0.01, 0.01)
slope = -vy / vx
# Calculate stem path length
stem_length = cv2.arcLength(grouped_stem, False) / 2
outputs.add_observation(sample=label, variable='stem_height', trait='vertical length of stem segments',
method='plantcv.plantcv.morphology.analyze_stem', scale='pixels', datatype=float,
value=height, label=None)
outputs.add_observation(sample=label, variable='stem_angle', trait='angle of combined stem object',
method='plantcv.plantcv.morphology.analyze_stem', scale='degrees', datatype=float,
value=float(slope), label=None)
outputs.add_observation(sample=label, variable='stem_length', trait='path length of combined stem object',
method='plantcv.plantcv.morphology.analyze_stem', scale='None', datatype=float,
value=stem_length, label=None)
if params.debug is not None:
# Draw culm_height
cv2.line(labeled_img, (int(stem_x), stem_y), (int(stem_x), stem_y + height), (0, 255, 0), params.line_thickness)
# Draw combined stem angle
x_min = 0 # Set bounds for regression lines to get drawn
x_max = img_x
intercept1 = int(((x - x_min) * slope) + y)
intercept2 = int(((x - x_max) * slope) + y)
if slope > 1000000 or slope < -1000000:
print("Slope is ", slope, " and cannot be plotted.")
else:
cv2.line(labeled_img, (x_max - 1, intercept2), (x_min, intercept1), (0, 0, 255), 1)
if params.debug == 'print':
print_image(labeled_img, os.path.join(params.debug_outdir, str(params.device) + 'stem_analze.png'))
elif params.debug == 'plot':
plot_image(labeled_img)
return labeled_img
| {
"repo_name": "stiphyMT/plantcv",
"path": "plantcv/plantcv/morphology/analyze_stem.py",
"copies": "2",
"size": "3055",
"license": "mit",
"hash": 4460195969054543400,
"line_mean": 42.0281690141,
"line_max": 120,
"alpha_frac": 0.6271685761,
"autogenerated": false,
"ratio": 3.7576875768757687,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5384856152975769,
"avg_score": null,
"num_lines": null
} |
'''Analyzes the visualize.py output'''
from numpy import log
import numpy as np
from scipy import optimize
from matplotlib import pyplot as plt
import tkinter as tk
from tkinter import filedialog
root = tk.Tk()
root.withdraw()
file_path = filedialog.askopenfilename()
root.destroy()
saved_results = dict(np.load(file_path).item())
to_remove = []
plt.scatter(log(list(saved_results.keys())), log(
list(saved_results.values())), s=1, label="Original data", color='red')
for key in saved_results.keys():
if key > 10 ** 4 or key < 10:
to_remove.append(key)
for key in to_remove:
saved_results.pop(key)
x_data = list(saved_results.keys())
y_data = list(saved_results.values())
x_data = log(x_data)
y_data = log(y_data)
x0 = np.array([0, 1])
sigma = np.array([0.00000000001 for i in range(len(x_data))])
print(len(x_data))
def func(x, a, b):
return a + b * x
result = optimize.curve_fit(func, x_data, y_data, x0, sigma)
print(result)
plt.plot(x_data, result[0][0] + result[0][1] * x_data,
label="a= " + str(result[0][0]) + " b= " + str(result[0][1]))
plt.legend()
plt.show()
| {
"repo_name": "BehzadE/Sandpile",
"path": "analyze.py",
"copies": "1",
"size": "1110",
"license": "mit",
"hash": -3308559506646186000,
"line_mean": 24.8139534884,
"line_max": 75,
"alpha_frac": 0.6630630631,
"autogenerated": false,
"ratio": 2.817258883248731,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3980321946348731,
"avg_score": null,
"num_lines": null
} |
""" Analyzes the word frequencies in a book downloaded from
Project Gutenberg """
import string as s
import re
def get_word_list(file_name):
''' Reads the specified project Gutenberg book. Header comments,
punctuation, and whitespace are stripped away. The function
returns a list of the words used in the book as a list.
All words are converted to lower case.
'''
f = open(file_name, 'r')
lines = f.readlines()
curr_line = 0
fin_line = 0
# find start and end line of actual text
while (lines[curr_line].find('START OF THIS PROJECT GUTENBERG EBOOK') == -1):
curr_line += 1
while (lines[fin_line].find('END OF THIS PROJECT GUTENBERG EBOOK') == -1):
fin_line += 1
lines = lines[curr_line+1:fin_line]
words = []
for line in lines:
line_list = s.split(line)
words.append(line_list)
flattened = [ele for sublist in words for ele in sublist]
lowered = [s.lower(ele) for ele in flattened]
stripped = [re.sub("[()/\,';.?!-]", '', ele) for ele in lowered]
return stripped
def get_top_n_words(word_list, n):
''' Takes a list of words as input and returns a list of the n most frequently
occurring words ordered from most to least frequently occurring.
word_list: a list of words (assumed to all be in lower case with no
punctuation
n: the number of words to return
returns: a list of n most frequently occurring words ordered from most
frequently to least frequently occurring
'''
word_count = {}
for word in word_list:
if word in word_count:
word_count[word] += 1
else:
word_count[word] = 1
ordered_by_frequency = sorted(word_count, key=word_count.get, reverse=True)
return ordered_by_frequency[:n]
def unique_words(word_list):
''' Takes a list of words as input and return number of unique words, and the
ratio of unique words to total words
word_list: a list of words (assumed to all be in lower case with no
punctuation
'''
word_count = {}
for word in word_list:
if word in word_count:
word_count[word] += 1
else:
word_count[word] = 1
ordered_by_frequency = sorted(word_count, key=word_count.get, reverse=True)
# also return ratio of unique words to total words
x = len(ordered_by_frequency)
y = len(word_list)
z = float(x)/float(y)
return [x, z]
def print_func(text, most):
''' Print various results of this script '''
words = get_word_list(text)
freq = get_top_n_words(words, n)
unique = unique_words(words)
if most:
print text[:-4], freq[:5], unique
else:
print text[:-4], freq[-5:-1], unique
if __name__ == "__main__":
n = 100000
print_func('Ulysses.txt', False)
print_func('Portrait.txt', False)
print_func('Proust.txt', True)
print_func('Voyage.txt', True)
print_func('NightDay.txt', True)
print_func('Oz.txt', False) | {
"repo_name": "nshlapo/SoftwareDesignFall15",
"path": "toolbox/word_frequency_analysis/frequency.py",
"copies": "1",
"size": "3043",
"license": "mit",
"hash": 70296990955436800,
"line_mean": 29.7474747475,
"line_max": 82,
"alpha_frac": 0.6201117318,
"autogenerated": false,
"ratio": 3.6097271648873073,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47298388966873073,
"avg_score": null,
"num_lines": null
} |
""" Analyzes the word frequencies in a book downloaded from
Project Gutenberg """
import string
file_name = "pg32325.txt"
#print lines
punctuation = string.punctuation
whitespace = ['\t','\n','\x0b','\x0c','\r',' ']
def get_word_list(file_name):
""" Reads the specified project Gutenberg book. Header comments,
punctuation, and whitespace are stripped away. The function
returns a list of the words used in the book as a list.
All words are converted to lower case.
"""
f = open(file_name,'r')
lines = f.readlines()
first_line = 0
last_line = 0
while lines[first_line].find('CONTENT') == -1:
first_line += 1
while lines[last_line].find('THE END') == -1:
last_line += 1
lines = lines[first_line:first_line+50]
for i in range(len(punctuation)/2):
for j in range(len(lines)):
lineslist1 = []
a = lines[j].replace(punctuation[i*2],'')
lineslist1.append(a)
for k in range(len(lineslist1)):
lines = []
a = lineslist1[k].replace(punctuation[i*2+1])
lines.append(a)
for i in range(len(lines)):
a = lines[i].split(punctuation[0])
lineslist1.append(a)
for i in range(len(lineslist1)):
a = lineslist1[i].split(punctuation[1])
lineslist2.append(a)
for i in range(len(punctuation)-2):
for j in range(len(lineslist2)):
lineslist1=[]
a = lineslist2[j].split(punctuation[i+2])
lineslist1.append[a]
for k in range(len(lineslist1)):
lineslist2 = []
a = lineslist1[k].split(punctuation[i+3])
lineslist2.append(a)
print lineslist2
get_word_list(file_name)
def get_top_n_words(word_list, n):
""" Takes a list of words as input and returns a list of the n most frequently
occurring words ordered from most to least frequently occurring.
word_list: a list of words (assumed to all be in lower case with no
punctuation
n: the number of words to return
returns: a list of n most frequently occurring words ordered from most
frequently to least frequentlyoccurring
"""
pass
| {
"repo_name": "SKim4/SoftwareDesignFall15",
"path": "word_frequency_analysis/example.py",
"copies": "1",
"size": "2266",
"license": "mit",
"hash": 7546329251216296000,
"line_mean": 25.6588235294,
"line_max": 82,
"alpha_frac": 0.5962047661,
"autogenerated": false,
"ratio": 3.6666666666666665,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47628714327666666,
"avg_score": null,
"num_lines": null
} |
""" Analyzes the word frequencies in a book downloaded from
Project Gutenberg """
import string
from collections import Counter
def get_word_list(file_name):
""" Reads the specified project Gutenberg book. Header comments,
punctuation, and whitespace are stripped away. The function
returns a list of the words used in the book as a list.
All words are converted to lower case.
"""
td = open(filename,'r')
lines = td.readlines()
curr_line = 0
while lines[curr_line].find('START OF THIS PROJECT GUTENBERG EBOOK') == -1:
curr_line += 1
lines = lines[curr_line+1:]
curr_line2 = -1
while lines[curr_line2].find("END OF THIS PROJECT GUTENBERG EBOOK") == -1:
curr_line2 -= 1
lines = lines[:curr_line2]
#print lines
stripped = []
words = []
for i in range(len(lines)):
ls = lines[i].rstrip()
lslower = ls.lower()
output = lslower.translate(string.maketrans("",""), string.punctuation)
stripped.append(output)
for i in range(len(stripped)):
words.extend(stripped[i].split())
return words
def get_top_n_words(word_list, n):
""" Takes a list of words as input and returns a list of the n most frequently
occurring words ordered from most to least frequently occurring.
word_list: a list of words (assumed to all be in lower case with no
punctuation
n: the number of words to return
returns: a list of n most frequently occurring words ordered from most
frequently to least frequentlyoccurring
"""
topn = []
wordcounts = Counter(word_list).most_common(n)
for i in range(len(wordcounts)):
word , count = wordcounts[i]
topn.append(word)
return topn
filename = 'the_defenders.txt'
DefendersList = get_word_list(filename)
top100 = get_top_n_words(DefendersList, 100)
print top100
| {
"repo_name": "bozzellaj/SoftwareDesignFall15",
"path": "WordFreqToolbox/myfrequency.py",
"copies": "1",
"size": "1752",
"license": "mit",
"hash": -3558886447493559300,
"line_mean": 25.9538461538,
"line_max": 79,
"alpha_frac": 0.7100456621,
"autogenerated": false,
"ratio": 3.1624548736462095,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43725005357462093,
"avg_score": null,
"num_lines": null
} |
""" Analyzes the word frequencies in a book downloaded from
Project Gutenberg """
import string
def get_word_list(file_name):
""" Reads the specified project Gutenberg book. Header comments,
punctuation, and whitespace are stripped away. The function
returns a list of the words used in the book as a list.
All words are converted to lower case.
"""
word_list =[]
# loading the file and stripping away the header and bottom comment
f = open(file_name,'r')
lines = f.readlines()
header_line = 0
while lines[header_line].find('START OF THIS PROJECT GUTENBERG EBOOK') == -1:
header_line += 1
end_line = header_line
while lines[end_line].find('END OF THIS PROJECT GUTENBERG EBOOK') == -1:
end_line += 1
lines = lines[header_line+1:end_line-1]
# Get rid of all the non-text stuff
for line in lines:
# replace non-text with spaces before splitting
for val in string.punctuation:
line = line.replace(str(val), ' ')
line = line.lower()
line = line.split()
for word in line:
word_list.append(word)
print "Total number of words: " + str(len(word_list))
return word_list
def get_top_n_words(word_list, n):
""" Takes a list of words as input and returns a list of the n most frequently
occurring words ordered from most to least frequently occurring.
word_list: a list of words (assumed to all be in lower case with no
punctuation
n: the number of words to return
returns: a list of n most frequently occurring words ordered from most
frequently to least frequentlyoccurring
"""
word_counts = {}
for word in word_list:
if word_counts.has_key(word):
word_counts[word] +=1
else:
word_counts.update({word:1})
print 'Number of different words: ' + str(len(word_counts))
# Return a sorted list of the word frequency from the most to the least
ordered_by_frequency = sorted(word_counts, key=word_counts.get, reverse=True)
return ordered_by_frequency[0:n]
if __name__ == '__main__':
word_list = get_word_list('sherlock.txt')
print get_top_n_words(word_list,100) | {
"repo_name": "SelinaWang/SoftwareDesignFall15",
"path": "toolbox/word_frequency_analysis/frequency.py",
"copies": "1",
"size": "2034",
"license": "mit",
"hash": 1181789207801207300,
"line_mean": 31.8225806452,
"line_max": 79,
"alpha_frac": 0.709439528,
"autogenerated": false,
"ratio": 3.183098591549296,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4392538119549296,
"avg_score": null,
"num_lines": null
} |
""" Analyzes the word frequencies in a book downloaded from
Project Gutenberg """
import string
punctuation = string.punctuation
def get_word_list(file_name):
""" Reads the specified project Gutenberg book. Header comments,
punctuation, and whitespace are stripped away. The function
returns a list of the words used in the book as a list.
All words are converted to lower case.
"""
f = open(file_name,'r')
lines = f.readlines()
first_line = 0
last_line = 0
while lines[first_line].find('CONTENT') == -1:
first_line += 1
while lines[last_line].find('THE END') == -1:
last_line += 1
lines = lines[first_line:first_line+200]
wordslist = []
lineslist1 = []
finallist = []
for i in range(len(lines)):
lineslist = lines[i].split()
for j in range(len(lineslist)):
wordslist.append(lineslist[j])
for i in range(len(punctuation)/2):
for j in range(len(wordslist)):
a = wordslist[j].replace(punctuation[i*2],'')
lineslist1.append(a)
wordslist = []
for k in range(len(lineslist1)):
a = lineslist1[k].replace(punctuation[i*2+1],'')
wordslist.append(a)
lineslist1 = []
for i in range(len(wordslist)):
a = wordslist[i].lower()
finallist.append(a)
return finallist
def get_top_n_words(word_list,n):
""" Takes a list of words as input and returns a list of the n most frequently
occurring words ordered from most to least frequently occurring.
word_list: a list of words (assumed to all be in lower case with no
punctuation
n: the number of words to return
returns: a list of n most frequently occurring words ordered from most
frequently to least frequentlyoccurring
"""
word_counts = dict()
for c in word_list:
if c not in word_counts:
word_counts[c] = 1
else:
word_counts[c] += 1
ordered_by_frequency = sorted(word_counts, key=word_counts.get, reverse=True)
for i in range(n):
print ordered_by_frequency[i]
| {
"repo_name": "SKim4/SoftwareDesignFall15",
"path": "word_frequency_analysis/frequency2.py",
"copies": "2",
"size": "1921",
"license": "mit",
"hash": 3521579035444359700,
"line_mean": 21.869047619,
"line_max": 79,
"alpha_frac": 0.6876626757,
"autogenerated": false,
"ratio": 3.0736,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47612626756999993,
"avg_score": null,
"num_lines": null
} |
"""Analyze stock market data."""
import math
import click
import matplotlib.pyplot as plt
import pandas as pd
@click.command()
@click.option("-f", "--file", "filepath", required=True)
def main(filepath):
"""
Download data via
https://www.fondscheck.de/quotes/historic?boerse_id=5&secu=123822833&page=23
"""
df = pd.read_csv(filepath, sep=";", thousands=".", decimal=",")
df["Datum"] = pd.to_datetime(df["Datum"], format="%Y-%m-%d")
df = df.sort_values(by="Datum").reset_index()
print(df)
print(f"Cummulative Volume: {df.Volumen.sum()}")
data = df.set_index("Datum")
data["year"] = data.index.year
data["month"] = data.index.month
data["week"] = data.index.week
data["weekday"] = data.index.weekday
data = data[["year", "month", "week", "weekday", "Schlusskurs"]].to_records(
index=False
)
# Group into weeks:
week_data = {}
for el in data:
year_month_week = "{}-{}-{}".format(el[0], el[1], el[2])
weekday = el[3]
if year_month_week not in week_data:
week_data[year_month_week] = {}
week_data[year_month_week][weekday] = el[4]
# Normalize
for year_month_week, weekday_value_dict in week_data.items():
for i in range(6):
if i not in weekday_value_dict:
# print("Could not find weekday {}".format(i))
weekday_value_dict[i] = float("nan")
weekday_values = sorted(weekday_value_dict.items())
# Take the first non-nan value
i = 0
v0 = weekday_values[i][1]
while math.isnan(v0) and i < len(weekday_values):
v0 = weekday_values[i][1]
i += 1
weekday_values = [value - v0 for key, value in weekday_values if key < 5]
week_data[year_month_week] = weekday_values
# Convert it back to a dataframe
data = []
for year_month_week, weekday_values in week_data.items():
assert len(weekday_values) == 5, len(weekday_values)
row = [str(year_month_week)] + weekday_values
data.append(row)
df = pd.DataFrame(data)
df.columns = ["week", 0, 1, 2, 3, 4]
# df = df.set_index('week')
# df = df.T.reset_index()
print(df)
# data_by_day = data.resample('d').mean() #.set_index(['year', 'week', 'day']).unstack(['year', 'week'])
# data_by_day['hash_rate'].plot()
# data_by_day['Schlusskurs'].plot()
# multiple line plot
for data in df.to_records(index=False).tolist():
key = data[0]
values = data[1:]
try:
plt.plot(values, marker="", linewidth=1, alpha=0.9)
except TypeError:
pass
# Add legend
plt.legend(loc=2, ncol=2)
# Add titles
plt.title(
"A (bad) Spaghetti plot", loc="left", fontsize=12, fontweight=0, color="orange"
)
plt.xlabel("Weekday")
plt.ylabel("Value")
plt.show()
for i in range(5):
df.hist(column=i)
print(df[i].mean())
plt.show()
main()
| {
"repo_name": "MartinThoma/algorithms",
"path": "ML/stock-quotes/quote_analysis.py",
"copies": "1",
"size": "3009",
"license": "mit",
"hash": -6400631657040882000,
"line_mean": 28.7920792079,
"line_max": 109,
"alpha_frac": 0.5689597873,
"autogenerated": false,
"ratio": 3.242456896551724,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4311416683851724,
"avg_score": null,
"num_lines": null
} |
# Analyze the POS and NER results, look for correlations between them
import numpy as np
import matplotlib.pyplot as plt
import operator
class Token:
def __init__(self, word, postag, gold_postag, nertag, gold_nertag):
self.word = word
self.postag = postag
self.gold_postag = gold_postag
self.nertag = nertag
self.gold_nertag = gold_nertag
class Sentence:
def __init__(self, tokens):
self.tokens = tokens
class Document:
def __init__(self, sentences):
self.sentences = sentences
self.ner_incorrect = []
self.pos_incorrect = []
self.ner_incorrect_pos_correct = []
self.ner_correct_pos_incorrect = []
self.both_incorrect = []
def read_output_file(filename):
f = open(filename, 'r')
sentences = []
tokens = []
for line in f:
line = line.strip()
if line == "":
sentences.append(Sentence(tokens))
tokens = []
else:
# line = line.replace("(", "")
# line = line.replace(")", "")
spline = line.split('~*~')
# print(spline)
tokens.append(Token(spline[0], spline[1], spline[2], spline[3], spline[4]))
return Document(sentences)
def collate_errors(doc):
# Go through each of the sentence and mark the type of error that occurs
for s in doc.sentences:
for t in s.tokens:
pos_c = t.postag == t.gold_postag
ner_c = t.nertag == t.gold_nertag
if not ner_c:
doc.ner_incorrect.append(t)
if not pos_c:
doc.pos_incorrect.append(t)
if ner_c and not pos_c:
doc.ner_correct_pos_incorrect.append(t)
if not ner_c and pos_c:
doc.ner_incorrect_pos_correct.append(t)
if not ner_c and not pos_c:
doc.both_incorrect.append(t)
def report_errors_4key(errors):
report = {}
for e in errors:
key = "{0} {1} {2} {3}".format(e.postag, e.gold_postag, e.nertag, e.gold_nertag)
# key = "{2} {3}".format(e.postag, e.gold_postag, e.nertag, e.gold_nertag)
# key = "{0} {1}".format(e.postag, e.gold_postag, e.nertag, e.gold_nertag)
if key not in report:
report[key] = 0
report[key] += 1
sorted_report = sorted(report.iteritems(), key=operator.itemgetter(1))
sorted_report.reverse()
return sorted_report
def report_errors_nerkey(errors):
report = {}
for e in errors:
# key = "{0} {1} {2} {3}".format(e.postag, e.gold_postag, e.nertag, e.gold_nertag)
key = "{2} {3}".format(e.postag, e.gold_postag, e.nertag, e.gold_nertag)
# key = "{0} {1}".format(e.postag, e.gold_postag, e.nertag, e.gold_nertag)
if key not in report:
report[key] = 0
report[key] += 1
sorted_report = sorted(report.iteritems(), key=operator.itemgetter(1))
sorted_report.reverse()
return sorted_report
def report_errors_poskey(errors):
report = {}
for e in errors:
# key = "{0} {1} {2} {3}".format(e.postag, e.gold_postag, e.nertag, e.gold_nertag)
# key = "{2} {3}".format(e.postag, e.gold_postag, e.nertag, e.gold_nertag)
key = "{0} {1}".format(e.postag, e.gold_postag, e.nertag, e.gold_nertag)
if key not in report:
report[key] = 0
report[key] += 1
sorted_report = sorted(report.iteritems(), key=operator.itemgetter(1))
sorted_report.reverse()
return sorted_report
def write_report(filename, report):
f = open(filename, 'w')
for tok in report:
f.write("\t".join(map(str, tok)) + "\n")
doc = read_output_file("ner-tag-output.txt")
collate_errors(doc)
write_report("both_incorrect.txt", report_errors_4key(doc.both_incorrect))
write_report("ner_incorrect.txt", report_errors_4key(doc.ner_incorrect))
write_report("pos_incorrect.txt", report_errors_4key(doc.pos_incorrect))
write_report("ner_correct_pos_incorrect.txt", report_errors_4key(doc.ner_correct_pos_incorrect))
write_report("ner_incorrect_pos_correct.txt", report_errors_4key(doc.ner_incorrect_pos_correct))
write_report("both_incorrect_nerkey.txt", report_errors_nerkey(doc.both_incorrect))
write_report("ner_incorrect_nerkey.txt", report_errors_nerkey(doc.ner_incorrect))
write_report("pos_incorrect_nerkey.txt", report_errors_nerkey(doc.pos_incorrect))
write_report("ner_correct_pos_incorrect_nerkey.txt", report_errors_nerkey(doc.ner_correct_pos_incorrect))
write_report("ner_incorrect_pos_correct_nerkey.txt", report_errors_nerkey(doc.ner_incorrect_pos_correct))
write_report("both_incorrect_poskey.txt", report_errors_poskey(doc.both_incorrect))
write_report("ner_incorrect_poskey.txt", report_errors_poskey(doc.ner_incorrect))
write_report("pos_incorrect_poskey.txt", report_errors_poskey(doc.pos_incorrect))
write_report("ner_correct_pos_incorrect_poskey.txt", report_errors_poskey(doc.ner_correct_pos_incorrect))
write_report("ner_incorrect_pos_correct_poskey.txt", report_errors_poskey(doc.ner_incorrect_pos_correct))
| {
"repo_name": "strubell/nlp-class-proj",
"path": "analysis.py",
"copies": "1",
"size": "4594",
"license": "apache-2.0",
"hash": 8647015144497817000,
"line_mean": 33.0296296296,
"line_max": 105,
"alpha_frac": 0.6963430562,
"autogenerated": false,
"ratio": 2.7247924080664294,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8777868324378513,
"avg_score": 0.028653427977583454,
"num_lines": 135
} |
"""Analyze the tdc data from simulation. Usefull for tdc data comparison with real measurement.
"""
import tables as tb
import numpy as np
import progressbar
import os
import math
import sys
import glob
from pybar.analysis.analyze_raw_data import AnalyzeRawData
import pybar.scans.analyze_source_scan_tdc_data as tdc_analysis
def analyze_hits(input_file_hits):
with AnalyzeRawData(raw_data_file=None, analyzed_data_file=input_file_hits) as analyze_raw_data:
analyze_raw_data.create_source_scan_hist = True
analyze_raw_data.create_cluster_hit_table = True
analyze_raw_data.create_cluster_table = True
analyze_raw_data.create_cluster_size_hist = True
analyze_raw_data.create_cluster_tot_hist = True
analyze_raw_data.create_tdc_hist = True
analyze_raw_data.analyze_hit_table(analyzed_data_out_file=input_file_hits[:-3] + '_analyzed.h5')
analyze_raw_data.plot_histograms(pdf_filename=input_file_hits[:-3], analyzed_data_file=input_file_hits[:-3] + '_analyzed.h5')
def analyze_tdc(hit_file, calibation_file, col_span=[5, 75], row_span=[10, 320]):
# Data files
hit_cut_file = hit_file[:-3] + '_cut_hits.h5'
hit_cut_analyzed_file = hit_file[:-3] + '_cut_hits_analyzed.h5'
# Selection criterions
hit_selection = '(column > %d) & (column < %d) & (row > %d) & (row < %d)' % (col_span[0] + 1, col_span[1] - 1, row_span[0] + 5, row_span[1] - 5) # deselect edge pixels for better cluster size cut
hit_selection_conditions = ['(n_cluster==1)', '(n_cluster==1) & (cluster_size == 1)', '(n_cluster==1) & (cluster_size == 1) & ((tot > 12) | ((TDC * 1.5625 - tot * 25 < 100) & (tot * 25 - TDC * 1.5625 < 100))) & %s' % hit_selection]
event_status_select_mask = 0b0000111111111111
event_status_condition = 0b0000000100000000 # trigger, tdc word and perfect event structure required
tdc_analysis.histogram_tdc_hits(input_file_hits=hit_file,
hit_selection_conditions=hit_selection_conditions,
event_status_select_mask=event_status_select_mask,
event_status_condition=event_status_condition,
calibration_file=calibation_file,
max_tdc=1500,
n_bins=1000)
if __name__ == "__main__":
# arguments = sys.argv
# if len(arguments) < 2:
# print 'Please provide the base file name of the root data files (e.g. threshold_ for threshold_2000.root)'
# raise SystemExit
#
# base_file_name = arguments[1]
base_file_name = '/media/documents/GEANT4/SourceSim-build/cc_'
calibation_file = r'/home/davidlp/git/Thesis/Analysis/Simulation/Landau/GEANT4/ChargeCloud/data/18_proto_7_hit_or_calibration_calibration.h5'
file_names = glob.glob(base_file_name + '*_interpreted.h5')
file_names.sort()
for file_name in file_names:
analyze_hits(file_name)
file_names = glob.glob(base_file_name + '*_interpreted_analyzed.h5')
file_names.sort()
for file_name in file_names:
analyze_tdc(file_name, calibation_file) | {
"repo_name": "DavidLP/SourceSim",
"path": "tools/fei4_tdc_analysis.py",
"copies": "1",
"size": "3186",
"license": "bsd-2-clause",
"hash": 1073614007138025000,
"line_mean": 49.5873015873,
"line_max": 235,
"alpha_frac": 0.6362209667,
"autogenerated": false,
"ratio": 3.271047227926078,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9276606828686258,
"avg_score": 0.02613227318796404,
"num_lines": 63
} |
# Analyze treatment regimens by patient
# Note: The treatment_resp dataset seems to be more complete, and the treatment_regimen dataset seems to have info
# on dosage (which we're tentatively ignoring for now)
import os
from load_patient_data import load_treatment_regimen, load_treatment_resp
import numpy as np
import scipy.io as sio
import cmsgpack
def merge_intervals(intervals):
"""Merge list of possibly overlapping (a, b) intervals. Requires a < b already for each pair.
https://codereview.stackexchange.com/questions/69242/merging-overlapping-intervals
"""
sorted_by_lower_bound = sorted(intervals, key=lambda tup: tup[0])
merged = []
for higher in sorted_by_lower_bound:
if not merged:
merged.append(higher)
else:
lower = merged[-1]
# test for intersection between lower and higher:
# we know via sorting that lower[0] <= higher[0]
if higher[0] <= lower[1]:
upper_bound = max(lower[1], higher[1])
merged[-1] = (lower[0], upper_bound) # replace by merged interval
else:
merged.append(higher)
return merged
def build_treatment_regimens():
"""Use STAND_ALONE_TREATMENT_REGIMEN data"""
treatment_data, treatment_dict, treatment_fields = load_treatment_regimen()
# First build list of all the therapies
therapies = np.unique(treatment_data['MMTX_THERAPY'])
print('Therapies: {therapies}'.format(therapies=therapies))
patients = np.unique(treatment_data['PUBLIC_ID'])
# Create a condensed listing of patient therapies:
# dict of key = patient ID, val = dict of key = therapy name, val = list of tuples of (start,end) times
# What do negative values for start and stop of therapy mean?
patient_therapies = {}
for patient in patients:
patient_data = treatment_data[treatment_data['PUBLIC_ID'] == patient]
patient_therapy = {}
for i, row in patient_data.iterrows():
therapy = row['MMTX_THERAPY']
start = row['startday']
stop = row['stopday'] # this can be NaN (blank in the original dataset) indicating not known
if start > stop: # just in case?
start, stop = stop, start
# Skip invalid (blank therapy)
if len(therapy) == 0:
continue
if therapy not in patient_therapy:
patient_therapy[therapy] = [(start, stop)]
else:
patient_therapy[therapy].append((start,stop))
patient_therapies[patient] = patient_therapy
return patient_therapies, therapies
def build_treatment_resps():
treatment_resp_data, treatment_resp_dict, treatment_resp_fields = load_treatment_resp()
# Collect all the treatments, separating out multiple treatments into the individual therapies (drugs)
mixed_therapies = treatment_resp_data['trtname']
therapies = set()
for i, entry in mixed_therapies.iteritems():
entries = entry.split('/')
for therapy in entries:
therapies.add(therapy)
print('{n} different therapies found'.format(n=len(therapies)))
# Get each patient's treatment intervals with each therapy
patients = np.unique(treatment_resp_data['PUBLIC_ID'])
patient_therapies = {}
for patient in patients:
patient_data = treatment_resp_data[treatment_resp_data['PUBLIC_ID'] == patient]
patient_therapy = {}
for i, row in patient_data.iterrows():
therapy = row['trtname']
start = row['trtstdy']
stop = row['trtendy']
entries = therapy.split('/')
for entry in entries:
# Skip invalid (blank therapy)
if len(therapy) == 0:
continue
if entry not in patient_therapy:
patient_therapy[entry] = [(start, stop)]
else:
patient_therapy[entry].append((start, stop))
# Clean up overlapping therapies
merged_patient_therapies = {}
for therapy, times in patient_therapy.items():
merged_patient_therapies[therapy] = merge_intervals(times)
patient_therapies[patient] = merged_patient_therapies
return patient_therapies, therapies
def cleanup_treatments(patient_timeseries, therapies):
"""Convert somewhat messy list of therapies to better-defined therapies and replace in patient profiles.
Some therapies have different names (pick a definitive one)
Combine uncommon therapies into an 'Other' category"""
# First get the number of patients on each therapy and look at the list to decide below
t = dict.fromkeys(therapies, 0)
for patient, tis in patient_timeseries.items():
for ti in tis:
t[ti] += 1
# Look at the main ones and decide by eye
# All other therapies are put into a catch-all called 'Other'
main_therapies = ['Bortezomib', 'Carfilzomib', 'Cyclophosphamide', 'Dexamethasone', 'Lenalidomide' ,'Melphalan']
# Map misspellings of main therapies to above
map_misspell = {
'Melfalan': 'Melphalan'
}
# Map all thalidomide derivatives to Lenalidamide - there's a lot less of them but they're probably significant for the same reason
# And just OR the treatment statuses
map_lidomides = {
'Pomalidomide': 'Lenalidomide',
'Thalidomide': 'Lenalidomide'
}
# Remap all patients
n_times = patient_timeseries['MMRF_1011']['Bortezomib'].size # sample that should be present
patient_therapies_ = {}
for patient, tis in patient_timeseries.items():
tis_ = {}
val_lidomides = np.zeros((n_times,), dtype=np.int8)
val_others = np.zeros((n_times,), dtype=np.int8)
for ti, vals in tis.items():
if ti in map_misspell:
tis_[map_misspell[ti]] = vals
elif ti in map_lidomides or ti == 'Lenalidomide':
val_lidomides = np.logical_or(val_lidomides, vals)
elif ti in main_therapies:
tis_[ti] = vals
else:
val_others = np.logical_or(val_others, vals)
tis_['Lenalidomide'] = val_lidomides.astype(np.int8)
tis_['Other'] = val_others.astype(np.int8)
patient_therapies_[patient] = tis_
main_therapies.append('Other')
return patient_therapies_, main_therapies
def build_treatment_features(patient_therapies, time_interval=90, time_final=3650):
"""Convert treatment data in the form of intervals to timeseries features. Inputs are in days."""
# Create timeseries for each patient
# If the patient was on a therapy at all in the interval [0, n), mark as 1, else 0. Interval granularity in days
# This is inefficient, but ultimately only takes a few sec and only has to be run once
intervals = np.arange(0, time_final, time_interval, dtype=np.float32)
n_intervals = intervals.size - 1
patient_timeseries = {}
for patient, therapies in patient_therapies.items():
patient_timeserie = {}
for therapy, times in therapies.items():
x = np.zeros((n_intervals,), dtype=np.int8)
for i in range(1, n_intervals):
i_start = intervals[i-1]
i_stop = intervals[i]
for time in times:
start = time[0]
stop = time[1]
if (start <= i_start < stop) or (start <= i_stop < stop) or (i_start <= start and stop < i_stop):
x[i-1] = 1
break
patient_timeserie[therapy] = x
patient_timeseries[patient] = patient_timeserie
# Toss last timepoint, which represents an interval starting at the end time
intervals = intervals[:-1]
return patient_timeseries, intervals
if __name__ == '__main__':
patient_therapies, therapies = build_treatment_resps()
patient_timeseries, intervals = build_treatment_features(patient_therapies)
patient_timeseries, therapies = cleanup_treatments(patient_timeseries, therapies)
# Build patients x therapy x time matrix
n_patients = len(patient_timeseries)
n_therapies = len(therapies)
n_times = len(intervals)
patients = sorted(list(patient_timeseries.keys()))
data = np.zeros((n_patients, n_therapies, n_times), dtype=np.int8)
for i in range(n_patients):
patient = patients[i]
for j in range(n_therapies):
therapy = therapies[j]
if therapy in patient_timeseries[patient]:
data[i, j, :] = patient_timeseries[patient][therapy]
data_dir = 'data/processed'
treatment_features_file = 'patient_treatment_timeseries'
with open(os.path.join(data_dir, treatment_features_file), 'wb') as f:
f.write(cmsgpack.packb((patient_timeseries, intervals)))
treatment_matrix_file = os.path.join(data_dir, 'patient_treatment_matrix.mat')
sio.savemat(treatment_matrix_file, {'data': data, 'patients': patients, 'therapies': therapies, 'intervals': intervals})
| {
"repo_name": "xpspectre/multiple-myeloma",
"path": "analyze_treatments.py",
"copies": "1",
"size": "9130",
"license": "mit",
"hash": -2716888472312346000,
"line_mean": 39.3982300885,
"line_max": 135,
"alpha_frac": 0.6308871851,
"autogenerated": false,
"ratio": 3.5973207249802996,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47282079100803,
"avg_score": null,
"num_lines": null
} |
"""Analyze unpacked OP-1 firmware directories."""
import os
import re
import time
UNKNOWN_VALUE = 'UNKNOWN'
def analyze_boot_ldr(target):
path = os.path.join(target, 'te-boot.ldr')
f = open(path, 'rb')
data = f.read()
f.close()
version_arr = re.findall(br'TE-BOOT .+?(\d*\.?\d+)', data)
bootloader_version = version_arr[0].decode('utf-8').strip() if version_arr else UNKNOWN_VALUE
return {
'bootloader_version': bootloader_version,
}
def analyze_main_ldr(target):
path = os.path.join(target, 'OP1_vdk.ldr')
f = open(path, 'rb')
data = f.read()
f.close()
start_pos = data.find(b'Rev.')
chunk = data[start_pos:]
end_pos = chunk.find(b'\n')
chunk = chunk[:end_pos].decode('utf-8')
build_version_arr = re.findall(r'Rev.+?(.*?);', chunk)
build_version = build_version_arr[0].strip() if build_version_arr else UNKNOWN_VALUE
date_arr = re.findall(r'\d\d\d\d/\d\d/\d\d', chunk)
time_arr = re.findall(r'\d\d:\d\d:\d\d', chunk)
fw_version = re.findall(br'R\..\d\d\d\d?\d?', data)
fw_version = UNKNOWN_VALUE if not fw_version else fw_version[0].decode('utf-8')
return {
'firmware_version': str(fw_version),
'build_version': build_version,
'build_date': date_arr[0] if date_arr else UNKNOWN_VALUE,
'build_time': time_arr[0] if time_arr else UNKNOWN_VALUE,
}
def analyze_fs(target):
oldest = None
newest = None
for root, dirs, files in os.walk(target):
for file in files:
file_path = os.path.join(root, file)
mtime = os.path.getmtime(file_path)
if oldest is None or mtime < oldest:
oldest = mtime
if newest is None or mtime > newest:
newest = mtime
return {
'oldest_file': time.strftime('%Y/%m/%d %H:%M', time.gmtime(oldest)),
'newest_file': time.strftime('%Y/%m/%d %H:%M', time.gmtime(newest)),
}
def analyze_unpacked_fw(target):
main_ldr_info = analyze_main_ldr(target)
boot_ldr_info = analyze_boot_ldr(target)
fs_info = analyze_fs(target)
return {
**main_ldr_info,
**boot_ldr_info,
**fs_info,
}
| {
"repo_name": "op1hacks/op1-fw-repacker",
"path": "op1repacker/op1_analyze.py",
"copies": "1",
"size": "2208",
"license": "mit",
"hash": -5855937890467880000,
"line_mean": 26.9493670886,
"line_max": 97,
"alpha_frac": 0.5865036232,
"autogenerated": false,
"ratio": 3.079497907949791,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9163891100859057,
"avg_score": 0.0004220860581467688,
"num_lines": 79
} |
"""Analyzing classes"""
from functools import reduce, partial
from collections import defaultdict
import inspect
import re
from warnings import warn
import pandas as pd
super_methods_p = re.compile('super\(\)\.(?P<super_method>\w+)\(')
ordered_unik_elements = partial(reduce,
lambda unik, new_items: unik + [x for x in new_items if x not in unik])
class MethodNotFoundInMro(NotImplementedError):
pass
def find_super_methods_in_code_line(code_line):
r = super_methods_p.search(code_line)
if r:
return r.groupdict().get('super_method', None)
def find_super_methods_in_code_object(code_obj):
code_lines, _ = inspect.getsourcelines(code_obj)
super_methods = filter(None, map(find_super_methods_in_code_line,
filter(lambda x: not x.startswith('#'),
map(str.strip, code_lines))))
return list(super_methods)
def find_super_methods_in_func(func):
if callable(func) and hasattr(func, '__code__'):
return find_super_methods_in_code_object(func.__code__)
else:
return []
def method_calls_super_method_of_same_name(method):
super_methods = find_super_methods_in_func(method)
return method.__name__ in super_methods
def mro_for_method(cls, method, mro=None, method_not_found_error=True, include_overridden_methods=False):
if not isinstance(method, str):
method = method.__name__
if mro is None:
mro = cls.mro()
mro_length = len(mro)
super_methods = []
_super_method_expected = False
super_cls = None
for i, super_cls in enumerate(mro, 1):
method_func = super_cls.__dict__.get(method, None)
if method_func is not None: # we found the class where the method that will be called is
super_methods.append(super_cls) # add this to the list (it's the first element)
# get the list of super methods called in the code:
if not method_calls_super_method_of_same_name(method_func):
_super_method_expected = False
if not include_overridden_methods:
break
else:
_super_method_expected = True # flag to indicate that a super method is expected
# if our target method is called (with super) in that code...
# extend our list with the the list of super methods called further in the mro...
if i >= mro_length:
raise ValueError("There was a super method call, but no more classes in the mro!")
if super_cls is not None and _super_method_expected:
warn("Method {} defined in {}, but call to super method has no resolution.".format(method, super_cls))
if method_not_found_error and len(super_methods) == 0:
raise MethodNotFoundInMro("Method {} isn't implemented in the mro of class {}".format(method, cls))
return super_methods
def _no_dunder(x):
return not (x.startswith('__') and x.endswith('__'))
def mk_cls_identifier(cls_identifier='name'):
if cls_identifier == 'module_path':
def id_of_cls(cls):
return cls.__module__ + '.' + cls.__qualname__
elif cls_identifier == 'name':
def id_of_cls(cls):
return cls.__qualname__
else:
def id_of_cls(cls):
return cls
return id_of_cls
def method_resolutions(cls, methods=None, cls_identifier=None, method_not_found_error=True,
include_overridden_methods=False):
"""
:param cls:
:param methods:
:param cls_identifier:
:param method_not_found_error:
:return:
>>> class A:
... def foo(self):
... return 42
... def hello(self):
... pass
>>> class B(A):
... def hello(self):
... super().hello()
>>> class C(A):
... def foo(self):
... super().foo()
... def bar(self):
... super().bar() # if A is the next in line in the mro, this should fail, since A has no bar
... def hello(self):
... super().hello()
>>> class BC(B, C):
... def foo(self):
... print('hello BC')
... super().bar() # a call to bar within foo. Should be ignored by the search for super().foo()
... super().foo()
... def hello(self):
... super().hello()
>>> class CB(C, B):
... def foo(self):
... # super().foo(), this comment is there just to test that the super().foo() it actually ignored
... pass
... def hello(self):
... super().hello()
>>> for cls in [A, B, C, BC, CB]:
... print("---- {} mro ----".format(cls.__name__))
... print(", ".join(map(lambda x: x.__name__, cls.__mro__)))
---- A mro ----
A, object
---- B mro ----
B, A, object
---- C mro ----
C, A, object
---- BC mro ----
BC, B, C, A, object
---- CB mro ----
CB, C, B, A, object
>>>
>>> method_resolutions(BC, ['__init__', 'hello', 'foo'], cls_identifier='name')
{'__init__': ['object'], 'hello': ['BC', 'B', 'C', 'A'], 'foo': ['BC', 'C', 'A']}
>>> method_resolutions(CB, ['__init__', 'hello', 'foo'], cls_identifier='name')
{'__init__': ['object'], 'hello': ['CB', 'C', 'B', 'A'], 'foo': ['CB']}
>>>
>>> import warnings; warnings.filterwarnings('error')
>>> try:
... res = method_resolutions(CB, ['bar'], cls_identifier='name')
... except UserWarning:
... print("Expected this UserWarning: C.bar calls super().bar but there's not bar further in mro")
Expected this UserWarning: C.bar calls super().bar but there's not bar further in mro
"""
if methods is None:
methods = ['__init__'] + list(filter(_no_dunder, cls.__dict__.keys()))
id_of_cls = mk_cls_identifier(cls_identifier)
_mro_for_method = {
method: list(map(id_of_cls,
mro_for_method(cls, method, None, method_not_found_error, include_overridden_methods)))
for method in methods}
return _mro_for_method
def df_of_method_resolutions(cls, methods=None, cls_identifier='name', method_not_found_error=True,
include_overridden_methods=False):
_mro_for_method = method_resolutions(
cls, methods, cls_identifier, method_not_found_error, include_overridden_methods)
d = {method: {k: i for i, k in enumerate(resolutions)} for method, resolutions in _mro_for_method.items()}
d = pd.DataFrame(d).T
methods = list(_mro_for_method.keys())
id_of_cls = mk_cls_identifier(cls_identifier)
d_cols = set(d.columns)
cols = [c for c in map(id_of_cls, cls.mro()) if c in d_cols]
d = d.loc[methods, cols].fillna(-1).astype(int)
d[d == -1] = ""
return d
def assert_cls(cls):
if isinstance(cls, type):
return cls
else:
return cls.__class__ # it's probably an instance, and we want the class
def class_path_str(cls):
return assert_cls(cls).__module__ + '.' + cls.__name__
def mro_str_with_indents(cls, indent=4):
cls = assert_cls(cls)
_indent_of_cls = dict()
def indent_of_cls(_cls):
for __cls in reversed(list(_indent_of_cls.keys())):
if _cls in __cls.__bases__:
_indent_of_cls[_cls] = _indent_of_cls[__cls] + indent
return _indent_of_cls[_cls]
# if got so far...
_indent_of_cls[_cls] = 0
return _indent_of_cls[_cls]
s = ''
for _cls in cls.mro():
_indent = indent_of_cls(_cls)
s += ' ' * _indent + class_path_str(_cls) + '\n'
return s
def print_mro(cls, indent=4):
print(mro_str_with_indents(cls, indent))
def all_methods_in_the_order_they_were_encountered_in_mro(cls):
all_methods = []
for _cls in cls.mro():
methods = [k for k, v in _cls.__dict__.items() if callable(v)]
all_methods = all_methods + [method for method in methods if method not in all_methods]
return all_methods
def mro_class_methods(cls, include=None, exclude=None, cls_identifier='name', include_overridden_methods=False):
"""
Get the methods that each class of the mro contains, organized by class.
:param cls: class to analyze
:param include: method (names) to include
:param exclude: method (names) to exclude
:param cls_identifier: how to represent classes (the class object itself, the module path, just the name (default))
:param include_overridden_methods: If False (default), will not include methods that have been overridden
:return: A {class: methods, ...} dict
>>> class A:
... def foo(self):
... return 42
... def hello(self):
... pass
>>> class B(A):
... def hello(self):
... super().hello()
>>> class C(A):
... def foo(self):
... super().foo()
... def bar(self):
... super().bar() # if A is the next in line in the mro, this should fail, since A has no bar
... def hello(self):
... super().hello()
>>> class BC(B, C):
... def foo(self):
... print('hello BC')
... super().bar() # a call to bar within foo. Should be ignored by the search for super().foo()
... super().foo()
... def hello(self):
... super().hello()
>>> class CB(C, B):
... def foo(self):
... # super().foo(), this comment is there just to test that the super().foo() it actually ignored
... pass
... def hello(self):
... super().hello()
>>> mro_class_methods(CB)
{'CB': ['foo', 'hello'], 'C': ['hello', 'bar'], 'B': ['hello'], 'A': ['hello'], 'object': ['__init__']}
>>> mro_class_methods(BC)
{'BC': ['foo', 'hello'], 'C': ['foo', 'hello', 'bar'], 'A': ['foo', 'hello'], 'B': ['hello'], 'object': ['__init__']}
"""
all_methods = all_methods_in_the_order_they_were_encountered_in_mro(cls)
# resolve inclusion/exclusion
if include:
all_methods = [method for method in all_methods if method in include]
else:
if not exclude:
# if not include or exclude, exclude all base object methods except __init__
exclude = set([k for k in object.__dict__.keys() if k not in {'__init__'}])
all_methods = [method for method in all_methods if method not in exclude]
# handle methods calling super methods of the same name (and warn if supers without resolution)
cls_resolution_for_method = {
method: mro_for_method(cls, method, include_overridden_methods=include_overridden_methods)
for method in all_methods
}
methods_of_cls = defaultdict(list)
for method, _classes in cls_resolution_for_method.items():
for _cls in _classes:
methods_of_cls[_cls].append(method)
id_of_cls = mk_cls_identifier(cls_identifier)
return {id_of_cls(_cls): methods for _cls, methods in methods_of_cls.items()}
def df_of_mro_class_methods(cls, include=None, exclude=None, cls_identifier='name', include_overridden_methods=False):
methods_of_cls = mro_class_methods(cls, include, exclude, cls_identifier)
df = pd.DataFrame(index=ordered_unik_elements(methods_of_cls.values()), columns=methods_of_cls.keys())
df = df.fillna('')
for cls_name, methods in methods_of_cls.items():
for method in methods:
df.loc[method, cls_name] = method
return df
| {
"repo_name": "thorwhalen/ut",
"path": "util/class_analysis.py",
"copies": "1",
"size": "11522",
"license": "mit",
"hash": -8082292552589861000,
"line_mean": 37.0264026403,
"line_max": 121,
"alpha_frac": 0.5727304287,
"autogenerated": false,
"ratio": 3.62441019188424,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46971406205842403,
"avg_score": null,
"num_lines": null
} |
# # Analyzing results from notebooks
#
# The `.ipynb` format is capable of storing tables and charts in a standalone file. This makes it a great choice for model evaluation reports. `NotebookCollection` allows you to retrieve results from previously executed notebooks to compare them.
# +
import papermill as pm
import jupytext
from sklearn_evaluation import NotebookCollection
# -
# Let's first generate a few notebooks, we have a `train.py` script that trains a single model, let's convert it to a jupyter notebook:
nb = jupytext.read('train.py')
jupytext.write(nb, 'train.ipynb')
# We use papermill to execute the notebook with different parameters, we'll train 4 models: 2 random forest, a linear regression and a support vector regression:
# +
# models with their corresponding parameters
params = [{
'model': 'sklearn.ensemble.RandomForestRegressor',
'params': {
'n_estimators': 50
}
}, {
'model': 'sklearn.ensemble.RandomForestRegressor',
'params': {
'n_estimators': 100
}
}, {
'model': 'sklearn.linear_model.LinearRegression',
'params': {
'normalize': True
}
}, {
'model': 'sklearn.svm.LinearSVR',
'params': {}
}]
# ids to identify each experiment
ids = [
'random_forest_1', 'random_forest_2', 'linear_regression',
'support_vector_regression'
]
# output files
files = [f'{i}.ipynb' for i in ids]
# execute notebooks using papermill
for f, p in zip(files, params):
pm.execute_notebook('train.ipynb', output_path=f, parameters=p)
# -
# To use `NotebookCollection`, we pass a a list of paths, and optionally, ids for each notebook (uses paths by default).
#
# The only requirement is that cells whose output we want to extract must have tags, each tag then becomes a key in the notebook collection. For instructions on adding tags, [see this](https://jupyterbook.org/advanced/advanced.html#how-should-i-add-cell-tags-and-metadata-to-my-notebooks).
#
# Extracted tables add colors to certain cells to identify the best and worst metrics. By default, it assumes that metrics are errors (smaller is better). If you are using scores (larger is better), pass `scores=True`, if you have both, pass a list of scores:
nbs = NotebookCollection(paths=files, ids=ids, scores=['r2'])
# To get a list of tags available:
list(nbs)
# `model_params` contains a dictionary with model parameters, let's get them (click on the tabs to switch):
# pro-tip: then typing the tag, press the "Tab" key for autocompletion!
nbs['model_params']
# `plot` has a `y_true` vs `y_pred` chart:
nbs['plot']
# On each notebook, `metrics` outputs a data frame with a single row with mean absolute error (mae) and mean squared error (mse) as columns.
#
# For single-row tables, a "Compare" tab shows all results at once:
nbs['metrics']
# We can see that the second random forest is performing the best in both metrics.
#
# `river` contains a multi-row table where with error metrics broken down by the `CHAS` indicator feature. Multi-row tables *do not* display the "Compare" tab:
nbs['river']
# If we only compare two notebooks, the output is a bit different:
# only compare two notebooks
nbs_two = NotebookCollection(paths=files[:2], ids=ids[:2], scores=['r2'])
# Comparing single-row tables includes a diff column with the error difference between experiments. Error reductions are showed in green, increments in red:
nbs_two['metrics']
# When comparing multi-row tables, the "Compare" tab appears, showing the difference between the tables:
nbs_two['river']
# When displaying dictionaries, a "Compare" tab shows with a diff view:
nbs_two['model_params']
# Lists (and sets) are compared based on elements existence:
nbs_two['feature_names']
# ## Using the mapping interface
#
# `NotebookCollection` has a dict-like interface, you can retrieve data from individual notebooks:
nbs['model_params']['random_forest_1']
nbs['plot']['random_forest_2']
| {
"repo_name": "edublancas/sklearn-evaluation",
"path": "docs/source/nbs/NotebookCollection.py",
"copies": "1",
"size": "3930",
"license": "mit",
"hash": 5401046204740199000,
"line_mean": 33.1739130435,
"line_max": 289,
"alpha_frac": 0.7264631043,
"autogenerated": false,
"ratio": 3.546931407942238,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9769317244712454,
"avg_score": 0.0008154535059569641,
"num_lines": 115
} |
""" An Ambry bundle for building geofile bundles
How to Create a New Geofile Bundle
==================================
Step 1: Setup source.
It's easiest to copy from a similar bundle and edit. You'll need these
sources:
* A dnlpage<year><release> for each year/release. These are URLs to the
download page for the release
* geofile_schema, a ref to a geofile schema partition, such as
census.gov-acs_geofile-schemas-2009e-geofile_schema-2013
* sumlevels, a partition ref to the list of summar levels, such as:
census.gov-acs_geofile-schemas-2009e-sumlevels-none
Then there should be two or three 'split' sources, both at stage 2, one for
each release / year:
* geofile-split-20141
* geofile-split-20141
Step 2: build the source and dest schemas
$ bambry exec meta_mkschema
Run 'bambry dump -T' to verify that a soruce schema was generated, and
'bambry dump -C' for the dest schema.
Step 3: Add source links
$ bambry exec meta_add_sources
Run 'bambry dump -s' to verify that a lost of new sources were generated.
Step 4: Update the datatype based on a single ingestion
$ bambry exec meta_update_source_types
This will run for a while, because the California file is big. A big file should have the full
range of values for each column, so it is better for determining column datatypes
Run 'bambry dump -C' and verify that not all of the colums have the same datatype.
Step 5: Now, sync out and build the bundle
$ bambry sync -o
$ bambry -m build
This build will fail, because stage 2 expects the tables that are created in step 6. The error is:
CRITICAL: No partition for ref: 'census.gov-acs-geofile-2013-geofile-20135'
Step 6: Construct the reduced schemas
After running, the 'meta_build_reduced_schemas' function can analyze the geofiles for each summary level
to determine which columns are used and which are not. Then it will create new schemas for each summary level
$ bambry exec meta_build_reduced_schemas
$ bambry sync -o
Run 'bambry dump -t' to verify there are many more tables
Step 7: Build again with stage 2, reduced schemas.
bambry -m build
"""
import ambry.bundle
class GeofileBundle(ambry.bundle.Bundle):
year = None
def init(self):
self._sl_map = None
@staticmethod
def non_int_is_null(v):
try:
return int(v)
except ValueError:
return None
def geobundle_doc(self):
"""Print the docstring"""
from censuslib.geofile import __doc__ as geobundle_docstring
print geobundle_docstring
##
## Meta, Step 2: build the source and dest schemas
##
def meta_mkschema(self):
"""Create the geofile schema from the configuration in the
upstream bundle. """
from ambry.orm.file import File
t = self.dataset.new_table('geofile')
st = self.dataset.new_source_table('geofile')
p = self.dep('geofile_schema')
i = 1
for row in p:
if row['year'] == self.year :
i += 1
name = row['name'].lower().strip()
name = name if name != 'blank' else 'blank{}'.format(i)
self.logger.info(name)
t.add_column(name, datatype = 'str', description = row['description'])
st.add_column( source_header = name, position = row['seq'],
datatype = str,
start = row['start'], width = row['width'],
description = row['description'])
self.commit()
self.build_source_files.sourceschema.objects_to_record()
self.build_source_files.schema.objects_to_record()
self.commit()
##
## Meta Step 3: Add source links
##
def meta_add_sources(self):
self._meta_add_13yr_sources(span=1)
if self.year <= 2013:
self._meta_add_13yr_sources(span=3)
self._meta_add_5yr_sources()
def _meta_add_13yr_sources(self, span):
"""Run once to create to create the sources.csv file. Scrapes the web page with the links to the
files. """
from ambry.orm import DataSource, File
from ambry.util import scrape_urls_from_web_page
from ambry.orm.exc import NotFoundError
source = self.source('dnlpage{}{}'.format(self.year,span))
entries = scrape_urls_from_web_page(source.url)['sources']
for k,v in entries.items():
d = {
'name': k.lower()+"_{}{}".format(self.year,span),
'source_table_name': 'geofile',
'dest_table_name': 'geofile',
'filetype': 'csv',
'file': 'g{}.*\.csv'.format(self.year),
'encoding': 'latin1',
'time': str(self.year)+str(span),
'start_line': 0,
'url': v['url']
}
try:
s = self._dataset.source_file(d['name'])
s.update(**d)
except NotFoundError:
s = self.dataset.new_source(**d)
self.session.merge(s)
self.commit()
self.build_source_files.sources.objects_to_record()
self.commit()
def _meta_add_5yr_sources(self):
"""The 5 year release has a different structure because the files are bigger. """
from ambry.orm import DataSource, File
from ambry.util import scrape_urls_from_web_page
from ambry.orm.exc import NotFoundError
import os
year = self.year
span = 5
source = self.source('dnlpage{}{}'.format(year,span))
self.log("Loading from {}".format(source.url))
name_map={
'All_Geographies_Not_Tracts_Block_Groups': 'L',
'Tracts_Block_Groups_Only': 'S'
}
def parse_name(inp):
for suffix, code in name_map.items():
if inp.endswith(suffix):
return inp.replace('_'+suffix, ''), code
return (None, None)
for link_name, parts in scrape_urls_from_web_page(source.url)['sources'].items():
url=parts['url']
state_name, size_code = parse_name(link_name)
d = {
'name': "{}{}_{}{}".format(state_name,size_code,self.year, span),
'source_table_name': 'geofile',
'dest_table_name': 'geofile',
'filetype': 'csv',
'file': 'g{}.*\.csv'.format(self.year),
'encoding': 'latin1',
'time': str(self.year)+str(span),
'start_line': 0,
'url':url
}
try:
s = self._dataset.source_file(d['name'])
s.update(**d)
except NotFoundError:
s = self.dataset.new_source(**d)
self.session.merge(s)
self.log(s.name)
self.commit()
self.build_source_files.sources.objects_to_record()
self.commit()
##
## Meta Step 4: Update the datatype based on a single ingestion
##
def meta_update_source_types(self):
from ambry_sources.intuit import TypeIntuiter
source_name = 'CaliforniaS_{}5'.format(self.year)
s = self.source(source_name)
s.start_line = 0
s.header_lines = []
self.commit()
self.ingest(sources=[s.name], force=True)
s = self.source(s.name)
st = self.source_table('geofile')
dt = self.table('geofile')
def col_by_pos(pos):
for c in st.columns:
if c.position == pos:
return c
with s.datafile.reader as r:
for col in r.columns:
c = col_by_pos(col.position+1)
c.datatype = col['resolved_type'] if col['resolved_type'] != 'unknown' else 'str'
dc = dt.column(c.name)
dc.datatype = c.datatype
self.commit()
self.build_source_files.sourceschema.objects_to_record()
self.build_source_files.schema.objects_to_record()
self.commit()
##
## Meta Step 6, After Build: Create per-summary level tables
##
def meta_build_reduced_schemas(self):
"""
After running once, it is clear that not all columns are used in all
summary levels. This routine builds new tables for all of the summary
levels that have only the columns that are used.
"""
from collections import defaultdict
from itertools import islice, izip
table_titles = { int(r['sumlevel']): r['description'] if r['description'] else r['sumlevel']
for r in self.dep('sumlevels')}
p = self.partition(table='geofile', time='{}5'.format(self.year))
# Create a dict of sets, where each set holds the non-empty columns for rows of
# a summary level
gf = defaultdict(set)
for r in p:
gf[r.sumlevel] |= set(k for k,v in r.items() if v)
for sumlevel, fields in gf.items():
t = self.dataset.new_table('geofile'+str(sumlevel))
t.columns = []
self.commit()
t.description = 'Geofile for: ' + str(table_titles.get(int(sumlevel), sumlevel))
self.log('New table {}: {}'.format(t.name, t.description))
for c in self.table('geofile').columns:
if c.name in fields:
t.add_column(name=c.name, datatype=c.datatype, description=c.description, transform=c.transform)
self.commit()
self.build_source_files.schema.objects_to_record()
self.commit()
| {
"repo_name": "CivicKnowledge/censuslib",
"path": "censuslib/geofile.py",
"copies": "1",
"size": "9686",
"license": "mit",
"hash": 5970163995614014000,
"line_mean": 28.3515151515,
"line_max": 116,
"alpha_frac": 0.5768118935,
"autogenerated": false,
"ratio": 3.839080459770115,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4915892353270115,
"avg_score": null,
"num_lines": null
} |
A = {'name':'A','stack':[2,1]}
B = {'name':'B','stack':[]}
C = {'name':'C','stack':[]}
def moveDisk(fromPile,toPile):
fromStack = fromPile['stack']
toStack = toPile['stack']
disk = fromStack.pop()
if len(toStack) > 0:
temp = toStack.pop()
if disk > temp:
raise Exception("Cannot move a larger disk onto a smaller one.")
else:
toStack.append(temp)
toStack.append(disk)
else:
toStack.append(disk)
print "Moved {} from {} to {}".format(disk,fromPile['name'],toPile['name'])
printStacks()
def moveTower(height,fromPile,toPile,withPile):
if height > 0:
moveTower(height-1,fromPile,withPile,toPile)
moveDisk(fromPile,toPile)
moveTower(height-1,withPile,toPile,fromPile)
def printStacks():
print "A: ",A['stack']
print "B: ",B['stack']
print "C: ",C['stack']
print ""
def testMoves():
printStacks()
moveDisk(A,C)
printStacks()
moveDisk(C,B)
printStacks()
moveDisk(A,C)
printStacks()
moveDisk(B,C)
printStacks()
printStacks()
moveTower(len(A['stack']),A,C,B)
| {
"repo_name": "willettk/insight",
"path": "python/towers_of_hanoi.py",
"copies": "1",
"size": "1166",
"license": "apache-2.0",
"hash": 6636893625123877000,
"line_mean": 20.2,
"line_max": 79,
"alpha_frac": 0.5677530017,
"autogenerated": false,
"ratio": 3.0207253886010363,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40884783903010363,
"avg_score": null,
"num_lines": null
} |
# An amendment id should be a unique string (a valid filename) built from a
# range of new taxon ids, in the form '{first_new_ottid}-{last_new_ottid}'.
# EXAMPLES: 'additions-8783730-8783738'
# 'additions-4999718-5003245'
# 'additions-9998974-10000005'
# N.B. We will somebay bump to 8 digits, so sorting logic should manage this.
from peyotl.utility import get_logger
from peyotl.utility.str_util import (slugify, increment_slug)
import json
try:
import anyjson
except:
class Wrapper(object):
pass
anyjson = Wrapper()
anyjson.loads = json.loads
from peyotl.git_storage import ShardedDocStore, \
TypeAwareDocStore
from peyotl.amendments.amendments_shard import (TaxonomicAmendmentsShardProxy, TaxonomicAmendmentsShard)
from peyotl.amendments.validation import validate_amendment
from peyotl.amendments.git_actions import TaxonomicAmendmentsGitAction
import re
# Allow simple slug-ified string with '{known-prefix}-{7-or-8-digit-id}-{7-or-8-digit-id}'
# (8-digit ottids are probably years away, but allow them to be safe.)
# N.B. currently only the 'additions' prefix is supported!
AMENDMENT_ID_PATTERN = re.compile(r'^(additions|changes|deletions)-[0-9]{7,8}-[0-9]{7,8}$')
_LOG = get_logger(__name__)
def prefix_from_amendment_path(amendment_id):
# The amendment id is in the form '{subtype}-{first ottid}-{last-ottid}'
# EXAMPLE: 'additions-0000000-0000005'
# TODO: Perhaps subtype could work as a prefix? Implies that we'd assign all matching
# amendments to a single shard.for grouping in shards. Let's try it and see...
_LOG.debug('> prefix_from_amendment_path(), testing this id: {i}'.format(i=amendment_id))
id_parts = amendment_id.split('-')
_LOG.debug('> prefix_from_amendment_path(), found {} parts'.format(len(id_parts)))
if len(id_parts) > 1:
subtype = id_parts[0]
else:
subtype = 'unknown_subtype' # or perhaps None?
return subtype
class TaxonomicAmendmentStoreProxy(ShardedDocStore):
"""Proxy for interacting with external resources if given the configuration of a remote TaxonomicAmendmentStore
"""
def __init__(self, config):
ShardedDocStore.__init__(self,
prefix_from_doc_id=prefix_from_amendment_path)
for s in config.get('shards', []):
self._shards.append(TaxonomicAmendmentsShardProxy(s))
d = {}
for s in self._shards:
for k in s.doc_index.keys():
if k in d:
raise KeyError('Amendment "{i}" found in multiple repos'.format(i=k))
d[k] = s
self._doc2shard_map = d
class _TaxonomicAmendmentStore(TypeAwareDocStore):
"""Wrapper around a set of sharded git repos.
"""
def __init__(self,
repos_dict=None,
repos_par=None,
with_caching=True,
assumed_doc_version=None,
git_ssh=None,
pkey=None,
git_action_class=TaxonomicAmendmentsGitAction,
mirror_info=None,
infrastructure_commit_author='OpenTree API <api@opentreeoflife.org>',
**kwargs):
"""
Repos can be found by passing in a `repos_par` (a directory that is the parent of the repos)
or by trusting the `repos_dict` mapping of name to repo filepath.
`with_caching` should be True for non-debugging uses.
`assumed_doc_version` is optional. If specified all TaxonomicAmendmentsShard repos are assumed to store
files of this version of nexson syntax.
`git_ssh` is the path of an executable for git-ssh operations.
`pkey` is the PKEY that has to be in the env for remote, authenticated operations to work
`git_action_class` is a subclass of GitActionBase to use. the __init__ syntax must be compatible
with PhylesystemGitAction
If you want to use a mirrors of the repo for pushes or pulls, send in a `mirror_info` dict:
mirror_info['push'] and mirror_info['pull'] should be dicts with the following keys:
'parent_dir' - the parent directory of the mirrored repos
'remote_map' - a dictionary of remote name to prefix (the repo name + '.git' will be
appended to create the URL for pushing).
"""
TypeAwareDocStore.__init__(self,
prefix_from_doc_id=prefix_from_amendment_path,
repos_dict=repos_dict,
repos_par=repos_par,
with_caching=with_caching,
assumed_doc_version=assumed_doc_version,
git_ssh=git_ssh,
pkey=pkey,
git_action_class=TaxonomicAmendmentsGitAction,
git_shard_class=TaxonomicAmendmentsShard,
mirror_info=mirror_info,
new_doc_prefix=None,
infrastructure_commit_author='OpenTree API <api@opentreeoflife.org>',
**kwargs)
self._growing_shard._determine_next_ott_id()
# rename some generic members in the base class, for clarity and backward compatibility
@property
def get_amendment_ids(self):
return self.get_doc_ids
@property
def delete_amendment(self):
return self.delete_doc
def create_git_action_for_new_amendment(self, new_amendment_id=None):
"""Checks out master branch of the shard as a side effect"""
return self._growing_shard.create_git_action_for_new_amendment(new_amendment_id=new_amendment_id)
def add_new_amendment(self,
json_repr,
auth_info,
commit_msg=''):
"""Validate and save this JSON. Ensure (and return) a unique amendment id"""
amendment = self._coerce_json_to_amendment(json_repr)
if amendment is None:
msg = "File failed to parse as JSON:\n{j}".format(j=json_repr)
raise ValueError(msg)
if not self._is_valid_amendment_json(amendment):
msg = "JSON is not a valid amendment:\n{j}".format(j=json_repr)
raise ValueError(msg)
# Mint any needed ottids, update the document accordingly, and
# prepare a response with
# - per-taxon mapping of tag to ottid
# - resulting id (or URL) to the stored amendment
# To ensure synchronization of ottids and amendments, this should be an
# atomic operation!
# check for tags and confirm count of new ottids required (if provided)
num_taxa_eligible_for_ids = 0
for taxon in amendment.get("taxa"):
# N.B. We don't require 'tag' in amendment validation; check for it now!
if "tag" not in taxon:
raise KeyError('Requested Taxon is missing "tag" property!')
# allow for taxa that have already been assigned (use cases?)
if "ott_id" not in taxon:
num_taxa_eligible_for_ids += 1
if 'new_ottids_required' in amendment:
requested_ids = amendment['new_ottids_required']
try:
assert (requested_ids == num_taxa_eligible_for_ids)
except:
m = 'Number of OTT ids requested ({r}) does not match eligible taxa ({t})'
m = m.format(r=requested_ids, t=num_taxa_eligible_for_ids)
raise ValueError(m)
# mint new ids and assign each to an eligible taxon
with self._growing_shard._doc_counter_lock:
# build a map of tags to ottids, to return to the caller
tag_to_id = {}
first_new_id = self._growing_shard.next_ott_id
last_new_id = first_new_id + num_taxa_eligible_for_ids - 1
if last_new_id < first_new_id:
# This can happen if ther are no eligible taxa! In this case,
# repeat and "burn" the next ottid (ie, it will be used to
# identify this amendment, but it won't be assigned)
last_new_id = first_new_id
new_id = first_new_id
for taxon in amendment.get("taxa"):
if "ott_id" not in taxon:
taxon["ott_id"] = new_id
ttag = taxon["tag"]
tag_to_id[ttag] = new_id
new_id += 1
ptag = taxon.get("parent_tag")
if ptag is not None:
taxon["parent"] = tag_to_id[ptag]
if num_taxa_eligible_for_ids > 0:
try:
assert (new_id == (last_new_id + 1))
except:
applied = last_new_id - first_new_id + 1
raise ValueError(
'Number of OTT ids requested ({r}) does not match ids actually applied ({a})'.format(
r=requested_ids, a=applied))
# Build a proper amendment id, in the format '{subtype}-{first ottid}-{last-ottid}'
amendment_subtype = 'additions'
# TODO: Handle other subtypes (beyond additions) by examining JSON?
amendment_id = "{s}-{f}-{l}".format(s=amendment_subtype, f=first_new_id, l=last_new_id)
# Check the proposed id for uniqueness (just to be safe), then
# "reserve" it using a placeholder value.
with self._index_lock:
if amendment_id in self._doc2shard_map:
# this should never happen!
raise KeyError('Amendment "{i}" already exists!'.format(i=amendment_id))
self._doc2shard_map[amendment_id] = None
# Set the amendment's top-level "id" property to match
amendment["id"] = amendment_id
# pass the id and amendment JSON to a proper git action
new_amendment_id = None
r = None
try:
# assign the new id to a shard (important prep for commit_and_try_merge2master)
gd_id_pair = self.create_git_action_for_new_amendment(new_amendment_id=amendment_id)
new_amendment_id = gd_id_pair[1]
# For amendments, the id should not have changed!
try:
assert new_amendment_id == amendment_id
except:
raise KeyError('Amendment id unexpectedly changed from "{o}" to "{n}"!'.format(
o=amendment_id, n=new_amendment_id))
try:
# it's already been validated, so keep it simple
r = self.commit_and_try_merge2master(file_content=amendment,
doc_id=new_amendment_id,
auth_info=auth_info,
parent_sha=None,
commit_msg=commit_msg,
merged_sha=None)
except:
self._growing_shard.delete_doc_from_index(new_amendment_id)
raise
# amendment is now in the repo, so we can safely reserve the ottids
first_minted_id, last_minted_id = self._growing_shard._mint_new_ott_ids(
how_many=max(num_taxa_eligible_for_ids, 1))
# do a final check for errors!
try:
assert first_minted_id == first_new_id
except:
raise ValueError('First minted ottid is "{m}", expected "{e}"!'.format(
m=first_minted_id, e=first_new_id))
try:
assert last_minted_id == last_new_id
except:
raise ValueError('Last minted ottid is "{m}", expected "{e}"!'.format(
m=last_minted_id, e=last_new_id))
# Add the tag-to-ottid mapping to the response, so a caller
# (e.g. the curation webapp) can provisionally assign them
r['tag_to_ottid'] = tag_to_id
except:
with self._index_lock:
if new_amendment_id in self._doc2shard_map:
del self._doc2shard_map[new_amendment_id]
raise
with self._index_lock:
self._doc2shard_map[new_amendment_id] = self._growing_shard
return new_amendment_id, r
def update_existing_amendment(self,
amendment_id=None,
json_repr=None,
auth_info=None,
parent_sha=None,
merged_sha=None,
commit_msg=''):
"""Validate and save this JSON. Ensure (and return) a unique amendment id"""
amendment = self._coerce_json_to_amendment(json_repr)
if amendment is None:
msg = "File failed to parse as JSON:\n{j}".format(j=json_repr)
raise ValueError(msg)
if not self._is_valid_amendment_json(amendment):
msg = "JSON is not a valid amendment:\n{j}".format(j=json_repr)
raise ValueError(msg)
if not amendment_id:
raise ValueError("Amendment id not provided (or invalid)")
if not self.has_doc(amendment_id):
msg = "Unexpected amendment id '{}' (expected an existing id!)".format(amendment_id)
raise ValueError(msg)
# pass the id and amendment JSON to a proper git action
r = None
try:
# it's already been validated, so keep it simple
r = self.commit_and_try_merge2master(file_content=amendment,
doc_id=amendment_id,
auth_info=auth_info,
parent_sha=parent_sha,
commit_msg=commit_msg,
merged_sha=merged_sha)
# identify shard for this id!?
except:
raise
return r
def _build_amendment_id(self, json_repr):
"""Parse the JSON, return a slug in the form '{subtype}-{first ottid}-{last-ottid}'."""
amendment = self._coerce_json_to_amendment(json_repr)
if amendment is None:
return None
amendment_subtype = 'additions'
# TODO: Look more deeply once we have other subtypes!
first_ottid = amendment['TODO']
last_ottid = amendment['TODO']
return slugify('{s}-{f}-{l}'.format(s=amendment_subtype, f=first_ottid, l=last_ottid))
def _is_valid_amendment_id(self, test_id):
"""Test for the expected format '{subtype}-{first ottid}-{last-ottid}', return T/F
N.B. This does not test for a working GitHub username!"""
return bool(AMENDMENT_ID_PATTERN.match(test_id))
def _is_existing_id(self, test_id):
"""Test to see if this id is non-unique (already exists in a shard)"""
return test_id in self.get_amendment_ids()
def _is_valid_amendment_json(self, json_repr):
"""Call the primary validator for a quick test"""
amendment = self._coerce_json_to_amendment(json_repr)
if amendment is None:
# invalid JSON, definitely broken
return False
aa = validate_amendment(amendment)
errors = aa[0]
for e in errors:
_LOG.debug('> invalid JSON: {m}'.format(m=e.encode('utf-8')))
if len(errors) > 0:
return False
return True
def _coerce_json_to_amendment(self, json_repr):
"""Use to ensure that a JSON string (if found) is parsed to the equivalent dict in python.
If the incoming value is already parsed, do nothing. If a string fails to parse, return None."""
if isinstance(json_repr, dict):
amendment = json_repr
else:
try:
amendment = anyjson.loads(json_repr)
except:
_LOG.warn('> invalid JSON (failed anyjson parsing)')
return None
return amendment
_THE_TAXONOMIC_AMENDMENT_STORE = None
# noinspection PyPep8Naming
def TaxonomicAmendmentStore(repos_dict=None,
repos_par=None,
with_caching=True,
assumed_doc_version=None,
git_ssh=None,
pkey=None,
git_action_class=TaxonomicAmendmentsGitAction,
mirror_info=None,
infrastructure_commit_author='OpenTree API <api@opentreeoflife.org>'):
"""Factory function for a _TaxonomicAmendmentStore object.
A wrapper around the _TaxonomicAmendmentStore class instantiation for
the most common use case: a singleton _TaxonomicAmendmentStore.
If you need distinct _TaxonomicAmendmentStore objects, you'll need to
call that class directly.
"""
global _THE_TAXONOMIC_AMENDMENT_STORE
if _THE_TAXONOMIC_AMENDMENT_STORE is None:
_THE_TAXONOMIC_AMENDMENT_STORE = _TaxonomicAmendmentStore(repos_dict=repos_dict,
repos_par=repos_par,
with_caching=with_caching,
assumed_doc_version=assumed_doc_version,
git_ssh=git_ssh,
pkey=pkey,
git_action_class=git_action_class,
mirror_info=mirror_info,
infrastructure_commit_author=infrastructure_commit_author)
return _THE_TAXONOMIC_AMENDMENT_STORE
| {
"repo_name": "mtholder/peyotl",
"path": "peyotl/amendments/amendments_umbrella.py",
"copies": "2",
"size": "18442",
"license": "bsd-2-clause",
"hash": 4062668217054940700,
"line_mean": 47.6596306069,
"line_max": 124,
"alpha_frac": 0.5431081228,
"autogenerated": false,
"ratio": 4.053186813186813,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003063137239124561,
"num_lines": 379
} |
"""An analysis of tic-tac-toe.
The goal here is to simply iterate over all possible valid states in the
game of tic-tac-toe and perform some basic analyses such the number of
unique valid states and optimal moves for each state.
This implementation uses no classes, which would structure the code more,
although Python lists are flexible enough to make this imperative version
quite terse and easy to understand.
A state is represented by a tuple with nine integer values -- 0, 1 or 2,
where zero means a square is unoccupied and 1/2 denotes a player. We also
pass around wich player's turn it is for convenience, but this could be
derived from the state (sum, modulo 2, plus one).
There is no player code here, although it would be easy to add it.
"""
def get_child_states(state, turn):
"""Get all possible new states for a given state and turn."""
for ic, c in enumerate(state):
if c == 0:
yield tuple(state[:ic] + (turn,) + state[ic+1:])
def check_won(state, turn):
"""Check if state is won on this turn."""
streak = (turn,) * 3
rows = [state[3*i:3*(i+1)] for i in range(3)]
cols = [state[i::3] for i in range(3)]
for i in range(3):
if (rows[i] == streak) or (cols[i] == streak):
return True
if rows[0][0] == rows[1][1] == rows[2][2] == turn:
return True
if rows[0][2] == rows[1][1] == rows[2][0] == turn:
return True
return False
# The root node is all zeros, and the tree is implemented by lists
# containing states and indices for children and parents.
root = (0,) * 9
states = [root]
children = [[]]
parents = [[]]
# Keep track of which player's turn it is for each state.
turn = [1]
# Keep track of whether a state is finished (no more possible moves),
# is a draw or won by either player (zero means undetermined). Later,
# we will propogate these values up the tree to determine them for
# each state given optimal moves for each state.
finished = [False]
draw = [False]
won = [0]
# Iterate over all possible states using a FIFO queue.
queue = [0]
while len(queue) > 0:
i = queue.pop(0)
for child in get_child_states(states[i], turn[i]):
if not child in states:
ic = len(states)
states.append(child)
children.append([])
parents.append([i])
turn.append((turn[i] - 1) or 2)
won.append(turn[i] * check_won(child, turn[i]))
finished.append(won[ic] != 0 or child.count(0) == 0)
draw.append(finished[ic] and won[ic] == 0)
if not finished[ic] and won[ic] == 0:
queue.append(ic)
else:
ic = states.index(child)
parents[ic].append(i)
children[i].append(ic)
# This list will hold all optimal moves for a given state.
optimal = [None] * len(states)
# Propogate draws/wins from the bottom of the tree, which means we
# need to iterate backwards over the list of states, whose order
# will guaranty there is never an undetermined state.
for istate in range(len(states))[::-1]:
if finished[istate] or won[istate] != 0:
continue
winning = [ic for ic in children[istate] if won[ic] == turn[istate]]
if len(winning) > 0:
won[istate] = turn[istate]
optimal[istate] = winning
elif all([won[ic] == ((turn[istate] - 1) or 2) for ic in children[istate]]):
won[istate] = (turn[istate] - 1) or 2
else:
draws = [ic for ic in children[istate] if draw[ic]]
if len(draws) > 0:
draw[istate] = True
optimal[istate] = draws
else:
print "Found a node with undetermined state"
print "That should not happend"
print "State:", states[istate]
# Some simple testing.
assert won[states.index((1,1,1,2,0,0,2,0,0))] == 1
assert won[states.index((1,2,0,1,0,2,1,0,0))] == 1
assert won[states.index((1,2,0,2,1,0,0,0,1))] == 1
print "Total number of states:", len(states)
print "Total number of drawn states:", sum(draw)
print "Total number of won states for player 1:", len([w for w in won if w == 1])
print "Total number of won states for player 2:", len([w for w in won if w == 2])
print "Root state is drawn:", draw[0]
print "Number of optimal moves in root state:", len(optimal[0])
print "Avg. number of optimal in second state: %.2f" % (1.0 * sum([len(optimal[i]) for i in optimal[0]]) / len(optimal[0]))
| {
"repo_name": "langner/tictactoe",
"path": "tictactoe.py",
"copies": "1",
"size": "4411",
"license": "mit",
"hash": -7564383586187512000,
"line_mean": 35.1557377049,
"line_max": 123,
"alpha_frac": 0.6311493992,
"autogenerated": false,
"ratio": 3.304119850187266,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.940706718910134,
"avg_score": 0.005640412057185165,
"num_lines": 122
} |
"""A nano HTTP server."""
from __future__ import (absolute_import, division,
print_function, unicode_literals)
#from future.builtins import *
#from future.utils import native
#from future import standard_library
#standard_library.install_hooks()
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
try:
from urllib import quote as _quote_str
from urllib import unquote as _unquote_str
def quote(part):
return _quote_str(part.encode("utf-8"))
def unquote(part):
if not isinstance(part, bytes): part = part.decode("utf-8")
return _unquote_str(part).decode("utf-8")
except ImportError:
from urllib.parse import quote, unquote
#-- Response --#
class Response(object):
"""HTTP response headers, status, and content."""
def __init__(self, content, status=200, **headers):
if not isinstance(content, bytes): content = content.encode("utf-8")
self.content = bytes(content)
self.status = status
defaults = dict(
content_type="text/plain",
content_length=str(len(self.content)),
access_control_allow_origin="*",
)
defaults.update(headers)
if "content_type" not in headers:
headers["content_type"] = "text/plain"
self.headers = dict((k.title().replace("_", "-"), v)
for (k, v) in defaults.items())
def Download(content, content_type="application/octet-stream"):
"""Response that downloads the file."""
return Response(content, content_type=content_type,
content_disposition="attachment")
def Redirect(location):
"""Response that temporary-redirects to the new location."""
return Response(location.encode("utf-8"), 302, location=location)
def NotFound():
return Response("ERROR: Not found", 404)
#-- Server --#
def GetRequestHandlerFactory(app):
app_ = app
class DerivedGetRequestHandler(GetRequestHandler):
app = app_
return DerivedGetRequestHandler
class GetRequestHandler(BaseHTTPRequestHandler):
def log_message(self, format_, *args):
if args[0].startswith(native("GET /poll")):
return
return BaseHTTPRequestHandler.log_message(self, format_, *args)
def do_OPTIONS(self):
self.send_response(200)
self.send_header("Access-Control-Allow-Origin", "*")
self.send_header("Access-Control-Allow-Headers",
"X-Requested-With, X-Application")
self.end_headers()
def do_GET(self):
args = self.path.split("/")
args = list(map(unquote, args))
assert args.pop(0) == "" # since path starts with a slash
response = self.app.get_response(*args)
self.send_response(response.status)
for k, v in response.headers.items():
self.send_header(k, str(v))
self.end_headers()
self.wfile.write(native(response.content))
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
def Server(app, host, port):
handler = GetRequestHandlerFactory(app)
server = ThreadedHTTPServer((host, port), handler)
return server
| {
"repo_name": "ilmanzo/scratch_extensions",
"path": "venv/lib/python3.4/site-packages/blockext/server.py",
"copies": "1",
"size": "3262",
"license": "mit",
"hash": -3891178590684320300,
"line_mean": 30.9803921569,
"line_max": 76,
"alpha_frac": 0.6397915389,
"autogenerated": false,
"ratio": 4.1186868686868685,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.016007534298142296,
"num_lines": 102
} |
"""An Apache Beam DoFn for lazily collecting GCS paths into a PCollection."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import apache_beam as beam
from google.cloud import storage
_SPLIT_INTO_BUCKET_AND_BLOB_REGEX = re.compile(r'^gs://([a-z0-9\-_.]+)(|/.*)$')
def split_into_bucket_and_blob(path):
"""Split a GCS URI into a bucket name and a blob name.
Args:
path: A GCS path starting with gs:// and the full name of a bucket.
Returns:
Tuple[str, str]: The name of the bucket and the blob within the bucket.
"""
path_match = _SPLIT_INTO_BUCKET_AND_BLOB_REGEX.match(path)
if path_match:
bucket_name, blob_name = path_match.groups()
if blob_name.startswith('/'):
blob_name = blob_name[1:]
else:
raise ValueError('Invalid GCS path or path prefix' + path)
return bucket_name, blob_name
class GetPaths(beam.DoFn):
"""Collect all the paths in a GCS bucket with a given prefix.
Notes:
Since this operation is a "fan-out" type operation, it should generally
be followed by a beam.ReShuffle transform to avoid spurious fusion.
Args:
max_results (Union[int, None], default: None): the maximum number of paths
to yield per prefix
validation_regex (Union[str, None], default: None): a regular expression
that paths, excluding the gs://{bucket}/ prefix, must match in order to be
added to the output PCollection
"""
def __init__(self, max_results=None, validation_regex=None):
super(GetPaths, self).__init__()
self.max_results = max_results
# Not serializable, to be initialized on each worker:
self._client = None
self.validation_regex = re.compile(
validation_regex) if validation_regex else None
def process(self, prefix):
"""Overrides beam.DoFn.process.
Args:
prefix (str): the prefix that generated paths share. Begins with gs:// and
a the complete name of a GCS bucket.
Yields:
unicode: all GCS blob URIs that start with the string
gs://{self.bucket}/{prefix} and match validation_regex if it was specified
"""
if self._client is None:
self._client = storage.Client()
bucket_name, blob_prefix = split_into_bucket_and_blob(prefix)
for blob in self._client.bucket(bucket_name).list_blobs(
prefix=blob_prefix, max_results=self.max_results):
if not blob.name.endswith('/'):
uri = u'gs://{}/{}'.format(bucket_name, blob.name)
if self.validation_regex is None or self.validation_regex.search(uri):
yield uri
| {
"repo_name": "GoogleCloudPlatform/healthcare",
"path": "datathon/datathon_etl_pipelines/dofns/get_paths.py",
"copies": "1",
"size": "2606",
"license": "apache-2.0",
"hash": -3346455344118621000,
"line_mean": 33.2894736842,
"line_max": 80,
"alpha_frac": 0.6765157329,
"autogenerated": false,
"ratio": 3.7335243553008595,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9876957234601891,
"avg_score": 0.006616570719793713,
"num_lines": 76
} |
"""An Apache Beam DoFn for resizing images."""
import apache_beam as beam
import tensorflow as tf
class ResizeImage(beam.DoFn):
"""Resize an image that is represented as an encoded byte-string.
Supports PNG and JPEG images.
Args:
height (int): the height to resize the images to.
width (int): the width to resize the images to.
color_channels (int, default=None): the number of colour channels to use in
the output image. Defaults to the same number as the input.
image_format (Union['jpg', 'png'], default='png'): the format of input and
output images.
"""
def __init__(self, image_format, height, width, color_channels=None):
super(ResizeImage, self).__init__()
if image_format == 'jpeg':
image_format = 'jpg'
if image_format not in ('jpg', 'png'):
raise ValueError('Unrecognized image format ' + image_format)
self.image_format = image_format
self.image_shape = (height, width)
if color_channels is None:
# Match the color channels of the input image using tf.image.decode_image
self.image_channels = 0
else:
self.image_channels = color_channels
self.initialized = False
# Not serializable, to be initialized on each worker
self._input_bytes_tensor = None
self._output_bytes_tensor = None
self._session = None
def initialize(self):
"""Initialize the tensorflow graph and session for this worker."""
self._input_bytes_tensor = tf.placeholder(tf.string, [])
if self.image_format == 'jpg':
decode_fn = tf.image.decode_jpeg
encode_fn = tf.image.encode_jpeg
elif self.image_format == 'png':
decode_fn = tf.image.decode_png
encode_fn = tf.image.encode_png
else:
raise ValueError('Unrecognized image format ' + self.image_format)
u8image = decode_fn(self._input_bytes_tensor, channels=self.image_channels)
resized_u8image = tf.cast(
tf.image.resize_images(u8image, self.image_shape), tf.uint8)
self._output_bytes_tensor = encode_fn(resized_u8image)
self._session = tf.Session()
self.initialized = True
def process(self, element):
"""Overrides beam.DoFn.process.
Args:
element (Tuple[Any, bytes]): A key with image bytes. The key is not
modified.
Yields:
np.array: a HWC array of the resized image.
"""
key, image_bytes = element
if not self.initialized:
# Initialize non-serializable data once on each worker
self.initialize()
image_bytes = self._session.run(self._output_bytes_tensor,
{self._input_bytes_tensor: image_bytes})
yield key, image_bytes
| {
"repo_name": "GoogleCloudPlatform/healthcare",
"path": "datathon/datathon_etl_pipelines/dofns/resize_image.py",
"copies": "1",
"size": "2657",
"license": "apache-2.0",
"hash": -5931455663368549000,
"line_mean": 33.5064935065,
"line_max": 79,
"alpha_frac": 0.6616484757,
"autogenerated": false,
"ratio": 3.8395953757225434,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5001243851422543,
"avg_score": null,
"num_lines": null
} |
"""An API for accessing images."""
import atexit
import pathlib
import tkinter
from typing import Dict, List
# __path__[0] is the directory where this __init__.py is
__path__: List[str]
images_dir = pathlib.Path(__path__[0]).absolute()
# tkinter images destroy themselves on __del__. here's how cpython exits:
#
# 1) atexit callbacks run
# 2) module globals are set to None (lol)
# 3) all objects are destroyed and __del__ methods run
#
# tkinter.Image.__del__ destroys the image, and that uses
# "except TclError". this causes means two things:
#
# - it's necessary to hold references to the images to avoid calling
# __del__ while they're being used somewhere
# - the images must be destroyed before step 2 above
#
# tldr: the cache is not just a performance or memory optimization
_image_cache: Dict[str, tkinter.PhotoImage] = {}
atexit.register(_image_cache.clear)
def get(name: str) -> tkinter.PhotoImage:
"""Load a ``tkinter.PhotoImage`` from an image file that comes with Porcup\
ine.
The name should be the name of a file in :source:`porcupine/images`
without the extension, e.g. ``'triangle'``. If this function is
called multiple times with the same name, the same image object is
returned every time.
"""
if name in _image_cache:
return _image_cache[name]
paths = [path for path in images_dir.iterdir() if path.stem == name]
if not paths:
raise FileNotFoundError(f"no image file named {name!r}")
assert len(paths) == 1, f"there are multiple {name!r} files"
image = tkinter.PhotoImage(file=paths[0])
_image_cache[name] = image
return image
| {
"repo_name": "Akuli/editor",
"path": "porcupine/images/__init__.py",
"copies": "2",
"size": "1641",
"license": "mit",
"hash": 7586439203305984000,
"line_mean": 31.82,
"line_max": 79,
"alpha_frac": 0.6879951249,
"autogenerated": false,
"ratio": 3.638580931263858,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5326576056163859,
"avg_score": null,
"num_lines": null
} |
"""An API for feeding operations asynchronously to Chopsticks."""
from __future__ import print_function
import sys
import traceback
from functools import partial
from collections import deque
from .tunnel import loop, PY2, BaseTunnel
from .group import Group, GroupOp
__metaclass__ = type
class NotCompleted(Exception):
"""No value has been received by an AsyncResult."""
class AsyncResult:
"""The deferred result of a queued operation."""
def __init__(self):
self._callback = None
self._value = NotCompleted
def with_callback(self, callback):
"""Attach a callback to be called when a value is set."""
# Chopsticks is not currently multithreaded, so in the intended usage
# there is no race condition where a value could be set before the
# callback is registered.
#
# We just validate that the usage is as intended.
assert self._callback is None, "Callback already set."
assert self._value is NotCompleted, "Value already set."
self._callback = callback
return self
@property
def value(self):
"""Get the value of the result.
Raise NotCompleted if the task has not yet run.
"""
if self._value is NotCompleted:
raise NotCompleted('The operation has not completed.')
return self._value
def _set(self, obj):
"""Set the value of the callback."""
self._value = obj
if self._callback:
try:
self._callback(self._value)
except Exception:
print('Error dispatching async callback', file=sys.stderr)
traceback.print_exc()
def iteritems(d):
"""Compatibility shim for dict iteration."""
if PY2:
return d.iteritems()
else:
return d.items()
class Queue:
"""A queue of tasks to be performed.
Queues build on Groups and Tunnels in order to feed tasks as quickly as
possible to all connected hosts.
All methods accept a parameter `target`, which specifies which tunnels the
operation should be performed with. This can be specified as a
:class:`Tunnel` or a :class:`Group`.
Each one returns an :class:`AsyncResult` which can be used to receive the
result of the operation.
"""
def __init__(self):
self.queued = {}
self.running = False
def _enqueue_group(self, methname, group, args, kwargs):
"""Enqueue an operation on a Group of tunnels."""
async_result = AsyncResult()
op = GroupOp(async_result._set)
for tunnel in group.tunnels:
r = self._enqueue_tunnel(methname, tunnel, args, kwargs)
r.with_callback(op.make_callback(tunnel.host))
return async_result
def _enqueue_tunnel(self, methname, tunnel, args, kwargs):
"""Enqueue an operation on a Tunnel."""
async_funcname = '_%s_async' % methname
async_func = getattr(tunnel, async_funcname)
async_result = AsyncResult()
try:
queue = self.queued[tunnel]
except KeyError:
queue = self.queued[tunnel] = deque()
self.connect(tunnel)
if self.running:
queue[0]() # start the connect
def callback(result):
async_result._set(result)
assert queue[0] is bound
queue.popleft()
if queue:
queue[0]()
else:
del self.queued[tunnel]
if not self.queued:
loop.stop()
bound = partial(async_func, callback, *args, **kwargs)
queue.append(bound)
return async_result
def mkhandler(methname):
"""Create a wrapper for queueing the 'methname' operation."""
def enqueue(self, target, *args, **kwargs):
if not isinstance(target, (BaseTunnel, Group)):
raise TypeError(
'Invalid target; expected Tunnel or Group'
)
if isinstance(target, Group):
m = self._enqueue_group
else:
m = self._enqueue_tunnel
return m(methname, target, args, kwargs)
if PY2:
enqueue.func_name == methname
else:
enqueue.__name__ = methname
enqueue.__doc__ = (
"Queue a :meth:`~chopsticks.tunnel.BaseTunnel.{meth}()` operation "
"to be run on the target.".format(meth=methname).lstrip()
)
return enqueue
connect = mkhandler('connect')
call = mkhandler('call')
fetch = mkhandler('fetch')
put = mkhandler('put')
del mkhandler
# fetch is slightly different because it constructs different local paths
# for each host:
def fetch(self, target, remote_path, local_path=None):
"""Queue a :meth:`~chopsticks.tunnel.BaseTunnel.fetch()` operation to be run on the target. """ # noqa
if isinstance(target, BaseTunnel):
return self._enqueue_tunnel(
'fetch', target,
(),
{'remote_path': remote_path, 'local_path': local_path}
)
async_result = AsyncResult()
op = GroupOp(async_result._set)
for tun, local_path in Group._local_paths(target.tunnels, local_path):
r = self._enqueue_tunnel(
'fetch', tun, (),
{'remote_path': remote_path, 'local_path': local_path}
)
r.with_callback(op.make_callback(tun.host))
return async_result
def run(self):
"""Run all items in the queue.
This method does not return until the queue is empty.
"""
self.running = True
try:
for host, queue in iteritems(self.queued):
if not queue:
continue
queue[0]()
loop.run()
finally:
self.running = False
| {
"repo_name": "lordmauve/chopsticks",
"path": "chopsticks/queue.py",
"copies": "1",
"size": "5971",
"license": "apache-2.0",
"hash": 4071369481175342000,
"line_mean": 30.4263157895,
"line_max": 112,
"alpha_frac": 0.5757829509,
"autogenerated": false,
"ratio": 4.436106983655275,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 190
} |
"""An api for generating documentation from the codebase
"""
from os.path import dirname, join
from os import sep
from re import compile
import subprocess
def generate_documentation(dirs, output_dir):
"""Use doxygen to generate the documentation
Positional arguments:
dirs - the directories that doxygen should scan for documentation
output_dir - location of the documentation after the return of this function
"""
print dirs
with open(join(dirname(__file__), "Doxyfile")) as doxyfile:
proc = subprocess.Popen(["doxygen", "-"], stdin=subprocess.PIPE)
proc.stdin.write(doxyfile.read())
proc.stdin.write("OUTPUT_DIRECTORY={}\n".format(output_dir))
proc.stdin.write("INPUT={}".format(" ".join(dirs)))
proc.stdin.close()
proc.wait()
EXCLUDES = ["targets", "features/FEATURE", "features/mbedtls",
"features/nanostack", "features/storage"]
def is_not_excluded(src):
return all(exclude not in src for exclude in EXCLUDES)
if __name__ == "__main__":
import sys
from os.path import abspath, dirname, join
# Be sure that the tools directory is in the search path
ROOT = abspath(join(dirname(__file__), "..", ".."))
sys.path.insert(0, ROOT)
from tools.toolchains.gcc import GCC_ARM
from tools.targets import TARGET_MAP
toolchain = GCC_ARM(TARGET_MAP["Super_Target"])
resources = toolchain.scan_resources(".")
generate_documentation(filter(is_not_excluded,
sum(map(lambda x:x.headers,
resources.features.values()),
resources.headers)),
join(dirname(dirname(__file__)), "mbed-docs"))
| {
"repo_name": "radhika-raghavendran/mbed-os5.1-onsemi",
"path": "tools/misc/docs_gen.py",
"copies": "16",
"size": "1751",
"license": "apache-2.0",
"hash": 1222827911062095000,
"line_mean": 36.2553191489,
"line_max": 80,
"alpha_frac": 0.6259280411,
"autogenerated": false,
"ratio": 4.229468599033816,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""An API for managing OAuth2 tokens."""
from __future__ import unicode_literals
from django.db.models.query import Q
from django.utils import six
from django.utils.translation import ugettext_lazy as _
from djblets.util.decorators import augment_method_from
from djblets.webapi.decorators import (webapi_login_required,
webapi_request_fields,
webapi_response_errors)
from djblets.webapi.oauth2_scopes import get_scope_dictionary
from djblets.webapi.errors import DOES_NOT_EXIST, INVALID_FORM_DATA
from oauth2_provider.models import AccessToken
from reviewboard.oauth.features import oauth2_service_feature
from reviewboard.webapi.base import WebAPIResource
from reviewboard.webapi.decorators import webapi_check_local_site
class OAuthTokenResource(WebAPIResource):
"""An API resource for managing OAuth2 tokens.
This resource allows callers to list, update, or delete their existing
tokens.
"""
model = AccessToken
name = 'oauth_token'
verbose_name = _('OAuth2 Tokens')
uri_object_key = 'oauth_token_id'
item_result_key = 'oauth_token'
required_features = [oauth2_service_feature]
allowed_methods = ('GET', 'PUT', 'DELETE')
api_token_access_allowed = False
oauth2_token_access_allowed = False
added_in = '3.0'
fields = {
'application': {
'type': six.text_type,
'description': 'The name of the application this token is for.',
},
'expires': {
'type': six.text_type,
'description': 'When this token is set to expire.',
},
'scope': {
'type': [six.text_type],
'description': 'The scopes this token has access to.',
},
'token': {
'type': six.text_type,
'description': 'The access token.',
},
}
def serialize_application_field(self, obj, *args, **kwargs):
"""Serialize the application field.
Args:
obj (oauth2_provider.models.AccessToken):
The token that is being serialized.
*args (tuple):
Ignored positional arguments.
**kwargs (dict):
Ignored keyword arguments.
Returns:
unicode:
The name of the application the access token has access to.
"""
return obj.application.name
def serialize_expires_field(self, obj, *args, **kwargs):
"""Serialize the expires field.
Args:
obj (oauth2_provider.models.AccessToken):
The token that is being serialized.
*args (tuple):
Ignored positional arguments.
**kwargs (dict):
Ignored keyword arguments.
Returns:
unicode:
The expiry date of the token, in ISO-8601 format.
"""
return obj.expires.isoformat()
def serialize_scope_field(self, obj, *args, **kwargs):
"""Serialize the scope field.
Args:
obj (oauth2_provider.models.AccessToken):
The token that is being serialized.
*args (tuple):
Ignored positional arguments.
**kwargs (dict):
Ignored keyword arguments.
Returns:
list of unicode:
The list of scopes the token has.
"""
return obj.scope.split()
def get_queryset(self, request, *args, **kwargs):
"""Return the queryset for the request.
Args:
request (django.http.HttpRequest):
The current HTTP request.
local_site (reviewboard.site.models.LocalSite, optional):
The current LocalSite, if any.
Returns:
django.db.models.query.QuerySet:
The tokens the user has access to.
"""
if not request.user.is_authenticated():
return AccessToken.objects.none()
q = Q(application__local_site=request.local_site)
if not request.user.is_superuser:
q &= Q(user=request.user)
return (
AccessToken.objects
.filter(q)
.select_related('application')
)
def has_access_permissions(self, request, obj, *args, **kwargs):
"""Return whether or not the user has access permissions.
A user has this permission if they own the token or are a superuser.
Args:
request (django.http.HttpRequest):
The current HTTP request.
obj (oauth2_provider.models.AccessToken):
The token in question.
Returns:
bool:
Whether or not the user has permission.
"""
return (request.user.is_authenticated() and
(obj.user == request.user or
request.user.is_superuser))
def has_modify_permissions(self, request, obj, *args, **kwargs):
"""Return whether or not the user has modification permissions.
A user has this permission if they own the token or are a superuser.
Args:
request (django.http.HttpRequest):
The current HTTP request.
obj (oauth2_provider.models.AccessToken):
The token in question.
Returns:
bool:
Whether or not the user has permission.
"""
return self.has_access_permissions(request, obj, *args, **kwargs)
def has_delete_permissions(self, request, obj, *args, **kwargs):
"""Return whether or not the user has deletion permissions.
A user has this permission if they own the token or are a superuser.
Args:
request (django.http.HttpRequest):
The current HTTP request.
obj (oauth2_provider.models.AccessToken):
The token in question.
Returns:
bool:
Whether or not the user has permission.
"""
return self.has_access_permissions(request, obj, *args, **kwargs)
@webapi_login_required
@augment_method_from(WebAPIResource)
def get(self, *args, **kwargs):
"""Retrieves information on a particular OAuth2 token.
This can only be accessed by the owner of the tokens or superusers
"""
pass
@webapi_login_required
@augment_method_from(WebAPIResource)
def get_list(self, *args, **kwargs):
"""Retrieve a list of information about an OAuth2 token.
If accessing this API on a Local Site, the results will be limited
to those associated with that site. Otherwise, it will be limited to
those associated with no Local Site.
This can only be accessed by the owner of the tokens or superusers.
"""
pass
@augment_method_from(WebAPIResource)
def delete(self, *args, **kwargs):
"""Delete the OAuth2 token, invalidating all clients using it.
The OAuth token will be removed from the user's account, and will no
longer be usable for authentication.
After deletion, this will return a :http:`204`.
"""
pass
@webapi_login_required
@webapi_check_local_site
@webapi_response_errors(DOES_NOT_EXIST)
@webapi_request_fields(
optional={
'add_scopes': {
'type': six.text_type,
'description': 'A comma-separated list of scopes to add.',
},
'remove_scopes': {
'type': six.text_type,
'description': 'A comma-separated list of scopes to remove.',
},
'scopes': {
'type': six.text_type,
'description': 'A comma-separated list of scopes to override '
'the current set with.\n\n'
'This field cannot be provided if either '
'add_scopes or remove_scopes is provided.',
},
},
)
def update(self, request, local_site=None, add_scopes=None,
remove_scopes=None, scopes=None, *args, **kwargs):
"""Update the scope of an OAuth2 token.
This resource allows a user to either (1) add and remove scopes or (2)
replace the set of scopes with a new set.
"""
try:
access_token = self.get_object(request, *args, **kwargs)
except AccessToken.DoesNotExist:
return DOES_NOT_EXIST
if not self.has_modify_permissions(request, access_token, *args,
**kwargs):
return self.get_no_access_error(request)
if ((add_scopes is not None or remove_scopes is not None) and
scopes is not None):
return INVALID_FORM_DATA, {
'fields': {
'scopes': [
'This field cannot be provided if either add_scopes '
'or remove_scopes is provided.',
],
},
}
field_errors = {}
valid_scopes = get_scope_dictionary()
if scopes is not None:
scopes = self._validate_scopes(valid_scopes, scopes, 'scopes',
field_errors)
elif add_scopes is not None or remove_scopes is not None:
add_scopes = self._validate_scopes(valid_scopes,
add_scopes,
'add_scopes',
field_errors)
remove_scopes = self._validate_scopes(valid_scopes,
remove_scopes,
'remove_scopes',
field_errors)
if field_errors:
return INVALID_FORM_DATA, {
'fields': field_errors,
}
if scopes is not None:
access_token.scope = ' '.join(scopes)
access_token.save(update_fields=('scope',))
elif add_scopes is not None or remove_scopes is not None:
current_scopes = set(access_token.scope.split(' '))
if add_scopes:
current_scopes.update(add_scopes)
if remove_scopes:
current_scopes.difference_update(remove_scopes)
access_token.scope = ' '.join(current_scopes)
access_token.save(update_fields=('scope',))
return 200, {
self.item_result_key: access_token,
}
def _validate_scopes(self, valid_scopes, scopes, field, field_errors):
"""Validate the given set of scopes against known valid scopes.
Args:
valid_scopes (dict):
The scope dictionary.
scopes (unicode):
The comma-separated list of scopes to validate.
field (unicode):
The name of the field that is being validated.
field_errors (dict):
A mapping of field names to errors.
An error message will be added to ``field_errors[field]`` for
each invalid scope.
Returns:
list:
The list of scopes, if they are all valid, or ``None`` otherwise.
"""
if scopes is None:
return None
scopes = scopes.split(',')
invalid_scopes = {
scope
for scope in scopes
if scope not in valid_scopes
}
if invalid_scopes:
field_errors[field] = [
'The scope "%s" is invalid.' % scope
for scope in invalid_scopes
]
return None
return scopes
oauth_token_resource = OAuthTokenResource()
| {
"repo_name": "brennie/reviewboard",
"path": "reviewboard/webapi/resources/oauth_token.py",
"copies": "1",
"size": "11905",
"license": "mit",
"hash": -3903974834576161300,
"line_mean": 31.4386920981,
"line_max": 78,
"alpha_frac": 0.549601008,
"autogenerated": false,
"ratio": 4.711119905025722,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5760720913025722,
"avg_score": null,
"num_lines": null
} |
"""An API for managing OAuth2 tokens."""
from __future__ import unicode_literals
from django.db.models.query import Q
from django.utils.translation import ugettext_lazy as _
from djblets.util.decorators import augment_method_from
from djblets.webapi.decorators import (webapi_login_required,
webapi_request_fields,
webapi_response_errors)
from djblets.webapi.oauth2_scopes import get_scope_dictionary
from djblets.webapi.errors import DOES_NOT_EXIST, INVALID_FORM_DATA
from djblets.webapi.fields import ListFieldType, StringFieldType
from oauth2_provider.models import AccessToken
from reviewboard.oauth.features import oauth2_service_feature
from reviewboard.webapi.base import WebAPIResource
from reviewboard.webapi.decorators import webapi_check_local_site
class OAuthTokenResource(WebAPIResource):
"""An API resource for managing OAuth2 tokens.
This resource allows callers to list, update, or delete their existing
tokens.
"""
model = AccessToken
name = 'oauth_token'
verbose_name = _('OAuth2 Tokens')
uri_object_key = 'oauth_token_id'
item_result_key = 'oauth_token'
required_features = [oauth2_service_feature]
allowed_methods = ('GET', 'PUT', 'DELETE')
api_token_access_allowed = False
oauth2_token_access_allowed = False
added_in = '3.0'
fields = {
'application': {
'type': StringFieldType,
'description': 'The name of the application this token is for.',
},
'expires': {
'type': StringFieldType,
'description': 'When this token is set to expire.',
},
'scope': {
'type': ListFieldType,
'items': {
'type': StringFieldType,
},
'description': 'The scopes this token has access to.',
},
'token': {
'type': StringFieldType,
'description': 'The access token.',
},
}
def serialize_application_field(self, obj, *args, **kwargs):
"""Serialize the application field.
Args:
obj (oauth2_provider.models.AccessToken):
The token that is being serialized.
*args (tuple):
Ignored positional arguments.
**kwargs (dict):
Ignored keyword arguments.
Returns:
unicode:
The name of the application the access token has access to.
"""
return obj.application.name
def serialize_expires_field(self, obj, *args, **kwargs):
"""Serialize the expires field.
Args:
obj (oauth2_provider.models.AccessToken):
The token that is being serialized.
*args (tuple):
Ignored positional arguments.
**kwargs (dict):
Ignored keyword arguments.
Returns:
unicode:
The expiry date of the token, in ISO-8601 format.
"""
return obj.expires.isoformat()
def serialize_scope_field(self, obj, *args, **kwargs):
"""Serialize the scope field.
Args:
obj (oauth2_provider.models.AccessToken):
The token that is being serialized.
*args (tuple):
Ignored positional arguments.
**kwargs (dict):
Ignored keyword arguments.
Returns:
list of unicode:
The list of scopes the token has.
"""
return obj.scope.split()
def get_queryset(self, request, *args, **kwargs):
"""Return the queryset for the request.
Args:
request (django.http.HttpRequest):
The current HTTP request.
local_site (reviewboard.site.models.LocalSite, optional):
The current LocalSite, if any.
Returns:
django.db.models.query.QuerySet:
The tokens the user has access to.
"""
if not request.user.is_authenticated():
return AccessToken.objects.none()
q = Q(application__local_site=request.local_site)
if not request.user.is_superuser:
q &= Q(user=request.user)
return (
AccessToken.objects
.filter(q)
.select_related('application')
)
def has_access_permissions(self, request, obj, *args, **kwargs):
"""Return whether or not the user has access permissions.
A user has this permission if they own the token or are a superuser.
Args:
request (django.http.HttpRequest):
The current HTTP request.
obj (oauth2_provider.models.AccessToken):
The token in question.
Returns:
bool:
Whether or not the user has permission.
"""
return (request.user.is_authenticated() and
(obj.user_id == request.user.pk or
request.user.is_superuser))
def has_modify_permissions(self, request, obj, *args, **kwargs):
"""Return whether or not the user has modification permissions.
A user has this permission if they own the token or are a superuser.
Args:
request (django.http.HttpRequest):
The current HTTP request.
obj (oauth2_provider.models.AccessToken):
The token in question.
Returns:
bool:
Whether or not the user has permission.
"""
return self.has_access_permissions(request, obj, *args, **kwargs)
def has_delete_permissions(self, request, obj, *args, **kwargs):
"""Return whether or not the user has deletion permissions.
A user has this permission if they own the token or are a superuser.
Args:
request (django.http.HttpRequest):
The current HTTP request.
obj (oauth2_provider.models.AccessToken):
The token in question.
Returns:
bool:
Whether or not the user has permission.
"""
return self.has_access_permissions(request, obj, *args, **kwargs)
@webapi_login_required
@augment_method_from(WebAPIResource)
def get(self, *args, **kwargs):
"""Retrieves information on a particular OAuth2 token.
This can only be accessed by the owner of the tokens or superusers
"""
pass
@webapi_login_required
@augment_method_from(WebAPIResource)
def get_list(self, *args, **kwargs):
"""Retrieve a list of information about an OAuth2 token.
If accessing this API on a Local Site, the results will be limited
to those associated with that site. Otherwise, it will be limited to
those associated with no Local Site.
This can only be accessed by the owner of the tokens or superusers.
"""
pass
@augment_method_from(WebAPIResource)
def delete(self, *args, **kwargs):
"""Delete the OAuth2 token, invalidating all clients using it.
The OAuth token will be removed from the user's account, and will no
longer be usable for authentication.
After deletion, this will return a :http:`204`.
"""
pass
@webapi_login_required
@webapi_check_local_site
@webapi_response_errors(DOES_NOT_EXIST)
@webapi_request_fields(
optional={
'add_scopes': {
'type': StringFieldType,
'description': 'A comma-separated list of scopes to add.',
},
'remove_scopes': {
'type': StringFieldType,
'description': 'A comma-separated list of scopes to remove.',
},
'scopes': {
'type': StringFieldType,
'description': 'A comma-separated list of scopes to override '
'the current set with.\n\n'
'This field cannot be provided if either '
'add_scopes or remove_scopes is provided.',
},
},
)
def update(self, request, local_site=None, add_scopes=None,
remove_scopes=None, scopes=None, *args, **kwargs):
"""Update the scope of an OAuth2 token.
This resource allows a user to either (1) add and remove scopes or (2)
replace the set of scopes with a new set.
"""
try:
access_token = self.get_object(request, *args, **kwargs)
except AccessToken.DoesNotExist:
return DOES_NOT_EXIST
if not self.has_modify_permissions(request, access_token, *args,
**kwargs):
return self.get_no_access_error(request)
if ((add_scopes is not None or remove_scopes is not None) and
scopes is not None):
return INVALID_FORM_DATA, {
'fields': {
'scopes': [
'This field cannot be provided if either add_scopes '
'or remove_scopes is provided.',
],
},
}
field_errors = {}
valid_scopes = get_scope_dictionary()
if scopes is not None:
scopes = self._validate_scopes(valid_scopes, scopes, 'scopes',
field_errors)
elif add_scopes is not None or remove_scopes is not None:
add_scopes = self._validate_scopes(valid_scopes,
add_scopes,
'add_scopes',
field_errors)
remove_scopes = self._validate_scopes(valid_scopes,
remove_scopes,
'remove_scopes',
field_errors)
if field_errors:
return INVALID_FORM_DATA, {
'fields': field_errors,
}
if scopes is not None:
access_token.scope = ' '.join(scopes)
access_token.save(update_fields=('scope',))
elif add_scopes is not None or remove_scopes is not None:
current_scopes = set(access_token.scope.split(' '))
if add_scopes:
current_scopes.update(add_scopes)
if remove_scopes:
current_scopes.difference_update(remove_scopes)
access_token.scope = ' '.join(current_scopes)
access_token.save(update_fields=('scope',))
return 200, {
self.item_result_key: access_token,
}
def _validate_scopes(self, valid_scopes, scopes, field, field_errors):
"""Validate the given set of scopes against known valid scopes.
Args:
valid_scopes (dict):
The scope dictionary.
scopes (unicode):
The comma-separated list of scopes to validate.
field (unicode):
The name of the field that is being validated.
field_errors (dict):
A mapping of field names to errors.
An error message will be added to ``field_errors[field]`` for
each invalid scope.
Returns:
list:
The list of scopes, if they are all valid, or ``None`` otherwise.
"""
if scopes is None:
return None
scopes = scopes.split(',')
invalid_scopes = {
scope
for scope in scopes
if scope not in valid_scopes
}
if invalid_scopes:
field_errors[field] = [
'The scope "%s" is invalid.' % scope
for scope in invalid_scopes
]
return None
return scopes
oauth_token_resource = OAuthTokenResource()
| {
"repo_name": "reviewboard/reviewboard",
"path": "reviewboard/webapi/resources/oauth_token.py",
"copies": "2",
"size": "12036",
"license": "mit",
"hash": -6183143365883414000,
"line_mean": 31.5297297297,
"line_max": 78,
"alpha_frac": 0.5508474576,
"autogenerated": false,
"ratio": 4.74792899408284,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.629877645168284,
"avg_score": null,
"num_lines": null
} |
"""An API for recording stats about your Python application.
The goal of this library is to make it as simple as possible to record stats
about your running application.
The stats are written to a named pipe. By default, the named pipes are stored
in /tmp/stats-pipe. They are named "<PID>.stats". Integer and floating point
values are supported.
The API is simple. You can `incr` or `set` values. Use the standard `cat`
command or whichever tool you prefer to read out the data.
"""
import atexit
import os
import threading
import math
from collections import defaultdict, deque
# Developer API
# =============
def set(name, value):
"""Set the given stat name to value.
"""
_stats_lock.acquire()
try:
_stats[name] = value
finally:
_stats_lock.release()
def incr(name, value=1):
"""Increment the given stat name by value.
"""
_stats_lock.acquire()
try:
_stats[name] += value
finally:
_stats_lock.release()
def record(name, value, format_func=str.format):
"""Record an instance of the value for the given stat name.
The `name` must have a "{0}" replacement token in it so aggregate
calculations have a chance to label the aggregate value.
"""
_stats_lock.acquire()
try:
_deques[name].append(value)
if name not in _formatters:
_formatters[name] = format_func
finally:
_stats_lock.release()
def get_all():
"""Return a dictionary of the recorded stats."""
return dict(_stats)
# Deployer API
# ============
config = {
'pipe_dir': '/tmp/stats-pipe',
}
def start_recorder():
"""Starts a dedicated thread for handling the stats named pipe.
Ensures that only a single instance of the thread starts. Creates the
directory for holding the named pipes, if needed.
"""
global _recorder
try:
os.mkdir(config['pipe_dir'])
except OSError, e:
if e.errno == 17:
# Directory already exists.
pass
else:
raise
if not _recorder:
_recorder = _StatRecorder()
_recorder.setDaemon(True)
_recorder.start()
# Private Code
# ============
_stats_lock = threading.Lock()
_stats = defaultdict(int)
_deques = defaultdict(lambda: deque(list(), 100))
_recorder = None
_formatters = {}
def basic_percentiles(name, vals):
n_vals = len(vals)
format_func = _formatters[name]
PERCENTILES = [(50, "median"), (95, "95th"), (99, "99th"), (100, "100th")]
for n,label in PERCENTILES:
index = int(math.floor(n_vals * (n * 0.01))) - 1
if index < 0:
index = 0
yield (format_func(name, label), vals[index] if vals else 0.0)
if n_vals:
yield (format_func(name, "mean"), sum(vals) / n_vals)
class _StatRecorder(threading.Thread):
def __init__(self, calculator=basic_percentiles, deque_size=100):
super(_StatRecorder, self).__init__()
default_filename = "%s.stats" % (os.getpid())
self.statpath = os.path.join(config['pipe_dir'], default_filename)
self.calculator = calculator
_deques.default_factory = (lambda: deque(list(), deque_size))
def set_deques(self):
# Does aggregate calculations on the values recorded in _deques and
# calls our `set` function to get them in the _stats dictionary for
# pipe output.
for name, vals in _deques.iteritems():
vals = sorted(vals)
for (metric, val) in self.calculator(name, vals):
# NOTE This call is *not* the builtin set type; its our own
# little API set function. An unfortunate collision that we run
# into on our way to a nice statvent API.
set(metric, val)
def run(self):
# Try and cleanup the named pipe after we exit. Not guaranteed to be
# called (in the case of a SIGKILL or machine crash or ...).
@atexit.register
def cleanup():
try:
os.unlink(self.statpath)
except OSError:
pass
while True:
os.mkfifo(self.statpath)
# NOTE: The thread blocks here until a proc opens the pipe to read.
f = open(self.statpath, 'w')
self.set_deques()
for name, value in get_all().iteritems():
if isinstance(value, float):
f.write('%s: %f\n' % (name, value))
elif isinstance(value, (int, long)):
f.write('%s: %d\n' % (name, value))
f.close()
os.unlink(self.statpath)
| {
"repo_name": "dowski/statvent",
"path": "statvent/stats.py",
"copies": "1",
"size": "4636",
"license": "bsd-2-clause",
"hash": 5051000954746692000,
"line_mean": 27.2682926829,
"line_max": 79,
"alpha_frac": 0.5933994823,
"autogenerated": false,
"ratio": 3.866555462885738,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4959954945185738,
"avg_score": null,
"num_lines": null
} |
"""An API to load config from a readthedocs.yml file."""
from os import path
from readthedocs.config import BuildConfigV1, ConfigFileNotFound
from readthedocs.config import load as load_config
from readthedocs.projects.models import ProjectConfigurationError
from .constants import DOCKER_IMAGE, DOCKER_IMAGE_SETTINGS
def load_yaml_config(version):
"""
Load a configuration from `readthedocs.yml` file.
This uses the configuration logic from `readthedocs-build`, which will keep
parsing consistent between projects.
"""
checkout_path = version.project.checkout_path(version.slug)
project = version.project
# Get build image to set up the python version validation. Pass in the
# build image python limitations to the loaded config so that the versions
# can be rejected at validation
img_name = project.container_image or DOCKER_IMAGE
python_version = 3 if project.python_interpreter == 'python3' else 2
try:
sphinx_configuration = path.join(
version.get_conf_py_path(),
'conf.py',
)
except ProjectConfigurationError:
sphinx_configuration = None
env_config = {
'build': {
'image': img_name,
},
'defaults': {
'install_project': project.install_project,
'formats': get_default_formats(project),
'use_system_packages': project.use_system_packages,
'requirements_file': project.requirements_file,
'python_version': python_version,
'sphinx_configuration': sphinx_configuration,
'build_image': project.container_image,
'doctype': project.documentation_type,
},
}
img_settings = DOCKER_IMAGE_SETTINGS.get(img_name, None)
if img_settings:
env_config.update(img_settings)
try:
config = load_config(
path=checkout_path,
env_config=env_config,
)
except ConfigFileNotFound:
# Dafault to use v1 with some defaults from the web interface
# if we don't find a configuration file.
config = BuildConfigV1(
env_config=env_config,
raw_config={},
source_file=checkout_path,
)
config.validate()
return config
def get_default_formats(project):
"""Get a list of the default formats for ``project``."""
formats = ['htmlzip']
if project.enable_epub_build:
formats += ['epub']
if project.enable_pdf_build:
formats += ['pdf']
return formats
| {
"repo_name": "rtfd/readthedocs.org",
"path": "readthedocs/doc_builder/config.py",
"copies": "1",
"size": "2558",
"license": "mit",
"hash": 8067712825715650000,
"line_mean": 31.3797468354,
"line_max": 79,
"alpha_frac": 0.6395621579,
"autogenerated": false,
"ratio": 4.284757118927973,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 79
} |
"""An Application for launching a kernel
Authors
-------
* MinRK
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import atexit
import json
import os
import sys
import signal
# System library imports
import zmq
from zmq.eventloop import ioloop
# IPython imports
from IPython.core.ultratb import FormattedTB
from IPython.core.application import (
BaseIPythonApplication, base_flags, base_aliases, catch_config_error
)
from IPython.utils import io
from IPython.utils.localinterfaces import LOCALHOST
from IPython.utils.path import filefind
from IPython.utils.py3compat import str_to_bytes
from IPython.utils.traitlets import (Any, Instance, Dict, Unicode, Integer, Bool,
DottedObjectName)
from IPython.utils.importstring import import_item
# local imports
from IPython.zmq.entry_point import write_connection_file
from IPython.zmq.heartbeat import Heartbeat
from IPython.zmq.parentpoller import ParentPollerUnix, ParentPollerWindows
from IPython.zmq.session import (
Session, session_flags, session_aliases, default_secure,
)
#-----------------------------------------------------------------------------
# Flags and Aliases
#-----------------------------------------------------------------------------
kernel_aliases = dict(base_aliases)
kernel_aliases.update({
'ip' : 'KernelApp.ip',
'hb' : 'KernelApp.hb_port',
'shell' : 'KernelApp.shell_port',
'iopub' : 'KernelApp.iopub_port',
'stdin' : 'KernelApp.stdin_port',
'f' : 'KernelApp.connection_file',
'parent': 'KernelApp.parent',
})
if sys.platform.startswith('win'):
kernel_aliases['interrupt'] = 'KernelApp.interrupt'
kernel_flags = dict(base_flags)
kernel_flags.update({
'no-stdout' : (
{'KernelApp' : {'no_stdout' : True}},
"redirect stdout to the null device"),
'no-stderr' : (
{'KernelApp' : {'no_stderr' : True}},
"redirect stderr to the null device"),
})
# inherit flags&aliases for Sessions
kernel_aliases.update(session_aliases)
kernel_flags.update(session_flags)
#-----------------------------------------------------------------------------
# Application class for starting a Kernel
#-----------------------------------------------------------------------------
class KernelApp(BaseIPythonApplication):
name='ipkernel'
aliases = Dict(kernel_aliases)
flags = Dict(kernel_flags)
classes = [Session]
# the kernel class, as an importstring
kernel_class = DottedObjectName('IPython.zmq.ipkernel.Kernel')
kernel = Any()
poller = Any() # don't restrict this even though current pollers are all Threads
heartbeat = Instance(Heartbeat)
session = Instance('IPython.zmq.session.Session')
ports = Dict()
_full_connection_file = Unicode()
# inherit config file name from parent:
parent_appname = Unicode(config=True)
def _parent_appname_changed(self, name, old, new):
if self.config_file_specified:
# it was manually specified, ignore
return
self.config_file_name = new.replace('-','_') + u'_config.py'
# don't let this count as specifying the config file
self.config_file_specified = False
# connection info:
ip = Unicode(LOCALHOST, config=True,
help="Set the IP or interface on which the kernel will listen.")
hb_port = Integer(0, config=True, help="set the heartbeat port [default: random]")
shell_port = Integer(0, config=True, help="set the shell (ROUTER) port [default: random]")
iopub_port = Integer(0, config=True, help="set the iopub (PUB) port [default: random]")
stdin_port = Integer(0, config=True, help="set the stdin (DEALER) port [default: random]")
connection_file = Unicode('', config=True,
help="""JSON file in which to store connection info [default: kernel-<pid>.json]
This file will contain the IP, ports, and authentication key needed to connect
clients to this kernel. By default, this file will be created in the security-dir
of the current profile, but can be specified by absolute path.
""")
# streams, etc.
no_stdout = Bool(False, config=True, help="redirect stdout to the null device")
no_stderr = Bool(False, config=True, help="redirect stderr to the null device")
outstream_class = DottedObjectName('IPython.zmq.iostream.OutStream',
config=True, help="The importstring for the OutStream factory")
displayhook_class = DottedObjectName('IPython.zmq.displayhook.ZMQDisplayHook',
config=True, help="The importstring for the DisplayHook factory")
# polling
parent = Integer(0, config=True,
help="""kill this process if its parent dies. On Windows, the argument
specifies the HANDLE of the parent process, otherwise it is simply boolean.
""")
interrupt = Integer(0, config=True,
help="""ONLY USED ON WINDOWS
Interrupt this process when the parent is signalled.
""")
def init_crash_handler(self):
# Install minimal exception handling
sys.excepthook = FormattedTB(mode='Verbose', color_scheme='NoColor',
ostream=sys.__stdout__)
def init_poller(self):
if sys.platform == 'win32':
if self.interrupt or self.parent:
self.poller = ParentPollerWindows(self.interrupt, self.parent)
elif self.parent:
self.poller = ParentPollerUnix()
def _bind_socket(self, s, port):
iface = 'tcp://%s' % self.ip
if port <= 0:
port = s.bind_to_random_port(iface)
else:
s.bind(iface + ':%i'%port)
return port
def load_connection_file(self):
"""load ip/port/hmac config from JSON connection file"""
try:
fname = filefind(self.connection_file, ['.', self.profile_dir.security_dir])
except IOError:
self.log.debug("Connection file not found: %s", self.connection_file)
# This means I own it, so I will clean it up:
atexit.register(self.cleanup_connection_file)
return
self.log.debug(u"Loading connection file %s", fname)
with open(fname) as f:
s = f.read()
cfg = json.loads(s)
if self.ip == LOCALHOST and 'ip' in cfg:
# not overridden by config or cl_args
self.ip = cfg['ip']
for channel in ('hb', 'shell', 'iopub', 'stdin'):
name = channel + '_port'
if getattr(self, name) == 0 and name in cfg:
# not overridden by config or cl_args
setattr(self, name, cfg[name])
if 'key' in cfg:
self.config.Session.key = str_to_bytes(cfg['key'])
def write_connection_file(self):
"""write connection info to JSON file"""
if os.path.basename(self.connection_file) == self.connection_file:
cf = os.path.join(self.profile_dir.security_dir, self.connection_file)
else:
cf = self.connection_file
write_connection_file(cf, ip=self.ip, key=self.session.key,
shell_port=self.shell_port, stdin_port=self.stdin_port, hb_port=self.hb_port,
iopub_port=self.iopub_port)
self._full_connection_file = cf
def cleanup_connection_file(self):
cf = self._full_connection_file
self.log.debug("cleaning up connection file: %r", cf)
try:
os.remove(cf)
except (IOError, OSError):
pass
def init_connection_file(self):
if not self.connection_file:
self.connection_file = "kernel-%s.json"%os.getpid()
try:
self.load_connection_file()
except Exception:
self.log.error("Failed to load connection file: %r", self.connection_file, exc_info=True)
self.exit(1)
def init_sockets(self):
# Create a context, a session, and the kernel sockets.
self.log.info("Starting the kernel at pid: %i", os.getpid())
context = zmq.Context.instance()
# Uncomment this to try closing the context.
# atexit.register(context.term)
self.shell_socket = context.socket(zmq.ROUTER)
self.shell_port = self._bind_socket(self.shell_socket, self.shell_port)
self.log.debug("shell ROUTER Channel on port: %i"%self.shell_port)
self.iopub_socket = context.socket(zmq.PUB)
self.iopub_port = self._bind_socket(self.iopub_socket, self.iopub_port)
self.log.debug("iopub PUB Channel on port: %i"%self.iopub_port)
self.stdin_socket = context.socket(zmq.ROUTER)
self.stdin_port = self._bind_socket(self.stdin_socket, self.stdin_port)
self.log.debug("stdin ROUTER Channel on port: %i"%self.stdin_port)
def init_heartbeat(self):
"""start the heart beating"""
# heartbeat doesn't share context, because it mustn't be blocked
# by the GIL, which is accessed by libzmq when freeing zero-copy messages
hb_ctx = zmq.Context()
self.heartbeat = Heartbeat(hb_ctx, (self.ip, self.hb_port))
self.hb_port = self.heartbeat.port
self.log.debug("Heartbeat REP Channel on port: %i"%self.hb_port)
self.heartbeat.start()
# Helper to make it easier to connect to an existing kernel.
# set log-level to critical, to make sure it is output
self.log.critical("To connect another client to this kernel, use:")
def log_connection_info(self):
"""display connection info, and store ports"""
basename = os.path.basename(self.connection_file)
if basename == self.connection_file or \
os.path.dirname(self.connection_file) == self.profile_dir.security_dir:
# use shortname
tail = basename
if self.profile != 'default':
tail += " --profile %s" % self.profile
else:
tail = self.connection_file
self.log.critical("--existing %s", tail)
self.ports = dict(shell=self.shell_port, iopub=self.iopub_port,
stdin=self.stdin_port, hb=self.hb_port)
def init_session(self):
"""create our session object"""
default_secure(self.config)
self.session = Session(config=self.config, username=u'kernel')
def init_blackhole(self):
"""redirects stdout/stderr to devnull if necessary"""
if self.no_stdout or self.no_stderr:
blackhole = open(os.devnull, 'w')
if self.no_stdout:
sys.stdout = sys.__stdout__ = blackhole
if self.no_stderr:
sys.stderr = sys.__stderr__ = blackhole
def init_io(self):
"""Redirect input streams and set a display hook."""
if self.outstream_class:
outstream_factory = import_item(str(self.outstream_class))
sys.stdout = outstream_factory(self.session, self.iopub_socket, u'stdout')
sys.stderr = outstream_factory(self.session, self.iopub_socket, u'stderr')
if self.displayhook_class:
displayhook_factory = import_item(str(self.displayhook_class))
sys.displayhook = displayhook_factory(self.session, self.iopub_socket)
def init_signal(self):
signal.signal(signal.SIGINT, signal.SIG_IGN)
def init_kernel(self):
"""Create the Kernel object itself"""
kernel_factory = import_item(str(self.kernel_class))
self.kernel = kernel_factory(config=self.config, session=self.session,
shell_socket=self.shell_socket,
iopub_socket=self.iopub_socket,
stdin_socket=self.stdin_socket,
log=self.log
)
self.kernel.record_ports(self.ports)
@catch_config_error
def initialize(self, argv=None):
super(KernelApp, self).initialize(argv)
self.init_blackhole()
self.init_connection_file()
self.init_session()
self.init_poller()
self.init_sockets()
self.init_heartbeat()
# writing/displaying connection info must be *after* init_sockets/heartbeat
self.log_connection_info()
self.write_connection_file()
self.init_io()
self.init_signal()
self.init_kernel()
# flush stdout/stderr, so that anything written to these streams during
# initialization do not get associated with the first execution request
sys.stdout.flush()
sys.stderr.flush()
def start(self):
if self.poller is not None:
self.poller.start()
self.kernel.start()
try:
ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
pass
| {
"repo_name": "sodafree/backend",
"path": "build/ipython/IPython/zmq/kernelapp.py",
"copies": "3",
"size": "13305",
"license": "bsd-3-clause",
"hash": 8922095795768594000,
"line_mean": 38.954954955,
"line_max": 101,
"alpha_frac": 0.5998496806,
"autogenerated": false,
"ratio": 4.072543617998163,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6172393298598163,
"avg_score": null,
"num_lines": null
} |
"""An Application for launching a kernel"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import atexit
import os
import sys
import signal
import traceback
import logging
from tornado import ioloop
import zmq
from zmq.eventloop import ioloop as zmq_ioloop
from zmq.eventloop.zmqstream import ZMQStream
from IPython.core.application import (
BaseIPythonApplication, base_flags, base_aliases, catch_config_error
)
from IPython.core.profiledir import ProfileDir
from IPython.core.shellapp import (
InteractiveShellApp, shell_flags, shell_aliases
)
from IPython.utils import io
from ipython_genutils.path import filefind, ensure_dir_exists
from traitlets import (
Any, Instance, Dict, Unicode, Integer, Bool, DottedObjectName, Type, default
)
from ipython_genutils.importstring import import_item
from jupyter_core.paths import jupyter_runtime_dir
from jupyter_client import write_connection_file
from jupyter_client.connect import ConnectionFileMixin
# local imports
from .iostream import IOPubThread
from .heartbeat import Heartbeat
from .ipkernel import IPythonKernel
from .parentpoller import ParentPollerUnix, ParentPollerWindows
from jupyter_client.session import (
Session, session_flags, session_aliases,
)
from .zmqshell import ZMQInteractiveShell
#-----------------------------------------------------------------------------
# Flags and Aliases
#-----------------------------------------------------------------------------
kernel_aliases = dict(base_aliases)
kernel_aliases.update({
'ip' : 'IPKernelApp.ip',
'hb' : 'IPKernelApp.hb_port',
'shell' : 'IPKernelApp.shell_port',
'iopub' : 'IPKernelApp.iopub_port',
'stdin' : 'IPKernelApp.stdin_port',
'control' : 'IPKernelApp.control_port',
'f' : 'IPKernelApp.connection_file',
'transport': 'IPKernelApp.transport',
})
kernel_flags = dict(base_flags)
kernel_flags.update({
'no-stdout' : (
{'IPKernelApp' : {'no_stdout' : True}},
"redirect stdout to the null device"),
'no-stderr' : (
{'IPKernelApp' : {'no_stderr' : True}},
"redirect stderr to the null device"),
'pylab' : (
{'IPKernelApp' : {'pylab' : 'auto'}},
"""Pre-load matplotlib and numpy for interactive use with
the default matplotlib backend."""),
})
# inherit flags&aliases for any IPython shell apps
kernel_aliases.update(shell_aliases)
kernel_flags.update(shell_flags)
# inherit flags&aliases for Sessions
kernel_aliases.update(session_aliases)
kernel_flags.update(session_flags)
_ctrl_c_message = """\
NOTE: When using the `ipython kernel` entry point, Ctrl-C will not work.
To exit, you will have to explicitly quit this process, by either sending
"quit" from a client, or using Ctrl-\\ in UNIX-like environments.
To read more about this, see https://github.com/ipython/ipython/issues/2049
"""
#-----------------------------------------------------------------------------
# Application class for starting an IPython Kernel
#-----------------------------------------------------------------------------
class IPKernelApp(BaseIPythonApplication, InteractiveShellApp,
ConnectionFileMixin):
name='ipython-kernel'
aliases = Dict(kernel_aliases)
flags = Dict(kernel_flags)
classes = [IPythonKernel, ZMQInteractiveShell, ProfileDir, Session]
# the kernel class, as an importstring
kernel_class = Type('ipykernel.ipkernel.IPythonKernel',
klass='ipykernel.kernelbase.Kernel',
help="""The Kernel subclass to be used.
This should allow easy re-use of the IPKernelApp entry point
to configure and launch kernels other than IPython's own.
""").tag(config=True)
kernel = Any()
poller = Any() # don't restrict this even though current pollers are all Threads
heartbeat = Instance(Heartbeat, allow_none=True)
ports = Dict()
subcommands = {
'install': (
'ipykernel.kernelspec.InstallIPythonKernelSpecApp',
'Install the IPython kernel'
),
}
# connection info:
connection_dir = Unicode()
@default('connection_dir')
def _default_connection_dir(self):
return jupyter_runtime_dir()
@property
def abs_connection_file(self):
if os.path.basename(self.connection_file) == self.connection_file:
return os.path.join(self.connection_dir, self.connection_file)
else:
return self.connection_file
# streams, etc.
no_stdout = Bool(False, help="redirect stdout to the null device").tag(config=True)
no_stderr = Bool(False, help="redirect stderr to the null device").tag(config=True)
outstream_class = DottedObjectName('ipykernel.iostream.OutStream',
help="The importstring for the OutStream factory").tag(config=True)
displayhook_class = DottedObjectName('ipykernel.displayhook.ZMQDisplayHook',
help="The importstring for the DisplayHook factory").tag(config=True)
# polling
parent_handle = Integer(int(os.environ.get('JPY_PARENT_PID') or 0),
help="""kill this process if its parent dies. On Windows, the argument
specifies the HANDLE of the parent process, otherwise it is simply boolean.
""").tag(config=True)
interrupt = Integer(int(os.environ.get('JPY_INTERRUPT_EVENT') or 0),
help="""ONLY USED ON WINDOWS
Interrupt this process when the parent is signaled.
""").tag(config=True)
def init_crash_handler(self):
sys.excepthook = self.excepthook
def excepthook(self, etype, evalue, tb):
# write uncaught traceback to 'real' stderr, not zmq-forwarder
traceback.print_exception(etype, evalue, tb, file=sys.__stderr__)
def init_poller(self):
if sys.platform == 'win32':
if self.interrupt or self.parent_handle:
self.poller = ParentPollerWindows(self.interrupt, self.parent_handle)
elif self.parent_handle:
self.poller = ParentPollerUnix()
def _bind_socket(self, s, port):
iface = '%s://%s' % (self.transport, self.ip)
if self.transport == 'tcp':
if port <= 0:
port = s.bind_to_random_port(iface)
else:
s.bind("tcp://%s:%i" % (self.ip, port))
elif self.transport == 'ipc':
if port <= 0:
port = 1
path = "%s-%i" % (self.ip, port)
while os.path.exists(path):
port = port + 1
path = "%s-%i" % (self.ip, port)
else:
path = "%s-%i" % (self.ip, port)
s.bind("ipc://%s" % path)
return port
def write_connection_file(self):
"""write connection info to JSON file"""
cf = self.abs_connection_file
self.log.debug("Writing connection file: %s", cf)
write_connection_file(cf, ip=self.ip, key=self.session.key, transport=self.transport,
shell_port=self.shell_port, stdin_port=self.stdin_port, hb_port=self.hb_port,
iopub_port=self.iopub_port, control_port=self.control_port)
def cleanup_connection_file(self):
cf = self.abs_connection_file
self.log.debug("Cleaning up connection file: %s", cf)
try:
os.remove(cf)
except (IOError, OSError):
pass
self.cleanup_ipc_files()
def init_connection_file(self):
if not self.connection_file:
self.connection_file = "kernel-%s.json"%os.getpid()
try:
self.connection_file = filefind(self.connection_file, ['.', self.connection_dir])
except IOError:
self.log.debug("Connection file not found: %s", self.connection_file)
# This means I own it, and I'll create it in this directory:
ensure_dir_exists(os.path.dirname(self.abs_connection_file), 0o700)
# Also, I will clean it up:
atexit.register(self.cleanup_connection_file)
return
try:
self.load_connection_file()
except Exception:
self.log.error("Failed to load connection file: %r", self.connection_file, exc_info=True)
self.exit(1)
def init_sockets(self):
# Create a context, a session, and the kernel sockets.
self.log.info("Starting the kernel at pid: %i", os.getpid())
context = zmq.Context.instance()
# Uncomment this to try closing the context.
# atexit.register(context.term)
self.shell_socket = context.socket(zmq.ROUTER)
self.shell_socket.linger = 1000
self.shell_port = self._bind_socket(self.shell_socket, self.shell_port)
self.log.debug("shell ROUTER Channel on port: %i" % self.shell_port)
self.stdin_socket = context.socket(zmq.ROUTER)
self.stdin_socket.linger = 1000
self.stdin_port = self._bind_socket(self.stdin_socket, self.stdin_port)
self.log.debug("stdin ROUTER Channel on port: %i" % self.stdin_port)
self.control_socket = context.socket(zmq.ROUTER)
self.control_socket.linger = 1000
self.control_port = self._bind_socket(self.control_socket, self.control_port)
self.log.debug("control ROUTER Channel on port: %i" % self.control_port)
self.init_iopub(context)
def init_iopub(self, context):
self.iopub_socket = context.socket(zmq.PUB)
self.iopub_socket.linger = 1000
self.iopub_port = self._bind_socket(self.iopub_socket, self.iopub_port)
self.log.debug("iopub PUB Channel on port: %i" % self.iopub_port)
self.configure_tornado_logger()
self.iopub_thread = IOPubThread(self.iopub_socket, pipe=True)
self.iopub_thread.start()
# backward-compat: wrap iopub socket API in background thread
self.iopub_socket = self.iopub_thread.background_socket
def init_heartbeat(self):
"""start the heart beating"""
# heartbeat doesn't share context, because it mustn't be blocked
# by the GIL, which is accessed by libzmq when freeing zero-copy messages
hb_ctx = zmq.Context()
self.heartbeat = Heartbeat(hb_ctx, (self.transport, self.ip, self.hb_port))
self.hb_port = self.heartbeat.port
self.log.debug("Heartbeat REP Channel on port: %i" % self.hb_port)
self.heartbeat.start()
def log_connection_info(self):
"""display connection info, and store ports"""
basename = os.path.basename(self.connection_file)
if basename == self.connection_file or \
os.path.dirname(self.connection_file) == self.connection_dir:
# use shortname
tail = basename
else:
tail = self.connection_file
lines = [
"To connect another client to this kernel, use:",
" --existing %s" % tail,
]
# log connection info
# info-level, so often not shown.
# frontends should use the %connect_info magic
# to see the connection info
for line in lines:
self.log.info(line)
# also raw print to the terminal if no parent_handle (`ipython kernel`)
# unless log-level is CRITICAL (--quiet)
if not self.parent_handle and self.log_level < logging.CRITICAL:
io.rprint(_ctrl_c_message)
for line in lines:
io.rprint(line)
self.ports = dict(shell=self.shell_port, iopub=self.iopub_port,
stdin=self.stdin_port, hb=self.hb_port,
control=self.control_port)
def init_blackhole(self):
"""redirects stdout/stderr to devnull if necessary"""
if self.no_stdout or self.no_stderr:
blackhole = open(os.devnull, 'w')
if self.no_stdout:
sys.stdout = sys.__stdout__ = blackhole
if self.no_stderr:
sys.stderr = sys.__stderr__ = blackhole
def init_io(self):
"""Redirect input streams and set a display hook."""
if self.outstream_class:
outstream_factory = import_item(str(self.outstream_class))
sys.stdout = outstream_factory(self.session, self.iopub_thread, u'stdout')
sys.stderr = outstream_factory(self.session, self.iopub_thread, u'stderr')
if self.displayhook_class:
displayhook_factory = import_item(str(self.displayhook_class))
self.displayhook = displayhook_factory(self.session, self.iopub_socket)
sys.displayhook = self.displayhook
self.patch_io()
def patch_io(self):
"""Patch important libraries that can't handle sys.stdout forwarding"""
try:
import faulthandler
except ImportError:
pass
else:
# Warning: this is a monkeypatch of `faulthandler.enable`, watch for possible
# updates to the upstream API and update accordingly (up-to-date as of Python 3.5):
# https://docs.python.org/3/library/faulthandler.html#faulthandler.enable
# change default file to __stderr__ from forwarded stderr
faulthandler_enable = faulthandler.enable
def enable(file=sys.__stderr__, all_threads=True, **kwargs):
return faulthandler_enable(file=file, all_threads=all_threads, **kwargs)
faulthandler.enable = enable
if hasattr(faulthandler, 'register'):
faulthandler_register = faulthandler.register
def register(signum, file=sys.__stderr__, all_threads=True, chain=False, **kwargs):
return faulthandler_register(signum, file=file, all_threads=all_threads,
chain=chain, **kwargs)
faulthandler.register = register
def init_signal(self):
signal.signal(signal.SIGINT, signal.SIG_IGN)
def init_kernel(self):
"""Create the Kernel object itself"""
shell_stream = ZMQStream(self.shell_socket)
control_stream = ZMQStream(self.control_socket)
kernel_factory = self.kernel_class.instance
kernel = kernel_factory(parent=self, session=self.session,
shell_streams=[shell_stream, control_stream],
iopub_thread=self.iopub_thread,
iopub_socket=self.iopub_socket,
stdin_socket=self.stdin_socket,
log=self.log,
profile_dir=self.profile_dir,
user_ns=self.user_ns,
)
kernel.record_ports({
name + '_port': port for name, port in self.ports.items()
})
self.kernel = kernel
# Allow the displayhook to get the execution count
self.displayhook.get_execution_count = lambda: kernel.execution_count
def init_gui_pylab(self):
"""Enable GUI event loop integration, taking pylab into account."""
# Register inline backend as default
# this is higher priority than matplotlibrc,
# but lower priority than anything else (mpl.use() for instance).
# This only affects matplotlib >= 1.5
if not os.environ.get('MPLBACKEND'):
os.environ['MPLBACKEND'] = 'module://ipykernel.pylab.backend_inline'
# Provide a wrapper for :meth:`InteractiveShellApp.init_gui_pylab`
# to ensure that any exception is printed straight to stderr.
# Normally _showtraceback associates the reply with an execution,
# which means frontends will never draw it, as this exception
# is not associated with any execute request.
shell = self.shell
_showtraceback = shell._showtraceback
try:
# replace error-sending traceback with stderr
def print_tb(etype, evalue, stb):
print ("GUI event loop or pylab initialization failed",
file=sys.stderr)
print (shell.InteractiveTB.stb2text(stb), file=sys.stderr)
shell._showtraceback = print_tb
InteractiveShellApp.init_gui_pylab(self)
finally:
shell._showtraceback = _showtraceback
def init_shell(self):
self.shell = getattr(self.kernel, 'shell', None)
if self.shell:
self.shell.configurables.append(self)
def init_extensions(self):
super(IPKernelApp, self).init_extensions()
# BEGIN HARDCODED WIDGETS HACK
# Ensure ipywidgets extension is loaded if available
extension_man = self.shell.extension_manager
if 'ipywidgets' not in extension_man.loaded:
try:
extension_man.load_extension('ipywidgets')
except ImportError as e:
self.log.debug('ipywidgets package not installed. Widgets will not be available.')
# END HARDCODED WIDGETS HACK
def configure_tornado_logger(self):
""" Configure the tornado logging.Logger.
Must set up the tornado logger or else tornado will call
basicConfig for the root logger which makes the root logger
go to the real sys.stderr instead of the capture streams.
This function mimics the setup of logging.basicConfig.
"""
logger = logging.getLogger('tornado')
handler = logging.StreamHandler()
formatter = logging.Formatter(logging.BASIC_FORMAT)
handler.setFormatter(formatter)
logger.addHandler(handler)
@catch_config_error
def initialize(self, argv=None):
super(IPKernelApp, self).initialize(argv)
if self.subapp is not None:
return
# register zmq IOLoop with tornado
zmq_ioloop.install()
self.init_blackhole()
self.init_connection_file()
self.init_poller()
self.init_sockets()
self.init_heartbeat()
# writing/displaying connection info must be *after* init_sockets/heartbeat
self.write_connection_file()
# Log connection info after writing connection file, so that the connection
# file is definitely available at the time someone reads the log.
self.log_connection_info()
self.init_io()
self.init_signal()
self.init_kernel()
# shell init steps
self.init_path()
self.init_shell()
if self.shell:
self.init_gui_pylab()
self.init_extensions()
self.init_code()
# flush stdout/stderr, so that anything written to these streams during
# initialization do not get associated with the first execution request
sys.stdout.flush()
sys.stderr.flush()
def start(self):
if self.subapp is not None:
return self.subapp.start()
if self.poller is not None:
self.poller.start()
self.kernel.start()
try:
ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
pass
launch_new_instance = IPKernelApp.launch_instance
def main():
"""Run an IPKernel as an application"""
app = IPKernelApp.instance()
app.initialize()
app.start()
if __name__ == '__main__':
main()
| {
"repo_name": "lancezlin/ml_template_py",
"path": "lib/python2.7/site-packages/ipykernel/kernelapp.py",
"copies": "5",
"size": "19344",
"license": "mit",
"hash": -1241053613651070200,
"line_mean": 38.6393442623,
"line_max": 101,
"alpha_frac": 0.6180727874,
"autogenerated": false,
"ratio": 4.035043804755945,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7153116592155946,
"avg_score": null,
"num_lines": null
} |
"""An application searcher"""
# -----------------------------
# - Imports
# -----------------------------
# Standard Library
import sys
from argparse import ArgumentParser
from pprint import pprint
from importlib import import_module
from warnings import warn
# Import from third party libraries
from xdg import BaseDirectory, DesktopEntry
import xdg.Exceptions
# Import 'local' modules
from bad import * # Warnings and exceptions
from utilities import isItemDict
# -----------------------------
# - Main Classes
# -----------------------------
class Indelve:
"""Indelve: an application searcher.
The main class of indelve. This is where all the action happens.
"""
def __init__(self,providers=None):
"""Initialise the indelve class, loading the search providers specified by the list `providers` (default:all)
Issues a `bad.ProviderLoadWarning` when a provider couldn't be loaded.
Raises a `bad.NoProvidersError` if no providers could be successfully loaded.
All warnings are derived from `bad.IndelveInitWarning`.
All exceptions are derived from `bad.IndelveInitError`.
"""
# Make sure `providers` is a list or None
if providers != None and not isinstance(providers,list):
raise TypeError("`providers` must be a list or None.")
# If `providers` is not specified, load all the provider modules
if providers == None:
providers = self.listProviders()
# The dictionary of `Provider` class instances
self.providerInstances = {}
# Loop through the specified providers
for provider in providers:
# Attempt to import the provider, sending a warning that fails
try:
providerModule = import_module("indelve.providers."+provider)
except (ImportError, KeyError):
warn(provider, ProviderLoadWarning)
continue
# Now load the provider's `Provider` class; if there's an exception with this, then there's a real problem with the code, so let it pass through
self.providerInstances[provider] = providerModule.Provider()
# Make sure we've actually loaded some providers
if len(self.providerInstances) == 0:
raise NoProvidersError()
def listProviders(self,descriptions=False):
"""List the possible provider modules.
If `descriptions` is False (default) it just provides a list, otherwise it provides a dict with entries:
"provider_name" : {
"short" : "<short description>",
"long" : "<long description>",
}
See indelve.proivders.abstract.Provider.description for more information.
"""
# The list of providers will be the `providers` packages's __all__ list
initModule = import_module("indelve.providers.__init__")
providerList = initModule.__all__
if not descriptions:
# If we don't need the descriptions, then this is all we need
return providerList
else:
# Otherwise, load up all the provider modules to get their short and long descriptions
providerDict = {}
for provider in providerList:
providerDict[provider] = self.getProviderDescription(provider)
return providerDict
def getProviderDescription(self,provider):
"""Return a dict of the short and long descriptions of a provider.
The dict will be:
{
"short" : "<short description>",
"long" : "<long description>",
}
"""
# Make sure `provider` is a string
if not isinstance(provider,basestring):
raise ValueError("`provider` must be a string.")
# Try to load the provider module
try:
providerModule = import_module("indelve.providers."+provider)
except ImportError:
raise ProviderLoadError(provider)
# Get the description dictionary
descriptionDict = providerModule.Provider.description
# Make sure the dictionary has the necessary keys
if "short" not in descriptionDict or "long" not in descriptionDict:
raise NotImplementedError("Provider '"+provider+"' does not have a proper description dictionary.")
return descriptionDict
def refresh(self,force=False):
"""Refresh all providers' databases, if that makes sense.
If the provider does not have a database, then this has no effect on them.
The `force` argument indicates that the providers should completely reload their databases, not just check for new items.
"""
# Loop through the provider instances
for name in self.providerInstances:
# Refresh this provider's database
self.providerInstances[name].refresh(force)
def search(self,query):
"""Search for `query` using all the loaded providers.
Returns a list of <item-dict>'s sorted by relevance. (See providers.abstract.Provider.search for a specification for <item-dict>)
"""
# Do some checking
if not isinstance(query, str):
raise TypeError("Parameter 'query' should be a string.")
if len(query) == 0:
raise ValueError("Parameter 'query' shouldn't be empty.")
# The list of item dicts
items = []
# Loop through the provider instances
for name in self.providerInstances:
# Try gettin the results from this provider; it may be that `query` is not right for the provider, in which case ignore it
try:
results = self.providerInstances[name].search(query)
except ValueError:
continue
# Verify that each item is indeed an <item-dict>
for item in items:
assert isItemDict(item)
# Add the results to our list
items.extend(results)
# Sort the items by relevance
items.sort(key=lambda a:a["relevance"])
# Finally return the sorted list
return items | {
"repo_name": "SparklePigBang/indelve",
"path": "indelve/main.py",
"copies": "1",
"size": "5389",
"license": "mit",
"hash": -3854931554017206300,
"line_mean": 29.8,
"line_max": 147,
"alpha_frac": 0.7153460753,
"autogenerated": false,
"ratio": 3.9595885378398235,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.03131528149699039,
"num_lines": 175
} |
# an application to generate the .xinfo file for data
# reduction from a directory full of images, optionally with scan and
# sequence files which will be used to add matadata.
import collections
import logging
import os
import sys
import traceback
import h5py
from libtbx import easy_mp
from xia2.Applications.xia2setup_helpers import get_sweep
from xia2.Experts.FindImages import image2template_directory
from xia2.Handlers.CommandLine import CommandLine
from xia2.Handlers.Phil import PhilIndex
from xia2.Schema import imageset_cache
from xia2.Wrappers.XDS.XDSFiles import XDSFiles
logger = logging.getLogger("xia2.Applications.xia2setup")
image_extensions = [
"img",
"mccd",
"mar2300",
"mar1200",
"mar1600",
"mar3450",
"osc",
"cbf",
"mar2000",
"sfrm",
"",
]
compression = ["", ".bz2", ".gz"]
known_image_extensions = []
for c in compression:
for ie in image_extensions:
ext = f"{ie}{c}"
if ext:
known_image_extensions.append(ext)
xds_file_names = [
"ABS",
"ABSORP",
"BKGINIT",
"BKGPIX",
"BLANK",
"DECAY",
"X-CORRECTIONS",
"Y-CORRECTIONS",
"MODPIX",
"FRAME",
"GX-CORRECTIONS",
"GY-CORRECTIONS",
"DX-CORRECTIONS",
"DY-CORRECTIONS",
"GAIN",
]
known_sequence_extensions = ["seq"]
known_hdf5_extensions = [".h5", ".nxs"]
latest_sequence = None
target_template = None
def is_sequence_name(file):
if os.path.isfile(file):
if file.split(".")[-1] in known_sequence_extensions:
return True
return False
def is_image_name(filename):
if os.path.isfile(filename):
if os.path.split(filename)[-1] in XDSFiles:
return False
for xds_file in "ABSORP", "DECAY", "MODPIX":
if os.path.join("scale", xds_file) in filename:
return False
for exten in known_image_extensions:
if filename.endswith(exten):
return True
end = filename.split(".")[-1]
try:
if ".log." not in filename and len(end) > 1:
return True
except Exception:
pass
if is_hdf5_name(filename):
return True
return False
def is_hdf5_name(filename):
if os.path.isfile(filename):
if os.path.splitext(filename)[-1] in known_hdf5_extensions:
return True
return False
def is_xds_file(f):
filename = os.path.split(f)[1]
xds_files = [
"ABS",
"ABSORP",
"BKGINIT",
"BKGPIX",
"BLANK",
"DECAY",
"DX-CORRECTIONS",
"DY-CORRECTIONS",
"FRAME",
"GAIN",
"GX-CORRECTIONS",
"GY-CORRECTIONS",
"MODPIX",
"X-CORRECTIONS",
"Y-CORRECTIONS",
]
return filename.split(".")[0].split("_") in xds_files
def get_template(f):
global target_template
if not is_image_name(f):
return
if is_xds_file(f):
return
# in here, check the permissions on the file...
template = None
directory = None
if not os.access(f, os.R_OK):
logger.debug("No read permission for %s" % f)
try:
template, directory = image2template_directory(f)
template = os.path.join(directory, template)
if target_template:
if template not in target_template:
return
except Exception as e:
logger.debug("Exception A: %s (%s)" % (str(e), f))
logger.debug(traceback.format_exc())
if template is None or directory is None:
raise RuntimeError("template not recognised for %s" % f)
return template
def parse_sequence(sequence_file):
sequence = ""
for record in open(sequence_file).readlines():
if record[0].upper() in "ABCDEFGHIJKLMNOPQRSTUVWXYZ ":
sequence += record.strip().upper()
global latest_sequence
latest_sequence = sequence
def visit(directory, files):
files.sort()
templates = set()
for f in files:
full_path = os.path.join(directory, f)
if is_hdf5_name(full_path):
from dxtbx.format import Registry
format_class = Registry.get_format_class_for_file(full_path)
if format_class is None:
logger.debug(
"Ignoring %s (Registry can not find format class)" % full_path
)
continue
elif format_class.is_abstract():
continue
templates.add(full_path)
elif is_image_name(full_path):
try:
template = get_template(full_path)
except Exception as e:
logger.debug("Exception B: %s" % str(e))
logger.debug(traceback.format_exc())
continue
if template is not None:
templates.add(template)
elif is_sequence_name(full_path):
parse_sequence(full_path)
return templates
def _list_hdf5_data_files(h5_file):
f = h5py.File(h5_file, "r")
filenames = [
f["/entry/data"][k].file.filename
for k in f["/entry/data"]
if k.startswith("data_")
]
f.close()
return filenames
def _filter_aliased_hdf5_sweeps(sweeps):
h5_data_to_sweep = {}
rest = []
for s in sweeps:
if not is_hdf5_name(s):
if s not in rest:
rest.append(s)
continue
filenames = tuple(_list_hdf5_data_files(s))
if filenames in h5_data_to_sweep:
# impose slight bias in favour of using _master.h5 in place of .nxs
# because XDS
if h5_data_to_sweep[filenames].endswith(".nxs") and s.endswith(
"_master.h5"
):
h5_data_to_sweep[filenames] = s
else:
h5_data_to_sweep[filenames] = s
return rest + [h5_data_to_sweep[k] for k in sorted(h5_data_to_sweep)]
def _write_sweeps(sweeps, out):
global latest_sequence
_known_sweeps = sweeps
sweeplist = sorted(_known_sweeps)
sweeplist = _filter_aliased_hdf5_sweeps(sweeplist)
assert sweeplist, "no sweeps found"
# sort sweeplist based on epoch of first image of each sweep
epochs = [
_known_sweeps[sweep][0].get_imageset().get_scan().get_epochs()[0]
for sweep in sweeplist
]
if len(epochs) != len(set(epochs)):
logger.debug("Duplicate epochs found. Trying to correct epoch information.")
cumulativedelta = 0.0
for sweep in sweeplist:
_known_sweeps[sweep][0].get_imageset().get_scan().set_epochs(
_known_sweeps[sweep][0].get_imageset().get_scan().get_epochs()
+ cumulativedelta
)
# could change the image epoch information individually, but only
# the information from the first image is used at this time.
cumulativedelta += sum(
_known_sweeps[sweep][0].get_imageset().get_scan().get_exposure_times()
)
epochs = [
_known_sweeps[sweep][0].get_imageset().get_scan().get_epochs()[0]
for sweep in sweeplist
]
if len(epochs) != len(set(epochs)):
logger.debug("Duplicate epoch information remains.")
# This should only happen with incorrect exposure time information.
sweeplist = [s for _, s in sorted(zip(epochs, sweeplist))]
# analysis pass
wavelengths = []
settings = PhilIndex.get_python_object().xia2.settings
wavelength_tolerance = settings.wavelength_tolerance
min_images = settings.input.min_images
min_oscillation_range = settings.input.min_oscillation_range
for sweep in sweeplist:
sweeps = _known_sweeps[sweep]
# sort on exposure epoch followed by first image number
sweeps = sorted(
sweeps,
key=lambda s: (
s.get_imageset().get_scan().get_epochs()[0],
s.get_images()[0],
),
)
for s in sweeps:
if len(s.get_images()) < min_images:
logger.debug("Rejecting sweep %s:" % s.get_template())
logger.debug(
" Not enough images (found %i, require at least %i)"
% (len(s.get_images()), min_images)
)
continue
oscillation_range = s.get_imageset().get_scan().get_oscillation_range()
width = oscillation_range[1] - oscillation_range[0]
if min_oscillation_range is not None and width < min_oscillation_range:
logger.debug("Rejecting sweep %s:" % s.get_template())
logger.debug(
" Too narrow oscillation range (found %i, require at least %i)"
% (width, min_oscillation_range)
)
continue
wavelength = s.get_wavelength()
if wavelength not in wavelengths:
have_wavelength = False
for w in wavelengths:
if abs(w - wavelength) < wavelength_tolerance:
have_wavelength = True
s.set_wavelength(w)
if not have_wavelength:
wavelengths.append(wavelength)
assert wavelengths, "No sweeps found matching criteria"
wavelength_map = {}
project = settings.project
crystal = settings.crystal
out.write("BEGIN PROJECT %s\n" % project)
out.write("BEGIN CRYSTAL %s\n" % crystal)
out.write("\n")
# check to see if a user spacegroup has been assigned - if it has,
# copy it in...
if settings.space_group is not None:
out.write("USER_SPACEGROUP %s\n" % settings.space_group.type().lookup_symbol())
out.write("\n")
if settings.unit_cell is not None:
out.write(
"USER_CELL %.2f %.2f %.2f %.2f %.2f %.2f\n"
% settings.unit_cell.parameters()
)
out.write("\n")
freer_file = PhilIndex.params.xia2.settings.scale.freer_file
if freer_file is not None:
out.write("FREER_FILE %s\n" % PhilIndex.params.xia2.settings.scale.freer_file)
out.write("\n")
if latest_sequence:
out.write("BEGIN AA_SEQUENCE\n")
out.write("\n")
for sequence_chunk in [
latest_sequence[i : i + 60] for i in range(0, len(latest_sequence), 60)
]:
out.write("%s\n" % sequence_chunk)
out.write("\n")
out.write("END AA_SEQUENCE\n")
out.write("\n")
if settings.input.atom:
out.write("BEGIN HA_INFO\n")
out.write("ATOM %s\n" % settings.input.atom.lower())
out.write("END HA_INFO\n")
out.write("\n")
elif settings.input.anomalous:
out.write("BEGIN HA_INFO\n")
out.write("ATOM X\n")
out.write("END HA_INFO\n")
out.write("\n")
for j, wavelength in enumerate(wavelengths):
anomalous = settings.input.anomalous
if settings.input.atom is not None:
anomalous = True
if len(wavelengths) == 1 and anomalous:
name = "SAD"
elif len(wavelengths) == 1:
name = "NATIVE"
else:
name = "WAVE%d" % (j + 1)
wavelength_map[wavelength] = name
out.write("BEGIN WAVELENGTH %s\n" % name)
dmin = PhilIndex.params.xia2.settings.resolution.d_min
dmax = PhilIndex.params.xia2.settings.resolution.d_max
if dmin and dmax:
out.write(f"RESOLUTION {dmin:f} {dmax:f}\n")
elif dmin:
out.write("RESOLUTION %f\n" % dmin)
out.write("WAVELENGTH %f\n" % wavelengths[j])
out.write("END WAVELENGTH %s\n" % name)
out.write("\n")
j = 0
for sweep in sweeplist:
sweeps = _known_sweeps[sweep]
# sort on exposure epoch followed by first image number
sweeps = sorted(
sweeps,
key=lambda s: (
s.get_imageset().get_scan().get_epochs()[0],
s.get_images()[0],
),
)
for s in sweeps:
# require at least n images to represent a sweep...
if len(s.get_images()) < min_images:
logger.debug("Rejecting sweep %s:" % s.get_template())
logger.debug(
" Not enough images (found %i, require at least %i)"
% (len(s.get_images()), min_images)
)
continue
oscillation_range = s.get_imageset().get_scan().get_oscillation_range()
width = oscillation_range[1] - oscillation_range[0]
if min_oscillation_range is not None and width < min_oscillation_range:
logger.debug("Rejecting sweep %s:" % s.get_template())
logger.debug(
" Too narrow oscillation range (found %i, require at least %i)"
% (width, min_oscillation_range)
)
continue
key = os.path.join(s.get_directory(), s.get_template())
if CommandLine.get_start_ends(key):
start_ends = CommandLine.get_start_ends(key)
start_good = (
min(s.get_images()) <= start_ends[0][0] <= max(s.get_images())
)
end_good = (
min(s.get_images()) <= start_ends[0][1] <= max(s.get_images())
)
if not all((start_good, end_good)):
logger.debug("Rejecting sweep %s:" % s.get_template())
if not start_good:
logger.debug(
" Your specified start-point image lies outside the bounds of this sweep."
)
if not end_good:
logger.debug(
" Your specified end-point image lies outside the bounds of this sweep."
)
logger.debug(
" Your specified start and end points were %d & %d,"
% start_ends[0]
)
logger.debug(
" this sweep consists of images from %d to %d."
% (min(s.get_images()), max(s.get_images()))
)
logger.debug(
""" If there are missing images in your sweep, but you have selected valid
start and end points within a contiguous range of images, you will see this
message, even though all is well with your selection, because xia2 treats
each contiguous image range as a separate sweep."""
)
continue
else:
start_ends = [(min(s.get_images()), max(s.get_images()))]
for start_end in start_ends:
j += 1
name = "SWEEP%d" % j
out.write("BEGIN SWEEP %s\n" % name)
if PhilIndex.params.xia2.settings.input.reverse_phi:
out.write("REVERSEPHI\n")
out.write("WAVELENGTH %s\n" % wavelength_map[s.get_wavelength()])
out.write("DIRECTORY %s\n" % s.get_directory())
imgset = s.get_imageset()
out.write("IMAGE %s\n" % os.path.split(imgset.get_path(0))[-1])
out.write("START_END %d %d\n" % start_end)
# really don't need to store the epoch in the xinfo file
# out.write('EPOCH %d\n' % int(s.get_collect()[0]))
if not settings.trust_beam_centre:
PhilIndex.params.xia2.settings.interactive = False
PhilIndex.get_python_object()
if settings.detector_distance is not None:
out.write("DISTANCE %.2f\n" % settings.detector_distance)
out.write("END SWEEP %s\n" % name)
out.write("\n")
out.write("END CRYSTAL %s\n" % crystal)
out.write("END PROJECT %s\n" % project)
def _get_sweeps(templates):
params = PhilIndex.get_python_object()
mp_params = params.xia2.settings.multiprocessing
nproc = mp_params.nproc
if params.xia2.settings.read_all_image_headers and nproc > 1:
method = "multiprocessing"
# If xia2 was a proper cctbx module, then we wouldn't have to do this
# FIXME xia2 is now a proper cctbx module ;o)
python_path = 'PYTHONPATH="%s"' % ":".join(sys.path)
qsub_command = "qsub -v %s -V" % python_path
args = [(template,) for template in templates]
results_list = easy_mp.parallel_map(
get_sweep,
args,
processes=nproc,
method=method,
qsub_command=qsub_command,
asynchronous=True,
preserve_order=True,
preserve_exception_message=True,
)
else:
results_list = [get_sweep((template,)) for template in templates]
known_sweeps = {}
for template, sweeplist in zip(templates, results_list):
if sweeplist is not None:
known_sweeps[template] = sweeplist
for sweep in sweeplist:
imageset = sweep.get_imageset()
if template not in imageset_cache:
imageset_cache[template] = collections.OrderedDict()
imageset_cache[template][
imageset.get_scan().get_image_range()[0]
] = imageset
return known_sweeps
def _rummage(directories):
"""Walk through the directories looking for sweeps."""
templates = set()
visited = set()
for path in directories:
for root, dirs, files in os.walk(path, followlinks=True):
realpath = os.path.realpath(root)
if realpath in visited:
# safety-check to avoid recursively symbolic links
continue
visited.add(realpath)
templates.update(visit(root, files))
return _get_sweeps(templates)
def write_xinfo(filename, directories, template=None, hdf5_master_files=None):
global target_template
target_template = template
settings = PhilIndex.get_python_object().xia2.settings
crystal = settings.crystal
if not os.path.isabs(filename):
filename = os.path.abspath(filename)
directory = os.path.join(os.getcwd(), crystal, "setup")
try:
os.makedirs(directory)
except OSError as e:
if "File exists" not in str(e):
raise
# if we have given a template and directory on the command line, just
# look there (i.e. not in the subdirectories)
if CommandLine.get_template() and CommandLine.get_directory():
# xia2 image=$(dials.data get -q x4wide)/X4_wide_M1S4_2_0001.cbf
templates = set()
for directory in CommandLine.get_directory():
templates.update(visit(directory, os.listdir(directory)))
sweeps = _get_sweeps(templates)
elif hdf5_master_files is not None:
# xia2 image=$(dials.data get -q vmxi_thaumatin)/image_15799_master.h5
sweeps = _get_sweeps(hdf5_master_files)
else:
# xia2 $(dials.data get -q x4wide)
sweeps = _rummage(directories)
with open(filename, "w") as fout:
_write_sweeps(sweeps, fout)
| {
"repo_name": "xia2/xia2",
"path": "src/xia2/Applications/xia2setup.py",
"copies": "1",
"size": "19381",
"license": "bsd-3-clause",
"hash": 365338163598673150,
"line_mean": 29.9600638978,
"line_max": 103,
"alpha_frac": 0.5546153449,
"autogenerated": false,
"ratio": 3.7407836325033776,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4795398977403378,
"avg_score": null,
"num_lines": null
} |
""" An app to draw glacier geometry on top of a background image (local plotly)
"""
from outletglacierapp import app
import os
import warnings
import itertools
import json
import numpy as np
from flask import Flask, redirect, url_for, render_template, request, jsonify, flash, session, abort, make_response, send_from_directory
from forms import MapForm, FlowLineForm, ExtractForm, MeshForm
from config import glacier_choices, datadir
import dimarray as da
from models.greenmap import get_dict_data, get_json_data, _load_data, get_coords
from models.flowline import compute_one_flowline
from models.mesh import make_2d_grid_from_contours, Point, Line, extractglacier1d
from models.glacier1d import massbalance_diag
def flash_errors(form):
for field, errors in form.errors.items():
for error in errors:
flash(u"Error in the %s field: %s" % (
getattr(form, field).label.text,
error
))
def getmeshpath(session):
if 'mesh2d' not in session:
session['mesh2d'] = 'mesh2d.nc'
return os.path.join(datadir, session['mesh2d'])
def getglacierpath(session):
if 'glacier1d' not in session:
session['glacier1d'] = 'glacier1d.nc'
return os.path.join(datadir, session['glacier1d'])
def getlinepath(session):
if 'lines' not in session:
session['lines'] = 'lines.json'
if type(session['lines']) is list:
warnings.warn('lines is a list for some reason')
session['lines'] = 'lines.json'
return os.path.join(datadir, session['lines'])
def get_map_form(session):
""" instantiate and define MapForm based on session parameters
"""
form = MapForm()
# update form based on session parameters
if 'variable' in session and 'dataset' in session:
form.dataset.data = session['variable']+' - '+ session['dataset']
if 'coords' in session:
print 'document coords',session['coords']
form.left.data = session['coords'][0]
form.right.data = session['coords'][1]
form.bottom.data = session['coords'][2]
form.top.data = session['coords'][3]
if 'glacier' in session:
form.glacier.data = session['glacier']
if 'maxpixels' in session:
form.maxpixels.data = session['maxpixels']
return form
def get_form(form, session):
""" initialize Form with session parameters (be careful, risk of conflict)
"""
for k in form.data.keys():
nm = form.__class__.__name__+'_'+k
if nm in session:
# form.data[k] = session[nm] # flask bug??? does not work
getattr(form, k).data = session[nm]
return form
def set_form(form, session):
for k in form.data.keys():
nm = form.__class__.__name__+'_'+k
# print "set param",nm,"with",session[nm],"to session"
session[nm] = form.data[k]
@app.route('/')
def index():
# return redirect(url_for('draw_basin'))
return redirect(url_for('drawing'))
@app.route('/basin')
def draw_basin():
form = get_map_form(session)
return render_template('draw_basin.html', form=form)
@app.route('/drawing')
def drawing():
#return redirect(url_for('map'))
form = get_map_form(session)
meshform = get_form(MeshForm(), session)
# if 'variable' in session
return render_template('drawing.html', form=form, flowline=FlowLineForm(), meshform=meshform, hidemeshform=True)
@app.route('/googlemap')
def googlemap():
#return redirect(url_for('map'))
# form = get_map_form(session)
# if 'variable' in session
meshform = get_form(MeshForm(), session)
return render_template('googlemap.html', flowline=FlowLineForm(), meshform=meshform, hidemeshform=True)
@app.route('/reset', methods=["POST"])
def reset():
# if 'mesh' in session: del session['mesh']
# if 'lines' in session: del session['lines']
if 'variable' in session: del session['variable']
if 'dataset' in session: del session['dataset']
if 'coords' in session: del session['coords']
if 'glacier' in session: del session['glacier']
if 'maxpixels' in session: del session['maxpixels']
return redirect(url_for('drawing'))
@app.route('/mapdata', methods=["GET"])
def mapdata():
""" return json data to plot map on Greenland domain
"""
form = MapForm(request.args)
if not form.validate():
flash_errors(form)
# define session parameters
variable, source = form.dataset.data.split('-')
# save these parameters in session just in case, but is not used
# but leave GET to make testing easier
session['variable'] = variable.strip()
session['dataset'] = source.strip()
session['glacier'] = form.glacier.data
session['maxpixels'] = form.maxpixels.data
# update coordinates to get a fixed aspect ratio
r = 1
currentwidth = form.right.data - form.left.data
width = r*(form.top.data - form.bottom.data)
# form.right.data += (width-currentwidth)/2
# form.left.data -= (width-currentwidth)/2
form.right.data = form.left.data + width # maintain the left side...
session['coords'] = [form.left.data, form.right.data, form.bottom.data, form.top.data]
coords = session['coords'] # coordinates (can be custom)
variable = session['variable'] # coordinates (can be custom)
dataset = session['dataset'] # coordinates (can be custom)
maxshape = (session['maxpixels'],)*2
data = get_json_data(variable, dataset, coords, maxshape=maxshape)
return make_response(data) #, type='application/json')
@app.route('/glacierinfo')
def glacierinfo():
""" provide glacier coordinate information from box and decker
"""
# indicate the same list of glaciers as in settings
data = [{'name':nm, 'coords':get_coords(nm)} for nm in glacier_choices if nm.lower() != 'custom']
return jsonify(glacierinfo=data)
@app.route('/flowline', methods=['GET'])
def flowline():
""" compute flowline given a starting point
"""
# starting point in km
# x = float(request.get('x'))
# y = float(request.get('y'))
# dx = float(request.get('dx'))
# maxdist = float(request.get('maxdist'))
# dataset = request.form.get('dataset')
form = FlowLineForm(request.args)
#TODO: remove maxshape argument (related to shape of loaded data) and write
# a fortran routine !
line = compute_one_flowline(form.x.data, form.y.data, dx=form.dx.data, maxdist=form.maxdist.data,
dataset=form.dataset.data, maxshape=(500,500))
return jsonify(line=line)
@app.route('/lines', methods=['GET','POST'])
def lines():
if request.method == 'GET':
lines = _getlines(session)
return jsonify(lines=lines)
else:
lines = request.json
_setlines(session, lines)
return jsonify(lines=lines)
def _getlines(session):
linepath = getlinepath(session)
if os.path.exists(linepath):
with open(linepath,'r') as f:
lines = json.load(f)
else:
lines = []
return lines
def _setlines(session, lines):
linepath = getlinepath(session)
with open(linepath,'w') as f:
lines = json.dump(lines, f)
@app.route('/lineslonglat', methods=['GET','POST'])
def lineslonglat():
import cartopy.crs as ccrs
from models.greenmap import CRS
longlat = ccrs.PlateCarree()
def transform_line(line, crs0, crs1):
" transform a line between two coordinate systems "
x, y = zip(*[(pt['x'], pt['y']) for pt in line['values']])
x, y = np.array(x), np.array(y)
if crs0 != longlat:
x *= 1e3
y *= 1e3
pts_xyz = crs1.transform_points(crs0, x, y)
if crs1 != longlat:
pts_xyz /= 1e3
lon, lat = pts_xyz[...,0], pts_xyz[...,1]
newvalues = [{'x':lo, 'y':la} for lo, la in zip(lon, lat)]
if np.any(~np.isfinite(pts_xyz)):
raise RuntimeError("nan or inf in points !")
return {'id':line['id'], 'values':newvalues}
if request.method == 'GET':
lines = _getlines(session)
longlatlines = [transform_line(line, CRS, longlat) for line in lines]
# lines = [transform_line(line, longlat, CRS) for line in longlatlines]
return jsonify(longlatlines=longlatlines)
else:
print "received longlat", request.json
#lines = [transform_line(line, longlat, CRS) for line in request.json]
lines = [transform_line(line, longlat, CRS) for line in request.json]
print "transformed xy", lines
_setlines(session, lines)
# return jsonify(msg='all good')
return jsonify(lines=lines)
# return jsonify(lines=lines)
@app.route('/mesh', methods=['GET', 'POST'])
def mesh():
meshpath = getmeshpath(session)
if request.method == 'GET':
try:
ds = da.read_nc(meshpath)
except:
raise
raise ValueError("mesh file not found, create mesh via POST first (Save and Mesh button)")
flash("mesh file not found, create mesh via POST first (Save and Mesh button)")
return jsonify(url=url_for('drawing'))
mesh = [[{'x':x*1e-3, 'y':y*1e-3, 's':s*1e-3} for x, y in zip(xs_section, ys_section)] for xs_section, ys_section, s in zip(ds['x_coord'], ds['y_coord'], ds.x)]
# return redirect(url_for('/viewmesh'))
return jsonify(mesh=mesh)
else:
# compute mesh and return the data extraction page
lines = _getlines(session)
meshform = MeshForm(request.form)
set_form(meshform, session) # make request persistent
dx = meshform.data['dx']
ny = meshform.data['ny']
if len(lines) == 0:
flash('no lines found !')
return jsonify(url=url_for('drawing'))
elif len(lines) != 3:
flash('3 lines expected !')
return jsonify(url=url_for('drawing'))
linedict = {line['id'].lower(): line['values'] for line in lines}
if set(linedict.keys()) != {'left','right','middle'}:
flash('Unxpected line ids. Expected: {}, got: {}'.format(['left','right','middle'],linedict.keys()))
return jsonify(url=url_for('drawing'))
# make Lines objects
for nm in ['middle','left','right']:
linedict[nm] = Line([Point(pt['x']*1e3, pt['y']*1e3) for pt in linedict[nm]]) # make a Line object
# # build fake mesh for testing
# ny = 5
# nx = len(session['lines'][0])
# mesh = [[{'x':pt['x']+20*j,'y':pt['y']+20*j} for j in range(ny)] for pt in session['lines'][0]['values']]
dima_mesh = make_2d_grid_from_contours(dx=dx, ny=ny, **linedict)
dima_mesh.write_nc(meshpath, 'w') # write mesh to disk
# return jsonify(url=url_for('viewmesh'))
return redirect(url_for('mesh'))
@app.route('/viewmesh')
def viewmesh():
""" mesh / glacier view
"""
mapform = get_map_form(session)
extractform = get_form(ExtractForm(), session)
meshform = get_form(MeshForm(), session)
return render_template('mesh.html', form=mapform, extractform=extractform, meshform=meshform)
@app.route('/meshoutline', methods=['GET', 'POST'])
def meshoutline():
""" extract glacier1d outlines (lines) from existing mesh
"""
meshpath = getmeshpath(session)
if not os.path.exists(meshpath):
raise ValueError("mesh file unavailable: "+meshpath)
x_coord = da.read_nc(meshpath,'x_coord').values*1e-3
y_coord = da.read_nc(meshpath,'y_coord').values*1e-3
ni, nj = x_coord.shape
left = []
middle = []
right = []
lines = [{'id':'middle', 'values':[]},
{'id':'left', 'values':[]},
{'id':'right', 'values':[]}]
for i in range(ni): # loop over sections
lines[0]['values'].append({'x':x_coord[i][int(nj/2)], 'y':y_coord[i][int(nj/2)]})
lines[1]['values'].append({'x':x_coord[i][0], 'y':y_coord[i][0]})
lines[2]['values'].append({'x':x_coord[i][-1], 'y':y_coord[i][-1]})
# if POST, make it the default line
if request.method == 'POST':
_setlines(session, lines)
return jsonify(lines=lines)
# @app.route('/data1d/<name:variable>/<name:dataset>', methods=['GET'])
# def extract_one_variable(variable, dataset):
# """ extract one variable from the netCDF file
# """
@app.route('/glacier1d', methods=['GET', 'POST'])
def make_glacier1d():
""" extract data
"""
meshpath = getmeshpath(session)
glacierpath = getglacierpath(session)
if request.method == 'POST':
# if request.method == 'GET':
extractform = ExtractForm(request.form)
# extractform = ExtractForm()
mesh = da.read_nc(meshpath)
glacier1d = extractglacier1d(mesh, extractform.data)
# quick fix SMB shifted upward
# glacier1d['smb'].values += (0.2/(3600*24*365.25))
# glacier1d['smb'].note = "increased by 0.2 m/year, uniformly"
glacier1d.write_nc(glacierpath, 'w')
return redirect(url_for('vizualize_glacier1d')) # get method
elif request.method == 'GET':
raise ValueError("no GET route for /glacier1d, try /figure/glacier1d")
@app.route("/figure/glacier1d")
def vizualize_glacier1d():
""" return data to make a figure
"""
# read glacier data
glacierpath = getglacierpath(session)
glacier1d = da.read_nc(glacierpath)
# for the diagnostic, also add velocity divergence near surface mass balance
glacier1d = massbalance_diag(glacier1d)
# rename variables and change units for the plotting
fmt = dict(
U='surf_velocity',
hs='surface',
hb='bottom',
zb='bedrock',
W='width',
)
glacier1d = da.Dataset({fmt.pop(nm, nm): glacier1d[nm] for nm in glacier1d.keys()})
# meters into km
glacier1d.axes['x'].values *= 1e-3
glacier1d.axes['x'].units = 'km'
for nm in ['x_coord','y_coord','width']:
glacier1d[nm].values *= 1e-3
glacier1d[nm].units = 'km'
# meters/seconds into meters/year
for nm in ['surf_velocity','balance_velocity_obs','balance_velocity_mod3D','smb','runoff']:
glacier1d[nm].values *= 24*3600*365.25
glacier1d[nm].units = 'meters/year'
# group data into various views
views = [
{
'id': 'elevation',
'names' : ['bedrock','bottom','surface'],
'xlabel' : '',
'ylabel' : 'elevation (m)',
},
{
'id': 'width',
'names' : ['width'],
'xlabel' : '',
'ylabel' : 'width (km)',
},
{
'id': 'velocity',
'names' : ['surf_velocity'],
# 'names' : ['surf_velocity','balance_velocity_obs','balance_velocity_mod3D'],
# 'xlabel' : '',
'xlabel' : 'distance from ice divide(km)',
'ylabel' : 'velocity (meters/year)',
},
# {
# 'id': 'mass_balance',
# 'names' : ['cumulative_smb','ice_flux_surf_obs','ice_flux_bal_mod3D'],
# 'xlabel' : '',
# 'ylabel' : 'mass balance (meters^3/second)',
# },
# {
# 'id': 'smb',
# 'names' : ['smb','runoff'],
# 'xlabel' : 'distance from ice divide(km)',
# 'ylabel' : 'SMB (meters/year)',
# },
]
# variables to plot
names = np.unique(list(itertools.chain(*[view['names'] for view in views]))).tolist()
names += ['x_coord','y_coord'] # also pass along coordinates
print names
# replace all nan values
missing_values = -99.99
for k in glacier1d:
glacier1d[k][np.isnan(glacier1d[k])] = missing_values
# for simplicity, organize each line a list of poitns with x, y property
sources = {}
for nm in names:
sources[nm] = {
'values':[{'x':glacier1d.x[i], 'y':val} for i, val in enumerate(glacier1d[nm].values)],
'missing_values': missing_values,
}
# not used for now
units = {k:glacier1d[k].units.strip() if hasattr(glacier1d[k], 'units') else '' for k in names}
return jsonify(views=views, sources=sources, width=350, height=120)
@app.route('/download/glacier1d.nc')
def download():
direc, filename = os.path.split(getglacierpath(session))
return send_from_directory(directory=direc, filename=filename)
| {
"repo_name": "perrette/webglacier1d",
"path": "outletglacierapp/views.py",
"copies": "1",
"size": "16362",
"license": "mit",
"hash": -4077231380146714000,
"line_mean": 34.0364025696,
"line_max": 168,
"alpha_frac": 0.6044493338,
"autogenerated": false,
"ratio": 3.432347388294525,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4536796722094525,
"avg_score": null,
"num_lines": null
} |
# Anarchic Society Optimization Algorithm
# As seen in the paper by Amir Ahmadi-Javid
# Implemented by Juanjo Sierra
from CreateInitialSociety import *
from CurrentMovementPolicy import CalculateFicklenessIndexes, GenerateCurrentMovementPolicy
from SocietyMovementPolicy import CalculateExternalIrregularityIndexes, GenerateSocietyMovementPolicy
from PreviousMovementPolicy import CalculateInternalIrregularityIndexes, GeneratePreviousMovementPolicy
from MovementPoliciesCombination import NewPositionsPolicyBased
from UpdateHistory import *
def ASO(CostFunction, dim=10, nindividuals=20, max_eval=10000,
fickleness_rate=0.5, external_rate=4, external_threshold=0.5,
internal_rate=4, internal_threshold=0.5, evolution_rate=0.5,
lower_bound=0, upper_bound=10, initial_population_lower_bound=None,
initial_population_upper_bound=None):
# Domain of the function, tuple including lower and upper bounds
domain = (lower_bound, upper_bound)
initial_domain = (initial_population_lower_bound, initial_population_upper_bound)
# Create the initial society
society, society_fitness, history, history_fitness = CreateInitialSociety(CostFunction,
nindividuals, dim, domain, initial_domain)
# Initial function evaluations are the same as society members
evaluations = nindividuals
evaluations_marker = 0
evaluation_marks = np.array([])
# Start the main loop
while evaluations < max_eval:
global_best = np.min(history_fitness)
iteration_best = society[np.argsort(society_fitness)[0]]
fickleness_indexes = CalculateFicklenessIndexes(society_fitness, history_fitness, fickleness_rate)
external_indexes = CalculateExternalIrregularityIndexes(society_fitness, global_best, external_rate)
internal_indexes = CalculateInternalIrregularityIndexes(society_fitness, history_fitness, internal_rate)
current_movement_positions = np.array([GenerateCurrentMovementPolicy(
individual, fickleness_index, iteration_best, fickleness_rate, evolution_rate, domain)
for (individual, fickleness_index) in zip(society, fickleness_indexes)])
society_movement_positions = np.array([GenerateSocietyMovementPolicy(
individual, external_index, global_best, external_threshold, evolution_rate, domain)
for (individual, external_index) in zip(society, external_indexes)])
previous_movement_positions = np.array([GeneratePreviousMovementPolicy(
individual, internal_index, previous_best, internal_threshold, evolution_rate, domain)
for (individual, internal_index, previous_best) in zip(society, internal_indexes, history)])
society, society_fitness, new_evaluations = NewPositionsPolicyBased(CostFunction, current_movement_positions, society_movement_positions, previous_movement_positions)
history, history_fitness = UpdateHistory(society, society_fitness, history, history_fitness)
evaluations += new_evaluations
if evaluations >= evaluations_marker:
evaluation_marks = np.insert(evaluation_marks, len(evaluation_marks), np.min(history_fitness))
evaluations_marker += max_eval / 10
#print("Iteration {:3}, best solution: {:e}".format(iteration, np.min(history_fitness)))
return np.append(evaluation_marks, np.min(history_fitness)) | {
"repo_name": "JJSrra/Research-SocioinspiredAlgorithms",
"path": "ASO/ASO.py",
"copies": "1",
"size": "3198",
"license": "mit",
"hash": 1601501287151703300,
"line_mean": 48.2153846154,
"line_max": 168,
"alpha_frac": 0.7948717949,
"autogenerated": false,
"ratio": 3.4313304721030042,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4726202267003004,
"avg_score": null,
"num_lines": null
} |
# anarchy.py by ApolloJustice
# for use with Python 3
# non PEP-8 compliant because honestly fuck that
# probably not commented because too lazy
__module_name__ = "Anarchizer"
__module_version__ = "1.0"
__module_description__ = "Makes a channel into an ANARCHY"
__author__ = "ApolloJustice"
import hexchat
def anarchize(word, word_eol, userdata):
chan = hexchat.get_info("channel")
if len(word) == 1:
print("Are you sure? Type /anarchy confirm to make %s into an anarchy." % chan)
return hexchat.EAT_ALL
if word[1] != 'confirm':
print("Are you sure? Type /anarchy confirm to make %s into an anarchy." % chan)
return hexchat.EAT_ALL
if word[1] == 'confirm':
userlist = []
for i in hexchat.get_list('users'):
userlist.append(i.nick.lower())
UList = ' '.join(map(str, userlist))
hexchat.command("me declares %s an anarchy!" % chan)
hexchat.command("cs op %s" % chan)
hexchat.command("timer 2 op %s" % UList)
return hexchat.EAT_ALL
hexchat.hook_command("anarchy", anarchize)
hexchat.emit_print('Notice', __module_name__ + ' [S]', '%s by %s loaded. You are using version %s of the script.' % (__module_name__, __author__, __module_version__)) | {
"repo_name": "ApolloJustice/HexChat-pyscripts",
"path": "anarchy.py",
"copies": "1",
"size": "1176",
"license": "mit",
"hash": 545094131093512800,
"line_mean": 33.6176470588,
"line_max": 166,
"alpha_frac": 0.675170068,
"autogenerated": false,
"ratio": 2.8066825775656326,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3981852645565632,
"avg_score": null,
"num_lines": null
} |
"""An array class that has methods supporting the type of stencil
operations we see in finite-difference methods, like i+1, i-1, etc.
"""
from __future__ import print_function
import numpy as np
def _buf_split(b):
""" take an integer or iterable and break it into a -x, +x, -y, +y
value representing a ghost cell buffer
"""
try:
bxlo, bxhi, bylo, byhi = b
except (ValueError, TypeError):
try:
blo, bhi = b
except (ValueError, TypeError):
blo = b
bhi = b
bxlo = bylo = blo
bxhi = byhi = bhi
return bxlo, bxhi, bylo, byhi
class ArrayIndexer(np.ndarray):
""" a class that wraps the data region of a single array (d)
and allows us to easily do array operations like d[i+1,j]
using the ip() method. """
def __new__(self, d, grid=None):
obj = np.asarray(d).view(self)
obj.g = grid
obj.c = len(d.shape)
return obj
def __array_finalize__(self, obj):
if obj is None:
return
self.g = getattr(obj, "g", None)
self.c = getattr(obj, "c", None)
def __array_wrap__(self, out_arr, context=None):
return np.ndarray.__array_wrap__(self, out_arr, context)
def v(self, buf=0, n=0, s=1):
"""return a view of the valid data region for component n, with stride
s, and a buffer of ghost cells given by buf
"""
return self.ip_jp(0, 0, buf=buf, n=n, s=s)
def ip(self, shift, buf=0, n=0, s=1):
"""return a view of the data shifted by shift in the x direction. By
default the view is the same size as the valid region, but the
buf can specify how many ghost cells on each side to include.
The component is n and s is the stride
"""
return self.ip_jp(shift, 0, buf=buf, n=n, s=s)
def jp(self, shift, buf=0, n=0, s=1):
"""return a view of the data shifted by shift in the y direction. By
default the view is the same size as the valid region, but the
buf can specify how many ghost cells on each side to include.
The component is n and s is the stride
"""
return self.ip_jp(0, shift, buf=buf, n=n, s=s)
def ip_jp(self, ishift, jshift, buf=0, n=0, s=1):
"""return a view of the data shifted by ishift in the x direction and
jshift in the y direction. By default the view is the same
size as the valid region, but the buf can specify how many
ghost cells on each side to include. The component is n and s
is the stride
"""
bxlo, bxhi, bylo, byhi = _buf_split(buf)
c = len(self.shape)
if c == 2:
return np.asarray(self[self.g.ilo-bxlo+ishift:self.g.ihi+1+bxhi+ishift:s,
self.g.jlo-bylo+jshift:self.g.jhi+1+byhi+jshift:s])
else:
return np.asarray(self[self.g.ilo-bxlo+ishift:self.g.ihi+1+bxhi+ishift:s,
self.g.jlo-bylo+jshift:self.g.jhi+1+byhi+jshift:s, n])
def lap(self, n=0, buf=0):
"""return the 5-point Laplacian"""
l = (self.ip(-1, n=n, buf=buf) - 2*self.v(n=n, buf=buf) + self.ip(1, n=n, buf=buf))/self.g.dx**2 + \
(self.jp(-1, n=n, buf=buf) - 2*self.v(n=n, buf=buf) + self.jp(1, n=n, buf=buf))/self.g.dy**2
return l
def norm(self, n=0):
"""
find the norm of the quantity (index n) defined on the same grid,
in the domain's valid region
"""
c = len(self.shape)
if c == 2:
return np.sqrt(self.g.dx * self.g.dy *
np.sum((self[self.g.ilo:self.g.ihi+1, self.g.jlo:self.g.jhi+1]**2).flat))
else:
_tmp = self[:, :, n]
return np.sqrt(self.g.dx * self.g.dy *
np.sum((_tmp[self.g.ilo:self.g.ihi+1, self.g.jlo:self.g.jhi+1]**2).flat))
def copy(self):
"""make a copy of the array, defined on the same grid"""
return ArrayIndexer(np.asarray(self).copy(), grid=self.g)
def is_symmetric(self, nodal=False, tol=1.e-14, asymmetric=False):
"""return True is the data is left-right symmetric (to the tolerance
tol) For node-centered data, set nodal=True
"""
# prefactor to convert from symmetric to asymmetric test
s = 1
if asymmetric:
s = -1
if not nodal:
L = self[self.g.ilo:self.g.ilo+self.g.nx//2,
self.g.jlo:self.g.jhi+1]
R = self[self.g.ilo+self.g.nx//2:self.g.ihi+1,
self.g.jlo:self.g.jhi+1]
else:
L = self[self.g.ilo:self.g.ilo+self.g.nx//2+1,
self.g.jlo:self.g.jhi+1]
print(self.g.ilo+self.g.nx//2, self.g.ihi+2)
R = self[self.g.ilo+self.g.nx//2:self.g.ihi+2,
self.g.jlo:self.g.jhi+1]
e = abs(L - s*np.flipud(R)).max()
return e < tol
def is_asymmetric(self, nodal=False, tol=1.e-14):
"""return True is the data is left-right asymmetric (to the tolerance
tol)---e.g, the sign flips. For node-centered data, set nodal=True
"""
return self.is_symmetric(nodal=nodal, tol=tol, asymmetric=True)
def fill_ghost(self, n=0, bc=None):
"""Fill the boundary conditions. This operates on a single component,
n. We do periodic, reflect-even, reflect-odd, and outflow
We need a BC object to tell us what BC type on each boundary.
"""
# there is only a single grid, so every boundary is on
# a physical boundary (except if we are periodic)
# Note: we piggy-back on outflow and reflect-odd for
# Neumann and Dirichlet homogeneous BCs respectively, but
# this only works for a single ghost cell
# -x boundary
if bc.xlb in ["outflow", "neumann"]:
if bc.xl_value is None:
for i in range(self.g.ilo):
self[i, :, n] = self[self.g.ilo, :, n]
else:
self[self.g.ilo-1, :, n] = \
self[self.g.ilo, :, n] - self.g.dx*bc.xl_value[:]
elif bc.xlb == "reflect-even":
for i in range(self.g.ilo):
self[i, :, n] = self[2*self.g.ng-i-1, :, n]
elif bc.xlb in ["reflect-odd", "dirichlet"]:
if bc.xl_value is None:
for i in range(self.g.ilo):
self[i, :, n] = -self[2*self.g.ng-i-1, :, n]
else:
self[self.g.ilo-1, :, n] = \
2*bc.xl_value[:] - self[self.g.ilo, :, n]
elif bc.xlb == "periodic":
for i in range(self.g.ilo):
self[i, :, n] = self[self.g.ihi-self.g.ng+i+1, :, n]
# +x boundary
if bc.xrb in ["outflow", "neumann"]:
if bc.xr_value is None:
for i in range(self.g.ihi+1, self.g.nx+2*self.g.ng):
self[i, :, n] = self[self.g.ihi, :, n]
else:
self[self.g.ihi+1, :, n] = \
self[self.g.ihi, :, n] + self.g.dx*bc.xr_value[:]
elif bc.xrb == "reflect-even":
for i in range(self.g.ng):
i_bnd = self.g.ihi+1+i
i_src = self.g.ihi-i
self[i_bnd, :, n] = self[i_src, :, n]
elif bc.xrb in ["reflect-odd", "dirichlet"]:
if bc.xr_value is None:
for i in range(self.g.ng):
i_bnd = self.g.ihi+1+i
i_src = self.g.ihi-i
self[i_bnd, :, n] = -self[i_src, :, n]
else:
self[self.g.ihi+1, :, n] = \
2*bc.xr_value[:] - self[self.g.ihi, :, n]
elif bc.xrb == "periodic":
for i in range(self.g.ihi+1, 2*self.g.ng + self.g.nx):
self[i, :, n] = self[i-self.g.ihi-1+self.g.ng, :, n]
# -y boundary
if bc.ylb in ["outflow", "neumann"]:
if bc.yl_value is None:
for j in range(self.g.jlo):
self[:, j, n] = self[:, self.g.jlo, n]
else:
self[:, self.g.jlo-1, n] = \
self[:, self.g.jlo, n] - self.g.dy*bc.yl_value[:]
elif bc.ylb == "reflect-even":
for j in range(self.g.jlo):
self[:, j, n] = self[:, 2*self.g.ng-j-1, n]
elif bc.ylb in ["reflect-odd", "dirichlet"]:
if bc.yl_value is None:
for j in range(self.g.jlo):
self[:, j, n] = -self[:, 2*self.g.ng-j-1, n]
else:
self[:, self.g.jlo-1, n] = \
2*bc.yl_value[:] - self[:, self.g.jlo, n]
elif bc.ylb == "periodic":
for j in range(self.g.jlo):
self[:, j, n] = self[:, self.g.jhi-self.g.ng+j+1, n]
# +y boundary
if bc.yrb in ["outflow", "neumann"]:
if bc.yr_value is None:
for j in range(self.g.jhi+1, self.g.ny+2*self.g.ng):
self[:, j, n] = self[:, self.g.jhi, n]
else:
self[:, self.g.jhi+1, n] = \
self[:, self.g.jhi, n] + self.g.dy*bc.yr_value[:]
elif bc.yrb == "reflect-even":
for j in range(self.g.ng):
j_bnd = self.g.jhi+1+j
j_src = self.g.jhi-j
self[:, j_bnd, n] = self[:, j_src, n]
elif bc.yrb in ["reflect-odd", "dirichlet"]:
if bc.yr_value is None:
for j in range(self.g.ng):
j_bnd = self.g.jhi+1+j
j_src = self.g.jhi-j
self[:, j_bnd, n] = -self[:, j_src, n]
else:
self[:, self.g.jhi+1, n] = \
2*bc.yr_value[:] - self[:, self.g.jhi, n]
elif bc.yrb == "periodic":
for j in range(self.g.jhi+1, 2*self.g.ng + self.g.ny):
self[:, j, n] = self[:, j-self.g.jhi-1+self.g.ng, n]
def pretty_print(self, n=0, fmt=None, show_ghost=True):
"""
Print out a small dataset to the screen with the ghost cells
a different color, to make things stand out
"""
if fmt is None:
if self.dtype == np.int:
fmt = "%4d"
elif self.dtype == np.float64:
fmt = "%10.5g"
else:
raise ValueError("ERROR: dtype not supported")
# print j descending, so it looks like a grid (y increasing
# with height)
if show_ghost:
ilo = 0
ihi = self.g.qx-1
jlo = 0
jhi = self.g.qy-1
else:
ilo = self.g.ilo
ihi = self.g.ihi
jlo = self.g.jlo
jhi = self.g.jhi
for j in reversed(range(jlo, jhi+1)):
for i in range(ilo, ihi+1):
if (j < self.g.jlo or j > self.g.jhi or
i < self.g.ilo or i > self.g.ihi):
gc = 1
else:
gc = 0
if self.c == 2:
val = self[i, j]
else:
val = self[i, j, n]
if gc:
print("\033[31m" + fmt % (val) + "\033[0m", end="")
else:
print(fmt % (val), end="")
print(" ")
leg = """
^ y
|
+---> x
"""
print(leg)
| {
"repo_name": "harpolea/pyro2",
"path": "mesh/array_indexer.py",
"copies": "1",
"size": "11548",
"license": "bsd-3-clause",
"hash": -8349634212112382000,
"line_mean": 33.9939393939,
"line_max": 108,
"alpha_frac": 0.4825943886,
"autogenerated": false,
"ratio": 3.163835616438356,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4146430005038356,
"avg_score": null,
"num_lines": null
} |
"""An asset card."""
from csrv.model import actions
from csrv.model import events
from csrv.model import game_object
from csrv.model import timing_phases
from csrv.model.cards import installable_card
from csrv.model.cards import card_info
class Asset(installable_card.InstallableCard):
TYPE = card_info.ASSET
REZZABLE = True
TRASHABLE = True
WHEN_IN_HAND_PROVIDES_CHOICES_FOR = {
timing_phases.CorpTurnActions: 'install_actions',
}
WHEN_INSTALLED_PROVIDES_CHOICES_FOR ={
timing_phases.CorpRezCards: 'rez_actions',
}
WHEN_ACCESSED_PROVIDES_CHOICES_FOR = {
timing_phases.AccessCard: 'trash_on_access_actions',
}
def build_actions(self):
installable_card.InstallableCard.build_actions(self)
self.install_action = actions.InstallAgendaAsset(
self.game, self.player, self)
self._rez_action = actions.RezAssetUpgrade(self.game, self.player, self)
def install_actions(self):
if self.player.clicks.value:
return [self.install_action]
return []
def rez_actions(self):
if not self.is_rezzed:
return [self._rez_action]
return []
def on_install(self):
installable_card.InstallableCard.on_install(self)
self.trigger_event(events.InstallAgendaAssetUpgrade(self.game, self.player))
| {
"repo_name": "mrroach/CentralServer",
"path": "csrv/model/cards/asset.py",
"copies": "1",
"size": "1276",
"license": "apache-2.0",
"hash": -2279621822591452000,
"line_mean": 26.1489361702,
"line_max": 80,
"alpha_frac": 0.723354232,
"autogenerated": false,
"ratio": 3.3142857142857145,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4537639946285715,
"avg_score": null,
"num_lines": null
} |
"""An assortment of different useful functions."""
import os
def in_inventory(item_class, player):
for item in player.inventory:
if isinstance(item, item_class):
return True
return False
def get_item_from_name(item_name, item_list):
"""Retrieve an item's object from its name."""
### Potential issue if multiple items share a name
for item in item_list:
if item_name == item.name:
return item
return False
def get_indef_article(noun):
"""Get the indefinite article that precedes a noun."""
### You can use strings library
vowels = [i for i in 'aeiou']
consonants = [i for i in 'bcdfghjklmnpqrstvwxyz1234567890']
if noun[0] in vowels:
return 'an'
elif noun[0] in consonants:
return 'a'
def clrscn():
"""Clear the screen."""
os.system("cls" if os.name == "nt" else "clear")
def number_strings(*strings):
"""Print a list of strings with a number preceding each."""
for number, string in enumerate(strings):
number += 1
print(number, string)
return
| {
"repo_name": "allanburleson/python-adventure-game",
"path": "pag/utils.py",
"copies": "2",
"size": "1096",
"license": "mit",
"hash": 8675506937762092000,
"line_mean": 24.488372093,
"line_max": 63,
"alpha_frac": 0.6295620438,
"autogenerated": false,
"ratio": 3.6411960132890364,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5270758057089037,
"avg_score": null,
"num_lines": null
} |
"""An assortment of preconfigured client factories that represent "turnkey"
clients. This is particularly useful for a CLI based application that would
like to allow the user to select one or more preconfigured test patterns
(including Behaviours, Policy, etc)."""
import httplib
from ..behaviours import *
from policies import *
from client import Client
class TestFactory(object):
def __init__(self, description, *behaviour_tuples, **kwargs):
self.description = description
self.behaviour_tuples = behaviour_tuples
self.kwargs = kwargs
def __call__(self):
return Client.from_parameters(*self.behaviour_tuples, **self.kwargs)
Plain = TestFactory(
"Endless sequence of 99 HTTP OK and 1 HTTP Not Found (404)",
(PlainResponse(), 99),
(PlainResponse(status=httplib.NOT_FOUND), 1),
policy=Repeat
)
LightSleep = TestFactory(
"Random choice of 99% HTTP OK and 1% 0.5 second sleep",
(PlainResponse(), 99),
(Sleeping(sleep_duration=0.5), 1),
)
HeavySleep = TestFactory(
"Random choice of 95% HTTP OK and 5% 2 second sleep",
(PlainResponse(), 95),
(Sleeping(sleep_duration=2), 5),
)
SIGSEGV = TestFactory(
"50/50 chance of HTTP OK or SIGSEGV",
(PlainResponse(), 50),
(SIGSEGV(), 50),
)
test_factory_map = dict((name, obj) for name, obj in globals().iteritems()
if isinstance(obj, TestFactory))
| {
"repo_name": "yaniv-aknin/labour",
"path": "labour/tester/factories.py",
"copies": "1",
"size": "1409",
"license": "mit",
"hash": -6622904260678328000,
"line_mean": 29.6304347826,
"line_max": 76,
"alpha_frac": 0.6806245564,
"autogenerated": false,
"ratio": 3.717678100263852,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4898302656663852,
"avg_score": null,
"num_lines": null
} |
# An assortment of utilities.
from contextlib import contextmanager
@contextmanager
def restoring_sels(view):
old_sels = list(view.sel())
yield
view.sel().clear()
for s in old_sels:
# XXX: If the buffer has changed in the meantime, this won't work well.
view.sel().add(s)
def has_dirty_buffers(window):
for v in window.views():
if v.is_dirty():
return True
def show_ipanel(window, caption='', initial_text='', on_done=None,
on_change=None, on_cancel=None):
v = window.show_input_panel(caption, initial_text, on_done, on_change,
on_cancel)
return v
def is_view(view):
"""
Returns `True` if @view is a normal view.
"""
return not (is_widget(view) or is_console(view))
def is_widget(view):
"""
Returns `True` if @view is a widget.
"""
return view.settings().get('is_widget')
def is_console(view):
"""
Returns `True` if @view seems to be ST3's console.
"""
# XXX: Is this reliable?
return (getattr(view, 'settings') is None)
| {
"repo_name": "himacro/Vintageous",
"path": "vi/sublime.py",
"copies": "9",
"size": "1098",
"license": "mit",
"hash": -2908037155276550700,
"line_mean": 21.875,
"line_max": 79,
"alpha_frac": 0.5947176685,
"autogenerated": false,
"ratio": 3.4746835443037973,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 48
} |
"""An async GitHub API library"""
__version__ = "5.0.1.dev"
import http
from typing import Any, Optional
class GitHubException(Exception):
"""Base exception for this library."""
class ValidationFailure(GitHubException):
"""An exception representing failed validation of a webhook event."""
# https://docs.github.com/en/free-pro-team@latest/developers/webhooks-and-events/securing-your-webhooks#validating-payloads-from-github
class HTTPException(GitHubException):
"""A general exception to represent HTTP responses."""
def __init__(self, status_code: http.HTTPStatus, *args: Any) -> None:
self.status_code = status_code
if args:
super().__init__(*args)
else:
super().__init__(status_code.phrase)
class RedirectionException(HTTPException):
"""Exception for 3XX HTTP responses."""
class BadRequest(HTTPException):
"""The request is invalid.
Used for 4XX HTTP errors.
"""
# https://docs.github.com/en/free-pro-team@latest/rest/overview/resources-in-the-rest-api#client-errors
class BadRequestUnknownError(BadRequest):
"""A bad request whose response body is not JSON."""
def __init__(self, response: str) -> None:
self.response = response
super().__init__(http.HTTPStatus.UNPROCESSABLE_ENTITY)
class RateLimitExceeded(BadRequest):
"""Request rejected due to the rate limit being exceeded."""
# Technically rate_limit is of type gidgethub.sansio.RateLimit, but a
# circular import comes about if you try to properly declare it.
def __init__(self, rate_limit: Any, *args: Any) -> None:
self.rate_limit = rate_limit
if not args:
super().__init__(http.HTTPStatus.FORBIDDEN, "rate limit exceeded")
else:
super().__init__(http.HTTPStatus.FORBIDDEN, *args)
class InvalidField(BadRequest):
"""A field in the request is invalid.
Represented by a 422 HTTP Response. Details of what fields were
invalid are stored in the errors attribute.
"""
def __init__(self, errors: Any, *args: Any) -> None:
"""Store the error details."""
self.errors = errors
super().__init__(http.HTTPStatus.UNPROCESSABLE_ENTITY, *args)
class ValidationError(BadRequest):
"""A request was unable to be completed.
Represented by a 422 HTTP response. Details of what went wrong
are stored in the *errors* attribute.
"""
def __init__(self, errors: Any, *args: Any) -> None:
"""Store the error details."""
self.errors = errors
super().__init__(http.HTTPStatus.UNPROCESSABLE_ENTITY, *args)
class GitHubBroken(HTTPException):
"""Exception for 5XX HTTP responses."""
class GraphQLException(GitHubException):
"""Base exception for the GraphQL v4 API."""
def __init__(self, message: str, response: Any) -> None:
self.response = response
super().__init__(message)
class BadGraphQLRequest(GraphQLException):
"""A 4XX HTTP response."""
def __init__(self, status_code: http.HTTPStatus, response: Any) -> None:
assert 399 < status_code < 500
self.status_code = status_code
super().__init__(response["message"], response)
class GraphQLAuthorizationFailure(BadGraphQLRequest):
"""401 HTTP response to a bad oauth token."""
def __init__(self, response: Any) -> None:
super().__init__(http.HTTPStatus(401), response)
class QueryError(GraphQLException):
"""An error occurred while attempting to handle a GraphQL v4 query."""
def __init__(self, response: Any) -> None:
super().__init__(response["errors"][0]["message"], response)
class GraphQLResponseTypeError(GraphQLException):
"""The GraphQL response has an unexpected content type."""
def __init__(self, content_type: Optional[str], response: Any) -> None:
super().__init__(
f"Response had an unexpected content-type: '{content_type!r}'", response
)
| {
"repo_name": "brettcannon/gidgethub",
"path": "gidgethub/__init__.py",
"copies": "1",
"size": "3996",
"license": "apache-2.0",
"hash": -370685802248629100,
"line_mean": 26.5586206897,
"line_max": 139,
"alpha_frac": 0.6546546547,
"autogenerated": false,
"ratio": 4.048632218844984,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5203286873544984,
"avg_score": null,
"num_lines": null
} |
""" An asynchronous computation.
"""
import logging
from tensorflow.core.framework import graph_pb2
from google.protobuf.json_format import MessageToJson
from .proto import computation_pb2
from .utils import Path
from .row import CellWithType, as_python_object, as_pandas_object
__all__ = ['Computation']
logger = logging.getLogger('karps')
class Computation(object):
""" An asynchronous computation.
This object provides access to the different lifetimes of a computation:
- original computation graph (with functional attributes)
- static computation graph (after unrolling of the functions)
- pinned graph (after optimization, which will be provided to the backend)
- results and stats (including for spark, the detail of the various plans)
"""
def __init__(self, session_id_p, computation_id_p,
channel, target_fetch_paths, final_unpack, return_mode):
# The proto of the session id.
self._session_id_p = session_id_p
# The proto of the computation id
self._computation_id_p = computation_id_p
# The GRPC channel
self._channel = channel
# The paths to fetch (list of strings)
self._target_fetch_paths = target_fetch_paths
# Bool, indicates wether the head of the list should be returned.
self._final_unpack = final_unpack
# The return mode for deserializing the data.
self._return_mode = return_mode
# All the results that we have received so far.
self._results = {}
# The compilation phases.
self._compilation_phases = []
# The extra phases that we can also access.
# They are all the phases related to Spark.
# Type: string -> graph_pb2
self._extra_phases = {}
# Quick lookup to access the nodes of the phases for in-place updates.
# Type: (string, string) -> NodeDef
# key is (phase, node name)
self._extra_by_name = {}
# The final profiling trace. This is expected once at the end of the computation.
self._profiling_trace = None
def values(self):
""" Returns the fetches (or the unique fetch if there is only one).
Blocks until the fetches are available or until an error is raised.
"""
while not self._values():
self._progress()
return self._values()
def compiler_step(self, step_name):
""" Returns the given compiler phase.
"""
while not self._compilation_phases:
self._progress()
for comp_phase in self._compilation_phases:
if comp_phase.phase_name.lower() == step_name.lower():
return comp_phase
for (spark_phase_name, spark_phase_graph) in self._extra_phases.items():
if spark_phase_name.lower() == step_name.lower():
return spark_phase_graph
step_names = [comp_phase.phase_name for comp_phase in self._compilation_phases]
extra_step_names = list(self._extra_phases.keys())
logger.warning("Could not find compiler step %s. Available steps are %s and %s",
step_name, step_names, extra_step_names)
return None
def profiling_trace(self):
""" Profiling traces to understand the running time of this computation.
:return: an object that can be understood by standard profiler.
"""
while self._profiling_trace is None:
self._progress()
return self._profiling_trace
def dump_profile(self, filename=None):
"""Writes the profile in a file (or returns it as a string if no file is provided)
The profile can be read in Google Chrome, using the 'about://tracing' inspector.
"""
trace_data = self.profiling_trace()
ss = ",\n".join([MessageToJson(x) for x in trace_data.chrome_events])
ss = """{
"traceEvents": [""" + ss + "]}"
if filename is not None:
with open(filename, mode="w") as f:
f.write(ss)
else:
return ss
def _values(self):
# Returns the values if they are all done, None otherwise.
res = []
for p in self._target_fetch_paths:
if p not in self._results:
return None
cr = self._results[p]
if cr.status in (computation_pb2.SCHEDULED, computation_pb2.RUNNING, computation_pb2.UNUSED):
return None
assert cr.status in [computation_pb2.FINISHED_SUCCESS, computation_pb2.FINISHED_FAILURE], (cr.status, cr)
if cr.final_error:
raise Exception(cr.final_error)
elif cr.final_result:
res.append(CellWithType(cr.final_result))
else:
return None
if self._final_unpack:
res = res[0]
if self._return_mode == 'proto':
return res
if self._return_mode == 'python':
return as_python_object(res)
if self._return_mode == 'pandas':
return as_pandas_object(res)
def _progress(self):
""" Attempts to make progress by blocking on the connection until an update is received.
"""
logger.debug("Calling _progress")
# Read one more value from the channel.
# type: ComputationStreamResponse
csr = next(self._channel)
logger.debug("channel: got value %s: %s", type(csr), str(csr))
if csr.HasField("start_graph"):
logger.debug("channel: received graph (discarding)")
if csr.HasField("pinned_graph"):
logger.debug("channel: received pinned graph (discarding)")
if csr.HasField("compilation_result"):
logger.debug("channel: received compilation results")
# Did we receive some steps?
if csr.compilation_result.compilation_graph:
logger.debug("channel: received compilation steps")
self._compilation_phases = csr.compilation_result.compilation_graph
if csr.HasField("computation_trace"):
logger.debug("channel: received profiling results")
self._profiling_trace = csr.computation_trace
if csr.results:
# Type: ComputationResult
for res in csr.results.results:
assert res.local_path, (res, csr)
path = Path(res.local_path)
logger.debug("channel: received result for %s: %s" % (path, res))
if path not in self._results:
self._results[path] = computation_pb2.ComputationResult()
current = self._results[path]
current.MergeFrom(res)
if res.spark_stats:
for sti in res.spark_stats.parsed:
self._progress_extra_phase("parsed", sti.proto)
for sti in res.spark_stats.physical:
self._progress_extra_phase("physical", sti.proto)
for rddi in res.spark_stats.rdd_info:
self._progress_extra_phase("rdd", rddi.proto)
def _progress_extra_phase(self, phase_name, node_def):
if not node_def.name:
return
# Find the graph
if phase_name not in self._extra_phases:
self._extra_phases[phase_name] = graph_pb2.GraphDef()
graph = self._extra_phases[phase_name]
# Find the node
key = (phase_name, node_def.name)
if key not in self._extra_by_name:
self._extra_by_name[key] = node_def
graph.node.extend([node_def])
return # No update necessary
node_def_orig = self._extra_by_name[key]
node_def_orig.MergeFrom(node_def)
| {
"repo_name": "tjhunter/karps",
"path": "python/karps/computation.py",
"copies": "1",
"size": "6966",
"license": "apache-2.0",
"hash": 2213053648288413400,
"line_mean": 36.6540540541,
"line_max": 111,
"alpha_frac": 0.6637955785,
"autogenerated": false,
"ratio": 3.7941176470588234,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4957913225558823,
"avg_score": null,
"num_lines": null
} |
'''An asynchronous multi-process `HTTP proxy server`_. It works for both
``http`` and ``https`` (tunneled) requests.
Managing Headers
=====================
It is possible to add middleware to manipulate the original request headers.
If the header middleware is
an empty list, the proxy passes requests and responses unmodified.
This is an implementation for a forward-proxy which can be used
to retrieve any type of source from the Internet.
To run the server::
python manage.py
An header middleware is a callable which receives the wsgi *environ* and
the list of request *headers*. By default the example uses:
.. autofunction:: x_forwarded_for
To run with different headers middleware create a new script and do::
from proxyserver.manage import server
if __name__ == '__main__':
server(headers_middleware=[...]).start()
Implemenation
===========================
.. autoclass:: ProxyServerWsgiHandler
:members:
:member-order:
.. _`HTTP proxy server`: http://en.wikipedia.org/wiki/Proxy_server
'''
import io
import logging
from functools import partial
import asyncio
import pulsar
from pulsar import HttpException, task, async, add_errback, as_coroutine
from pulsar.apps import wsgi, http
from pulsar.utils.httpurl import Headers
from pulsar.utils.log import LocalMixin, local_property
SERVER_SOFTWARE = 'Pulsar-proxy-server/%s' % pulsar.version
ENVIRON_HEADERS = ('content-type', 'content-length')
USER_AGENT = SERVER_SOFTWARE
logger = logging.getLogger('pulsar.proxyserver')
def x_forwarded_for(environ, headers):
'''Add *x-forwarded-for* header'''
headers.add_header('x-forwarded-for', environ['REMOTE_ADDR'])
class user_agent:
'''Override user-agent header'''
def __init__(self, agent):
self.agent = agent
def __call__(self, environ, headers):
headers['user-agent'] = self.agent
class ProxyServerWsgiHandler(LocalMixin):
'''WSGI middleware for an asynchronous proxy server.
To perform processing on headers you can pass a list of
``headers_middleware``.
An headers middleware is a callable which accepts two parameters, the wsgi
*environ* dictionary and the *headers* container.
'''
def __init__(self, headers_middleware=None):
self.headers_middleware = headers_middleware or []
@local_property
def http_client(self):
'''The :class:`.HttpClient` used by this proxy middleware for
accessing upstream resources'''
return http.HttpClient(decompress=False, store_cookies=False)
@task
def __call__(self, environ, start_response):
uri = environ['RAW_URI']
logger.debug('new request for %r' % uri)
if not uri or uri.startswith('/'): # No proper uri, raise 404
raise HttpException(status=404)
if environ.get('HTTP_EXPECT') != '100-continue':
stream = environ.get('wsgi.input') or io.BytesIO()
data = yield from as_coroutine(stream.read())
else:
data = None
request_headers = self.request_headers(environ)
method = environ['REQUEST_METHOD']
if method == 'CONNECT':
response = ProxyTunnel(environ, start_response)
else:
response = ProxyResponse(environ, start_response)
request = self.http_client.request(method, uri, data=data,
headers=request_headers,
version=environ['SERVER_PROTOCOL'],
pre_request=response.pre_request)
add_errback(async(request), response.error)
return response
def request_headers(self, environ):
'''Fill request headers from the environ dictionary and
modify them via the list of :attr:`headers_middleware`.
The returned headers will be sent to the target uri.
'''
headers = Headers(kind='client')
for k in environ:
if k.startswith('HTTP_'):
head = k[5:].replace('_', '-')
headers[head] = environ[k]
for head in ENVIRON_HEADERS:
k = head.replace('-', '_').upper()
v = environ.get(k)
if v:
headers[head] = v
for middleware in self.headers_middleware:
middleware(environ, headers)
return headers
############################################################################
# RESPONSE OBJECTS
class ProxyResponse(object):
'''Asynchronous wsgi response for http requests
'''
_started = False
_headers = None
_done = False
def __init__(self, environ, start_response):
self.environ = environ
self.start_response = start_response
self.queue = asyncio.Queue()
def __iter__(self):
while True:
if self._done:
try:
yield self.queue.get_nowait()
except asyncio.QueueEmpty:
break
else:
yield async(self.queue.get())
def pre_request(self, response, exc=None):
self._started = True
response.bind_event('data_processed', self.data_processed)
def error(self, exc):
if not self._started:
request = wsgi.WsgiRequest(self.environ)
content_type = request.content_types.best_match(
('text/html', 'text/plain'))
uri = self.environ['RAW_URI']
msg = 'Could not find %s' % uri
logger.info(msg=msg)
if content_type == 'text/html':
html = wsgi.HtmlDocument(title=msg)
html.body.append('<h1>%s</h1>' % msg)
data = html.render()
resp = wsgi.WsgiResponse(504, data, content_type='text/html')
elif content_type == 'text/plain':
resp = wsgi.WsgiResponse(504, msg, content_type='text/html')
else:
resp = wsgi.WsgiResponse(504, '')
self.start_response(resp.status, resp.get_headers())
self._done = True
self.queue.put_nowait(resp.content[0])
@task
def data_processed(self, response, exc=None, **kw):
'''Receive data from the requesting HTTP client.'''
status = response.get_status()
if status == '100 Continue':
stream = self.environ.get('wsgi.input') or io.BytesIO()
body = yield from stream.read()
response.transport.write(body)
if response.parser.is_headers_complete():
if self._headers is None:
headers = self.remove_hop_headers(response.headers)
self._headers = Headers(headers, kind='server')
# start the response
self.start_response(status, list(self._headers))
body = response.recv_body()
if response.parser.is_message_complete():
self._done = True
self.queue.put_nowait(body)
def remove_hop_headers(self, headers):
for header, value in headers:
if header.lower() not in wsgi.HOP_HEADERS:
yield header, value
class ProxyTunnel(ProxyResponse):
'''Asynchronous wsgi response for https requests
'''
def pre_request(self, response, exc=None):
'''Start the tunnel.
This is a callback fired once a connection with upstream server is
established.
Write back to the client the 200 Connection established message.
After this the downstream connection consumer will upgrade to the
DownStreamTunnel.
'''
# Upgrade downstream protocol consumer
# set the request to None so that start_request is not called
assert response._request.method == 'CONNECT'
self._started = True
response._request = None
upstream = response._connection
dostream = self.environ['pulsar.connection']
#
dostream.upgrade(partial(StreamTunnel, upstream))
upstream.upgrade(partial(StreamTunnel, dostream))
response.finished()
self.start_response('200 Connection established', [])
# send empty byte so that headers are sent
self.queue.put_nowait(b'')
self._done = True
return response
class StreamTunnel(pulsar.ProtocolConsumer):
''':class:`.ProtocolConsumer` handling encrypted messages from
downstream client and upstream server.
This consumer is created as an upgrade of the standard Http protocol
consumer.
.. attribute:: tunnel
Connection to the downstream client or upstream server.
'''
headers = None
status_code = None
def __init__(self, tunnel, loop=None):
super(StreamTunnel, self).__init__(loop)
self.tunnel = tunnel
def connection_made(self, connection):
connection.bind_event('connection_lost', self._close_tunnel)
def data_received(self, data):
try:
return self.tunnel.write(data)
except Exception:
if not self.tunnel.closed:
raise
def _close_tunnel(self, arg, exc=None):
if not self.tunnel.closed:
self._loop.call_soon(self.tunnel.close)
def server(name='proxy-server', headers_middleware=None,
server_software=None, **kwargs):
'''Function to Create a WSGI Proxy Server.'''
if headers_middleware is None:
# headers_middleware = [user_agent(USER_AGENT), x_forwarded_for]
headers_middleware = [x_forwarded_for]
wsgi_proxy = ProxyServerWsgiHandler(headers_middleware)
kwargs['server_software'] = server_software or SERVER_SOFTWARE
return wsgi.WSGIServer(wsgi_proxy, name=name, **kwargs)
if __name__ == '__main__':
server().start()
| {
"repo_name": "tempbottle/pulsar",
"path": "examples/proxyserver/manage.py",
"copies": "5",
"size": "9763",
"license": "bsd-3-clause",
"hash": -752189318456567900,
"line_mean": 33.4982332155,
"line_max": 78,
"alpha_frac": 0.6099559562,
"autogenerated": false,
"ratio": 4.350713012477718,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7460668968677718,
"avg_score": null,
"num_lines": null
} |
"""An asyncronous cyclus server that provides as JSON API over websockets.
The webserver has a number of 'events' that it may send or recieve. These
in turn affect how a cyclus simulation runs.
The server, which operates both asynchronously and in parallel, has five
top-level tasks which it manages:
* The cyclus simulation object, run on a separate thread,
* A action consumer, which executes actions that have been queued either
by the cyclus simulation or the user,
* A websockets server that sends and recieves JSON-formatted events from
the client.
* A heartbeat that sends special events every so often.
* A monitor for presenting data about certain other future actions.
For purposes of this document, the following terminology is used:
**event:** A JSON object / dictionary that contains behaviour instructions.
**message:** The string form of an event.
**action:** A delayed asynchronous coroutine function that carries out the
behaviour specified in a cooresponding event. These do the actual work of
the event system.
**repeating action:** An action coroutine function (or event name) or a list
of the coroutine function and arguments that is added to the action queue
each time step of the simulation. This enables pausing each time step,
streaming table data, etc.
**task:** A future object that results from calling an action. See
asyncio for more details.
Events
======
Events are JSON-formatted strings that represent JSON-objects (dicts) at
their top-most level. All events must have an "event" key whose value is
the string name that distinguishes the event from all other kinds of events.
Often times, events may have parameters that they send/recieve. These live
in the "params" key as a JSON object / dict.
Events may be conceptually divided into server-sent, client-sent events, and
bidirectional events which are sent by either the client or the server.
Server Events
-------------
Server-sent events are those that the server sends to the client. These are
often repsonse to requests for data about the state of the simulation.
They typically contain a "data" key which holds data about the simulation
state. They may also have a "success" key, whose value is true/false, that
specifies whether the data was able to be computed.
**heartbeat:** A simple event the lets the client know that the server is
still alive. The data value is the approximate time of the next heartbeat
in seconds::
{"event": "heartbeat", "data": val}
**loaded:** A simple message that says that a simulation has been loaded.
{"event": "loaded",
"params": {"status": "ok"},
"data": null
}
**registry:** The in-memory backend registy value in its current form::
{"event": "registry",
"params": null,
"data": ["table0", "table1", ...]
}
**table_names:** The current file system backend table names::
{"event": "table_names",
"params": null,
"data": ["table0", "table1", ...]
}
Client Events
-------------
Client events are often requests originating from users. They may either
express a request for behaviour (pause the simulation, restart, etc.) or
a request for data. These events may or may not have additional parameters,
depending on the type of request.
**deregister_tables:** Remove table names from the in-memory backend registry.
A registry event from the server will follow the completion of this event::
{"event": "deregister_tables",
"params": {"tables": ["table0", "table1", ...]}
}
**load:** Loads the input file in the simulation and starts running the simulation::
{"event": "load"}
**pause:** Pauses the simulation until it is unpaused::
{"event": "pause"}
**register_tables:** Add table names to the in-memory backend registry.
A registry event from the server will follow the completion of this event::
{"event": "register_tables",
"params": {"tables": ["table0", "table1", ...]}
}
**registry_request:** A simple reqest for the in-memory backend regsitry::
{"event": "registry_request"}
**shutdown:** A reqest to shutdown the server::
{"event": "shutdown",
"params": {"when": "empty" or "now"}
}
**table_names_request:** A simple reqest for the table names present in the
file system backend::
{"event": "table_names_request"}
**unpause:** Unpauses the simulation by canceling the pause task::
{"event": "unpause"}
Bidirectional Events
--------------------
These are events that may logically originate from either the client or the
server. Certian keys in the event may or not be present depending on the
sender, but the event name stays the same.
**agent_annotations:** This event requests and returns the agent annotations
for a given agent spec. If the data field is null, this is a request to the
server for annotation information. If the data field is a dict, it represents
the annotations for the spec::
{"event": "agent_annotations",
"params": {"spec": "<path>:<lib>:<name>"},
"data": null or object
}
**echo:** Echos back a single string parameter. When requesting an echo,
the data key need not be present::
{"event": "table_names",
"params": {"s": value},
"data": value
}
**sleep:** The requester instructs the reciever to sleep for n seconds::
{"event": "sleep", "params": {"n": value}}
**table_data:** Data about a table with the conditions and other parameters
applied. If the client sends this event without the "data" key, the server
will respond with the requested table::
{"event": "table_data",
"params": {"table": "<name of table>",
"conds": ["<list of condition lists, if any>"],
"orient": "<orientation of JSON, see Pandas>"},
"data": {"<keys>": "<values subject to orientation>"}
}
Command Line Interface
======================
You may launch the cyclus server by running::
$ python -m cyclus.server input.xml
Most of the arguments are relatively self-explanatory. However, the CLI here
also allows you to load initial and repeating actions. The syntax for this
is an event name followed by parameter tokens (which must contain an equals
sign)::
$ python -m cyclus.server input.xml --repeating-actions sleep n=1
You may load many actions by repeating the name-params pattern:
$ python -m cyclus.server input.xml --repeating-actions \
sleep n=1 \
table_data table="TimeSeriesPower"
Note that everything right of an equals sign in a parameter token is passed
to Python's eval() builtin function. It thererfore must be valid Python.
For string values, make sure that you properly escape quotation marks as
per the syntax of your shell language.
"""
from __future__ import print_function, unicode_literals
import sys
import json
import socket
import logging
from argparse import ArgumentParser, Action
import cyclus.events
from cyclus.system import asyncio, concurrent_futures, websockets
from cyclus.simstate import SimState
from cyclus.events import EVENT_ACTIONS, MONITOR_ACTIONS
async def run_sim(state, loop, executor):
"""Runs a cyclus simulation in an executor, which should be on another
thread.
"""
run_task = loop.run_in_executor(executor, state.run)
state.tasks['run'] = run_task
await asyncio.wait([run_task])
async def action_consumer(state):
"""The basic consumer of actions."""
staged_tasks = []
while True:
while not state.action_queue.empty():
action = state.action_queue.get()
action_task = asyncio.ensure_future(action())
staged_tasks.append(action_task)
else:
if len(staged_tasks) > 0:
await asyncio.wait(staged_tasks)
staged_tasks.clear()
await asyncio.sleep(state.frequency)
async def action_monitor(state):
"""Consumes actions that have been scheduled for monitoring tasks,
such as status reporting, sending signals, or canceling other task.
These are awaited in the order recieved.
"""
while True:
while not state.monitor_queue.empty():
action = state.monitor_queue.get()
await action()
await asyncio.sleep(max(state.frequency, 0.05))
async def get_send_data():
"""Asynchronously grabs the next data to send from the queue."""
state = cyclus.events.STATE
data = await state.send_queue.get()
return data
async def queue_message_action(message):
state = cyclus.events.STATE
event = json.loads(message)
params = event.get("params", {})
kind = event["event"]
if kind in EVENT_ACTIONS:
action = EVENT_ACTIONS[kind]
state.action_queue.put(action(state, **params))
elif kind in MONITOR_ACTIONS:
action = MONITOR_ACTIONS[kind]
state.monitor_queue.put(action(state, **params))
else:
raise KeyError(kind + "action could not be found in either"
"EVENT_ACTIONS or MONITOR_ACTIONS.")
async def websocket_handler(websocket, path):
"""Sends and recieves data via a websocket."""
while True:
recv_task = asyncio.ensure_future(websocket.recv())
send_task = asyncio.ensure_future(get_send_data())
done, pending = await asyncio.wait([recv_task, send_task],
return_when=asyncio.FIRST_COMPLETED)
# handle incoming
if recv_task in done:
message = recv_task.result()
await queue_message_action(message)
else:
recv_task.cancel()
# handle sending of data
if send_task in done:
message = send_task.result()
await websocket.send(message)
else:
send_task.cancel()
async def heartbeat(state):
"""This sends a heartbeat event to the client with a nominal period.
This occurs outside of the normal action-consumer event system. The
client is then able to detect the lack of a heartbeat and know that the
server has been disconected.
"""
message_template = '{{"event": "heartbeat", "data": {f}}}'
f = state.heartbeat_frequency
message = message_template.format(f=f)
while True:
if state.heartbeat_frequency != f:
f = state.heartbeat_frequency
message = message_template.format(f=f)
await state.send_queue.put(message)
await asyncio.sleep(f)
class EventCLIAction(Action):
"""A basic class for parsing action specs on the command line."""
def __init__(self, option_strings, dest, nargs='+', **kwargs):
super().__init__(option_strings, dest, nargs=nargs, **kwargs)
def __call__(self, parser, ns, values, option_string=None):
specs = []
for tok in values:
if '=' not in tok:
# must have new action name
specs.append((tok, {}))
else:
params = specs[-1][1]
name, _, s = tok.partition('=')
ctx = {} # isolate teh evaluation context
val = eval(s, ctx, ctx)
params[name] = val
setattr(ns, self.dest, specs)
def make_parser():
"""Makes the argument parser for the cyclus server."""
p = ArgumentParser("cyclus", description="Cyclus Server CLI")
p.add_argument('-o', '--output-path', dest='output_path',
default=None, help='output path')
p.add_argument('--debug', action='store_true', default=False,
dest='debug', help="runs the server in debug mode.")
p.add_argument('--host', dest='host', default='localhost',
help='hostname to run the server on')
p.add_argument('-p', '--port', dest='port', type=int, default=4242,
help='port to run the server on')
p.add_argument('-n', '--nthreads', type=int, dest='nthreads', default=16,
help='Maximum number of thread workers to run with.')
p.add_argument('-r', '--repeating-actions', action=EventCLIAction,
dest='repeating_actions', default=(),
help='list of repeating actions')
p.add_argument('-i', '--initial-actions', action=EventCLIAction,
dest='initial_actions', default=(),
help='list of initial actions to queue')
p.add_argument('input_file', nargs= '?', default='<no-input-file>', help='path to input file')
return p
def _start_debug(loop):
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger('websockets.server')
logger.setLevel(logging.ERROR)
logger.addHandler(logging.StreamHandler())
loop.set_debug(True)
def _find_open_port(host, port, debug=False):
found = False
while not found:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind((host, port))
except socket.error as e:
if debug:
msg = '[cyclus-server] port {} not available, trying port {}'
print(msg.format(port, port+1), file=sys.stderr)
if e.errno == 98:
port += 1
continue
else:
raise
finally:
s.close()
found = True
return port
def main(args=None):
"""Main cyclus server entry point."""
p = make_parser()
ns = p.parse_args(args=args)
cyclus.events.STATE = state = SimState(input_file=ns.input_file,
output_path=ns.output_path,
memory_backend=True,
debug=ns.debug)
# load initial and repeating actions
for kind, params in ns.initial_actions:
if kind in EVENT_ACTIONS:
action = EVENT_ACTIONS[kind]
else:
action = MONITOR_ACTIONS[kind]
# place all initial actions in action queue, even if it is a monitor action.
# this enables shutdown to happen after all actions when issues from command line.
state.action_queue.put(action(state, **params))
state.repeating_actions.extend(ns.repeating_actions)
# start up tasks
executor = concurrent_futures.ThreadPoolExecutor(max_workers=ns.nthreads)
state.executor = executor
loop = state.loop = asyncio.get_event_loop()
if ns.debug:
_start_debug(loop)
open_port = _find_open_port(ns.host, ns.port, debug=ns.debug)
if open_port != ns.port:
msg = "port {} already bound, next available port is {}"
print(msg.format(ns.port, open_port), file=sys.stderr)
ns.port = open_port
if ns.debug:
print("initilizing websockets at ws://{}:{}".format(ns.host, ns.port))
server = websockets.serve(websocket_handler, ns.host, ns.port)
print("serving cyclus at http://{}:{}".format(ns.host, ns.port))
# run the loop!
try:
loop.run_until_complete(asyncio.gather(
asyncio.ensure_future(run_sim(state, loop, executor)),
asyncio.ensure_future(action_consumer(state)),
asyncio.ensure_future(action_monitor(state)),
asyncio.ensure_future(heartbeat(state)),
asyncio.ensure_future(server),
))
finally:
if not loop.is_closed():
loop.close()
if __name__ == '__main__':
main()
| {
"repo_name": "Baaaaam/cyclus",
"path": "cyclus/server.py",
"copies": "6",
"size": "15284",
"license": "bsd-3-clause",
"hash": -1981756631929826600,
"line_mean": 34.627039627,
"line_max": 98,
"alpha_frac": 0.6474744831,
"autogenerated": false,
"ratio": 4.032717678100264,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0001275534800833032,
"num_lines": 429
} |
'''A native ElasticSearch implementation for dossier.store.
.. This software is released under an MIT/X11 open source license.
Copyright 2012-2014 Diffeo, Inc.
'''
from __future__ import absolute_import, division, print_function
import base64
from collections import OrderedDict, Mapping, defaultdict
import logging
import regex as re
import uuid
import cbor
from dossier.fc import FeatureCollection as FC
import yakonfig
from elasticsearch import Elasticsearch, NotFoundError, TransportError
from elasticsearch.helpers import bulk, scan
logger = logging.getLogger(__name__)
class ElasticStore(object):
'''A feature collection store on ElasticSearch.
Feature collections are maps from feature names to features.
The representation of each feature is unspecified by this
interface.
This class exposes a similar interface to the regular ``Store``
class, with a few additions:
1. Canopy scans are implemented natively with ElasticSearch,
so they are provided as methods here.
2. On all retrieval methods, the caller can pass a list of
feature names (or feature name wildcards) to retrieve.
If your FCs have lots of features, this is useful when
you only need to retrieve a small fraction of them.
.. automethod:: __init__
.. automethod:: configured
**CRUD operations**
.. automethod:: get
.. automethod:: get_many
.. automethod:: put
.. automethod:: delete
.. automethod:: delete_all
.. automethod:: delete_index
**Keyword scanning**
.. automethod:: keyword_scan
.. automethod:: keyword_scan_ids
**Scanning ids in lexicographic order**
Note that these operations may be inefficient because of
how ElasticSearch handles sorting.
.. automethod:: scan
.. automethod:: scan_ids
.. automethod:: scan_prefix
.. automethod:: scan_prefix_ids
**Low-level**
.. automethod:: sync
.. automethod:: index_scan_ids
.. automethod:: index_names
'''
config_name = 'dossier.store'
@classmethod
def configured(cls):
'''Create a new instance from the global configuration.
In order to use this, you must make sure that
:class:`ElasticStore` has been configured by :mod:`yakonfig`,
usually by passing the class to ``yakonfig.parse_args``.
'''
return cls(**yakonfig.get_global_config('dossier.store'))
def __init__(self, hosts=None, namespace=None, type='fc',
feature_indexes=None, shards=10, replicas=0,
fulltext_indexes=None):
'''Create a new store or connect to an existing one.
:param hosts:
Passed directly to ``elasticsearch.Elasticsearch``
constructor. Required.
:param str namespace:
Used as the ES index name, prefixed by ``fcs_``. Required.
:param str type:
The ES type to use. If this is set to ``None``, then a random
unique string is used.
:param [str] feature_indexes:
A list of names of features to index.
:param int shards:
The number of shards to use for this index. This only has an
effect if the ES index didn't previous exist.
:param int replicas:
The number of replicas to use for this index. This only has
an effect if the ES index didn't previous exist.
:rtype: :class:`ElasticStore`
'''
if hosts is None:
raise yakonfig.ProgrammerError(
'ElasticStore needs at least one host specified.')
if namespace is None:
raise yakonfig.ProgrammerError(
'ElasticStore needs a namespace defined.')
if type is None:
type = unicode(uuid.uuid4())
self.conn = Elasticsearch(hosts=hosts, timeout=60, request_timeout=60)
self.index = 'fcs_%s' % namespace
self.type = type
self.shards = shards
self.replicas = replicas
self.indexes = OrderedDict()
self.fulltext_indexes = OrderedDict()
self.indexed_features = set()
self.fulltext_indexed_features = set()
self._normalize_feature_indexes(feature_indexes)
self._normalize_fulltext_feature_indexes(fulltext_indexes)
if not self.conn.indices.exists(index=self.index):
# This can race, but that should be OK.
# Worst case, we initialize with the same settings more than
# once.
self._create_index()
mapping = self.conn.indices.get_mapping(
index=self.index, doc_type=self.type)
if len(mapping) == 0:
self._create_mappings()
def get(self, content_id, feature_names=None):
'''Retrieve a feature collection.
If a feature collection with the given id does not
exist, then ``None`` is returned.
:param str content_id: Content identifier.
:param [str] feature_names:
A list of feature names to retrieve. When ``None``, all
features are retrieved. Wildcards are allowed.
:rtype: :class:`dossier.fc.FeatureCollection` or ``None``
'''
try:
resp = self.conn.get(index=self.index, doc_type=self.type,
id=eid(content_id),
_source=self._source(feature_names))
return self.fc_from_dict(resp['_source']['fc'])
except NotFoundError:
return None
except:
raise
def get_many(self, content_ids, feature_names=None):
'''Returns an iterable of feature collections.
This efficiently retrieves multiple FCs corresponding to the
list of ids given. Tuples of identifier and feature collection
are yielded. If the feature collection for a given id does not
exist, then ``None`` is returned as the second element of the
tuple.
:param [str] content_ids: List of content ids.
:param [str] feature_names:
A list of feature names to retrieve. When ``None``, all
features are retrieved. Wildcards are allowed.
:rtype: Iterable of ``(content_id, FC)``
'''
try:
resp = self.conn.mget(index=self.index, doc_type=self.type,
_source=self._source(feature_names),
body={'ids': map(eid, content_ids)})
except TransportError:
return
for doc in resp['docs']:
fc = None
if doc['found']:
fc = self.fc_from_dict(doc['_source']['fc'])
yield did(doc['_id']), fc
def put(self, items, indexes=True):
'''Adds feature collections to the store.
This efficiently adds multiple FCs to the store. The iterable
of ``items`` given should yield tuples of ``(content_id, FC)``.
:param items: Iterable of ``(content_id, FC)``.
:param [str] feature_names:
A list of feature names to retrieve. When ``None``, all
features are retrieved. Wildcards are allowed.
'''
actions = []
for cid, fc in items:
# TODO: If we store features in a columnar order, then we
# could tell ES to index the feature values directly. ---AG
# (But is problematic because we want to preserve the ability
# to selectively index FCs. So we'd probably need two distinct
# doc types.)
idxs = defaultdict(list)
if indexes:
for fname in self.indexed_features:
if fname in fc:
idxs[fname_to_idx_name(fname)].extend(fc[fname])
for fname in self.fulltext_indexed_features:
if fname not in fc:
continue
if isinstance(fc[fname], basestring):
idxs[fname_to_full_idx_name(fname)] = fc[fname]
else:
idxs[fname_to_full_idx_name(fname)].extend(fc[fname])
actions.append({
'_index': self.index,
'_type': self.type,
'_id': eid(cid),
'_op_type': 'index',
'_source': dict(idxs, **{
'fc': self.fc_to_dict(fc),
}),
})
bulk(self.conn, actions, timeout=60, request_timeout=60)
def delete(self, content_id):
'''Deletes the corresponding feature collection.
If the FC does not exist, then this is a no-op.
'''
try:
self.conn.delete(index=self.index, doc_type=self.type,
id=eid(content_id))
except NotFoundError:
pass
def delete_all(self):
'''Deletes all feature collections.
This does not destroy the ES index, but instead only
deletes all FCs with the configured document type
(defaults to ``fc``).
'''
try:
self.conn.indices.delete_mapping(
index=self.index, doc_type=self.type)
except TransportError:
logger.warn('type %r in index %r already deleted',
self.index, self.type, exc_info=True)
def delete_index(self):
'''Deletes the underlying ES index.
Only use this if you know what you're doing. This destroys
the entire underlying ES index, which could be shared by
multiple distinct ElasticStore instances.
'''
if self.conn.indices.exists(index=self.index):
self.conn.indices.delete(index=self.index)
def sync(self):
'''Tells ES to tell Lucene to do an fsync.
This guarantees that any previous calls to ``put`` will be
flushed to disk and available in subsequent searches.
Generally, this should only be used in test code.
'''
self.conn.indices.refresh(index=self.index)
def scan(self, *key_ranges, **kwargs):
'''Scan for FCs in the given id ranges.
:param key_ranges:
``key_ranges`` should be a list of pairs of ranges. The first
value is the lower bound id and the second value is the
upper bound id. Use ``()`` in either position to leave it
unbounded. If no ``key_ranges`` are given, then all FCs in
the store are returned.
:param [str] feature_names:
A list of feature names to retrieve. When ``None``, all
features are retrieved. Wildcards are allowed.
:rtype: Iterable of ``(content_id, FC)``
'''
for hit in self._scan(*key_ranges, **kwargs):
yield did(hit['_id']), self.fc_from_dict(hit['_source']['fc'])
def scan_ids(self, *key_ranges, **kwargs):
'''Scan for ids only in the given id ranges.
:param key_ranges:
``key_ranges`` should be a list of pairs of ranges. The first
value is the lower bound id and the second value is the
upper bound id. Use ``()`` in either position to leave it
unbounded. If no ``key_ranges`` are given, then all FCs in
the store are returned.
:param [str] feature_names:
A list of feature names to retrieve. When ``None``, all
features are retrieved. Wildcards are allowed.
:rtype: Iterable of ``content_id``
'''
kwargs['feature_names'] = False
for hit in self._scan(*key_ranges, **kwargs):
yield did(hit['_id'])
def scan_prefix(self, prefix, feature_names=None):
'''Scan for FCs with a given prefix.
:param str prefix: Identifier prefix.
:param [str] feature_names:
A list of feature names to retrieve. When ``None``, all
features are retrieved. Wildcards are allowed.
:rtype: Iterable of ``(content_id, FC)``
'''
resp = self._scan_prefix(prefix, feature_names=feature_names)
for hit in resp:
yield did(hit['_id']), self.fc_from_dict(hit['_source']['fc'])
def scan_prefix_ids(self, prefix):
'''Scan for ids with a given prefix.
:param str prefix: Identifier prefix.
:param [str] feature_names:
A list of feature names to retrieve. When ``None``, all
features are retrieved. Wildcards are allowed.
:rtype: Iterable of ``content_id``
'''
resp = self._scan_prefix(prefix, feature_names=False)
for hit in resp:
yield did(hit['_id'])
def fulltext_scan(self, query_id=None, query_fc=None, feature_names=None,
preserve_order=True, indexes=None):
'''Fulltext search.
Yields an iterable of triples (score, identifier, FC)
corresponding to the search results of the fulltext search
in ``query``. This will only search text indexed under the
given feature named ``fname``.
Note that, unless ``preserve_order`` is set to True, the
``score`` will always be 0.0, and the results will be
unordered. ``preserve_order`` set to True will cause the
results to be scored and be ordered by score, but you should
expect to see a decrease in performance.
:param str fname:
The feature to search.
:param unicode query:
The query.
:param [str] feature_names:
A list of feature names to retrieve. When ``None``, all
features are retrieved. Wildcards are allowed.
:rtype: Iterable of ``(score, content_id, FC)``
'''
it = self._fulltext_scan(query_id, query_fc,
feature_names=feature_names,
preserve_order=preserve_order,
indexes=indexes)
for hit in it:
fc = self.fc_from_dict(hit['_source']['fc'])
yield hit['_score'], did(hit['_id']), fc
def fulltext_scan_ids(self, query_id=None, query_fc=None,
preserve_order=True, indexes=None):
'''Fulltext search for identifiers.
Yields an iterable of triples (score, identifier)
corresponding to the search results of the fulltext search
in ``query``. This will only search text indexed under the
given feature named ``fname``.
Note that, unless ``preserve_order`` is set to True, the
``score`` will always be 0.0, and the results will be
unordered. ``preserve_order`` set to True will cause the
results to be scored and be ordered by score, but you should
expect to see a decrease in performance.
:param str fname:
The feature to search.
:param unicode query:
The query.
:rtype: Iterable of ``(score, content_id)``
'''
it = self._fulltext_scan(query_id, query_fc, feature_names=False,
preserve_order=preserve_order,
indexes=indexes)
for hit in it:
yield hit['_score'], did(hit['_id'])
def keyword_scan(self, query_id=None, query_fc=None, feature_names=None):
'''Keyword scan for feature collections.
This performs a keyword scan using the query given. A keyword
scan searches for FCs with terms in each of the query's indexed
fields.
At least one of ``query_id`` or ``query_fc`` must be provided.
If ``query_fc`` is ``None``, then the query is retrieved
automatically corresponding to ``query_id``.
:param str query_id: Optional query id.
:param query_fc: Optional query feature collection.
:type query_fc: :class:`dossier.fc.FeatureCollection`
:param [str] feature_names:
A list of feature names to retrieve. When ``None``, all
features are retrieved. Wildcards are allowed.
:rtype: Iterable of ``(content_id, FC)``
'''
it = self._keyword_scan(query_id, query_fc,
feature_names=feature_names)
for hit in it:
fc = self.fc_from_dict(hit['_source']['fc'])
yield did(hit['_id']), fc
def keyword_scan_ids(self, query_id=None, query_fc=None):
'''Keyword scan for ids.
This performs a keyword scan using the query given. A keyword
scan searches for FCs with terms in each of the query's indexed
fields.
At least one of ``query_id`` or ``query_fc`` must be provided.
If ``query_fc`` is ``None``, then the query is retrieved
automatically corresponding to ``query_id``.
:param str query_id: Optional query id.
:param query_fc: Optional query feature collection.
:type query_fc: :class:`dossier.fc.FeatureCollection`
:rtype: Iterable of ``content_id``
'''
it = self._keyword_scan(query_id, query_fc, feature_names=False)
for hit in it:
yield did(hit['_id'])
def index_scan_ids(self, fname, val):
'''Low-level keyword index scan for ids.
Retrieves identifiers of FCs that have a feature value
``val`` in the feature named ``fname``. Note that
``fname`` must be indexed.
:param str fname: Feature name.
:param str val: Feature value.
:rtype: Iterable of ``content_id``
'''
disj = []
for fname2 in self.indexes[fname]['feature_names']:
disj.append({'term': {fname_to_idx_name(fname2): val}})
query = {
'constant_score': {
'filter': {'or': disj},
},
}
hits = scan(self.conn, index=self.index, doc_type=self.type, query={
'_source': False,
'query': query,
})
for hit in hits:
yield did(hit['_id'])
def index_names(self):
'''Returns a list of all defined index names.
Note that this only includes boolean based indexes.
:rtype: list of ``unicode``
'''
return map(unicode, self.indexes.iterkeys())
def fulltext_index_names(self):
'''Returns a list of all defined fulltext index names.
:rtype: list of ``unicode``
'''
return map(unicode, self.fulltext_indexes.iterkeys())
def _fulltext_scan(self, query_id, query_fc, preserve_order=True,
feature_names=None, indexes=None):
query_fc = self.get_query_fc(query_id, query_fc)
ids = set([] if query_id is None else [eid(query_id)])
indexes_to_search = indexes
if indexes_to_search is None:
indexes_to_search = self.fulltext_indexes.keys()
for fname in indexes_to_search:
if fname not in self.fulltext_indexes:
raise ValueError(
'Trying to scan on non-indexed feature %s' % fname)
features = self.fulltext_indexes[fname]
qvals = map(unicode, query_fc.get(fname, {}).keys())
if len(qvals) == 0:
continue
qmatches = []
qfields = map(fname_to_full_idx_name, features)
for qval in qvals:
if re.search('\p{Punct}', qval):
match_type = 'phrase'
else:
match_type = 'best_fields'
qmatches.append({
'multi_match': {
'type': match_type,
'query': qval,
'fields': qfields,
}
})
query = {
'filtered': {
'query': {
'bool': {
'should': qmatches,
},
},
'filter': {
'not': {
'ids': {
'values': list(ids),
},
},
},
},
}
logger.info('fulltext scanning index: %s, query: %r', fname, qvals)
hits = scan(
self.conn, index=self.index, doc_type=self.type,
preserve_order=preserve_order,
query={
'_source': self._source(feature_names),
'query': query,
})
for hit in hits:
ids.add(eid(hit['_id']))
yield hit
def _keyword_scan(self, query_id, query_fc, feature_names=None):
# Why are we running multiple scans? Why are we deduplicating?
#
# It turns out that, in our various systems, it can be important to
# prioritize the order of results returned in a keyword scan based on
# the feature index that is being searched. For example, we typically
# want to start a keyword scan with the results from a search on
# `NAME`, which we don't want to be mingled with the results from a
# search on some other feature.
#
# The simplest way to guarantee this type of prioritization is to run
# a query for each index in the order in which they were defined.
#
# This has some downsides:
#
# 1. We return *all* results for the first index before ever returning
# results for the second.
# 2. Since we're running multiple queries, we could get back results
# we've already retrieved in a previous query.
#
# We accept (1) for now.
#
# To fix (2), we keep track of all ids we've seen and include them
# as a filter in subsequent queries.
query_fc = self.get_query_fc(query_id, query_fc)
ids = set([] if query_id is None else [eid(query_id)])
for fname in self.indexes:
term_disj = self._fc_index_disjunction_from_query(query_fc, fname)
if len(term_disj) == 0:
continue
query = {
'constant_score': {
'filter': {
'and': [{
'not': {
'ids': {
'values': list(ids),
},
},
}, {
'or': term_disj,
}],
},
},
}
logger.info('keyword scanning index: %s', fname)
hits = scan(
self.conn, index=self.index, doc_type=self.type,
query={
'_source': self._source(feature_names),
'query': query,
})
for hit in hits:
ids.add(eid(hit['_id']))
yield hit
def _scan(self, *key_ranges, **kwargs):
feature_names = kwargs.get('feature_names')
range_filters = self._range_filters(*key_ranges)
return scan(self.conn, index=self.index, doc_type=self.type,
_source=self._source(feature_names),
preserve_order=True,
query={
# Sorting by `_id` seems to fail spuriously and
# I have no idea why. ---AG
'sort': {'_uid': {'order': 'asc'}},
'query': {
'constant_score': {
'filter': {
'and': range_filters,
},
},
},
})
def _scan_prefix(self, prefix, feature_names=None):
query = {
'constant_score': {
'filter': {
'and': [{
'prefix': {
'_id': eid(prefix),
},
}],
},
},
}
return scan(self.conn, index=self.index, doc_type=self.type,
_source=self._source(feature_names),
preserve_order=True,
query={
# Sorting by `_id` seems to fail spuriously and
# I have no idea why. ---AG
'sort': {'_uid': {'order': 'asc'}},
'query': query,
})
def _source(self, feature_names):
'''Maps feature names to ES's "_source" field.'''
if feature_names is None:
return True
elif isinstance(feature_names, bool):
return feature_names
else:
return map(lambda n: 'fc.' + n, feature_names)
def _range_filters(self, *key_ranges):
'Creates ES filters for key ranges used in scanning.'
filters = []
for s, e in key_ranges:
if isinstance(s, basestring):
s = eid(s)
if isinstance(e, basestring):
# Make the range inclusive.
# We need a valid codepoint, so use the max.
e += u'\U0010FFFF'
e = eid(e)
if s == () and e == ():
filters.append({'match_all': {}})
elif e == ():
filters.append({'range': {'_id': {'gte': s}}})
elif s == ():
filters.append({'range': {'_id': {'lte': e}}})
else:
filters.append({'range': {'_id': {'gte': s, 'lte': e}}})
if len(filters) == 0:
return [{'match_all': {}}]
else:
return filters
def _create_index(self):
'Create the index'
try:
self.conn.indices.create(
index=self.index, timeout=60, request_timeout=60, body={
'settings': {
'number_of_shards': self.shards,
'number_of_replicas': self.replicas,
},
})
except TransportError:
# Hope that this is an "index already exists" error...
logger.warn('index already exists? OK', exc_info=True)
pass
def _create_mappings(self):
'Create the field type mapping.'
self.conn.indices.put_mapping(
index=self.index, doc_type=self.type,
timeout=60, request_timeout=60,
body={
self.type: {
'dynamic_templates': [{
'default_no_analyze_fc': {
'match': 'fc.*',
'mapping': {'index': 'no'},
},
}],
'_all': {
'enabled': False,
},
'_id': {
'index': 'not_analyzed', # allows range queries
},
'properties': self._get_index_mappings(),
},
})
# It is possible to create an index and quickly launch a request
# that will fail because the index hasn't been set up yet. Usually,
# you'll get a "no active shards available" error.
#
# Since index creation is a very rare operation (it only happens
# when the index doesn't already exist), we sit and wait for the
# cluster to become healthy.
self.conn.cluster.health(index=self.index, wait_for_status='yellow')
def _get_index_mappings(self):
'Retrieve the field mappings. Useful for debugging.'
maps = {}
for fname in self.indexed_features:
config = self.indexes.get(fname, {})
print(fname, config)
maps[fname_to_idx_name(fname)] = {
'type': config.get('es_index_type', 'integer'),
'store': False,
'index': 'not_analyzed',
}
for fname in self.fulltext_indexed_features:
maps[fname_to_full_idx_name(fname)] = {
'type': 'string',
'store': False,
'index': 'analyzed',
}
return maps
def _get_field_types(self):
'Retrieve the field types. Useful for debugging.'
mapping = self.conn.indices.get_mapping(
index=self.index, doc_type=self.type)
return mapping[self.index]['mappings'][self.type]['properties']
def _normalize_fulltext_feature_indexes(self, fulltext_indexes):
for x in fulltext_indexes or []:
if isinstance(x, Mapping):
assert len(x) == 1, 'only one mapping per index entry allowed'
name = x.keys()[0]
features = x[name]
else:
name = x
features = [x]
self.fulltext_indexes[name] = features
for fname in features:
self.fulltext_indexed_features.add(fname)
def _normalize_feature_indexes(self, feature_indexes):
for x in feature_indexes or []:
if isinstance(x, Mapping):
assert len(x) == 1, 'only one mapping per index entry allowed'
name = x.keys()[0]
if isinstance(x[name], Mapping):
index_type = x[name]['es_index_type']
features = x[name]['feature_names']
else:
index_type = 'integer'
features = x[name]
else:
name = x
features = [x]
index_type = 'integer'
self.indexes[name] = {
'feature_names': features,
'es_index_type': index_type,
}
for fname in features:
self.indexed_features.add(fname)
def _fc_index_disjunction_from_query(self, query_fc, fname):
'Creates a disjunction for keyword scan queries.'
if len(query_fc.get(fname, [])) == 0:
return []
terms = query_fc[fname].keys()
disj = []
for fname in self.indexes[fname]['feature_names']:
disj.append({'terms': {fname_to_idx_name(fname): terms}})
return disj
def fc_to_dict(self, fc):
d = {}
for name, feat in fc.to_dict().iteritems():
# This is a hack to drop the clean_visible feature because it
# is not necessary to store it and it is large. We simply need
# to index it.
if name == '#clean_visible':
continue
d[name] = base64.b64encode(cbor.dumps(feat))
return d
def fc_from_dict(self, fc_dict):
d = {}
for name, feat in fc_dict.iteritems():
d[name] = cbor.loads(base64.b64decode(feat))
return FC(d)
def get_query_fc(self, query_id, query_fc):
if query_fc is None:
if query_id is None:
raise ValueError(
'one of query_id or query_fc must not be None')
query_fc = self.get(query_id)
if query_fc is None:
raise KeyError(query_id)
return query_fc
def fc_bytes(self, fc_dict):
'''Take a feature collection in dict form and count its size in bytes.
'''
num_bytes = 0
for _, feat in fc_dict.iteritems():
num_bytes += len(feat)
return num_bytes
def count_bytes(self, filter_preds):
'''Count bytes of all feature collections whose key satisfies one of
the predicates in ``filter_preds``. The byte counts are binned
by filter predicate.
'''
num_bytes = defaultdict(int)
for hit in self._scan():
for filter_pred in filter_preds:
if filter_pred(did(hit['_id'])):
num_bytes[filter_pred] += self.fc_bytes(
hit['_source']['fc'])
return num_bytes
class ElasticStoreSync(ElasticStore):
'''Synchronous ElasticSearch backend.
This is just like :class:`ElasticStore`, except it will call `sync`
after every ``put`` and ``delete`` operation.
This is useful for testing where it is most convenient for every
write operation to be synchronous.
'''
def put(self, *args, **kwargs):
super(ElasticStoreSync, self).put(*args, **kwargs)
self.sync()
def delete(self, *args, **kwargs):
super(ElasticStoreSync, self).delete(*args, **kwargs)
self.sync()
def eid(s):
'''Encode id (bytes) as a Unicode string.
The encoding is done such that lexicographic order is
preserved. No concern is given to wasting space.
The inverse of ``eid`` is ``did``.
'''
if isinstance(s, unicode):
s = s.encode('utf-8')
return u''.join('{:02x}'.format(ord(b)) for b in s)
def did(s):
'''Decode id (Unicode string) as a bytes.
The inverse of ``did`` is ``eid``.
'''
return ''.join(chr(int(s[i:i+2], base=16)) for i in xrange(0, len(s), 2))
def idx_name_to_fname(idx_name):
return idx_name[4:]
def fname_to_idx_name(fname):
return u'idx_%s' % fname.decode('utf-8')
def full_idx_name_to_fname(idx_name):
return idx_name[9:]
def fname_to_full_idx_name(fname):
return u'full_idx_%s' % fname.decode('utf-8')
| {
"repo_name": "dossier/dossier.store",
"path": "dossier/store/elastic.py",
"copies": "1",
"size": "33398",
"license": "mit",
"hash": 1301133203779820500,
"line_mean": 36.3162011173,
"line_max": 79,
"alpha_frac": 0.5338942452,
"autogenerated": false,
"ratio": 4.395051980523753,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5428946225723753,
"avg_score": null,
"num_lines": null
} |
"""An attempt at making a period abstraction which does not suck.
Notably, a period abstraction which plays nice with timezones.
"""
from abc import ABCMeta, abstractmethod
from datetime import date, datetime, timedelta, time
from typing import Any, Iterator
from dateutil.tz import gettz
EUROPE_PARIS = gettz("Europe/Paris")
def _at_midnight(a_date: date, tzinfo=gettz("Europe/Paris")) -> datetime:
return datetime.combine(a_date, time(hour=0, minute=0, tzinfo=tzinfo))
class PeriodError(Exception):
"""Base class for period exceptions."""
def raise_if_not_date(candidate: Any) -> None:
"""Raise an exception if the given value is not strictly a date."""
# That looks weird, Pythonic way would be to use `isinstance`, but
# as `date` inherits from `datetime`, a `datetime` would pass the
# test (and we do not want datetimes here).
if type(candidate) != date:
raise PeriodError("Given value is not strictly a date")
def raise_if_not_datetime_ta(candidate: Any) -> None:
"""Raise an exception if the given value is not a timezone aware
datetime.
"""
if not isinstance(candidate, datetime):
raise PeriodError("Given value is not strictly a datetime")
if candidate.tzinfo is None:
raise PeriodError('Given datetime is "naive" (no timezone is attached to it)')
class Period(metaclass=ABCMeta):
def __init__(self, *, first_day, last_day):
raise_if_not_date(first_day)
raise_if_not_date(last_day)
self.first_day = first_day
self.last_day = last_day
@classmethod
def from_reference_datetime(cls, reference_datetime, *, tzinfo=EUROPE_PARIS) -> "Period":
"""Return the period containing the given datetime in the given
timezone.
This one is a bit hairy, but for instance, "monday at 3am in
Europe/Paris" and "sunday at 11pm America/Somewhere" is the same
datetime, hence to determine the period we need to know in which
timezone we want to "see" this datetime.
"""
raise_if_not_datetime_ta(reference_datetime)
reference_date = reference_datetime.astimezone(tz=tzinfo).date()
return cls.from_reference_date(reference_date)
@staticmethod
@abstractmethod
def from_reference_date(reference_date: date) -> "Period":
pass
def start(self, *, tzinfo=EUROPE_PARIS) -> datetime:
"""Return the first instant of the period in the given timezone
(a timezone aware datetime).
"""
return _at_midnight(self.first_day, tzinfo=tzinfo)
def end(self, *, tzinfo=EUROPE_PARIS) -> datetime:
"""Return the last instant of the period in the given timezone
(a timezone aware datetime).
"""
return self.next().start(tzinfo=tzinfo)
@classmethod
def current(cls, *, tzinfo=EUROPE_PARIS) -> "Period":
"""Return the period containing the date of "today" in the
given timezone.
"""
return cls.from_reference_date(datetime.now(tzinfo).date())
def next(self) -> "Period":
"""Return the period directly following the current period.
The trick is to add 1 day to the last day of the period to land
in the next period, and use this date as the `reference_date`
for the next period.
"""
return type(self).from_reference_date(self.last_day + timedelta(days=1))
def previous(self) -> "Period":
"""Return the period directly preceding the current period."""
# See `Period.next()` docstring for an explanation.
return type(self).from_reference_date(self.first_day - timedelta(days=1))
@classmethod
def iter_between_datetime(
cls, *, from_datetime: datetime, to_datetime: datetime, tzinfo=EUROPE_PARIS
) -> Iterator["Period"]:
"""Return an iterator yielding all periods between (and
including) the two given datetimes in the given timezone.
"""
raise_if_not_datetime_ta(from_datetime)
raise_if_not_datetime_ta(to_datetime)
from_date = from_datetime.astimezone(tz=tzinfo).date()
to_date = to_datetime.astimezone(tz=tzinfo).date()
yield from cls.iter_between_date(from_date=from_date, to_date=to_date)
@classmethod
def iter_between_date(cls, *, from_date: date, to_date: date) -> Iterator["Period"]:
"""Return an iterator yielding all periods between (and
including) the two given dates.
"""
raise_if_not_date(from_date)
raise_if_not_date(to_date)
current_period = cls.from_reference_date(from_date)
yield current_period
while True:
next_period = current_period.next()
# Iteration is over
if next_period.first_day > to_date:
break
yield next_period
current_period = next_period
class Day(Period):
@staticmethod
def from_reference_date(reference_date: date) -> "Day":
raise_if_not_date(reference_date)
return Day(first_day=reference_date, last_day=reference_date)
class Week(Period):
@staticmethod
def from_reference_date(reference_date: date) -> "Week":
raise_if_not_date(reference_date)
first_day_label = reference_date.strftime("%G%V1")
last_day_label = reference_date.strftime("%G%V7")
return Week(
first_day=datetime.strptime(first_day_label, "%G%V%u").date(),
last_day=datetime.strptime(last_day_label, "%G%V%u").date(),
)
class Month(Period):
@staticmethod
def from_reference_date(reference_date: date) -> "Month":
raise_if_not_date(reference_date)
first_day = date(reference_date.year, reference_date.month, 1)
first_day_next_month = (first_day + timedelta(days=31)).replace(day=1)
last_day = first_day_next_month - timedelta(days=1)
return Month(first_day=first_day, last_day=last_day)
_PERIOD_TYPE_TO_PERIOD = {"daily": Day, "weekly": Week, "monthly": Month}
def period_from_string(period_string):
return _PERIOD_TYPE_TO_PERIOD[period_string]
| {
"repo_name": "ouihelp/yesaide",
"path": "yesaide/period.py",
"copies": "1",
"size": "6134",
"license": "mit",
"hash": -5793516975524761000,
"line_mean": 33.6553672316,
"line_max": 93,
"alpha_frac": 0.6493315944,
"autogenerated": false,
"ratio": 3.8194271481942716,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9966786593761774,
"avg_score": 0.0003944297664994944,
"num_lines": 177
} |
import numpy as np;
import numpy.linalg as LA;
import scipy.signal as SL;
from scipy.ndimage import imread;
import matplotlib;
matplotlib.use('tkagg');
import matplotlib.pyplot as plt;
img=imread('Lenna.png')/255.0;
img=np.mean(img, axis=2);
img=(img-np.mean(img))/np.sqrt(np.var(img)+10);
#
# plt.figure(1);
# plt.imshow(img, cmap = plt.get_cmap('gray'), interpolation='nearest');
# plt.axis('off');
# plt.show();
num_centroids=50;
D=np.random.normal(size=(100, num_centroids));
D=D/np.sqrt(np.sum(D**2, axis=0));
D=D.reshape(10, 10, num_centroids);
for i in xrange(10):
S=np.zeros((503, 503, num_centroids));
for j in xrange(num_centroids):
S[:, :, j]=SL.convolve2d(img, D[:,:,j], mode="valid");
S=S*(S>=np.max(S,axis=2,keepdims=True));
for j in xrange(num_centroids):
temp=SL.convolve2d(img, S[:, :, j], mode="valid");
D[:, :, j]=temp+D[:, :, j];
D[:, :, j]=D[:,:,j]/np.sqrt(np.sum(D[:,:,j]**2));
print "[MESSAGE] Iteration %i is done" % (i);
plt.figure(1);
for i in xrange(num_centroids):
plt.subplot(5,10,i+1);
plt.imshow(D[:,:,i], cmap = plt.get_cmap('gray'), interpolation='nearest');
plt.axis('off')
plt.show(); | {
"repo_name": "duguyue100/kmeans",
"path": "conv_kmeans.py",
"copies": "1",
"size": "1314",
"license": "mit",
"hash": -1864433349766909200,
"line_mean": 24.2884615385,
"line_max": 77,
"alpha_frac": 0.6301369863,
"autogenerated": false,
"ratio": 2.643863179074447,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8436182097917345,
"avg_score": 0.06756361349142032,
"num_lines": 52
} |
# An attempt to calculate the average A-level point score.
# Ended up not being able to.
#
# Update: Found out AS exam scores are also taken into account,
# at half the score of A-level. Maybe this will help you determine
# it, if for some weird reason you wanted to.
from pymongo import MongoClient
mongo = MongoClient()
db = mongo.ks5
schools = db.schools.find({
"performance.2013.aps.a-level.entry": {"$gt": 0},
"performance.2013.results.a-level": {"$exists": True}
})
success = 0
inaccurate = 0
all_suppressed = 0
no_info = 0
for school in schools:
try:
sum_of_scores = 0
entries = 0
suppressed = 0
subjects = school["performance"]["2013"]["results"]["a-level"]
for name, subject in subjects.items():
try:
sum_of_scores += subject["A*"] * 300
sum_of_scores += subject["A"] * 270
sum_of_scores += subject["B"] * 240
sum_of_scores += subject["C"] * 210
sum_of_scores += subject["D"] * 180
sum_of_scores += subject["E"] * 150
entries += subject["total"]
except KeyError:
suppressed += subject["total"]
if entries:
real_score = school["performance"]["2013"]["aps"]["a-level"]["entry"]
calculated_score = float(sum_of_scores) / entries
if suppressed:
score_of_suppressed = real_score + (entries / suppressed) * (real_score - calculated_score)
calculated_score = (suppressed * score_of_suppressed + entries * calculated_score) / (suppressed + entries)
percentage_error = abs(calculated_score - real_score) / real_score
if percentage_error < 0.025:
print("\nReal score: {0}".format(real_score))
print("Calc score: {0}".format(calculated_score))
print("Score of suppressed: {0}".format(score_of_suppressed))
success += 1
else:
inaccurate += 1
else:
all_suppressed += 1
except KeyError:
no_info += 1
print("\n{0} calculated with acceptable error.".format(success))
print("{0} calculated with significant error.".format(inaccurate))
print("{0} schools had only suppressed results.".format(all_suppressed))
print("{0} scohols did not provide information.".format(no_info))
mongo.close() | {
"repo_name": "danielgavrilov/schools",
"path": "db/aps.py",
"copies": "1",
"size": "2420",
"license": "mit",
"hash": -8697362019777099000,
"line_mean": 35.1343283582,
"line_max": 123,
"alpha_frac": 0.5830578512,
"autogenerated": false,
"ratio": 3.9222042139384117,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5005262065138412,
"avg_score": null,
"num_lines": null
} |
# an attempt to parse s-expressions in the jankiest manner possible
import ast # use ast.literal_eval on text files, lol
import sys # for getting arguments
#fileName = "cccookies-reformatted"
fileName = sys.argv[1]
forbiddenChars = dict.fromkeys(("\n","\t","\\"), " ") # translate stuff that latex doesn't like to empty string
#newlineChars = dict.fromkeys((r'''/'''),r'''}\\\text{''') # this is how we put multi lines in math mode
preamble = r'''\documentclass[12pt]{standalone}
\usepackage{mathtools}
\DeclareMathSizes{12}{12}{12}{12}
\begin{document}
\[
'''
postamble = r'''\]
\end{document}'''
# these hold the output
frontMatter = [] # this is going to hold the latex output
backMatter = []
# eat the input
f = open(fileName,'r')
blah = ast.literal_eval(f.read().translate(forbiddenChars))
f.close()
# digest the input
def parse_stuff(stuff):
#print(stuff)
if isinstance(stuff,tuple):
#print("1")
frontMatter.append(r'''\begin{rcases}\begin{alignedat}{3}'''+'\n')
for item in stuff[1:]:parse_stuff(item)
frontMatter.append(r'''\end{alignedat}\end{rcases}'''+'\n')
frontMatter.append(r'''\substack{\text{'''+stuff[0].replace(r'''/''',r'''}\\\text{''')+r'''}}\\'''+'\n')
elif isinstance(stuff,str):
#print("2")
frontMatter.append(r'''\text{'''+stuff+r'''}\\'''+'\n')
else:
print("THIS SHOULD NEVER HAVE HAPPENED")
print(type(stuff))
parse_stuff(blah)
# and poop it back out
outputFile = open(fileName+'.tex','w') #this overwrites existing tex file
outputFile.write(preamble)
# do the things that happen in the middle
for item in frontMatter: outputFile.write(item)
for item in backMatter: outputFile.write(item)
outputFile.write(postamble)
outputFile.close()
# run latex to make pdf/graphix | {
"repo_name": "jnj16180340/RecipeTypesetting",
"path": "recipes/parseRecipesToLatex.py",
"copies": "1",
"size": "1748",
"license": "bsd-3-clause",
"hash": 7865996657727096000,
"line_mean": 30.8,
"line_max": 111,
"alpha_frac": 0.6790617849,
"autogenerated": false,
"ratio": 3.0828924162257496,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.426195420112575,
"avg_score": null,
"num_lines": null
} |
# An attempt to provide combinators for constructing documents.
from boxmodel import *
# A convenience wrapper to form a scope from a dictionary.
class Environ(object):
__slots__ = ('parent', 'values')
def __init__(self, parent, values):
self.parent = parent
self.values = values
def __getattr__(self, name):
if name in self.values:
return self.values[name]
return getattr(self.parent, name)
@classmethod
def let(cls, parent, values):
if len(values) > 0:
return cls(parent, values)
return parent
@classmethod
def root(cls, values={}):
env = cls(None, values)
if 'line_break' not in values:
env.values['line_break'] = line_break_greedy
if 'text_align' not in values:
env.values['text_align'] = line_justify
if 'indent' not in values:
env.values['indent'] = 0
if 'depth' not in values:
env.values['depth'] = 0
return env
def fold(item, env):
if isinstance(item, Frame):
return [item]
elif callable(item):
return item(env)
else:
return env.font(item, env.font_size, color=env.color)
def toplevel(contents, env, values={}):
env = Environ.let(env, values)
return vpack(list(vbox(contents)(env)))
# These 'folding' -combinators take input and return a closure constructing boxes for environ.
def scope(contents, values={}):
def scope_fold(env):
env = Environ.let(env, values)
for item in contents:
for box in fold(item, env):
yield box
return scope_fold
# imitates restricted horizontal mode
def hbox(contents, values={}):
def hbox_fold(env):
env = Environ.let(env, values)
boxes = []
for item in contents:
boxes.extend(fold(item, env))
yield hpack(boxes)
return hbox_fold
# imitates both vertical modes
def vbox(contents, values={}):
def vbox_fold(env):
env = Environ.let(env, values)
boxes = []
paragraph = []
for item in contents:
for box in fold(item, env):
if box.get_hint('vertical'):
if len(paragraph) > 0:
boxes.extend(env.line_break(paragraph, env))
paragraph = []
boxes.append(box)
else:
paragraph.append(box)
if len(paragraph) > 0:
boxes.extend(env.line_break(paragraph, env))
yield vpack(boxes)
return vbox_fold
def no_line_break(paragraph, env):
yield hpack(paragraph)
def line_break_greedy(paragraph, env):
indent = 0
line = []
remaining = env.page_width
breakpoint = 0
for box in paragraph:
if remaining < box.width and breakpoint > 0:
lineseg = hpack(line[:breakpoint-1])
lineseg.shift = indent
yield lineseg
line = line[breakpoint:]
breakpoint = 0
indent = env.indent
remaining = env.page_width - sum(box.width for box in line) - indent
line.append(box)
remaining -= box.width
if box.get_hint('break'):
breakpoint = len(line)
lineseg = hpack(line)
lineseg.shift = indent
yield lineseg
def line_break_greedy_justify(paragraph, env):
indent = 0
line = []
remaining = env.page_width - indent
breakpoint = 0
for box in paragraph:
if remaining < box.width and breakpoint > 0:
lineseg = hpack(line[:breakpoint-1], to_dimen=env.page_width)
lineseg.shift = indent
yield lineseg
line = line[breakpoint:]
breakpoint = 0
indent = env.indent
remaining = env.page_width - sum(box.width for box in line) - indent
line.append(box)
remaining -= box.width
if box.get_hint('break'):
breakpoint = len(line)
lineseg = hpack(line)
lineseg.shift = indent
yield lineseg
# Somewhat less clumsy implementation of minimum raggedness algorithm.
def line_break(paragraph, env):
length = len(paragraph)
page_width = env.page_width
memo = []
def penalty_of(index):
return memo[length - index][0]
def penalty(cut, width):
# Adjustment to not penalize final line
if cut == length and width*10 > page_width:
return 0
p = penalty_of(cut)
if width <= page_width:
return p + (page_width - width) ** 2
return 2**10
def cut_points(start):
cut = start + 1
width = paragraph[start].width
none_yet = True
while cut < length and (none_yet or width <= page_width):
if paragraph[cut].get_hint('break'):
yield width, cut
none_yet = False
width += paragraph[cut].width
cut += 1
if cut == length:
yield width, cut
def compute(start):
if start == length:
return (0, length)
return min(
(penalty(cut, width), cut)
for width, cut in cut_points(start))
index = length
while index >= 0:
memo.append(compute(index))
index -= 1
start = 0
while start < length:
cut = memo[length - start][1]
yield env.text_align(env, paragraph[start:cut], cut==length)
start = cut+1
def line_justify(env, line, is_last_line):
if is_last_line:
return hpack(line)
return hpack(line, to_dimen=env.page_width)
def line_left(env, line, is_last_line):
return hpack(line)
# paragraph break fold
def par(env):
box = Glue(env.font_size)
box.hint = {'vertical': True}
yield box
def brk(env):
box = Glue(0)
box.hint = {'vertical': True}
yield box
def hfil(env):
yield Glue(1, 0, 1+1j)
import math, time
# Table layouting
def table(rows, values={}):
def table_fold(env):
env = Environ.let(env, values)
tab = [[list(fold(cell, env)) for cell in row] for row in rows]
col = [0 for i in range(max(map(len, tab)))]
for row in tab:
for i, cell in enumerate(row):
col[i] = max(col[i], sum(x.width for x in cell))
box = vpack([
hpack([hpack(cell, to_dimen=w) for w, cell in zip(col, row)])
for row in tab])
if len(box) > 0:
y = box[len(box)/2].offset
if len(box)%2 == 0:
y = (y + box[len(box)/2-1].offset) * 0.5
box.height += y
box.depth -= y
yield box
return table_fold
# Primitive form of pretty printing.
def codeline(contents):
def _codeline(env):
env = Environ.let(env, {'depth': env.depth+1})
if env.depth > 1:
for item in contents:
for box in fold(item, env):
yield box
else:
row = []
for item in contents:
row.extend(fold(item, env))
boxes = list(codeline_break(env.page_width, row, 0))
yield vpack(boxes)
return _codeline
def codeline_break(width, row, indent):
remaining = width
best_depth = 10000
for box in row:
remaining -= box.width
#if remaining < 0:
best_depth = min(
best_depth,
box.get_hint('break_depth', 10000))
if best_depth >= 10000 or remaining > 0:
line = hpack(row)
line.shift = indent
yield line
else:
res, shift = [], 0
for box in row:
if box.get_hint('break_depth') == best_depth:
for subline in codeline_break(width-shift, res, indent+shift):
yield subline
res, shift = [], 20
else:
res.append(box)
for subline in codeline_break(width-shift, res, indent+shift):
yield subline
def nl(env):
k = Glue(0)
k.hint = {'break_depth': env.depth}
yield k
| {
"repo_name": "cheery/textended-edit",
"path": "minitex/__init__.py",
"copies": "1",
"size": "8041",
"license": "mit",
"hash": -5415065465998962000,
"line_mean": 28.8921933086,
"line_max": 94,
"alpha_frac": 0.5516726775,
"autogenerated": false,
"ratio": 3.8418537983755376,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48935264758755376,
"avg_score": null,
"num_lines": null
} |
# An attempt to rewrite the handmade penguin sound code in Python.
# This one didn't work, but soundtest2.py is based on it and does work.
# Link: https://davidgow.net/handmadepenguin/ch8.html
import sdl2
import ctypes
def init_audio(samples_per_second, buffer_size):
audio_settings = sdl2.SDL_AudioSpec(freq=samples_per_second,
aformat=sdl2.AUDIO_S16LSB,
channels=2,
samples=buffer_size)
sdl2.SDL_OpenAudio(audio_settings, audio_settings)
if audio_settings.format != sdl2.AUDIO_S16LSB:
print("Oops! We didn't get AUDIO_S16LSB as our sample format!")
sdl2.SDL_CloseAudio()
samples_per_second = 48000
tone_hz = 256
tone_volume = 3000
running_sample_index = 0
squarewave_period = samples_per_second / tone_hz
half_squarewave_period = squarewave_period / 2
bytes_per_sample = ctypes.sizeof(ctypes.c_int16) * 2
sdl2.SDL_Init(sdl2.SDL_INIT_AUDIO)
init_audio(samples_per_second, int(samples_per_second * bytes_per_sample / 10))
sound_is_playing = False
while True:
target_queue_bytes = samples_per_second * bytes_per_sample
bytes_to_write = target_queue_bytes - sdl2.SDL_GetQueuedAudioSize(1)
if bytes_to_write > 0:
sound_buffer = (ctypes.c_int16 * bytes_to_write)()
bytes_buffer = ctypes.cast(sound_buffer, POINTER(ctypes.c_ubyte))
sample_count = int(bytes_to_write / bytes_per_sample)
for i in range(sample_count):
running_sample_index += 1
high = (running_sample_index / half_squarewave_period) % 2
sample_value = tone_volume if high else -tone_volume
sound_buffer[i*bytes_per_sample] = ctypes.c_int16(sample_value)
sound_buffer[i*bytes_per_sample+1] = ctypes.c_int16(sample_value)
sdl2.SDL_QueueAudio(1, sound_buffer, bytes_to_write)
del(sound_buffer)
if not sound_is_playing:
sdl2.SDL_PauseAudio(0)
sound_is_playing = True
| {
"repo_name": "MageJohn/CHIP8",
"path": "soundtests/handmadepenguin_squarwave.py",
"copies": "1",
"size": "2021",
"license": "mit",
"hash": -5175925471476281000,
"line_mean": 37.1320754717,
"line_max": 79,
"alpha_frac": 0.6501731816,
"autogenerated": false,
"ratio": 3.243980738362761,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9297888998899996,
"avg_score": 0.019252984212552945,
"num_lines": 53
} |
# A natural number, N, that can be written as the sum and
# product of a given set of at least two natural numbers,
# {a1, a2, ... , ak} is called a product-sum number:
# N = a_1 + a_2 + ... + a_k = a_1 x a_2 x ... x a_k.
# For example, 6 = 1 + 2 + 3 = 1 x 2 x 3.
# For a given set of size, k, we shall call the smallest N
# with this property a minimal product-sum number. The
# minimal product-sum numbers for sets of size, k = 2, 3,
# 4, 5, and 6 are as follows.
# k = 2: 4 = 2 x 2 = 2 + 2
# k = 3: 6 = 1 x 2 x 3 = 1 + 2 + 3
# k = 4: 8 = 1 x 1 x 2 x 4 = 1 + 1 + 2 + 4
# k = 5: 8 = 1 x 1 x 2 x 2 x 2 = 1 + 1 + 2 + 2 + 2
# k = 6: 12 = 1 x 1 x 1 x 1 x 2 x 6 = 1 + 1 + 1 + 1 + 2 + 6
# Hence for 2 <= k <= 6, the sum of all the minimal product-sum
# numbers is 4 + 6 + 8 + 12 = 30; note that 8 is only counted
# once in the sum.
# In fact, as the complete set of minimal product-sum numbers
# for 2 <= k <= 12 is {4, 6, 8, 12, 15, 16}, the sum is 61.
# What is the sum of all the minimal product-sum numbers for
# 2 <= k <= 12000?
limit = 12000
ans = [2 * k for k in range(12001)]
def get_product_sum(num, nprod, nsum, start):
k = nprod - nsum + num
if k <= limit:
ans[k] = min(nprod, ans[k])
for i in range(start, limit / nprod * 2 + 1):
get_product_sum(num + 1, nprod * i, nsum + i, i)
get_product_sum(0, 1, 0, 2)
print sum(set(ans[2:]))
| {
"repo_name": "cloudzfy/euler",
"path": "src/88.py",
"copies": "1",
"size": "1356",
"license": "mit",
"hash": -487902855806499600,
"line_mean": 32.9,
"line_max": 63,
"alpha_frac": 0.5737463127,
"autogenerated": false,
"ratio": 2.515769944341373,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3589516257041373,
"avg_score": null,
"num_lines": null
} |
# an auc library that doesn't require sklearn
# thanks to https://github.com/benhamner/Metrics/blob/master/Python/ml_metrics/auc.py
# I made some changes to style to fit our code-base
import numpy as np
def tied_rank(x):
"""
Computes the tied rank of elements in x.
This function computes the tied rank of elements in x.
Parameters
----------
x : list of numbers, numpy array
Returns
-------
score : list of numbers
The tied rank f each element in x
"""
sorted_x = sorted(zip(x,range(len(x))))
r = np.array([0] * len(x))
cur_val = sorted_x[0][0]
last_rank = 0
for i in range(len(sorted_x)):
if cur_val != sorted_x[i][0]:
cur_val = sorted_x[i][0]
for j in range(last_rank, i):
r[sorted_x[j][1]] = float(last_rank + 1 + i) / 2.0
last_rank = i
if i == len(sorted_x) - 1:
for j in range(last_rank, i + 1):
r[sorted_x[j][1]] = float(last_rank + i + 2) / 2.0
return r
def auc(actual, posterior, pos_label=1):
"""
Computes the area under the receiver-operater characteristic (AUC)
This function computes the AUC error metric for binary classification.
Parameters
----------
actual : list of binary numbers, numpy array
The ground truth value
posterior : same type as actual
Defines a ranking on the binary numbers, from most likely to
be positive to least likely to be positive.
Returns
-------
score : double
The mean squared error between actual and posterior
"""
if (len(actual) == 0):
raise Exception('actual is empty')
if (len(posterior) == 0):
raise Exception('posterior is empty')
if len(actual) != len(posterior):
raise Exception('actual and posterior lengths do not match')
r = tied_rank(posterior)
positives = np.equal(actual, pos_label)
num_positive = sum(positives)
if (num_positive == 0):
raise Exception('actual is has no positives')
if (num_positive == len(actual)):
raise Exception('actual is all positives')
num_negative = len(actual) - num_positive
sum_positive = sum(r[positives])
auc = ((sum_positive - num_positive * (num_positive + 1) / 2.0) /
(num_negative * num_positive))
return auc
if __name__ == "__main__":
from sklearn import metrics
import random
y = np.array([1] * random.randint(1, 20) + [2] * random.randint(1, 20))
pred = np.array([random.random() for i in xrange(len(y))])
fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
standard = metrics.auc(fpr, tpr)
custom = auc(y, pred, pos_label=2)
if standard != custom:
raise Exception("custom AUC doesn't match SKlearn AUC, {0} != {1}".format(
standard, custom))
| {
"repo_name": "yueranyuan/vector_edu",
"path": "learntools/libs/auc.py",
"copies": "1",
"size": "2879",
"license": "mit",
"hash": 5313534480106108000,
"line_mean": 34.5432098765,
"line_max": 85,
"alpha_frac": 0.5946509205,
"autogenerated": false,
"ratio": 3.6489226869455007,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9738422964883022,
"avg_score": 0.0010301285124955345,
"num_lines": 81
} |
"""An augmented version of the dict class"""
from hawkweed.computed import PY3
from hawkweed.classes.iterable import Iterable
from hawkweed.classes.repr import Repr
from hawkweed.classes.collection import Collection
class Dict(Repr, dict, Iterable, Collection):
"""An augmented version of the dict class"""
def reset(self, other):
"""
Resets a dict to another dicts content
Complexity: O(n)
params:
other: the dict this dict should be resetted to
returns: self
"""
self.clear()
self.update(other)
return self
def reverse(self):
"""
Exchanges keys and values; if there are duplicate values, the
last value that's been written wins.
Complexity: O(2*n)
returns: self
"""
tmp = dict()
for key, val in self.items():
tmp[val] = key
self.reset(tmp)
del tmp
return self
def remove_empty(self, fun=None, filter_keys=False):
"""
Removes empty pairs from the dict.
Complexity: O(2*n)
params:
fun: a function that takes an element and returns whether it
should be kept (defaults to bool())
filter_keys: a flag that indicates that filtering should be
done by keys, not by values
returns: self
"""
if not fun:
fun = bool
tmp = dict()
for key, val in self.items():
checker = key if filter_keys else val
if fun(checker):
tmp[key] = val
self.reset(tmp)
del tmp
return self
def update(self, *args, **kwargs):
"""
Update dictionary. Same as in dict(), but returns self.
returns: self
"""
super(Dict, self).update(*args, **kwargs)
return self
def reduce(self, fun, acc=None):
"""
Reduce the dict to a value (using function fun).
Complexity: O(n)
params:
fun: a function that takes the accumulator and current key
and value and returns the new accumulator
acc: the initial accumulator (defaults to tuple of first
key and value taken from the iterator)
returns: self
"""
iterator = self.items().__iter__()
if acc is None:
acc = iterator.__next__() if PY3 else iterator.next()
for key, val in iterator:
acc = fun(acc, key, val)
return acc
def clear(self):
"""
Augmented clear function that returns self.
returns: self
"""
super(Dict, self).clear()
return self
def setdefault(self, *args, **kwargs):
"""
Augmented setdefault function that returns self.
returns: self
"""
super(Dict, self).setdefault(*args, **kwargs)
return self
def pick(self, *keys):
"""
Takes a list of keys to pick and returns a subdict that contains
only those entries.
Complexity: O(k) where k is the number of keys
params:
*keys: the keys to pick
returns: the subdict
"""
newd = {}
for key in keys:
if key in self:
newd[key] = self[key]
return newd
| {
"repo_name": "hellerve/hawkweed",
"path": "hawkweed/classes/dict_cls.py",
"copies": "1",
"size": "3376",
"license": "mit",
"hash": -7251432227096227000,
"line_mean": 26.6721311475,
"line_max": 72,
"alpha_frac": 0.5438388626,
"autogenerated": false,
"ratio": 4.525469168900805,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5569308031500805,
"avg_score": null,
"num_lines": null
} |
"""An augmented version of the list class"""
from hawkweed.classes.iterable import Iterable
from hawkweed.classes.repr import Repr
from hawkweed.classes.collection import Collection
class List(Repr, list, Iterable, Collection):
"""An augmented version of the list class"""
def reset(self, other):
"""
Resets a list to another lists content
Complexity: O(n+m)
params:
other: the list this list should be resetted to
returns: self
"""
self.clear()
self.extend(other)
return self
def remove_empty(self, fun=None):
"""
Removes empty elements from the list.
Complexity: O(3*n)
params:
fun: a function that takes an element and returns whether it
should be kept (defaults to bool())
returns: self
"""
if not fun:
fun = bool
tmp = []
for elem in self:
if fun(elem):
tmp.append(elem)
self.reset(tmp)
del tmp
return self
def rindex(self, elem):
"""
Gets the index of an element starting from the end. Does not copy the list.
Complexity: O(n)
params:
elem: the element that should be found
returns: self
"""
for i, find in enumerate(reversed(self)):
if elem == find:
return i
return -1
def take(self, num):
"""
A generator that returns a subarray (from 0 to num).
Complexity: O(n+k) where k is the param
params:
num: the upper limit of the generator slice
"""
for i, elem in enumerate(self):
if num <= i:
break
yield elem
def take_while(self, fun):
"""
A generator that returns elements while a given function fun returns True.
Complexity: O(n)
params:
fun: the predicate function
"""
for elem in self:
if fun(elem):
yield elem
else:
break
def drop(self, num):
"""
A generator that returns a subarray (from num to len(List)).
Complexity: O(n+k) where k is the param
params:
num: the lower limit of the generator slice
"""
for elem in self[num:]:
yield elem
def drop_while(self, fun):
"""
A generator that skips elements while a given function fun returns True.
Complexity: O(n+k)
params:
fun: the predicate function
"""
i = 0
for i, elem in enumerate(self):
if not fun(elem):
break
for elem in self[i:]:
yield elem
def clear(self):
"""
A reimplemented version of clear because it only exists in Python3.
Complexity: O(n)
returns: self
"""
del self[:]
return self
def append(self, *args, **kwargs):
"""
Augmented update function that returns self.
returns: self
"""
super(List, self).append(*args, **kwargs)
return self
def extend(self, *args, **kwargs):
"""
Augmented extend function that returns self.
returns: self
"""
super(List, self).extend(*args, **kwargs)
return self
def get(self, index, dflt=None):
"""
A getter function that behaves like dict.get.
params:
index: the index to get
dflt: the default return value (defaults to None)
returns: self
"""
if len(self) > index:
return self[index]
return dflt
def flatten(self):
"""
Returns a new deeply flattened list.
Complexity: O(n)
returns: the flattened list
"""
flattened = []
for item in self:
if isinstance(item, List):
flattened.extend(item.flatten())
elif isinstance(item, list):
flattened.extend(List(item).flatten())
else:
flattened.append(item)
return flattened
| {
"repo_name": "hellerve/hawkweed",
"path": "hawkweed/classes/list_cls.py",
"copies": "1",
"size": "4245",
"license": "mit",
"hash": -8453638778095210000,
"line_mean": 24.4191616766,
"line_max": 83,
"alpha_frac": 0.5210836278,
"autogenerated": false,
"ratio": 4.624183006535947,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00034753152169445824,
"num_lines": 167
} |
"""An authentication plugin for ``repoze.who`` and SQLAlchemy."""
from zope.interface import implements
from repoze.who.interfaces import IAuthenticator
from repoze.who.interfaces import IMetadataProvider
from sqlalchemy.orm.exc import MultipleResultsFound
from sqlalchemy.orm.exc import NoResultFound
from lasco.models import User
from lasco.models import DBSession
class BasePlugin(object):
def get_user(self, **kwargs):
session = DBSession()
user = None
try:
user = session.query(User).filter_by(**kwargs).one()
except (NoResultFound, MultipleResultsFound):
pass
return user
class SQLAlchemyAuthPlugin(BasePlugin):
implements(IAuthenticator)
def authenticate(self, environ, identity):
try:
login = identity['login']
password = identity['password']
except KeyError:
return None
user = self.get_user(login=login)
if user is None:
return None
if not user.validate_password(password):
return None
return user.id
class SQLAlchemyMetadataPlugin(BasePlugin):
implements(IMetadataProvider)
def add_metadata(self, environ, identity):
user = self.get_user(id=identity['repoze.who.userid'])
if user is not None:
login = user.login
fullname = user.fullname
user_id = user.id
else:
login = fullname = user_id = None
identity.update(login=login,
fullname=fullname,
id=user_id)
def make_auth_plugin():
return SQLAlchemyAuthPlugin()
def make_md_plugin():
return SQLAlchemyMetadataPlugin()
| {
"repo_name": "dbaty/Lasco",
"path": "lasco/whoplugins.py",
"copies": "1",
"size": "1721",
"license": "bsd-3-clause",
"hash": -6562619308561976000,
"line_mean": 25.4769230769,
"line_max": 65,
"alpha_frac": 0.635676932,
"autogenerated": false,
"ratio": 4.447028423772609,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 65
} |
"""An automobile price research tool."""
import bottle
import craigslist
import json
APP = bottle.default_app()
@APP.route('/')
def html():
return bottle.static_file('till.html', '.')
@APP.route('/till.css')
def css():
return bottle.static_file('till_combined.css', 'compiled')
@APP.route('/till.js')
def js():
return bottle.static_file('app_combined.js', 'compiled')
@APP.route('/ping')
def ping():
return bottle.HTTPResponse(status=204)
@APP.route('/automobiles/<city>/<query>')
def automobiles(city, query):
"""Queries Craigslist for automobiles.
Args:
city: The city in which to search.
query: The search query to be executed.
Returns:
A JSON-encoded array of vehicles.
"""
result = craigslist.automobiles(city, query)
content = [{
'mileage': i.mileage,
'price': i.price,
'year': i.year
} for i in result]
bottle.response.content_type = 'application/json'
return json.dumps({'automobiles': content})
@APP.route('/cities')
def cities():
"""Gets a list of cities that can be queried.
Returns:
A list of city names.
"""
result = craigslist.cities()
content = [{'city_id': i.city_id, 'name': i.name} for i in result]
bottle.response.content_type = 'application/json'
return json.dumps({'cities': content})
| {
"repo_name": "kjiwa/till",
"path": "server.py",
"copies": "1",
"size": "1299",
"license": "mpl-2.0",
"hash": -1091106403017044600,
"line_mean": 19.619047619,
"line_max": 68,
"alpha_frac": 0.662817552,
"autogenerated": false,
"ratio": 3.1605839416058394,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43234014936058396,
"avg_score": null,
"num_lines": null
} |
"""A navigable completer for the qtconsole"""
# coding : utf-8
#-----------------------------------------------------------------------------
# Copyright (c) 2012, IPython Development Team.$
#
# Distributed under the terms of the Modified BSD License.$
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
# System library imports
import IPython.utils.text as text
from IPython.external.qt import QtCore, QtGui
#--------------------------------------------------------------------------
# Return an HTML table with selected item in a special class
#--------------------------------------------------------------------------
def html_tableify(item_matrix, select=None, header=None , footer=None) :
""" returnr a string for an html table"""
if not item_matrix :
return ''
html_cols = []
tds = lambda text : u'<td>'+text+u' </td>'
trs = lambda text : u'<tr>'+text+u'</tr>'
tds_items = [list(map(tds, row)) for row in item_matrix]
if select :
row, col = select
tds_items[row][col] = u'<td class="inverted">'\
+item_matrix[row][col]\
+u' </td>'
#select the right item
html_cols = map(trs, (u''.join(row) for row in tds_items))
head = ''
foot = ''
if header :
head = (u'<tr>'\
+''.join((u'<td>'+header+u'</td>')*len(item_matrix[0]))\
+'</tr>')
if footer :
foot = (u'<tr>'\
+''.join((u'<td>'+footer+u'</td>')*len(item_matrix[0]))\
+'</tr>')
html = (u'<table class="completion" style="white-space:pre">'+head+(u''.join(html_cols))+foot+u'</table>')
return html
class SlidingInterval(object):
"""a bound interval that follows a cursor
internally used to scoll the completion view when the cursor
try to go beyond the edges, and show '...' when rows are hidden
"""
_min = 0
_max = 1
_current = 0
def __init__(self, maximum=1, width=6, minimum=0, sticky_lenght=1):
"""Create a new bounded interval
any value return by this will be bound between maximum and
minimum. usual width will be 'width', and sticky_length
set when the return interval should expand to max and min
"""
self._min = minimum
self._max = maximum
self._start = 0
self._width = width
self._stop = self._start+self._width+1
self._sticky_lenght = sticky_lenght
@property
def current(self):
"""current cursor position"""
return self._current
@current.setter
def current(self, value):
"""set current cursor position"""
current = min(max(self._min, value), self._max)
self._current = current
if current > self._stop :
self._stop = current
self._start = current-self._width
elif current < self._start :
self._start = current
self._stop = current + self._width
if abs(self._start - self._min) <= self._sticky_lenght :
self._start = self._min
if abs(self._stop - self._max) <= self._sticky_lenght :
self._stop = self._max
@property
def start(self):
"""begiiing of interval to show"""
return self._start
@property
def stop(self):
"""end of interval to show"""
return self._stop
@property
def width(self):
return self._stop - self._start
@property
def nth(self):
return self.current - self.start
class CompletionHtml(QtGui.QWidget):
""" A widget for tab completion, navigable by arrow keys """
#--------------------------------------------------------------------------
# 'QObject' interface
#--------------------------------------------------------------------------
_items = ()
_index = (0, 0)
_consecutive_tab = 0
_size = (1, 1)
_old_cursor = None
_start_position = 0
_slice_start = 0
_slice_len = 4
def __init__(self, console_widget):
""" Create a completion widget that is attached to the specified Qt
text edit widget.
"""
assert isinstance(console_widget._control, (QtGui.QTextEdit, QtGui.QPlainTextEdit))
super(CompletionHtml, self).__init__()
self._text_edit = console_widget._control
self._console_widget = console_widget
self._text_edit.installEventFilter(self)
self._sliding_interval = None
self._justified_items = None
# Ensure that the text edit keeps focus when widget is displayed.
self.setFocusProxy(self._text_edit)
def eventFilter(self, obj, event):
""" Reimplemented to handle keyboard input and to auto-hide when the
text edit loses focus.
"""
if obj == self._text_edit:
etype = event.type()
if etype == QtCore.QEvent.KeyPress:
key = event.key()
if self._consecutive_tab == 0 and key in (QtCore.Qt.Key_Tab,):
return False
elif self._consecutive_tab == 1 and key in (QtCore.Qt.Key_Tab,):
# ok , called twice, we grab focus, and show the cursor
self._consecutive_tab = self._consecutive_tab+1
self._update_list()
return True
elif self._consecutive_tab == 2:
if key in (QtCore.Qt.Key_Return, QtCore.Qt.Key_Enter):
self._complete_current()
return True
if key in (QtCore.Qt.Key_Tab,):
self.select_right()
self._update_list()
return True
elif key in ( QtCore.Qt.Key_Down,):
self.select_down()
self._update_list()
return True
elif key in (QtCore.Qt.Key_Right,):
self.select_right()
self._update_list()
return True
elif key in ( QtCore.Qt.Key_Up,):
self.select_up()
self._update_list()
return True
elif key in ( QtCore.Qt.Key_Left,):
self.select_left()
self._update_list()
return True
elif key in ( QtCore.Qt.Key_Escape,):
self.cancel_completion()
return True
else :
self.cancel_completion()
else:
self.cancel_completion()
elif etype == QtCore.QEvent.FocusOut:
self.cancel_completion()
return super(CompletionHtml, self).eventFilter(obj, event)
#--------------------------------------------------------------------------
# 'CompletionHtml' interface
#--------------------------------------------------------------------------
def cancel_completion(self):
"""Cancel the completion
should be called when the completer have to be dismissed
This reset internal variable, clearing the temporary buffer
of the console where the completion are shown.
"""
self._consecutive_tab = 0
self._slice_start = 0
self._console_widget._clear_temporary_buffer()
self._index = (0, 0)
if(self._sliding_interval):
self._sliding_interval = None
#
# ... 2 4 4 4 4 4 4 4 4 4 4 4 4
# 2 2 4 4 4 4 4 4 4 4 4 4 4 4
#
#2 2 x x x x x x x x x x x 5 5
#6 6 x x x x x x x x x x x 5 5
#6 6 x x x x x x x x x x ? 5 5
#6 6 x x x x x x x x x x ? 1 1
#
#3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 ...
#3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 ...
def _select_index(self, row, col):
"""Change the selection index, and make sure it stays in the right range
A little more complicated than just dooing modulo the number of row columns
to be sure to cycle through all element.
horizontaly, the element are maped like this :
to r <-- a b c d e f --> to g
to f <-- g h i j k l --> to m
to l <-- m n o p q r --> to a
and vertically
a d g j m p
b e h k n q
c f i l o r
"""
nr, nc = self._size
nr = nr-1
nc = nc-1
# case 1
if (row > nr and col >= nc) or (row >= nr and col > nc):
self._select_index(0, 0)
# case 2
elif (row <= 0 and col < 0) or (row < 0 and col <= 0):
self._select_index(nr, nc)
# case 3
elif row > nr :
self._select_index(0, col+1)
# case 4
elif row < 0 :
self._select_index(nr, col-1)
# case 5
elif col > nc :
self._select_index(row+1, 0)
# case 6
elif col < 0 :
self._select_index(row-1, nc)
elif 0 <= row and row <= nr and 0 <= col and col <= nc :
self._index = (row, col)
else :
raise NotImplementedError("you'r trying to go where no completion\
have gone before : %d:%d (%d:%d)"%(row, col, nr, nc) )
@property
def _slice_end(self):
end = self._slice_start+self._slice_len
if end > len(self._items) :
return None
return end
def select_up(self):
"""move cursor up"""
r, c = self._index
self._select_index(r-1, c)
def select_down(self):
"""move cursor down"""
r, c = self._index
self._select_index(r+1, c)
def select_left(self):
"""move cursor left"""
r, c = self._index
self._select_index(r, c-1)
def select_right(self):
"""move cursor right"""
r, c = self._index
self._select_index(r, c+1)
def show_items(self, cursor, items):
""" Shows the completion widget with 'items' at the position specified
by 'cursor'.
"""
if not items :
return
self._start_position = cursor.position()
self._consecutive_tab = 1
items_m, ci = text.compute_item_matrix(items, empty=' ')
self._sliding_interval = SlidingInterval(len(items_m)-1)
self._items = items_m
self._size = (ci['rows_numbers'], ci['columns_numbers'])
self._old_cursor = cursor
self._index = (0, 0)
sjoin = lambda x : [ y.ljust(w, ' ') for y, w in zip(x, ci['columns_width'])]
self._justified_items = list(map(sjoin, items_m))
self._update_list(hilight=False)
def _update_list(self, hilight=True):
""" update the list of completion and hilight the currently selected completion """
self._sliding_interval.current = self._index[0]
head = None
foot = None
if self._sliding_interval.start > 0 :
head = '...'
if self._sliding_interval.stop < self._sliding_interval._max:
foot = '...'
items_m = self._justified_items[\
self._sliding_interval.start:\
self._sliding_interval.stop+1\
]
self._console_widget._clear_temporary_buffer()
if(hilight):
sel = (self._sliding_interval.nth, self._index[1])
else :
sel = None
strng = html_tableify(items_m, select=sel, header=head, footer=foot)
self._console_widget._fill_temporary_buffer(self._old_cursor, strng, html=True)
#--------------------------------------------------------------------------
# Protected interface
#--------------------------------------------------------------------------
def _complete_current(self):
""" Perform the completion with the currently selected item.
"""
i = self._index
item = self._items[i[0]][i[1]]
item = item.strip()
if item :
self._current_text_cursor().insertText(item)
self.cancel_completion()
def _current_text_cursor(self):
""" Returns a cursor with text between the start position and the
current position selected.
"""
cursor = self._text_edit.textCursor()
if cursor.position() >= self._start_position:
cursor.setPosition(self._start_position,
QtGui.QTextCursor.KeepAnchor)
return cursor
| {
"repo_name": "initNirvana/Easyphotos",
"path": "env/lib/python3.4/site-packages/IPython/qt/console/completion_html.py",
"copies": "12",
"size": "12740",
"license": "mit",
"hash": -2221621554306232600,
"line_mean": 33.3396226415,
"line_max": 110,
"alpha_frac": 0.4895604396,
"autogenerated": false,
"ratio": 4.206008583690987,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.012426529896679622,
"num_lines": 371
} |
"""A navigable completer for the qtconsole"""
# coding : utf-8
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import ipython_genutils.text as text
from qtconsole.qt import QtCore, QtGui
#--------------------------------------------------------------------------
# Return an HTML table with selected item in a special class
#--------------------------------------------------------------------------
def html_tableify(item_matrix, select=None, header=None , footer=None) :
""" returnr a string for an html table"""
if not item_matrix :
return ''
html_cols = []
tds = lambda text : u'<td>'+text+u' </td>'
trs = lambda text : u'<tr>'+text+u'</tr>'
tds_items = [list(map(tds, row)) for row in item_matrix]
if select :
row, col = select
tds_items[row][col] = u'<td class="inverted">'\
+item_matrix[row][col]\
+u' </td>'
#select the right item
html_cols = map(trs, (u''.join(row) for row in tds_items))
head = ''
foot = ''
if header :
head = (u'<tr>'\
+''.join((u'<td>'+header+u'</td>')*len(item_matrix[0]))\
+'</tr>')
if footer :
foot = (u'<tr>'\
+''.join((u'<td>'+footer+u'</td>')*len(item_matrix[0]))\
+'</tr>')
html = (u'<table class="completion" style="white-space:pre"'
'cellspacing=0>' +
head + (u''.join(html_cols)) + foot + u'</table>')
return html
class SlidingInterval(object):
"""a bound interval that follows a cursor
internally used to scoll the completion view when the cursor
try to go beyond the edges, and show '...' when rows are hidden
"""
_min = 0
_max = 1
_current = 0
def __init__(self, maximum=1, width=6, minimum=0, sticky_lenght=1):
"""Create a new bounded interval
any value return by this will be bound between maximum and
minimum. usual width will be 'width', and sticky_length
set when the return interval should expand to max and min
"""
self._min = minimum
self._max = maximum
self._start = 0
self._width = width
self._stop = self._start+self._width+1
self._sticky_lenght = sticky_lenght
@property
def current(self):
"""current cursor position"""
return self._current
@current.setter
def current(self, value):
"""set current cursor position"""
current = min(max(self._min, value), self._max)
self._current = current
if current > self._stop :
self._stop = current
self._start = current-self._width
elif current < self._start :
self._start = current
self._stop = current + self._width
if abs(self._start - self._min) <= self._sticky_lenght :
self._start = self._min
if abs(self._stop - self._max) <= self._sticky_lenght :
self._stop = self._max
@property
def start(self):
"""begiiing of interval to show"""
return self._start
@property
def stop(self):
"""end of interval to show"""
return self._stop
@property
def width(self):
return self._stop - self._start
@property
def nth(self):
return self.current - self.start
class CompletionHtml(QtGui.QWidget):
""" A widget for tab completion, navigable by arrow keys """
#--------------------------------------------------------------------------
# 'QObject' interface
#--------------------------------------------------------------------------
_items = ()
_index = (0, 0)
_consecutive_tab = 0
_size = (1, 1)
_old_cursor = None
_start_position = 0
_slice_start = 0
_slice_len = 4
def __init__(self, console_widget):
""" Create a completion widget that is attached to the specified Qt
text edit widget.
"""
assert isinstance(console_widget._control, (QtGui.QTextEdit, QtGui.QPlainTextEdit))
super(CompletionHtml, self).__init__()
self._text_edit = console_widget._control
self._console_widget = console_widget
self._text_edit.installEventFilter(self)
self._sliding_interval = None
self._justified_items = None
# Ensure that the text edit keeps focus when widget is displayed.
self.setFocusProxy(self._text_edit)
def eventFilter(self, obj, event):
""" Reimplemented to handle keyboard input and to auto-hide when the
text edit loses focus.
"""
if obj == self._text_edit:
etype = event.type()
if etype == QtCore.QEvent.KeyPress:
key = event.key()
if self._consecutive_tab == 0 and key in (QtCore.Qt.Key_Tab,):
return False
elif self._consecutive_tab == 1 and key in (QtCore.Qt.Key_Tab,):
# ok , called twice, we grab focus, and show the cursor
self._consecutive_tab = self._consecutive_tab+1
self._update_list()
return True
elif self._consecutive_tab == 2:
if key in (QtCore.Qt.Key_Return, QtCore.Qt.Key_Enter):
self._complete_current()
return True
if key in (QtCore.Qt.Key_Tab,):
self.select_right()
self._update_list()
return True
elif key in ( QtCore.Qt.Key_Down,):
self.select_down()
self._update_list()
return True
elif key in (QtCore.Qt.Key_Right,):
self.select_right()
self._update_list()
return True
elif key in ( QtCore.Qt.Key_Up,):
self.select_up()
self._update_list()
return True
elif key in ( QtCore.Qt.Key_Left,):
self.select_left()
self._update_list()
return True
elif key in ( QtCore.Qt.Key_Escape,):
self.cancel_completion()
return True
else :
self.cancel_completion()
else:
self.cancel_completion()
elif etype == QtCore.QEvent.FocusOut:
self.cancel_completion()
return super(CompletionHtml, self).eventFilter(obj, event)
#--------------------------------------------------------------------------
# 'CompletionHtml' interface
#--------------------------------------------------------------------------
def cancel_completion(self):
"""Cancel the completion
should be called when the completer have to be dismissed
This reset internal variable, clearing the temporary buffer
of the console where the completion are shown.
"""
self._consecutive_tab = 0
self._slice_start = 0
self._console_widget._clear_temporary_buffer()
self._index = (0, 0)
if(self._sliding_interval):
self._sliding_interval = None
#
# ... 2 4 4 4 4 4 4 4 4 4 4 4 4
# 2 2 4 4 4 4 4 4 4 4 4 4 4 4
#
#2 2 x x x x x x x x x x x 5 5
#6 6 x x x x x x x x x x x 5 5
#6 6 x x x x x x x x x x ? 5 5
#6 6 x x x x x x x x x x ? 1 1
#
#3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 ...
#3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 ...
def _select_index(self, row, col):
"""Change the selection index, and make sure it stays in the right range
A little more complicated than just dooing modulo the number of row columns
to be sure to cycle through all element.
horizontaly, the element are maped like this :
to r <-- a b c d e f --> to g
to f <-- g h i j k l --> to m
to l <-- m n o p q r --> to a
and vertically
a d g j m p
b e h k n q
c f i l o r
"""
nr, nc = self._size
nr = nr-1
nc = nc-1
# case 1
if (row > nr and col >= nc) or (row >= nr and col > nc):
self._select_index(0, 0)
# case 2
elif (row <= 0 and col < 0) or (row < 0 and col <= 0):
self._select_index(nr, nc)
# case 3
elif row > nr :
self._select_index(0, col+1)
# case 4
elif row < 0 :
self._select_index(nr, col-1)
# case 5
elif col > nc :
self._select_index(row+1, 0)
# case 6
elif col < 0 :
self._select_index(row-1, nc)
elif 0 <= row and row <= nr and 0 <= col and col <= nc :
self._index = (row, col)
else :
raise NotImplementedError("you'r trying to go where no completion\
have gone before : %d:%d (%d:%d)"%(row, col, nr, nc) )
@property
def _slice_end(self):
end = self._slice_start+self._slice_len
if end > len(self._items) :
return None
return end
def select_up(self):
"""move cursor up"""
r, c = self._index
self._select_index(r-1, c)
def select_down(self):
"""move cursor down"""
r, c = self._index
self._select_index(r+1, c)
def select_left(self):
"""move cursor left"""
r, c = self._index
self._select_index(r, c-1)
def select_right(self):
"""move cursor right"""
r, c = self._index
self._select_index(r, c+1)
def show_items(self, cursor, items):
""" Shows the completion widget with 'items' at the position specified
by 'cursor'.
"""
if not items :
return
self._start_position = cursor.position()
self._consecutive_tab = 1
# Calculate the number of characters available.
width = self._text_edit.document().textWidth()
char_width = QtGui.QFontMetrics(self._console_widget.font).width(' ')
displaywidth = int(max(10, (width / char_width) - 1))
items_m, ci = text.compute_item_matrix(items, empty=' ',
displaywidth=displaywidth)
self._sliding_interval = SlidingInterval(len(items_m)-1)
self._items = items_m
self._size = (ci['rows_numbers'], ci['columns_numbers'])
self._old_cursor = cursor
self._index = (0, 0)
sjoin = lambda x : [ y.ljust(w, ' ') for y, w in zip(x, ci['columns_width'])]
self._justified_items = list(map(sjoin, items_m))
self._update_list(hilight=False)
def _update_list(self, hilight=True):
""" update the list of completion and hilight the currently selected completion """
self._sliding_interval.current = self._index[0]
head = None
foot = None
if self._sliding_interval.start > 0 :
head = '...'
if self._sliding_interval.stop < self._sliding_interval._max:
foot = '...'
items_m = self._justified_items[\
self._sliding_interval.start:\
self._sliding_interval.stop+1\
]
self._console_widget._clear_temporary_buffer()
if(hilight):
sel = (self._sliding_interval.nth, self._index[1])
else :
sel = None
strng = html_tableify(items_m, select=sel, header=head, footer=foot)
self._console_widget._fill_temporary_buffer(self._old_cursor, strng, html=True)
#--------------------------------------------------------------------------
# Protected interface
#--------------------------------------------------------------------------
def _complete_current(self):
""" Perform the completion with the currently selected item.
"""
i = self._index
item = self._items[i[0]][i[1]]
item = item.strip()
if item :
self._current_text_cursor().insertText(item)
self.cancel_completion()
def _current_text_cursor(self):
""" Returns a cursor with text between the start position and the
current position selected.
"""
cursor = self._text_edit.textCursor()
if cursor.position() >= self._start_position:
cursor.setPosition(self._start_position,
QtGui.QTextCursor.KeepAnchor)
return cursor
| {
"repo_name": "ArcherSys/ArcherSys",
"path": "Lib/site-packages/qtconsole/completion_html.py",
"copies": "10",
"size": "12836",
"license": "mit",
"hash": 8379864692038278000,
"line_mean": 33.4128686327,
"line_max": 91,
"alpha_frac": 0.4942349642,
"autogenerated": false,
"ratio": 4.168886001948684,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9663120966148684,
"avg_score": null,
"num_lines": null
} |
"""Anchore Jenkins plugin security warnings collector."""
from base_collectors import SourceCollector
from collector_utilities.functions import md5_hash
from collector_utilities.type import URL
from source_model import Entities, Entity, SourceResponses
class AnchoreJenkinsPluginSecurityWarnings(SourceCollector):
"""Anchore Jenkins plugin security warnings collector."""
TAG, CVE, SEVERITY, PACKAGE, FIX, CVE_URL = range(6) # Column indices
async def _landing_url(self, responses: SourceResponses) -> URL:
"""Override to return the URL for the plugin."""
return URL(f"{await self._api_url()}/lastSuccessfulBuild/anchore-results")
async def _get_source_responses(self, *urls: URL, **kwargs) -> SourceResponses:
"""Extend to get the Anchore security report JSON from the last successful build."""
# We need to collect the build number of the last successful build because the Anchore Jenkins plugin uses
# the build number in the name of the folder where it stores the security warnings:
api_url = await self._api_url()
responses = await super()._get_source_responses(URL(f"{api_url}/api/json"), **kwargs)
json = await responses[0].json()
build = json["lastSuccessfulBuild"]["number"]
job = json["name"]
anchore_security_json_url = URL(f"{api_url}/{build}/artifact/AnchoreReport.{job}_{build}/anchore_security.json")
return await super()._get_source_responses(anchore_security_json_url, **kwargs)
async def _parse_entities(self, responses: SourceResponses) -> Entities:
"""Override to parse the Anchore Jenkins plugin security warnings."""
severities = self._parameter("severities")
entities = Entities()
for response in responses:
json = await response.json(content_type=None)
entities.extend(
[
self._create_entity(vulnerability)
for vulnerability in json.get("data", [])
if vulnerability[self.SEVERITY] in severities
]
)
return entities
def _create_entity(self, vulnerability: list[str]) -> Entity:
"""Create an entity from the vulnerability."""
return Entity(
key=md5_hash(f"{vulnerability[self.TAG]}:{vulnerability[self.CVE]}:{vulnerability[self.PACKAGE]}"),
tag=vulnerability[self.TAG],
cve=vulnerability[self.CVE],
package=vulnerability[self.PACKAGE],
severity=vulnerability[self.SEVERITY],
fix=vulnerability[self.FIX],
url=vulnerability[self.CVE_URL],
)
| {
"repo_name": "ICTU/quality-time",
"path": "components/collector/src/source_collectors/anchore_jenkins_plugin/security_warnings.py",
"copies": "1",
"size": "2666",
"license": "apache-2.0",
"hash": 4955147504026345000,
"line_mean": 47.4727272727,
"line_max": 120,
"alpha_frac": 0.6541635409,
"autogenerated": false,
"ratio": 4.172143974960877,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5326307515860876,
"avg_score": null,
"num_lines": null
} |
"""Anchore security warnings collector."""
from base_collectors import JSONFileSourceCollector
from collector_utilities.functions import md5_hash
from source_model import Entities, Entity, SourceResponses
class AnchoreSecurityWarnings(JSONFileSourceCollector):
"""Anchore collector for security warnings."""
async def _parse_entities(self, responses: SourceResponses) -> Entities:
"""Override to parse the Anchore security warnings."""
severities = self._parameter("severities")
entities = Entities()
for response in responses:
json = await response.json(content_type=None)
vulnerabilities = json.get("vulnerabilities", []) if isinstance(json, dict) else []
filename = response.filename if hasattr(response, "filename") else "" # Zipped responses have a filename
entities.extend(
[
self._create_entity(vulnerability, filename)
for vulnerability in vulnerabilities
if vulnerability["severity"] in severities
]
)
return entities
@staticmethod
def _create_entity(vulnerability: dict[str, str], filename: str) -> Entity:
"""Create an entity from the vulnerability."""
return Entity(
# Include the filename in the hash so that it is unique even when multiple images contain the
# same package with the same vulnerability. Don't add a colon so existing hashes stay the same
# if the source is not a zipped report (filename is an empty string in that case).
key=md5_hash(f'{filename}{vulnerability["vuln"]}:{vulnerability["package"]}'),
cve=vulnerability["vuln"],
filename=filename,
package=vulnerability["package"],
severity=vulnerability["severity"],
fix=vulnerability["fix"],
url=vulnerability["url"],
)
| {
"repo_name": "ICTU/quality-time",
"path": "components/collector/src/source_collectors/anchore/security_warnings.py",
"copies": "1",
"size": "1958",
"license": "apache-2.0",
"hash": 5193818112186416,
"line_mean": 45.619047619,
"line_max": 117,
"alpha_frac": 0.6384065373,
"autogenerated": false,
"ratio": 4.787286063569682,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0014091982469764783,
"num_lines": 42
} |
"""Anchore sources."""
from typing import cast
from ..meta.entity import Color
from ..meta.source import Source
from ..parameters import access_parameters, Severities, URL
from .jenkins import jenkins_access_parameters
ALL_ANCHORE_METRICS = ["security_warnings", "source_up_to_dateness"]
SEVERITIES = Severities(values=["Unknown", "Negligible", "Low", "Medium", "High", "Critical"])
COMMON_ENTITY_ATTRIBUTES = [
dict(name="CVE", url="url"),
dict(name="Package"),
dict(name="Fix"),
dict(
name="Severity",
color=dict(Critical=Color.NEGATIVE, High=Color.NEGATIVE, Medium=Color.WARNING, Low=Color.WARNING),
),
]
ANCHORE = Source(
name="Anchore",
description="Anchore image scan analysis report in JSON format.",
url="https://docs.anchore.com/current/docs/using/integration/ci_cd/inline_scanning/",
parameters=dict(
details_url=URL(
name="URL to an Anchore details report in JSON format or "
"to a zip with Anchore details reports in JSON format",
metrics=["source_up_to_dateness"],
),
severities=SEVERITIES,
**access_parameters(
ALL_ANCHORE_METRICS,
source_type="an Anchore vulnerability report",
source_type_format="JSON",
kwargs=dict(url=dict(metrics=["security_warnings"])),
),
),
entities=dict(
security_warnings=dict(
name="security warning",
attributes=[cast(object, dict(name="Report filename", key="filename"))] + COMMON_ENTITY_ATTRIBUTES,
)
),
)
ANCHORE_JENKINS_PLUGIN = Source(
name="Anchore Jenkins plugin",
description="A Jenkins job with an Anchore report produced by the Anchore Jenkins plugin.",
url="https://plugins.jenkins.io/anchore-container-scanner/",
parameters=dict(
severities=SEVERITIES,
**jenkins_access_parameters(
ALL_ANCHORE_METRICS,
kwargs=dict(
url=dict(
help="URL to a Jenkins job with an Anchore report generated by the Anchore plugin. For example, "
"'https://jenkins.example.org/job/anchore' or https://jenkins.example.org/job/anchore/job/master' "
"in case of a pipeline job."
)
),
),
),
entities=dict(
security_warnings=dict(
name="security warning", attributes=[cast(object, dict(name="Tag"))] + COMMON_ENTITY_ATTRIBUTES
)
),
)
| {
"repo_name": "ICTU/quality-time",
"path": "components/server/src/data_model/sources/anchore.py",
"copies": "1",
"size": "2506",
"license": "apache-2.0",
"hash": -7854353257797641000,
"line_mean": 32.8648648649,
"line_max": 119,
"alpha_frac": 0.6177174781,
"autogenerated": false,
"ratio": 3.8852713178294573,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5002988795929457,
"avg_score": null,
"num_lines": null
} |
# anchorGenerator
from models.anchor import *
# main function
if __name__=='__main__':
# TEMP: Wipe existing anchors
# anchors = Anchor.all(size=1000)
# Anchor.delete_all(anchors)
# THIS IS TEMPORARY:
anchors = {'Vaccination', 'Vaccinations', 'Vaccine', 'Vaccines', 'Inoculation', 'Immunization', 'Shot', 'Chickenpox', 'Disease', 'Diseases', 'Hepatitis A', 'Hepatitis B', 'infection', 'infections', 'measles', 'outbreak', 'mumps', 'rabies', 'tetanus', 'virus', 'autism'}
seed = 'vaccination'
for anchor in anchors:
a = Anchor.getOrCreate(anchor)
a.findInstances()
a.save()
"""
query = {
"size": 0,
"query": {
"filtered": {
"query": {
"query_string": {
"query": "*",
"analyze_wildcard": True
}
}
}
},
"aggs": {
"2": {
"terms": {
"field": "title",
"size": 100,
"order": {
"_count": "desc"
}
}
}
}
}
response = es.search(index="crowdynews"', 'body=query)
retrieved = now()
anchors = {}
# go through each retrieved document
for hit in response['aggregations']['2']['buckets']:
key = hit['key']
if validKey(key):
anchors[key] = hit['doc_count']
addBulk(anchors)
""" | {
"repo_name": "ControCurator/controcurator",
"path": "python_code/anchorGenerator.py",
"copies": "1",
"size": "1229",
"license": "mit",
"hash": 963672568256329200,
"line_mean": 18.5238095238,
"line_max": 270,
"alpha_frac": 0.5573637103,
"autogenerated": false,
"ratio": 2.926190476190476,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3983554186490476,
"avg_score": null,
"num_lines": null
} |
"""Anchor management.
"""
__all__ = ['Anchor']
from typing import Optional
import numpy as np
from miles import CollectiveVariables
class Anchor:
"""Anchor point.
An anchor is a point in the space of collective variables that
serves as a seed for a Voronoi tessellation.
Attributes
----------
collective_variables : CollectiveVariables
Space of collective variables where the anchor belongs.
coordinates : np.array
Coordinates of the anchor in the space of collective
variables.
index : int
Unique index identifying the anchor.
"""
def __init__(self, collective_variables: CollectiveVariables,
coordinates: np.array, index: Optional[int] = None) \
-> None:
self.collective_variables = collective_variables
self.coordinates = coordinates
self.index = index
def __repr__(self) -> str:
return ('{}({!r}, {!r}, index={!r})'
.format(self.__class__.__name__,
self.collective_variables,
self.coordinates,
self.index))
def __eq__(self, other) -> bool:
if isinstance(other, self.__class__):
return self.index == other.index
else:
return False
def __lt__(self, other) -> bool:
return self.index < other.index
def __hash__(self) -> int:
return hash((tuple(self.coordinates), self.index))
def distance(self, coordinates: np.array) -> float:
"""Distance from point to anchor.
Parameters
----------
coordinates : np.array
Coordinates of a point in the space of collective
variables.
Returns
-------
d : float
Distance from point to anchor.
"""
return self.collective_variables.distance(coordinates,
self.coordinates)
| {
"repo_name": "clsb/miles",
"path": "miles/anchor.py",
"copies": "1",
"size": "1977",
"license": "mit",
"hash": 4804176390494019000,
"line_mean": 25.7162162162,
"line_max": 70,
"alpha_frac": 0.5569044006,
"autogenerated": false,
"ratio": 4.741007194244604,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5797911594844605,
"avg_score": null,
"num_lines": null
} |
""" Anchor Widget, use this to create the equivalent of the <a></a> tag.
Copyright(C) 2010, Martin Hellwig
Copyright(C) 2010, Luke Leighton <lkcl@lkcl.net>
License: Apache Software Foundation v2
Here is an example for using it with an image:
---------------------------------------------------------
if __name__ == '__main__':
from pyjamas.ui.RootPanel import RootPanel
from pyjamas.ui.Image import Image
root = RootPanel()
image = Image('http://pyj.be/img/pyjamas.128x128.png')
anchor = Anchor(Widget=image, Href='http://pyj.be')
root.add(anchor)
---------------------------------------------------------
"""
from pyjamas import DOM
from pyjamas.ui.Widget import Widget
from pyjamas.ui.ClickListener import ClickHandler
class _Attribute(object):
"Attribute definition class with method set and remove"
def __init__(self, element, attribute,
attribute_type=None, type_restriction=None):
self.element = element
self.attribute = attribute
self._type = attribute_type
self._restriction = type_restriction
def get(self):
"Get the value"
return DOM.getAttribute(self.element, self.attribute)
def set(self, value):
"Set the value"
DOM.setAttribute(self.element, self.attribute, value)
def remove(self):
"Remove the attribute from the element"
DOM.removeAttribute(self.element, self.attribute)
class _Attributes(object):
"Attribute container class"
def __init__(self, element):
self.name = _Attribute(element, 'name', 'cdata', 'cs')
self.href = _Attribute(element, 'href', 'uri', 'ct')
self.hreflang = _Attribute(element, 'hreflang', 'langcode', 'ci')
self.type = _Attribute(element, 'type', 'content-type', 'ci')
self.rel = _Attribute(element, 'rel', 'link-types' ,'ci')
self.rev = _Attribute(element, 'rev', 'link-types', 'ci')
self.charset = _Attribute(element, 'charset', 'charset', 'ci')
self.target = _Attribute(element, 'target', 'target', 'ci')
class Anchor(Widget, ClickHandler, _Attributes):
"""Anchor attribute, use this to create the equivalent of the <a></a> tag.
The attributes: name, href. hreflang, type, rel, rev, charset are in the
namespace of the Anchor instance.
These attributes themselves have the functions 'set' and 'remove'
For example:
anchor = Anchor()
anchor.href.set('http://www.dcuktec.com')
"""
def __init__(self, **kwargs):
element = kwargs.pop('Element', None) or DOM.createAnchor()
kwargs['StyleName'] = kwargs.pop('StyleName', 'gwt-Anchor')
_Attributes.__init__(self, element)
self.setElement(element)
self.widget = None
Widget.__init__(self, **kwargs)
ClickHandler.__init__(self)
def setWidget(self, widget):
""" Add child widget
"""
widget.removeFromParent()
widget.setParent(self)
self.widget = widget
DOM.appendChild(self.getElement(), widget.getElement())
def removeWidget(self):
""" remove child widget
"""
self.widget.removeFromParent()
DOM.removeChild(self.getElement(), self.widget.getElement())
self.widget = None
def setHref(self, url):
self.href.set(url)
| {
"repo_name": "minghuascode/pyj",
"path": "library/pyjamas/ui/Anchor.py",
"copies": "1",
"size": "3373",
"license": "apache-2.0",
"hash": 904746525033527000,
"line_mean": 35.6630434783,
"line_max": 78,
"alpha_frac": 0.606581678,
"autogenerated": false,
"ratio": 3.935822637106184,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5042404315106184,
"avg_score": null,
"num_lines": null
} |
# Ancient Nordic Elvish translation
import numpy
import re
instructions = [line.rstrip("\n") for line in open("day6.txt", "r")]
theSize = 1000
lights = numpy.zeros((theSize, theSize))
def get_cells(start, end):
[startx, starty] = re.sub(r'\s', '', start).split(',')
[endx, endy] = re.sub(r'\s', '', end).split(',')
return [(x, y) for x in range(int(startx), int(endx)+1)
for y in range(int(starty), int(endy)+1)]
for instruction in instructions:
inst = instruction.split()
command = inst[0]
if command == "turn":
onoff = inst[1]
cells = get_cells(inst[2], inst[4])
if onoff == "on":
for (x, y) in cells:
lights[x, y] += 1
elif onoff == "off":
for (x, y) in cells:
lights[x, y] -= 1
if lights[x, y] < 0:
lights[x, y] = 0
elif command == "toggle":
cells = get_cells(inst[1], inst[3])
for (x, y) in cells:
lights[x, y] += 2.0
print "Nonzero:", lights.sum()
| {
"repo_name": "ksallberg/adventofcode",
"path": "2015/src/day6_2.py",
"copies": "1",
"size": "1079",
"license": "bsd-2-clause",
"hash": -6582546130277390000,
"line_mean": 28.9722222222,
"line_max": 68,
"alpha_frac": 0.5013901761,
"autogenerated": false,
"ratio": 3.0916905444126073,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40930807205126074,
"avg_score": null,
"num_lines": null
} |
## ancillary functions for composite.py
## last updated: 09.14.2017 vitti@broadinstitute.org
import sys
import os
import math
import numpy as np
import gzip
####################
## PREPARE INPUT ###
####################
def write_perpop_ihh_from_xp(infilename, outfilename, popNum = 1):
outfile = open(outfilename, 'w')
#### First iteration - ensure proper handling of file.
if ".gz" in infilename:
infile = gzip.open(infilename, 'rt')
else:
infile = open(infilename, 'r')
infile.readline() #header
for line in infile:
entries = line.split()
xpehh = float(entries[7])
entry1, entry2, entry3, entry4 = [float(item) for item in entries[3:7]] #there are inconsistencies
### need a flexible solution that can figure out which column is which
if entry3 == 0 or entry4 == 0:
pass
else:
testXp = np.log(float(entry2)/float(entry4))
testXp2 = np.log(float(entry1)/float(entry3))
if math.fabs(testXp - float(xpehh)) < .001:
p1_ind, ihh1_ind, p2_ind, ihh2_ind = 3, 4, 5, 6
elif math.fabs(testXp2 - float(xpehh)) < .001:
p1_ind, ihh1_ind, p2_ind, ihh2_ind = 4, 3, 6, 5
else:
print(line)
print(str(testXp))
print(str(testXp2))
print('check indices ' + infilename)
break
infile.close()
if ".gz" in infilename:
infile = gzip.open(infilename, 'rt')
else:
infile = open(infilename, 'r')
infile.readline() #header
for line in infile:
entries = line.split()
xpehh = float(entries[7])
locus = entries[0]
pos = entries[1]
gpos = entries[2]
p1 = entries[p1_ind]
p2 = entries[p2_ind]
ihh1 = entries[ihh1_ind]
ihh2 = entries[ihh2_ind]
if popNum ==1:
writeline = pos + "\t" + gpos + "\t" + p1 + "\t" + ihh1 + "\n"
elif popNum == 2:
writeline = pos + "\t" + gpos + "\t" + p2 + "\t" + ihh2 + "\n"
outfile.write(writeline)
outfile.close()
infile.close()
print('wrote to: ' + outfilename)
return outfilename
def write_pair_sourcefile(writefilename, ihsfilename, delihhfilename, nslfilename, xpehhfilename, freqsfilename):
#if not os.path.isfile(writefilename):
if True:
openfile = open(writefilename, 'w')
openfile.write(ihsfilename+ "\n")
openfile.write(delihhfilename+ "\n")
openfile.write(nslfilename+ "\n")
openfile.write(xpehhfilename+ "\n")
openfile.write(freqsfilename+ "\n")
openfile.close()
return writefilename
def write_run_paramfile(writefilename, ihs_master_likesfile, nsl_master_likesfile, delihh_master_likesfile, xpehh_master_likesfile,
fst_master_likesfile, deldaf_master_likesfile, cutoffline, includeline):
#if not os.path.isfile(writefilename):
if True:
openfile = open(writefilename, 'w')
openfile.write(ihs_master_likesfile + "\n")
openfile.write(nsl_master_likesfile + "\n") #CHANGE ORDER
openfile.write(delihh_master_likesfile + "\n")
openfile.write(xpehh_master_likesfile + "\n")
openfile.write(fst_master_likesfile + "\n")
openfile.write(deldaf_master_likesfile + "\n")
openfile.write(cutoffline + "\n")
openfile.write(includeline + "\n")
openfile.close()
return writefilename
def normalize(rawscore, mean, sd):
rawscore, mean, sd = float(rawscore), float(mean), float(sd)
normalizedvalue = (rawscore - mean) / sd
return normalizedvalue
| {
"repo_name": "broadinstitute/cms",
"path": "cms/combine/input_func.py",
"copies": "1",
"size": "3201",
"license": "bsd-2-clause",
"hash": 2919303304517595000,
"line_mean": 31.6632653061,
"line_max": 131,
"alpha_frac": 0.6776007498,
"autogenerated": false,
"ratio": 2.7150127226463106,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.389261347244631,
"avg_score": null,
"num_lines": null
} |
import argparse
import time
import os
import logging
from sys import platform
from threading import Timer
class TimerPy:
def __init__(self, time, task_name):
self.duration = time
self.task_name = task_name
def start(self):
print("Starting Timer for %s at "%(self.task_name), time.strftime("%H:%M:%S", time.localtime()))
logging.info("Starting Timer %s at "%(self.task_name) + time.strftime("%H:%M:%S", time.localtime()))
def finish(self):
notify("Timer", "Timer for %s is up" %(self.task_name))
print("Ending Time: ", time.strftime("%H:%M:%S", time.localtime()))
logging.info("Ending Time for %s: "%(self.task_name)+ time.strftime("%H:%M:%S", time.localtime()))
def notify(title, text):
if platform == 'darwin':
os.system("""
osascript -e 'display notification "{}" with title "{}"'
""".format(text, title))
elif platform == 'linux':
print("%s" %(text))
if __name__ == "__main__":
logging.basicConfig(filename="timer.log",level=logging.INFO, format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
parser= argparse.ArgumentParser()
parser.add_argument("-s", help="set timer", action="store_true",dest="start",required=True)
subparsers = parser.add_subparsers(title="time",help="time for the timer", dest="type")
hour_parser=subparsers.add_parser("hour")
hour_parser.add_argument(dest="hour", type=int)
minute_parser = subparsers.add_parser("minute")
minute_parser.add_argument( dest="minute", type=int)
second_parser = subparsers.add_parser("second")
second_parser.add_argument(dest="seconds", type=int)
parser.add_argument("--name", dest="task_name", type=str)
args = parser.parse_args()
parser.add_argument("-p", help="pause the timer")
duration=0
logging.debug(args)
if args.start:
logging.debug(args.start)
if args.type == "minute":
duration=args.minute * 60
if args.type == "hour":
duration=args.hour * 60 * 60
if args.type == "second":
duration=args.seconds
logging.info("Starting the timer")
timer = TimerPy(duration, args.task_name)
timer.start()
print(duration)
t=Timer(duration, timer.finish)
t.start()
| {
"repo_name": "abhixec/timer",
"path": "timerpy.py",
"copies": "1",
"size": "2373",
"license": "apache-2.0",
"hash": 8553267503666584000,
"line_mean": 36.078125,
"line_max": 130,
"alpha_frac": 0.615676359,
"autogenerated": false,
"ratio": 3.6229007633587784,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4738577122358778,
"avg_score": null,
"num_lines": null
} |
"""ANCP Client
Copyright (C) 2017-2021, Christian Giese (GIC-de)
SPDX-License-Identifier: MIT
"""
from __future__ import print_function
from __future__ import unicode_literals
from builtins import bytes
from ancp.subscriber import Subscriber
from datetime import datetime
from threading import Thread, Event, Lock
import struct
import socket
import logging
import collections
log = logging.getLogger(__name__)
VERSION_RFC = 50
class MessageType(object):
ADJACENCY = 10
PORT_MANAGEMENT = 32
PORT_UP = 80
PORT_DOWN = 81
ADJACENCY_UPDATE = 85
class AdjacencyState(object):
IDLE = 1
SYNSENT = 2
SYNRCVD = 3
ESTAB = 4
class MessageCode(object):
SYN = 1
SYNACK = 2
ACK = 3
RSTACK = 4
class TechTypes(object):
ANY = 0
PON = 1
DSL = 5
class ResultFields(object):
Ignore = 0x00
Nack = 0x01
AckAll = 0x02
Success = 0x03
Failure = 0x04
class ResultCodes(object):
NoResult = 0x000
class Capabilities(object):
TOPO = 1
OAM = 4
# HELPER FUNCTIONS AND CALSSES ------------------------------------------------
def tomac(v):
"""Tuple to MAC Address
:param v: MAC address
:type v: tuple
:return: MAC address
:rtype: str
"""
return "%02x:%02x:%02x:%02x:%02x:%02x" % v
# ANCP CLIENT -----------------------------------------------------------------
class Client(object):
"""ANCP Client
:param address: ANCP server address (IPv4)
:type address: str
:param port: ANCP port (default: 6086)
:type port: int
:param tech_type: tech type (default=DSL)
:type tech_type: ancp.client.TechTypes
:param timer: adjacency timer (default=25.0)
:type timer: int
:param source_address: optional source address
:type source_address: str
"""
def __init__(self, address, port=6068, tech_type=TechTypes.DSL, timer=25.0, source_address=None):
self.address = str(address)
self.port = port
self.source_address = str(source_address) if source_address else None
self.timer = timer # adjacency timer
self.timeout = 1.0 # socket timeout
self._last_syn_time = None
self._tx_lock = Lock()
self.established = Event()
self.version = VERSION_RFC
self.tech_type = tech_type
self.state = AdjacencyState.IDLE
self.capabilities = [Capabilities.TOPO]
self.transaction_id = 1
if self.source_address:
# create sender_name from source_address
_sender_name = [int(i) for i in source_address.split(".")]
_sender_name.extend([0, 0])
self.sender_name = tuple(_sender_name)
# TCP socket is created in connect method
else:
self.sender_name = (1, 2, 3, 4, 5, 6)
# create TCP socket
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sender_instance = 16777217
self.sender_port = 0
self.receiver_name = (0, 0, 0, 0, 0, 0)
self.receiver_instance = 0
self.receiver_port = 0
def __repr__(self):
if self.source_address:
return "Client(%s:%s, %s)" % (self.address, self.port, self.source_address)
else:
return "Client(%s:%s)" % (self.address, self.port)
def connect(self):
"""connect"""
if self.source_address:
self.socket = socket.create_connection((self.address, self.port), source_address=(self.source_address, 0))
else:
self.socket.connect((self.address, self.port))
self.socket.setblocking(True)
self.socket.settimeout(self.timeout)
self._send_syn()
# rx / tx thread
self._thread = Thread(target=self._handle, name="handle")
self._thread.setDaemon(True)
self._thread.start()
for _ in range(6):
if self._thread.is_alive():
self.established.wait(1)
else:
break
if self.established.is_set():
return True
else:
return False
def disconnect(self, send_ack=False):
"""disconnect"""
if send_ack:
self._send_ack()
else:
self._send_rstack()
self._thread.join(timeout=1.0)
self.socket.close()
self.established.clear()
def port_up(self, subscribers):
"""send port-up message
For backwards compability single value ANCP subscribers are accepted.
:param subscriber: collection of ANCP subscribers
:type subscriber: [ancp.subscriber.Subscriber]
"""
if not isinstance(subscribers, collections.Iterable):
subscribers = [subscribers]
elif len(subscribers) == 0:
raise ValueError("No Subscribers passed")
self._port_updown(MessageType.PORT_UP, subscribers)
def port_down(self, subscribers):
"""send port-down message
For backwards compability single value ANCP subscribers are accepted.
:param subscriber: collection of ANCP subscribers
:type subscriber: [ancp.subscriber.Subscriber]
"""
if not isinstance(subscribers, collections.Iterable):
subscribers = [subscribers]
elif len(subscribers) == 0:
raise ValueError("No Subscribers passed")
self._port_updown(MessageType.PORT_DOWN, subscribers)
# internal methods --------------------------------------------------------
def _handle(self):
"""RX / TX Thread"""
while True:
try:
b = self._recvall(4)
except socket.timeout:
self._handle_timeout()
else:
if len(b) == 0:
log.warning("connection lost with %s ", tomac(self.receiver_name))
break
else:
log.debug("received len(b) = %d", len(b))
(id, length) = struct.unpack("!HH", b)
log.debug("message rcvd length field %d", length)
if id != 0x880C:
log.error("incorrect ident 0x%x", id)
break
b = self._recvall(length)
if len(b) != length:
log.warning("MSG_WAITALL failed")
log.debug("rest received len(b) = %d", len(b))
(ver, mtype, var) = struct.unpack_from("!BBH", b, 0)
s0 = self.state
if mtype == MessageType.ADJACENCY:
self._handle_adjacency(var, b)
elif mtype == MessageType.ADJACENCY_UPDATE:
self._handle_adjacency_update(var, b)
elif mtype == MessageType.PORT_UP:
log.warning("received port up in AN mode")
elif mtype == MessageType.PORT_DOWN:
log.warning("received port down in AN mode")
else:
self._handle_general(var, b)
if s0 != self.state and self.state == AdjacencyState.ESTAB and not self.established.is_set():
self.established.set()
log.info("adjacency established with %s", tomac(self.receiver_name))
self.established.clear()
def _port_updown(self, message_type, subscribers):
if not self.established.is_set():
raise RuntimeError("session not established")
self._send_port_updwn(message_type, self.tech_type, subscribers)
def _recvall(self, toread):
buf = bytearray(toread)
view = memoryview(buf)
while toread:
nbytes = self.socket.recv_into(view, toread)
if nbytes == 0:
return b''
view = view[nbytes:] # slicing views is cheap
toread -= nbytes
return buf
def _mkadjac(self, mtype, time, m, code):
totcapslen = len(self.capabilities) * 4
b = bytearray(40 + totcapslen)
off = 0
struct.pack_into("!HH", b, off, 0x880c, 36 + totcapslen)
off += 4
struct.pack_into("!BBBB", b, off, self.version, mtype, int(self.timer * 10), (m << 7) | code)
off += 4
(s1, s2, s3, s4, s5, s6) = self.sender_name
(r1, r2, r3, r4, r5, r6) = self.receiver_name
struct.pack_into("!6B6B", b, off,
s1, s2, s3, s4, s5, s6,
r1, r2, r3, r4, r5, r6)
off += 12
struct.pack_into("!II", b, off, self.sender_port, self.receiver_port)
off += 8
struct.pack_into("!I", b, off, self.sender_instance)
off += 4
struct.pack_into("!I", b, off, self.receiver_instance)
off += 5
struct.pack_into("!BH", b, off, len(self.capabilities), totcapslen)
off += 3
for cap in self.capabilities:
struct.pack_into("!H", b, off, cap)
off += 2
return b
def _send_adjac(self, m, code):
log.debug("send adjanecy message with code %s", (code))
b = self._mkadjac(MessageType.ADJACENCY, self.timer * 10, m, code)
with self._tx_lock:
self.socket.send(b)
def _send_syn(self):
self._send_adjac(0, MessageCode.SYN)
self.state = AdjacencyState.SYNSENT
self._last_syn_time = datetime.now()
def _send_ack(self):
self._send_adjac(0, MessageCode.ACK)
def _send_synack(self):
self._send_adjac(0, MessageCode.SYNACK)
self.state = AdjacencyState.SYNRCVD
def _send_rstack(self):
self._send_adjac(0, MessageCode.RSTACK)
self.state = AdjacencyState.SYNRCVD
def _handle_timeout(self):
if self.state == AdjacencyState.SYNSENT:
self._send_syn()
elif self.state == AdjacencyState.ESTAB:
# send every self.timer seconds a SYN, ... (keep-alive)
diff = datetime.now() - self._last_syn_time
if diff.seconds >= self.timer:
self._send_syn()
def _handle_syn(self):
log.debug("SYN received with current state %d", self.state)
if self.state == AdjacencyState.SYNSENT:
self._send_synack()
elif self.state == AdjacencyState.SYNRCVD:
self._send_synack()
elif self.state == AdjacencyState.ESTAB:
self._send_ack()
elif self.state == AdjacencyState.IDLE:
self._send_syn()
else:
log.warning('SYN not expected in state: %d', self.state)
def _handle_synack(self):
log.debug("SYNACK received with current state %d", self.state)
if self.state == AdjacencyState.SYNSENT:
# C !C ??
self._send_ack()
self.state = AdjacencyState.ESTAB
elif self.state == AdjacencyState.SYNRCVD:
# C !C ??
self._send_ack()
elif self.state == AdjacencyState.ESTAB:
self._send_ack()
else:
log.warning('SYNACK not expected in state: %d', self.state)
def _handle_ack(self):
log.debug("ACK received with current state %d", self.state)
if self.state == AdjacencyState.ESTAB:
self._send_ack()
else:
self.state = AdjacencyState.ESTAB
def _handle_rstack(self):
log.debug("RSTACK received with current state %d", self.state)
if self.state == AdjacencyState.SYNSENT:
pass
else:
# disconnect
self.disconnect(send_ack=True)
def _handle_adjacency(self, var, b):
timer = var >> 8
m = var & 0x80
code = var & 0x7f
if m == 0:
log.error("received M flag 0 in AN mode")
raise RuntimeError("Trying to synchronize with other AN")
self.receiver_name = struct.unpack_from("!BBBBBB", b, 4)
self.receiver_instance = struct.unpack_from("!I", b, 24)[0] & 16777215
if code == MessageCode.SYN:
self._handle_syn()
elif code == MessageCode.SYNACK:
self._handle_synack()
elif code == MessageCode.ACK:
self._handle_ack()
elif code == MessageCode.RSTACK:
self._handle_rstack()
else:
log.warning("unknown code %d" % code)
def _handle_adjacency_update(self, var, b):
res = var >> 12
code = var & 0xfff
def _handle_general(self, var, b):
pass
def _mkgeneral(self, message_type, result, result_code, body):
b = bytearray(4 + 12)
partition_id = 0
off = 0
struct.pack_into("!HH", b, off, 0x880c, len(b) - 4 + len(body))
off += 4
struct.pack_into("!BBH", b, off, self.version, message_type, (result << 12) | result_code)
off += 4
struct.pack_into("!I", b, off, (partition_id << 24) | self.transaction_id)
self.transaction_id += 1
off += 4
struct.pack_into("!HH", b, off, 0x8001, len(b) - 4 + len(body))
off += 4
return b + body
def _send_port_updwn(self, message_type, tech_type, subscribers):
msg = bytearray()
for subscriber in subscribers:
try:
num_tlvs, tlvs = subscriber.tlvs
except:
log.warning("subscriber is not of type ancp.subscriber.Subscriber: skip")
continue
b = bytearray(28)
off = 20
struct.pack_into("!xBBx", b, off, message_type, tech_type)
off += 4
struct.pack_into("!HH", b, off, num_tlvs, len(tlvs))
off += 4
msg += self._mkgeneral(message_type, ResultFields.Nack,
ResultCodes.NoResult, b + tlvs)
if len(msg) == 0:
raise ValueError("No valid Subscriber passed")
with self._tx_lock:
self.socket.send(msg)
| {
"repo_name": "GIC-de/PyANCP",
"path": "ancp/client.py",
"copies": "1",
"size": "13931",
"license": "mit",
"hash": 217604155026265800,
"line_mean": 32.4879807692,
"line_max": 118,
"alpha_frac": 0.5471251166,
"autogenerated": false,
"ratio": 3.7979825517993455,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48451076683993455,
"avg_score": null,
"num_lines": null
} |
"""ANCP Subscribers
Copyright (C) 2017-2021, Christian Giese (GIC-de)
SPDX-License-Identifier: MIT
"""
from __future__ import print_function
from __future__ import unicode_literals
from builtins import bytes
import struct
import logging
log = logging.getLogger(__name__)
class LineState(object):
"Line States"
SHOWTIME = 1
IDLE = 2
SILENT = 3
class DslType(object):
"DSL Types"
ADSL = 1
ADSL2 = 2
ADSL2P = 3
VDSL1 = 4
VDSL2 = 5
SDSL = 6
OTHER = 0
class TlvType(object):
"TLV Types"
ACI = 0x0001 # Access-Loop-Circuit-ID
ARI = 0x0002 # Access-Loop-Remote-ID
AACI_ASCII = 0x0003 # Access-Aggregation-Circuit-ID-ASCII
LINE = 0x0004
AACI_BIN = 0x0006 # Access-Aggregation-Circuit-ID-Binary
UP = 0x0081
DOWN = 0x0082
MIN_UP = 0x0083
MIN_DOWN = 0x0084
ATT_UP = 0x0085
ATT_DOWN = 0x0086
MAX_UP = 0x0087
MAX_DOWN = 0x0088
STATE = 0x008f
ACC_LOOP_ENC = 0x0090
TYPE = 0x0091
# Access-Loop-Encapsulation
class DataLink(object):
"Access-Loop-Encapsulation - Data Link"
ATM_AAL5 = 0
ETHERNET = 1
class Encap1(object):
"Access-Loop-Encapsulation - Encapsulation 1"
NA = 0
UNTAGGED_ETHERNET = 1
SINGLE_TAGGED_ETHERNET = 2
DOUBLE_TAGGED_ETHERNET = 3
class Encap2(object):
"Access-Loop-Encapsulation - Encapsulation 2"
PPPOA_LLC = 1
PPPOA_NULL = 2
IPOA_LLC = 3
IPOA_Null = 4
EOAAL5_LLC_FCS = 5
EOAAL5_LLC = 6
EOAAL5_NULL_FCS = 7
EOAAL5_NULL = 8
# HELPER FUNCTIONS AND CALSSES ------------------------------------------------
class TLV(object):
__slots__ = ('type', 'val', 'len', 'off')
def __init__(self, t, val):
self.type = t # type
self.val = val # value
self.len = 0 # length
self.off = 0 # offset (lenth + padding)
if isinstance(val, int):
# int
self.len = 4
self.off = 4
elif isinstance(val, list):
# sub tlvs
for sub in val:
self.len += 4 + sub.len
self.off += 4 + sub.off
elif isinstance(val, tuple):
# list of int (e.g. for AACI_BIN)
self.len = len(val) * 4
self.off = self.len
else:
# string
self.len = len(val)
padding = 4 - (self.len % 4)
if(padding < 4):
self.off = len(val) + padding
else:
self.off = self.len
def mktlvs(tlvs):
blen = 0
for t in tlvs:
blen += 4 + t.off
b = bytearray(blen)
off = 0
for t in tlvs:
if isinstance(t.val, tuple):
# list of int (e.g. for AACI_BIN)
struct.pack_into("!HH", b, off, t.type, t.len)
off += 4
for i in t.val:
struct.pack_into("!I", b, off, i)
off += 4
elif isinstance(t.val, list):
# sub tlvs
struct.pack_into("!HH", b, off, t.type, t.off)
off += 4
for s in t.val:
if isinstance(s.val, int):
struct.pack_into("!HHI", b, off, s.type, s.len, s.val)
else:
fmt = "!HH%ds" % s.len
struct.pack_into(fmt, b, off, s.type, s.len, bytes(s.val, encoding='utf-8'))
off += 4 + s.off
else:
# tlv
if isinstance(t.val, int):
# int
struct.pack_into("!HHI", b, off, t.type, t.len, t.val)
elif isinstance(t.val, tuple):
# list of int (e.g. for AACI_BIN)
struct.pack_into("!HH", b, off, t.type, t.len)
off += 4
for i in t.val:
struct.pack_into("!I", b, off, i)
off += 4
else:
# string
fmt = "!HH%ds" % t.len
struct.pack_into(fmt, b, off, t.type, t.len, bytes(t.val, encoding='utf-8'))
off += 4 + t.off
return b
def access_loop_enc(data_link, encap1, encap2):
"""Create the Access Loop Tlv
:param data_link: The Data link type
:type data_link: ancp.subscriber.DataLink
:param encap1: The first Encapsulation type
:type encap1: ancp.subscriber.Encap1
:param encap2: The second Encapsulation type
:type encap2: ancp.subscriber.Encap2
:rtype: TLV
"""
tlv = TLV(TlvType.ACC_LOOP_ENC, 0)
tlv.len = 3
tlv.off = 4
tlv.val = data_link << 24 | encap1 << 16 | encap2 << 8
return tlv
# ANCP SUBSCRIBER -------------------------------------------------------------
class Subscriber(object):
"""ANCP Subscriber
:param aci: Access-Loop-Circuit-ID
:type aci: str
:param ari: Access-Loop-Remote-ID
:type ari: str
:param aaci_bin: Access-Aggregation-Circuit-ID-Binary
:type aaci_bin: int or tuple
:param aaci_ascii: Access-Aggregation-Circuit-ID-ASCII
:type aaci_ascii: str
:param state: DSL-Line-State
:type state: ancp.subscriber.LineState
:param up: Actual-Net-Data-Rate-Upstream
:type up: int
:param down: Actual-Net-Data-Rate-Downstream
:type down: int
:param min_up: Minimum-Net-Data-Rate-Upstream
:type min_up: int
:param min_down: Minimum-Net-Data-Rate-Downstream
:type min_down: int
:param att_up: Attainable-Net-Data-Rate-Upstream
:type att_up: int
:param att_down: Attainable-Net-Data-Rate-Downstream
:type att_down: int
:param max_up: Maximum-Net-Data-Rate-Upstream
:type max_up: int
:param max_down: Maximum-Net-Data-Rate-Downstream
:type max_down: int
:param dsl_type: DSL-Type
:type dsl_type: ancp.subscriber.DslType
:param data_link: Access-Loop-Encapsulation - Data Link
:type data_link: ancp.subscriber.DataLink
:param encap1: Access-Loop-Encapsulation - Encapsulation 1
:type encap1: ancp.subscriber.Encap1
:param encap2: Access-Loop-Encapsulation - Encapsulation 2
:type encap2: ancp.subscriber.Encap2
"""
def __init__(self, aci, **kwargs):
self.aci = aci
self.ari = kwargs.get("ari")
self.aaci_bin = kwargs.get("aaci_bin")
self.aaci_ascii = kwargs.get("aaci_ascii")
self.state = kwargs.get("state", LineState.SHOWTIME)
self.up = kwargs.get("up", 0)
self.down = kwargs.get("down", 0)
self.min_up = kwargs.get("min_up")
self.min_down = kwargs.get("min_down")
self.att_up = kwargs.get("att_up")
self.att_down = kwargs.get("att_down")
self.max_up = kwargs.get("max_up")
self.max_down = kwargs.get("max_down")
self.dsl_type = kwargs.get("dsl_type", DslType.OTHER)
self.data_link = kwargs.get("data_link", DataLink.ETHERNET)
self.encap1 = kwargs.get("encap1", Encap1.DOUBLE_TAGGED_ETHERNET)
self.encap2 = kwargs.get("encap2", Encap2.EOAAL5_LLC)
def __repr__(self):
return "Subscriber(%s)" % (self.aci)
@property
def aaci_bin(self):
return self._aaci_bin
@aaci_bin.setter
def aaci_bin(self, value):
if value is not None:
if isinstance(value, tuple):
for v in value:
if not isinstance(v, int):
raise ValueError("invalid value for aaci_bin")
elif not isinstance(value, int):
raise ValueError("invalid value for aaci_bin")
self._aaci_bin = value
@property
def tlvs(self):
tlvs = [TLV(TlvType.ACI, self.aci)]
if self.ari is not None:
tlvs.append(TLV(TlvType.ARI, self.ari))
if self.aaci_bin is not None:
tlvs.append(TLV(TlvType.AACI_BIN, self.aaci_bin))
if self.aaci_ascii is not None:
tlvs.append(TLV(TlvType.AACI_ASCII, self.aaci_ascii))
# DSL LINE ATTRIBUTES
line = [TLV(TlvType.TYPE, self.dsl_type)]
line.append(access_loop_enc(self.data_link, self.encap1, self.encap2))
line.append(TLV(TlvType.STATE, self.state))
if self.up is not None:
line.append(TLV(TlvType.UP, self.up))
if self.down is not None:
line.append(TLV(TlvType.DOWN, self.down))
if self.min_up is not None:
line.append(TLV(TlvType.MIN_UP, self.min_up))
if self.min_down is not None:
line.append(TLV(TlvType.MIN_DOWN, self.min_down))
if self.att_up is not None:
line.append(TLV(TlvType.ATT_UP, self.att_up))
if self.att_down is not None:
line.append(TLV(TlvType.ATT_DOWN, self.att_down))
if self.max_up is not None:
line.append(TLV(TlvType.MAX_UP, self.max_up))
if self.max_down is not None:
line.append(TLV(TlvType.MAX_DOWN, self.max_down))
tlvs.append(TLV(TlvType.LINE, line))
return (len(tlvs), mktlvs(tlvs))
| {
"repo_name": "GIC-de/PyANCP",
"path": "ancp/subscriber.py",
"copies": "1",
"size": "8949",
"license": "mit",
"hash": 5177740520780844000,
"line_mean": 30.6219081272,
"line_max": 96,
"alpha_frac": 0.556710247,
"autogenerated": false,
"ratio": 3.185831256674973,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9242169384125105,
"avg_score": 0.00007442390997370487,
"num_lines": 283
} |
""" AND and OR Searching """
# pylint: disable=unneeded-not,invalid-name,import-error
import json
import re
from models import Blog, App, Misc, Data
def search_and(term):
"""
Searches for 'and' results for all models
:param term: search term
:return: json of search results
"""
term = str(term).lower()
result = dict()
result["Blogs"] = search_and_relation(Blog, term)
result["Apps"] = search_and_relation(App, term)
result["Miscs"] = search_and_relation(Misc, term)
result["Data"] = search_and_relation(Data, term)
ret = dict()
ret["num_results"] = len(result)
ret["objects"] = result
ret["page"] = 1
ret["total_pages"] = 1
return json.dumps(result, ensure_ascii=False)
def search_or(term):
"""
Searches for 'or' results for all models
:param term: search term
:return: json of search results
"""
term = str(term).lower()
result = dict()
result["Blogs"] = search_or_relation(Blog, term)
result["Apps"] = search_or_relation(App, term)
result["Miscs"] = search_or_relation(Misc, term)
result["Data"] = search_or_relation(Data, term)
return json.dumps(result, ensure_ascii=False)
def search_and_relation(relation, term):
"""
Search 'and' results
:param relation: The model
:param term: search term
:return: list of rows in models that contain term
"""
list_results = list()
if not term or term == "none":
return list_results
result = relation.query.all()
if result:
exists = False
for item in result:
temp = dict()
counter = 0
context = list()
for key, value in item.__dict__.items():
key = str(key)
value = str(value)
tkey = key.lower()
tvalue = value.lower()
temp[key] = value
if not "_sa_instance_Misc" in tkey and (term in tkey or term in tvalue):
exists = True
if term in tvalue:
context.append(make_pretty(key) +
": " + bold_word(value, term))
else:
context.append(make_pretty(bold_word(key, term)) +
": " + value)
counter = counter + 1
if exists:
temp["context"] = context
list_results += [temp]
exists = False
return list_results
def search_or_relation(relation, term):
"""
Search 'or' results
:param relation: The model
:param term: search term
:return: list of rows in models that contain term
"""
list_results = list()
if not term or term == "none":
return list_results
words = re.split(' ', term)
result = relation.query.all()
if result:
exists = False
# e_words = list()
for item in result:
temp = dict()
counter = 0
context = list()
for key, value in item.__dict__.items():
key = str(key)
value = str(value)
tkey = key.lower()
tvalue = value.lower()
temp[key] = value
for word in words:
if not "_sa_instance_Misc" in tkey and (word in tkey or word in tvalue):
exists = True
context.append(bold_words(
(make_pretty(key), value), word))
counter = counter + 1
if exists:
temp["context"] = context
list_results += [temp]
exists = False
return list_results
def bold_words(tup, term):
"""
Bolding multiple words
:param tup: contains key, value of attribute of model and value for that attribute
:param term: search term
:return: Bolded String
"""
context = ""
words = tuple()
if term in tup[1].lower():
words = re.split(' ', tup[1])
for word in words:
if term in word.lower():
context = context + " " + bold_word(word, term)
else:
context = context + " " + word
else:
context = bold_word(tup[0], term)
return tup[0] + " : " + context
def bold_word(word, term):
"""
Bolding term inside of word
:param word: String that contains term
:param term: word to be bolded
:return: Return position of the search term
"""
bw = ""
word_location = word.lower().find(term)
if word_location >= 0:
bw = word[:word_location] + '<span class="context"><strong>' + \
word[word_location:(word_location + len(term))] + '</strong></span>' + \
word[(word_location + len(term)):]
return bw
def make_pretty(key):
"""
:param key: String to make pretty
:return: pretty string
"""
key = key.title().replace("_", " ")
if key == "Descriptive Name":
return "Name"
return key
| {
"repo_name": "jasonvila/jasonvila.com",
"path": "backend/searchdb.py",
"copies": "1",
"size": "5116",
"license": "mit",
"hash": -7253679503774416000,
"line_mean": 26.2127659574,
"line_max": 92,
"alpha_frac": 0.5222830336,
"autogenerated": false,
"ratio": 4.122481869460112,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5144764903060112,
"avg_score": null,
"num_lines": null
} |
#a=["--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--"]
def calen(m,v,mm,tt,da):
k=m
z=1
j=v
mo=mm
ta=tt
dy=da
a=["--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--"]
for i in range(k,k+j):
a[i]=z
z=z+1
if j==a[i]:
g=(i+1)%7
if mo==ta:
if a[i]==dy:
tk=i%7
print "The given day is",
if tk==0:
print "SUNDAY"
elif tk==1:
print "MONDAY"
elif tk==2:
print " TUESDAY"
elif tk==3:
print "WEDNESDAY"
elif tk==4:
print "THURSDAY"
elif tk==5:
print "FRIDAY"
elif tk==6:
print "SATURDAY"
import sys
sys.exit()
return g
l,mn,de=raw_input("Enter the year month and date(yy mm dd) in numbers\n").split()
l=int(l)
mn=int(mn)
de=int(de)
n=0
for y in range(1,l+1):
x=0
if y%4!=0:
n=n+1
x=x+1
elif y%100!=0:
n=n+2
x=x+1
elif y%400!=0:
n=n+1
x=x+1
else :
n=n+2
x=x+2
s=n%7
#print "JANUARY"
p=calen(s,31,1,mn,de)
#print "FEBRAURY"
if x==1:
q=calen(p,28,2,mn,de)
else :
q=calen(p,29,2,mn,de)
#print "MARCH"
r=calen(q,31,3,mn,de)
#print "APRIL"
d=calen(r,30,4,mn,de)
#print "MAY"
w=calen(d,31,5,mn,de)
#print "JUNE"
l=calen(w,30,6,mn,de)
#print "JULY"
b=calen(l,31,7,mn,de)
#print "AUGUST"
i=calen(b,31,8,mn,de)
#print "SEPTEMBER"
o=calen(i,30,9,mn,de)
#print "OCTOBER"
m=calen(o,31,10,mn,de)
#print "NOVEMBER"
f=calen(m,30,11,mn,de)
#print "DECEMBER"
po=calen(f,31,12,mn,de)
| {
"repo_name": "parichitran/py-hw",
"path": "Day_Finder.py",
"copies": "1",
"size": "2018",
"license": "apache-2.0",
"hash": 8743253450909166000,
"line_mean": 23.6097560976,
"line_max": 217,
"alpha_frac": 0.3622398414,
"autogenerated": false,
"ratio": 2.351981351981352,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7653004123977669,
"avg_score": 0.1122434138807365,
"num_lines": 82
} |
#Anderson Evans
#August 3rd - 2013
import pygame
import sys, glob
from pygame import *
class Player(pygame.sprite.Sprite):
def __init__(self, *groups):
super(Player, self).__init__(*groups)
self.image = pygame.image.load('19man.png')
self.rect = pygame.rect.Rect((320, 240), self.image.get_size())
def update(self, dt):
key = pygame.key.get_pressed()
if key[pygame.K_LEFT]:
# self.rect.x -= 10
self.rect.x -= 300 * dt
if key[pygame.K_RIGHT]:
# self.rect.x += 10
self.rect.x += 300 * dt
if key[pygame.K_UP]:
# self.rect.y -= 10
self.rect.y -= 300 * dt
if key[pygame.K_DOWN]:
# self.rect.y += 10
self.rect.y += 300 * dt
class Game(object):
def main(self, screen):
clock = pygame.time.Clock()
pygame.display.set_caption("19th Century Graphics")
image = pygame.image.load('19Man.png')
sprites = pygame.sprite.Group()
self.player = Player(sprites)
while 1:
#clock.tick(40)
dt = clock.tick(30)
for event in pygame.event.get():
if event.type == pygame.QUIT:
return
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
return
#sprites.update()
sprites.update(dt / 1000.)
screen.fill((200, 200, 200))
#screen.blit(image, (320, 240))
sprites.draw(screen)
pygame.display.flip()
if __name__ == '__main__':
pygame.init()
screen = pygame.display.set_mode((640, 490))
Game().main(screen)
| {
"repo_name": "EliCash82/proto",
"path": "pygame/MrWilliams.py",
"copies": "1",
"size": "1720",
"license": "bsd-3-clause",
"hash": 3397278293236657000,
"line_mean": 28.1525423729,
"line_max": 81,
"alpha_frac": 0.5174418605,
"autogenerated": false,
"ratio": 3.6830835117773018,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4700525372277302,
"avg_score": null,
"num_lines": null
} |
#Anderson Evans
#August 3rd - 2013
#much of code taken from tutorial http://www.youtube.com/watch?v=mTmJfWdZzbo
import pygame
import sys, glob
from pygame import *
class Player(pygame.sprite.Sprite):
def __init__(self, *groups):
super(Player, self).__init__(*groups)
self.image = pygame.image.load('19man2.png')
self.rect = pygame.rect.Rect((320, 240), self.image.get_size())
self.resting = False
self.dy = 0
def update(self, dt, game):
last = self.rect.copy()
key = pygame.key.get_pressed()
if key[pygame.K_LEFT]:
self.rect.x -= 300 * dt
if key[pygame.K_RIGHT]:
self.rect.x += 300 * dt
if self.resting and key[pygame.K_SPACE]:
self.dy = -500
self.dy = min(400, self.dy + 40)
self.rect.y += self.dy * dt
new = self.rect
self.resting = False
for cell in pygame.sprite.spritecollide(self, game.walls, False):
cell = cell.rect
if last.right <= cell.left and new.right > cell.left:
new.right = cell.left
if last.left >= cell.right and new.left < cell.right:
new.left = cell.right
if last.bottom <= cell.top and new.bottom > cell.top:
self.resting = True
new.bottom = cell.top
self.dy = 0
if last.top >= cell.bottom and new.top < cell.bottom:
new.top = cell.bottom
self.dy = 0
class Game(object):
def main(self, screen):
clock = pygame.time.Clock()
pygame.display.set_caption("Hit SPACEBAR to bounce.")
background = pygame.image.load('MrWilliamsbg2.png')
sprites = pygame.sprite.Group()
self.player = Player(sprites)
self.walls = pygame.sprite.Group()
block = pygame.image.load('block.png')
for x in range(0, 640, 32):
for y in range(0, 480, 32):
if x in (0, 640-32) or y in (0, 480-32):
wall = pygame.sprite.Sprite(self.walls)
wall.image = block
wall.rect = pygame.rect.Rect((x, y), block.get_size())
sprites.add(self.walls)
while 1:
dt = clock.tick(30)
for event in pygame.event.get():
if event.type == pygame.QUIT:
return
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
return
sprites.update(dt / 1000., self)
screen.blit(background, (0, 0))
sprites.draw(screen)
pygame.display.flip()
if __name__ == '__main__':
pygame.init()
screen = pygame.display.set_mode((640, 480))
Game().main(screen)
| {
"repo_name": "EliCash82/proto",
"path": "pygame/MrWilliams2.py",
"copies": "1",
"size": "2895",
"license": "bsd-3-clause",
"hash": -8952027171455382000,
"line_mean": 28.5408163265,
"line_max": 81,
"alpha_frac": 0.5150259067,
"autogenerated": false,
"ratio": 3.7892670157068062,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48042929224068065,
"avg_score": null,
"num_lines": null
} |
#-------------Anderson.py------------------------------------------------------#
#
# Anderson.py
#
# Purpose: Visualize the Quantum Monte Carlo code from earlier on stream!
#
# Notes: This code can be run by using the following command:
# blender -b -P demon.py
#
#------------------------------------------------------------------------------#
import bpy
import numpy as np
# goes through all the data! Woo!
# Written by Kramsfasel
def parse_data(num_part=0):
array = []
offset = 0
linesInDataSet = 0
print("importing data from file")
input = "../out.dat"
num_part_temp = 0
with open(input, 'r') as data:
for line in data:
if line != '\n':
linesInDataSet +=1
s = line.split()
temp = [float(s) for s in line.split()]
temp[7]=int (s[7])
temp[6]=int (s[6])
array.append(temp)
else:
if (num_part == 0):
num_part=linesInDataSet
place_spheres(array, num_part, linesInDataSet)
numberOfFrames = int (linesInDataSet / num_part)
#numberOfFrames = 1
print ("found " + str(numberOfFrames) + " and " + str(num_part) + " particles in first frame")
for linesInDataSet in range(2, numberOfFrames+1):
if (linesInDataSet%100==0):print ("at frame " + str(linesInDataSet)+ " of " + str(numberOfFrames))
move_spheres(array, num_part, linesInDataSet)
return numberOfFrames
# Creates sphere material
def create_new_material (passedName,passedcolor):
tempMat = bpy.data.materials.new(passedName)
if tempMat != None:
tempMat.diffuse_color = passedcolor
tempMat.diffuse_shader = 'LAMBERT'
tempMat.diffuse_intensity = 1.0
tempMat.specular_color = (0.9,0.9,0.9)
tempMat.specular_shader = 'COOKTORR'
tempMat.specular_intensity = 0.5
tempMat.use_transparency=True
tempMat.alpha = 0.01
tempMat.ambient = 0.3
tempMat.emit = 0.2
tempMat.keyframe_insert(data_path="diffuse_color", frame=1, index=-1)
return tempMat
# places new sphere at given location
def new_sphere(diam, x, y, z, r, g, b, id):
temp_sphere = bpy.ops.mesh.primitive_uv_sphere_add(segments = 32,
ring_count = 16,
size = diam,
location = (x, y, z),
rotation = (0, 0, 0))
ob = bpy.context.active_object
ob.name = str(id)
me = ob.data
color = (r, g, b)
mat = create_new_material(ob.name, color)
me.materials.append(mat)
return temp_sphere
# places sphere duplicates around for fun!
def place_duplicates(x, y, z, id, ob = None):
if not ob:
ob = bpy.context.active_object
obs = []
sce = bpy.context.scene
copy = ob.copy()
copy.location = x,y,z
copy.data = copy.data.copy()
copy.name = str(id)
obs.append(copy)
for ob in obs:
sce.objects.link(ob)
#sce.update()
# function to place spheres in blender
def place_spheres(array, num_part, i):
diam = 0.2
for i in range(0, num_part):
print(i)
new_sphere(diam, array[i][0], array[i][1], array[i][2],
1, 0, 0, array[i][7])
new_sphere(diam, array[i][3], array[i][4], array[i][5],
0, 0, 1, array[i][7])
# Function to moves spheres that are already there.
def move_spheres(array, num_part, frame):
bpy.context.scene.frame_set(frame)
offset = int(frame * num_part - num_part)
current_frame = bpy.context.scene.frame_current
for i in range(offset,num_part+offset):
mat = bpy.data.materials[str(array[i][7])]
mat.diffuse_color = (0,0,1)
mat.keyframe_insert(data_path="diffuse_color", frame=frame,
index=-1)
bpy.context.scene.objects[str(array[i][7])].location = (array[i][0],array[i][1],array[i][2])
bpy.context.scene.objects[str(array[i][7])].keyframe_insert(data_path='location', frame=(current_frame))
# Creates the cage material
def create_cage (passedName):
cageMat = bpy.data.materials.new(passedName)
cageMat.type = 'WIRE'
cageMat.diffuse_color = (1,1,1)
cageMat.diffuse_shader = 'FRESNEL'
cageMat.diffuse_intensity = 1
cageMat.specular_color = (1,1,1)
cageMat.use_diffuse_ramp = True
ramp = cageMat.diffuse_ramp
#(pt_location_on_ramp, (r,g,b,dens_at_pt))
values = [(0.0, (1,1,1,1)), (1.0, (1,1,1,1))]
for n,value in enumerate(values):
ramp.elements.new((n+1)*0.2)
elt = ramp.elements[n]
(pos, color) = value
elt.position = pos
elt.color = color
cageMat.diffuse_ramp_input = 'RESULT'
return cageMat
# Creates cage at location
def cage_set(Box_length, sign):
ccube = bpy.ops.mesh.primitive_cube_add(location=(sign * Box_length / 2,Box_length / 2, Box_length / 2), radius = Box_length / 2)
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.object.mode_set(mode='OBJECT')
ob = bpy.context.object
me = ob.data
mat = create_cage('MaterialCage')
me.materials.append(mat)
return ccube
# Removes objects in scene
def remove_obj( scene ):
for ob in scene.objects:
if ob.name !='Camera':
scene.objects.unlink( ob )
#defining our scene
def def_scene(box_length, bgcolor):
# Camera stuff
'''
x_cam = 2.2
y_cam = 2.75
z_cam = 1.43
r_camx = 70
r_camy = 0
r_camz = 145
'''
x_cam = 0
y_cam = 0.5
z_cam = 4
r_camx = 0
r_camy = 0
r_camz = 0
scene.camera.location.x = box_length * x_cam
scene.camera.location.y = box_length * y_cam
scene.camera.location.z = box_length * z_cam
scene.camera.rotation_mode = 'XYZ'
scene.camera.rotation_euler[0] = (np.pi/180.0) * r_camx
scene.camera.rotation_euler[1] = (np.pi/180.0) * r_camy
scene.camera.rotation_euler[2] = (np.pi/180.0) * r_camz
# Sets field of view
scene.camera.data.angle = 50*(np.pi/180.0)
bpy.data.cameras['Camera'].type = 'ORTHO'
bpy.data.cameras['Camera'].ortho_scale = 21.0
# Scene resolution
scene.render.resolution_x = 1366*2
scene.render.resolution_y = 768*2
# Remove lighting (for now)
remove_obj( scene )
# sets background to be black
bpy.data.worlds['World'].horizon_color = (0,0,0)
return scene
# Renders movie
def render_movie(scene):
scene = bpy.context.scene
bpy.data.scenes[0].render.image_settings.file_format="PNG"
#bpy.data.scenes[0].render.filepath = "images/image%.5d" %iteration
bpy.ops.render.render( write_still=True )
print("rendering movie")
scene.sequence_editor_create()
bpy.data.scenes["Scene"].render.fps = 10
bpy.data.scenes["Scene"].render.image_settings.file_format = 'FFMPEG'
#bpy.data.scenes["Scene"].render.ffmpeg.video_bitrate = 24300
bpy.data.scenes["Scene"].render.ffmpeg.format = 'MPEG4'
bpy.data.scenes["Scene"].render.ffmpeg.audio_codec = 'NONE'
bpy.data.scenes["Scene"].render.ffmpeg.minrate = 0
bpy.data.scenes["Scene"].render.ffmpeg.maxrate = 30000
bpy.data.scenes["Scene"].render.ffmpeg.codec = 'MPEG4'
bpy.data.scenes["Scene"].render.filepath = 'out.mp4'
bpy.data.scenes["Scene"].render.use_file_extension = False
bpy.ops.render.render( animation=True )
# Render Scene into image
def render_img(filename):
bpy.data.scenes['Scene'].render.filepath = filename
bpy.ops.render.render( write_still=True )
scene = bpy.context.scene
scene = def_scene(10,scene)
remove_obj(scene)
num = parse_data()
#render_img("out.png")
bpy.data.scenes["Scene"].frame_end = num
#cage_set(10, 1)
#cage_set(10, -1)
scene.update()
render_movie(scene)
#print (array)
| {
"repo_name": "Gustorn/simuleios",
"path": "QMC/visualization/Anderson.py",
"copies": "2",
"size": "8231",
"license": "mit",
"hash": 1198976324443049200,
"line_mean": 33.0123966942,
"line_max": 133,
"alpha_frac": 0.5678532378,
"autogenerated": false,
"ratio": 3.2688641779189833,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4836717415718983,
"avg_score": null,
"num_lines": null
} |
#Anderson
#2-19
from __future__ import division
import numpy as np
import pandas as pd
from pylab import *
import random
#import the data
incidents = pd.read_csv('data/ArlingtonCensusFireDataYearly.csv')
#aggregate the yearly number of residential structure fires that ACFD responded to
yeardist = incidents.groupby('year').aggregate('sum')['COUNT']
#aggregate the total number of residential fires that occurred in each census tract
tractincidents = incidents.groupby('GEO.id2').aggregate('sum')['COUNT']
#delete the incidents object to save memory (someday this may be huge)
del incidents
#add 1 to all tract incidents to allow zero incident tracts a small probability of selection
tractincidents = tractincidents + 1
tractincidents.sort()
#build the cumulative distribution for selecting tracts
tractshare = tractincidents/tractincidents.sum()
tractcum = tractshare.cumsum()
###figure out how to draw from the cumulative distribution
randdraw = 0.01 #pretend this comes frmo an rng
tractind = np.where(randdraw <= tractcum)[0].min()
#slice the distribution and retrieve the tract (index) corresponding to drawn value.
tractdraw = tractcum[tractind:tractind+1].index.tolist()
print tractdraw
###derive the normal distribution approximation to use as a stopping rule
yrmean = yeardist.mean()
print yrmean
print yeardist
yrvar=yeardist.var()
####perform normal draws
yrdraw = round(np.random.normal(yrmean,sqrt(yrvar)))
print yrdraw
| {
"repo_name": "FireCARES/fire-risk",
"path": "scripts/incident_draws.py",
"copies": "2",
"size": "1476",
"license": "mit",
"hash": -2667383581319791000,
"line_mean": 35.8461538462,
"line_max": 92,
"alpha_frac": 0.7689701897,
"autogenerated": false,
"ratio": 3.3698630136986303,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01502248236147217,
"num_lines": 39
} |
# AND gate. No hidden layer, basic logistic regression
import numpy as np
import theano
import theano.tensor as T
import matplotlib.pyplot as plt
# Set inputs and correct output values
inputs = [[0,0], [1,1], [0,1], [1,0]]
outputs = [0, 1, 0, 0]
# Set training parameters
alpha = 0.1 # Learning rate
training_iterations = 30000
# Define tensors
x = T.matrix("x")
y = T.vector("y")
b = theano.shared(value=1.0, name='b')
# Set random seed
rng = np.random.RandomState(2345)
# Initialize random weights
w_values = np.asarray(
rng.uniform(low=-1, high=1, size=(2, 1)),
dtype=theano.config.floatX) # Force type to 32bit float for GPU
w = theano.shared(value=w_values, name='w', borrow=True)
# Theano symbolic expressions
hypothesis = T.nnet.sigmoid(T.dot(x, w) + b) # Sigmoid/logistic activation
hypothesis = T.flatten(hypothesis) # This needs to be flattened so
# hypothesis (matrix) and
# y (vector) have the same shape
# cost = T.sum((y - hypothesis) ** 2) # Quadratic/squared error loss
# cost = -(y*T.log(hypothesis) + (1-y)*T.log(1-hypothesis)).sum() # Manual CE
# cost = T.nnet.categorical_crossentropy(hypothesis, y) # Categorical CE
cost = T.nnet.binary_crossentropy(hypothesis, y).mean() # Binary CE
updates_rules = [
(w, w - alpha * T.grad(cost, wrt=w)),
(b, b - alpha * T.grad(cost, wrt=b))
]
# Theano compiled functions
train = theano.function(inputs=[x, y], outputs=[hypothesis, cost],
updates=updates_rules)
predict = theano.function(inputs=[x], outputs=[hypothesis])
# Training
cost_history = []
for i in range(training_iterations):
if (i+1) % 5000 == 0:
print "Iteration #%s: " % str(i+1)
print "Cost: %s" % str(cost)
h, cost = train(inputs, outputs)
cost_history.append(cost)
# Plot training curve
plt.plot(range(1, len(cost_history)+1), cost_history)
plt.grid(True)
plt.xlim(1, len(cost_history))
plt.ylim(0, max(cost_history))
plt.title("Training Curve")
plt.xlabel("Iteration #")
plt.ylabel("Cost")
# Predictions
test_data = [[1,1], [1,1], [1,1], [1,0]]
predictions = predict(test_data)
print predictions
| {
"repo_name": "sho-87/python-machine-learning",
"path": "Logistic Regression/and.py",
"copies": "1",
"size": "2183",
"license": "mit",
"hash": 3933415120988254700,
"line_mean": 29.3194444444,
"line_max": 78,
"alpha_frac": 0.6477324782,
"autogenerated": false,
"ratio": 3.0068870523415976,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4154619530541597,
"avg_score": null,
"num_lines": null
} |
# AND gate. No hidden layer, basic logistic regression. No bias
# Demonstration - can't learn as separator goes through origin w/o a bias unit
import numpy as np
import theano
import theano.tensor as T
import matplotlib.pyplot as plt
# Set inputs and correct output values
inputs = [[0,0], [1,1], [0,1], [1,0]]
outputs = [0, 1, 0, 0]
# Set training parameters
alpha = 0.1 # Learning rate
training_iterations = 30000
# Define tensors
x = T.matrix("x")
y = T.vector("y")
# Set random seed
rng = np.random.RandomState(2345)
# Initialize random weights
w_values = np.asarray(
rng.uniform(low=-1, high=1, size=(2, 1)),
dtype=theano.config.floatX) # Force type to 32bit float for GPU
w = theano.shared(value=w_values, name='w', borrow=True)
# Theano symbolic expressions
hypothesis = T.nnet.sigmoid(T.dot(x, w)) # Sigmoid/logistic activation
hypothesis = T.flatten(hypothesis) # This needs to be flattened so
# hypothesis (matrix) and
# y (vector) have the same shape
# cost = T.sum((y - hypothesis) ** 2) # Quadratic/squared error loss
# cost = -(y*T.log(hypothesis) + (1-y)*T.log(1-hypothesis)).sum() # Manual CE
# cost = T.nnet.categorical_crossentropy(hypothesis, y) # Categorical CE
cost = T.nnet.binary_crossentropy(hypothesis, y).mean() # Binary CE
updates_rules = [
(w, w - alpha * T.grad(cost, wrt=w))
]
# Theano compiled functions
train = theano.function(inputs=[x, y], outputs=[hypothesis, cost],
updates=updates_rules)
predict = theano.function(inputs=[x], outputs=[hypothesis])
# Training
cost_history = []
for i in range(training_iterations):
if (i+1) % 5000 == 0:
print "Iteration #%s: " % str(i+1)
print "Cost: %s" % str(cost)
h, cost = train(inputs, outputs)
cost_history.append(cost)
# Plot training curve
plt.plot(range(1, len(cost_history)+1), cost_history)
plt.grid(True)
plt.xlim(1, len(cost_history))
plt.ylim(0, max(cost_history)+0.1)
plt.title("Training Curve")
plt.xlabel("Iteration #")
plt.ylabel("Cost")
# Predictions
test_data = [[1,1], [1,1], [1,1], [1,0]]
predictions = predict(test_data)
print predictions
| {
"repo_name": "sho-87/python-machine-learning",
"path": "Logistic Regression/and_no_bias.py",
"copies": "1",
"size": "2190",
"license": "mit",
"hash": -6719973431409343000,
"line_mean": 29.8450704225,
"line_max": 78,
"alpha_frac": 0.6561643836,
"autogenerated": false,
"ratio": 3.058659217877095,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9190559021864639,
"avg_score": 0.004852915922490942,
"num_lines": 71
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.