text
stringlengths 4
1.02M
| meta
dict |
|---|---|
class OutlineCodeItem(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
"""
Attributes:
swaggerTypes (dict): The key is attribute name and the value is attribute type.
attributeMap (dict): The key is attribute name and the value is json key in definition.
"""
self.swaggerTypes = {
'Link': 'Link',
'Index': 'int'
}
self.attributeMap = {
'Link': 'Link','Index': 'Index'}
self.Link = None # Link
self.Index = None # int
|
{
"content_hash": "824bb49341d4e0d6203441b88abc05a5",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 97,
"avg_line_length": 28.782608695652176,
"alnum_prop": 0.5468277945619335,
"repo_name": "sohail-aspose/Aspose_Tasks_Cloud",
"id": "5ee5af422aa14fee51a03757a3be53fa56d30ef0",
"size": "685",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "SDKs/Aspose.Tasks_Cloud_SDK_for_Python/asposetaskscloud/models/OutlineCodeItem.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "530694"
},
{
"name": "Objective-C",
"bytes": "395232"
},
{
"name": "PHP",
"bytes": "224405"
},
{
"name": "Python",
"bytes": "322562"
},
{
"name": "Ruby",
"bytes": "280"
}
],
"symlink_target": ""
}
|
import socket
import sys
# client_socket_socket = None
try:
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.settimeout(.150)
# print "client connected"
except socket.error, (code,message):
if client_socket:
client_socket.close()
print "Could not open socket: " + message
sys.exit(1)
def initialize_handshake(HOST, PORT): # setup socket and start the connection to the model
client_socket.connect((HOST,int(PORT)))
# print "client_socket after connect : --"+ str(client_socket) +"--"
def process(HOST, PORT, GET,client_socketport=None):
try:
# client_socket.send("GET %s HTTP/1.0\r\nHost: %s\r\n\r\n" % (GET+"&]", HOST))
client_socket.sendall("GET %s HTTP/1.1\r\nHost: %s\r\n\r\n" % (GET, HOST)) #This is for test to see that it actually send whole message at once or not
# print "send : "+GET
response = client_socket.recv(1024) # buffer size is 1024 bytes
data = response.split("\r\n")
response = data[len(data) - 4].split("]")[0];
print response
except socket.error, msg:
sys.stderr.write("[ERROR] in TCP client send and receive :%s\n " % msg)
response = ""
return response
if __name__ == "__main__":
print process(host, port, data);
|
{
"content_hash": "627ba5b0771d7985619390dfc0770fee",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 163,
"avg_line_length": 35.729729729729726,
"alnum_prop": 0.6232980332829047,
"repo_name": "slremy/testingpubsub",
"id": "5f14eaa030c0d5e9e0746f0884eb8ceb8cc5d2c6",
"size": "1334",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "myBallPlate/httpmanualclient.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "32047"
},
{
"name": "HTML",
"bytes": "7708"
},
{
"name": "Java",
"bytes": "30511"
},
{
"name": "Makefile",
"bytes": "431"
},
{
"name": "Python",
"bytes": "95479"
}
],
"symlink_target": ""
}
|
"""Meetup API wrapper."""
from datetime import datetime
import json
import logging
import redis
import requests
__all__ = 'APIWrapper',
_logger = logging.getLogger(__name__)
class APIWrapper:
"""Wrapper around the meetup.com API."""
def __init__(self, api_key):
self.api_key = api_key
def _get(self, method, limit, **params):
r = redis.StrictRedis()
key = 'meetup_{}_{}'.format(method, limit)
result = None and r.get(key)
if not result:
url = 'http://api.meetup.com/2/{}.json'.format(method)
params['key'] = self.api_key
params['page'] = limit
resp = requests.get(url, params=params)
if resp.status_code != 200:
_logger.error('Meetup API <{}>: {}'.format(
resp.status_code, resp.json().get('details')))
message = 'Meetup returned no results for {}'.format(method)
_logger.error(message)
return []
result = resp.json().get('results', [])
r.set(key, json.dumps(result))
r.expire(key, 300)
else:
result = json.loads(result)
return result
def events(self, group_id, limit=None):
"""Return the upcoming events for the group."""
limit = limit or 20
result = self._get(
'events',
limit,
group_id=group_id,
status='upcoming',
visibility='public',
)
events = []
for event in result:
name = event.get('name', 'Unnamed event')
url = event.get('event_url', '')
description = event.get('description', '')
time = int(event.get('time', 0)) + int(event.get('utc_offset', 0))
time /= 1000
events.append({
'name': name.strip(),
'url': url,
'description': description,
'time': datetime.fromtimestamp(time),
})
return events
def photos(self, group_id, limit=None):
"""Return photos for the group."""
limit = limit or 20
result = self._get('photos', limit, group_id=group_id)
photos = []
for photo in result:
url = photo.get('photo_link')
if not url:
continue
photos.append({'url': url})
return photos
|
{
"content_hash": "cb11d04b3674af30472cdf4348dbb846",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 78,
"avg_line_length": 27.055555555555557,
"alnum_prop": 0.5039014373716633,
"repo_name": "NYCPython/nycpython.com",
"id": "9bdfed59583670c0df835293a78bf402190630a8",
"size": "2435",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nycpython/meetup/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7762"
},
{
"name": "Shell",
"bytes": "1027"
}
],
"symlink_target": ""
}
|
import numpy as np
import pandas as pd
from scipy.stats import skew, kurtosis
from matplotlib import pyplot, animation
from flowstats import cluster
class DataSet(object):
"""
A single multi-variate data set
"""
def __init__(self, parameter_count):
if not isinstance(parameter_count, int):
raise TypeError("'parameter_count' must be an integer")
self._parameter_count = parameter_count
self.blobs = {}
self.length = None
self.raw_results = None # holds DPMixture object
self.results = None
@property
def labels(self):
return self.blobs.keys()
@property
def parameter_count(self):
return self._parameter_count
def add_blob(self, label, blob_data):
if not isinstance(label, int):
raise TypeError("'label' must be an integer")
if not isinstance(blob_data, np.ndarray):
raise TypeError("'blob_data' must be a NumPy 'ndarray'")
# noinspection PyUnresolvedReferences
if blob_data.shape[1] != self._parameter_count:
raise ValueError("blob does not match data set's parameter count")
if label in self.blobs.keys():
raise ValueError("This label is already in use")
else:
self.blobs[label] = blob_data
def plot_blobs(
self,
labels,
x=0,
y=1,
figure_size=(8, 8),
x_lim=None,
y_lim=None
):
pyplot.figure(figsize=figure_size)
if x_lim is not None:
pyplot.xlim(xmin=x_lim[0], xmax=x_lim[1])
if y_lim is not None:
pyplot.ylim(ymin=y_lim[0], ymax=y_lim[1])
for label in labels:
label_array = np.empty(self.blobs[label].shape[0])
label_array.fill(label)
pyplot.scatter(
self.blobs[label][:, x],
self.blobs[label][:, y],
s=4,
c=label_array,
cmap=pyplot.cm.get_cmap('jet'),
vmin=min(self.blobs.keys()),
vmax=max(self.blobs.keys()),
edgecolors='none',
alpha=0.7
)
pyplot.show()
def _create_results_dataframe(self, results, n_components, n_iterations):
component_dicts = []
for iter_i in range(0, n_components * n_iterations, n_components):
iter_components = results[iter_i:iter_i + n_components]
n_iter = iter_i / n_components
for component_i, _component in enumerate(iter_components):
component_dict = {
'iteration': n_iter,
'component': component_i,
'weight': _component.pi,
}
for loc_i in range(0, self._parameter_count):
component_dict['loc' + str(loc_i)] = _component.mu[loc_i]
component_dicts.append(component_dict)
return pd.DataFrame(component_dicts)
def cluster(
self,
component_count,
burn_in,
iteration_count,
random_seed,
initial_conditions=None,
model='dp',
normed=False
):
if self.results is not None:
raise ValueError("Data set already has clustering results")
model = cluster.DPMixtureModel(
component_count,
iteration_count,
burn_in,
model=model
)
if initial_conditions is not None:
# should check keys of initial values, the
# shapes & values should be taken care of in FlowStats
model.load_pi(initial_conditions['pis'])
model.load_mu(initial_conditions['mus'])
model.load_sigma(initial_conditions['sigmas'])
self.raw_results = model.fit(
np.vstack(self.blobs.values()),
device=0,
seed=random_seed,
munkres_id=False,
verbose=True,
normed=normed
)
self.results = self._create_results_dataframe(
self.raw_results,
component_count,
self.raw_results.niter
)
def add_results(self, dp_mixture):
if not isinstance(dp_mixture, cluster.DPMixture):
raise TypeError("Data set results must be a 'DPMixture'")
elif self.raw_results is not None:
raise ValueError("Data set already has clustering results")
if len(dp_mixture) % dp_mixture.niter != 0:
raise ValueError("Failed to parse DPMixture components")
iteration_count = dp_mixture.niter
component_count = len(dp_mixture) / iteration_count
self.raw_results = dp_mixture
self.results = self._create_results_dataframe(
self.raw_results,
component_count,
iteration_count
)
def test_component(self, component_dataframe, ignore_weight=False):
"""
Tests a given component dataframe for convergence, returning
True for converged components
:param component_dataframe: Pandas dataframe
:param ignore_weight: boolean
:return: boolean
"""
# define our acceptable bounds
skew_range = [-0.6, 0.6]
kurt_range = [-1.5, 0.75] # accept shorter tails for bang-on data
weight_low = 0.008
# perform weight test first if not ignored
if not ignore_weight:
if component_dataframe.weight.mean() < weight_low:
return False
if skew(component_dataframe.weight) < skew_range[0]:
return False
if skew(component_dataframe.weight) > skew_range[1]:
return False
if kurtosis(component_dataframe.weight) < kurt_range[0]:
return False
if kurtosis(component_dataframe.weight) > kurt_range[1]:
return False
# now for the component parameter locations
for param in ['loc'+str(i) for i in range(self._parameter_count)]:
if skew(component_dataframe[param]) < skew_range[0]:
return False
if skew(component_dataframe[param]) > skew_range[1]:
return False
if kurtosis(component_dataframe[param]) < kurt_range[0]:
return False
if kurtosis(component_dataframe[param]) > kurt_range[1]:
return False
# all tests passed
return True
def get_valid_components(self, ignore_weight=False):
if self.raw_results is None:
raise ValueError("Data set has no saved results")
# list of good components to return
good_comps = []
for comp in self.results.component.unique():
comp_data = self.results[self.results.component == comp]
comp_passed = self.test_component(
comp_data,
ignore_weight=ignore_weight
)
if comp_passed:
good_comps.append(comp)
return good_comps
def get_log_likelihood_trace(self):
if self.raw_results is None:
raise ValueError("Data set has no saved results")
log_likelihoods = []
data = np.vstack(self.blobs.values())
for i in range(self.raw_results.niter):
dp_mixture_iter = self.raw_results.get_iteration(i)
log_likelihoods.append(
dp_mixture_iter.log_likelihood(data)
)
return log_likelihoods
def plot_log_likelihood_trace(self):
log_likelihoods = self.get_log_likelihood_trace()
n_iterations = self.raw_results.niter
fig = pyplot.figure(figsize=(16, 4))
ax = fig.add_subplot(1, 1, 1)
ax.set_title('Log likelihood trace')
ax.plot(
range(n_iterations),
log_likelihoods,
'dodgerblue',
lw='1.0',
alpha=0.8
)
return fig
def plot_param_iteration_trace(
self,
component,
parameter,
figure_size=None
):
if figure_size is not None:
fig = pyplot.figure(figsize=figure_size)
else:
fig = pyplot.figure(figsize=(16, 4))
param = 'loc' + str(parameter)
ds_comp = self.results[self.results.component == component]
comp_param_skew = skew(ds_comp[param])
comp_param_kurt = kurtosis(ds_comp[param])
ax = fig.add_subplot(1, 1, 1)
ax.set_title(
'Component: %d, Param: %s, Skew: %.2f, Kurt: %.2f' %
(component, param, comp_param_skew, comp_param_kurt)
)
ax.set_xlim(0, len(ds_comp.iteration))
ax.set_ylim(ds_comp[param].min()/1.5, ds_comp[param].max())
ax.plot(
ds_comp.iteration,
ds_comp[param],
'dodgerblue',
lw='0.5',
alpha=0.8
)
pyplot.show()
def plot_iteration_traces(self, component):
fig = pyplot.figure(figsize=(16, 4 * self._parameter_count))
subplot_n = 1
ds_comp = self.results[self.results.component == component]
for param in ['loc'+str(i) for i in range(self._parameter_count)]:
ax = fig.add_subplot(self._parameter_count, 1, subplot_n)
ax.set_title(
'Component: %d, Param: %s' %
(component, param)
)
ax.set_xlim(0, len(ds_comp.iteration))
ax.set_ylim(ds_comp[param].min()/1.5, ds_comp[param].max())
ax.plot(
ds_comp.iteration,
ds_comp[param],
'dodgerblue',
lw='0.5',
alpha=0.8
)
ax2 = ax.twinx()
ax2.set_xlim(0, len(ds_comp.iteration))
ax2.set_ylim(0.0, 1.0)
ax2.plot(
ds_comp.iteration,
ds_comp.weight,
'sienna',
lw='0.5',
alpha=0.5
)
ax2.fill_between(
ds_comp.iteration,
ds_comp.weight,
where=ds_comp.iteration >= 0,
interpolate=True,
color='salmon',
lw='1',
alpha=0.5)
subplot_n += 1
pyplot.show()
def get_classifications(self, iteration, labels):
dp_mixture_iter = self.raw_results.get_iteration(iteration)
raw_data = np.vstack([self.blobs[label] for label in labels])
classifications = dp_mixture_iter.classify(raw_data)
return classifications
def plot_classifications(self, iteration, x=0, y=1, x_lim=None, y_lim=None):
dp_mixture_iter = self.raw_results.get_iteration(iteration)
raw_data = np.vstack(self.blobs.values())
classifications = dp_mixture_iter.classify(raw_data)
pyplot.figure(figsize=(8, 8))
cmap = pyplot.cm.get_cmap('jet')
pyplot.scatter(
raw_data[:, x],
raw_data[:, y],
s=8,
c=classifications,
edgecolors='none',
cmap=cmap,
vmax=len(dp_mixture_iter) - 1,
alpha=1.0
)
if x_lim is not None:
pyplot.xlim(xmin=x_lim[0])
pyplot.xlim(xmax=x_lim[1])
if y_lim is not None:
pyplot.ylim(ymin=y_lim[0])
pyplot.ylim(ymax=y_lim[1])
for i, dp_cluster in enumerate(dp_mixture_iter):
pyplot.text(
dp_cluster.mu[x],
dp_cluster.mu[y],
str(i),
va='center',
ha='center',
color='lime',
size=14,
bbox=dict(facecolor='black')
)
pyplot.title('Iteration %d' % iteration)
pyplot.show()
def plot_animated_trace(
self,
x=0,
y=1,
x_lim=None,
y_lim=None,
iter_start=0
):
def update_plot(frame):
pyplot.title('Iteration: %d' % frame)
scatter.set_array(classifications[frame - iter_start])
n_iterations = self.raw_results.niter
n_clusters = len(self.raw_results.get_iteration(0))
raw_data = np.vstack(self.blobs.values())
classifications = []
for i in range(iter_start, n_iterations):
new_iter = self.raw_results.get_iteration(i)
classifications.append(new_iter.classify(raw_data))
fig = pyplot.figure(figsize=(8, 8))
ax = fig.add_subplot(111, axisbg='gray')
scatter = ax.scatter(
raw_data[:, x],
raw_data[:, y],
s=16,
c=classifications[0], # start with 1st iteration
edgecolors='none',
cmap=pyplot.cm.get_cmap('jet'),
vmax=n_clusters - 1,
alpha=1.0
)
if x_lim is not None:
ax.set_xlim(x_lim[0], x_lim[1])
if y_lim is not None:
ax.set_ylim(y_lim[0], y_lim[1])
animated_plot = animation.FuncAnimation(
fig,
update_plot,
interval=150,
frames=xrange(iter_start, n_iterations),
fargs=()
)
pyplot.title('Fitted clusters')
pyplot.show()
return animated_plot
|
{
"content_hash": "2cd869650cfbfe020f1250bba3b0e510",
"timestamp": "",
"source": "github",
"line_count": 455,
"max_line_length": 80,
"avg_line_length": 29.665934065934067,
"alnum_prop": 0.5272632982664098,
"repo_name": "whitews/dpconverge",
"id": "bedb36fb51f5d3533a8b0fc266af608d6ed91d19",
"size": "13498",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dpconverge/data_set.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "43612"
}
],
"symlink_target": ""
}
|
import argparse
import inspect
import json
import sys
from ast import literal_eval
import six
from validation import ValidationException
class ConfigOption(object):
def __init__(self, default=None, description='', validators=None):
self.default = default
self.description = description
self.type = type
self.validators = validators or list()
class Configurator(object):
def __init__(self, file):
self._file = file
def loadConfig(self):
try:
with open(self._file, 'r') as f:
obj = json.load(f)
return obj
except:
return dict()
def applyConfig(self):
obj = self.loadConfig()
for key, val in six.iteritems(obj):
setattr(sys.modules[__name__], key, val)
def saveConfig(self, obj):
with open(self._file, 'w') as f:
json.dump(obj, f)
def main(self):
parser = argparse.ArgumentParser(description='Configure your application.')
choices = ['all']
options = dict()
for (name, value) in inspect.getmembers(sys.modules[__name__], lambda x: isinstance(x, ConfigOption)):
options[name] = value
choices.append(name)
parser.add_argument('option', choices=choices, help='The configuration option to configure.')
args = parser.parse_args()
option = args.option
if option == 'all':
cfg = self.loadConfig()
for name, configObject in six.iteritems(options):
try:
data = Configurator._queryUser(name, configObject, cfg[name] if cfg.has_key(name) else configObject.default)
cfg[name] = data
except Exception as ex:
six.print_('Failed to configure {0}: {1}'.format(name, repr(ex)))
six.print_('Saving configuration')
self.saveConfig(cfg)
elif option == 'help':
six.print_('Not implemented.')
else:
configObject = getattr(sys.modules[__name__], option)
cfg = self.loadConfig()
try:
data = Configurator._queryUser(option, configObject, cfg[option] if cfg.has_key(option) else configObject.default)
cfg[option] = data
except Exception as ex:
six.print_('Failed to configure {0}: {1}'.format(option, repr(ex)))
six.print_('Saving configuration')
self.saveConfig(cfg)
@staticmethod
def _queryUser(name, configObject, default):
data = six.moves.input('ConfigOption[{name}, {default}]: '.format(name=name, default=default))
try:
data = literal_eval(data)
except:
data = str(data)
if data == 'null':
data = None
elif isinstance(data, six.string_types) and len(data) == 0:
data = default
for val in configObject.validators:
try:
data = val(data)
except ValidationException as ex:
six.print_('Failed to validate {0}: {1}'.format(name, ex.message))
raise Exception('Failed to validate.')
return data
localPort = ConfigOption(description='The local port to forward')
forwards = ConfigOption(description='The forwards for breadproxy in a dictionary in the format of {\'protocolName\': \'clientString\'}')
configurator = Configurator('breadproxy.json')
if __name__ == '__main__':
configurator.main()
else:
configurator.applyConfig()
|
{
"content_hash": "beda7cb088397ea682d378c3252741a7",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 136,
"avg_line_length": 27.318181818181817,
"alnum_prop": 0.6895174708818635,
"repo_name": "systocrat/bread",
"id": "60c03fc636d13fc6595c96496733f15e8acf99bc",
"size": "3005",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16494"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, division, absolute_import
import time
import os
import pytest
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from tests.web.frontend.models import IndexPage, SearchPage
pytestmark = pytest.mark.uses_web
@pytest.fixture()
def page(driver, base_url):
page = IndexPage(driver, root_uri=base_url)
page.get('')
return page
@pytest.mark.xfail()
@pytest.mark.timeout(45)
@pytest.mark.usefixtures('live_server')
class TestIndexPage(object):
''' Tests for the main Index page '''
def test_title(self, page):
assert 'Marvin' in page.w.title
def test_goto_random(self, page):
assert 'Marvin' in page.w.title
page.imagepage.click()
time.sleep(1)
assert 'random' in page.w.current_url
def test_goto_search(self, page):
page.searchpage.click()
time.sleep(1)
assert 'Search' in page.w.title
assert 'search' in page.w.current_url
results = page.w.find_elements(By.ID, "search_results")
assert len(results) == 0
@pytest.fixture()
def search_page(driver, base_url):
page = SearchPage(driver, root_uri=base_url)
page.get('search/')
return page
@pytest.mark.xfail()
@pytest.mark.timeout(45)
@pytest.mark.usefixtures('live_server')
class TestSearchPage(object):
''' Tests for the main Search page '''
def test_title(self, search_page):
assert 'Search' in search_page.w.title
def test_search(self, search_page):
assert search_page.results is None
search_page.searchbox = 'nsa.z < 0.1'
search_page.searchbox = Keys.RETURN
assert search_page.results is not None
assert search_page.table is not None
|
{
"content_hash": "4d9dd931572dd7d5a2f73018b5b9e77e",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 64,
"avg_line_length": 27.515625,
"alnum_prop": 0.6740488358886996,
"repo_name": "sdss/marvin",
"id": "a2b0fe902d91e2e8e1c61ef11e8e7a675c4da3e3",
"size": "1992",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/web/frontend/test_frontend.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "166739"
},
{
"name": "HTML",
"bytes": "91250"
},
{
"name": "JavaScript",
"bytes": "247561"
},
{
"name": "PLpgSQL",
"bytes": "1577"
},
{
"name": "Python",
"bytes": "1706012"
},
{
"name": "SCSS",
"bytes": "266310"
},
{
"name": "Shell",
"bytes": "1150"
}
],
"symlink_target": ""
}
|
import math
import sys
from typing import Dict, List, Optional
import torch
import torch.nn as nn
from torch import Tensor
from fairseq import search, utils
from fairseq.data import data_utils
from fairseq.models import FairseqIncrementalDecoder
from fairseq.ngram_repeat_block import NGramRepeatBlock
class SequenceGenerator(nn.Module):
def __init__(
self,
models,
tgt_dict,
beam_size=1,
max_len_a=0,
max_len_b=200,
max_len=0,
min_len=1,
normalize_scores=True,
len_penalty=1.0,
unk_penalty=0.0,
temperature=1.0,
match_source_len=False,
no_repeat_ngram_size=0,
search_strategy=None,
eos=None,
symbols_to_strip_from_output=None,
lm_model=None,
lm_weight=1.0,
):
"""Generates translations of a given source sentence.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models,
currently support fairseq.models.TransformerModel for scripting
beam_size (int, optional): beam width (default: 1)
max_len_a/b (int, optional): generate sequences of maximum length
ax + b, where x is the source length
max_len (int, optional): the maximum length of the generated output
(not including end-of-sentence)
min_len (int, optional): the minimum length of the generated output
(not including end-of-sentence)
normalize_scores (bool, optional): normalize scores by the length
of the output (default: True)
len_penalty (float, optional): length penalty, where <1.0 favors
shorter, >1.0 favors longer sentences (default: 1.0)
unk_penalty (float, optional): unknown word penalty, where <0
produces more unks, >0 produces fewer (default: 0.0)
temperature (float, optional): temperature, where values
>1.0 produce more uniform samples and values <1.0 produce
sharper samples (default: 1.0)
match_source_len (bool, optional): outputs should match the source
length (default: False)
"""
super().__init__()
if isinstance(models, EnsembleModel):
self.model = models
else:
self.model = EnsembleModel(models)
self.tgt_dict = tgt_dict
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos() if eos is None else eos
self.symbols_to_strip_from_output = (
symbols_to_strip_from_output.union({self.eos})
if symbols_to_strip_from_output is not None
else {self.eos}
)
self.vocab_size = len(tgt_dict)
self.beam_size = beam_size
# the max beam size is the dictionary size - 1, since we never select pad
self.beam_size = min(beam_size, self.vocab_size - 1)
self.model.set_decoder_beam_size(self.beam_size)
self.max_len_a = max_len_a
self.max_len_b = max_len_b
self.min_len = min_len
self.max_len = max_len or self.model.max_decoder_positions()
self.normalize_scores = normalize_scores
self.len_penalty = len_penalty
self.unk_penalty = unk_penalty
self.temperature = temperature
self.match_source_len = match_source_len
if no_repeat_ngram_size > 0:
self.repeat_ngram_blocker = NGramRepeatBlock(no_repeat_ngram_size)
else:
self.repeat_ngram_blocker = None
assert temperature > 0, "--temperature must be greater than 0"
self.search = (
search.BeamSearch(tgt_dict) if search_strategy is None else search_strategy
)
# We only need to set src_lengths in LengthConstrainedBeamSearch.
# As a module attribute, setting it would break in multithread
# settings when the model is shared.
self.should_set_src_lengths = (
hasattr(self.search, "needs_src_lengths") and self.search.needs_src_lengths
)
self.model.eval()
self.lm_model = lm_model
self.lm_weight = lm_weight
if self.lm_model is not None:
self.lm_model.eval()
def cuda(self):
self.model.cuda()
return self
@torch.no_grad()
def forward(
self,
sample: Dict[str, Dict[str, Tensor]],
prefix_tokens: Optional[Tensor] = None,
bos_token: Optional[int] = None,
):
"""Generate a batch of translations.
Args:
sample (dict): batch
prefix_tokens (torch.LongTensor, optional): force decoder to begin
with these tokens
bos_token (int, optional): beginning of sentence token
(default: self.eos)
"""
return self._generate(sample, prefix_tokens, bos_token=bos_token)
# TODO(myleott): unused, deprecate after pytorch-translate migration
def generate_batched_itr(self, data_itr, beam_size=None, cuda=False, timer=None):
"""Iterate over a batched dataset and yield individual translations.
Args:
cuda (bool, optional): use GPU for generation
timer (StopwatchMeter, optional): time generations
"""
for sample in data_itr:
s = utils.move_to_cuda(sample) if cuda else sample
if "net_input" not in s:
continue
input = s["net_input"]
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
encoder_input = {
k: v for k, v in input.items() if k != "prev_output_tokens"
}
if timer is not None:
timer.start()
with torch.no_grad():
hypos = self.generate(encoder_input)
if timer is not None:
timer.stop(sum(len(h[0]["tokens"]) for h in hypos))
for i, id in enumerate(s["id"].data):
# remove padding
src = utils.strip_pad(input["src_tokens"].data[i, :], self.pad)
ref = (
utils.strip_pad(s["target"].data[i, :], self.pad)
if s["target"] is not None
else None
)
yield id, src, ref, hypos[i]
@torch.no_grad()
def generate(
self, models, sample: Dict[str, Dict[str, Tensor]], **kwargs
) -> List[List[Dict[str, Tensor]]]:
"""Generate translations. Match the api of other fairseq generators.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models
sample (dict): batch
prefix_tokens (torch.LongTensor, optional): force decoder to begin
with these tokens
constraints (torch.LongTensor, optional): force decoder to include
the list of constraints
bos_token (int, optional): beginning of sentence token
(default: self.eos)
"""
return self._generate(sample, **kwargs)
def _generate(
self,
sample: Dict[str, Dict[str, Tensor]],
prefix_tokens: Optional[Tensor] = None,
constraints: Optional[Tensor] = None,
bos_token: Optional[int] = None,
):
incremental_states = torch.jit.annotate(
List[Dict[str, Dict[str, Optional[Tensor]]]],
[
torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {})
for i in range(self.model.models_size)
],
)
net_input = sample["net_input"]
if "src_tokens" in net_input:
src_tokens = net_input["src_tokens"]
# length of the source text being the character length except EndOfSentence and pad
src_lengths = (
(src_tokens.ne(self.eos) & src_tokens.ne(self.pad)).long().sum(dim=1)
)
elif "source" in net_input:
src_tokens = net_input["source"]
src_lengths = (
net_input["padding_mask"].size(-1) - net_input["padding_mask"].sum(-1)
if net_input["padding_mask"] is not None
else torch.tensor(src_tokens.size(-1)).to(src_tokens)
)
elif "features" in net_input:
src_tokens = net_input["features"]
src_lengths = (
net_input["padding_mask"].size(-1) - net_input["padding_mask"].sum(-1)
if net_input["padding_mask"] is not None
else torch.tensor(src_tokens.size(-1)).to(src_tokens)
)
else:
raise Exception(
"expected src_tokens or source in net input. input keys: "
+ str(net_input.keys())
)
# bsz: total number of sentences in beam
# Note that src_tokens may have more than 2 dimensions (i.e. audio features)
bsz, src_len = src_tokens.size()[:2]
beam_size = self.beam_size
if constraints is not None and not self.search.supports_constraints:
raise NotImplementedError(
"Target-side constraints were provided, but search method doesn't support them"
)
# Initialize constraints, when active
self.search.init_constraints(constraints, beam_size)
max_len: int = -1
if self.match_source_len:
max_len = src_lengths.max().item()
else:
max_len = min(
int(self.max_len_a * src_len + self.max_len_b),
self.max_len - 1,
)
assert (
self.min_len <= max_len
), "min_len cannot be larger than max_len, please adjust these!"
# compute the encoder output for each beam
with torch.autograd.profiler.record_function("EnsembleModel: forward_encoder"):
encoder_outs = self.model.forward_encoder(net_input)
# placeholder of indices for bsz * beam_size to hold tokens and accumulative scores
new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1)
new_order = new_order.to(src_tokens.device).long()
encoder_outs = self.model.reorder_encoder_out(encoder_outs, new_order)
# ensure encoder_outs is a List.
assert encoder_outs is not None
# initialize buffers
scores = (
torch.zeros(bsz * beam_size, max_len + 1).to(src_tokens).float()
) # +1 for eos; pad is never chosen for scoring
tokens = (
torch.zeros(bsz * beam_size, max_len + 2)
.to(src_tokens)
.long()
.fill_(self.pad)
) # +2 for eos and pad
tokens[:, 0] = self.eos if bos_token is None else bos_token
attn: Optional[Tensor] = None
# A list that indicates candidates that should be ignored.
# For example, suppose we're sampling and have already finalized 2/5
# samples. Then cands_to_ignore would mark 2 positions as being ignored,
# so that we only finalize the remaining 3 samples.
cands_to_ignore = (
torch.zeros(bsz, beam_size).to(src_tokens).eq(-1)
) # forward and backward-compatible False mask
# list of completed sentences
finalized = torch.jit.annotate(
List[List[Dict[str, Tensor]]],
[torch.jit.annotate(List[Dict[str, Tensor]], []) for i in range(bsz)],
) # contains lists of dictionaries of infomation about the hypothesis being finalized at each step
# a boolean array indicating if the sentence at the index is finished or not
finished = [False for i in range(bsz)]
num_remaining_sent = bsz # number of sentences remaining
# number of candidate hypos per step
cand_size = 2 * beam_size # 2 x beam size in case half are EOS
# offset arrays for converting between different indexing schemes
bbsz_offsets = (
(torch.arange(0, bsz) * beam_size)
.unsqueeze(1)
.type_as(tokens)
.to(src_tokens.device)
)
cand_offsets = torch.arange(0, cand_size).type_as(tokens).to(src_tokens.device)
reorder_state: Optional[Tensor] = None
batch_idxs: Optional[Tensor] = None
original_batch_idxs: Optional[Tensor] = None
if "id" in sample and isinstance(sample["id"], Tensor):
original_batch_idxs = sample["id"]
else:
original_batch_idxs = torch.arange(0, bsz).type_as(tokens)
for step in range(max_len + 1): # one extra step for EOS marker
# reorder decoder internal states based on the prev choice of beams
if reorder_state is not None:
if batch_idxs is not None:
# update beam indices to take into account removed sentences
corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(
batch_idxs
)
reorder_state.view(-1, beam_size).add_(
corr.unsqueeze(-1) * beam_size
)
original_batch_idxs = original_batch_idxs[batch_idxs]
self.model.reorder_incremental_state(incremental_states, reorder_state)
encoder_outs = self.model.reorder_encoder_out(
encoder_outs, reorder_state
)
with torch.autograd.profiler.record_function(
"EnsembleModel: forward_decoder"
):
lprobs, avg_attn_scores = self.model.forward_decoder(
tokens[:, : step + 1],
encoder_outs,
incremental_states,
self.temperature,
)
if self.lm_model is not None:
lm_out = self.lm_model(tokens[:, : step + 1])
probs = self.lm_model.get_normalized_probs(
lm_out, log_probs=True, sample=None
)
probs = probs[:, -1, :] * self.lm_weight
lprobs += probs
lprobs[lprobs != lprobs] = torch.tensor(-math.inf).to(lprobs)
lprobs[:, self.pad] = -math.inf # never select pad
lprobs[:, self.unk] -= self.unk_penalty # apply unk penalty
# handle max length constraint
if step >= max_len:
lprobs[:, : self.eos] = -math.inf
lprobs[:, self.eos + 1 :] = -math.inf
# handle prefix tokens (possibly with different lengths)
if (
prefix_tokens is not None
and step < prefix_tokens.size(1)
and step < max_len
):
lprobs, tokens, scores = self._prefix_tokens(
step, lprobs, scores, tokens, prefix_tokens, beam_size
)
elif step < self.min_len:
# minimum length constraint (does not apply if using prefix_tokens)
lprobs[:, self.eos] = -math.inf
# Record attention scores, only support avg_attn_scores is a Tensor
if avg_attn_scores is not None:
if attn is None:
attn = torch.empty(
bsz * beam_size, avg_attn_scores.size(1), max_len + 2
).to(scores)
attn[:, :, step + 1].copy_(avg_attn_scores)
scores = scores.type_as(lprobs)
eos_bbsz_idx = torch.empty(0).to(
tokens
) # indices of hypothesis ending with eos (finished sentences)
eos_scores = torch.empty(0).to(
scores
) # scores of hypothesis ending with eos (finished sentences)
if self.should_set_src_lengths:
self.search.set_src_lengths(src_lengths)
if self.repeat_ngram_blocker is not None:
lprobs = self.repeat_ngram_blocker(tokens, lprobs, bsz, beam_size, step)
# Shape: (batch, cand_size)
cand_scores, cand_indices, cand_beams = self.search.step(
step,
lprobs.view(bsz, -1, self.vocab_size),
scores.view(bsz, beam_size, -1)[:, :, :step],
tokens[:, : step + 1],
original_batch_idxs,
)
# cand_bbsz_idx contains beam indices for the top candidate
# hypotheses, with a range of values: [0, bsz*beam_size),
# and dimensions: [bsz, cand_size]
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
# finalize hypotheses that end in eos
# Shape of eos_mask: (batch size, beam size)
eos_mask = cand_indices.eq(self.eos) & cand_scores.ne(-math.inf)
eos_mask[:, :beam_size][cands_to_ignore] = torch.tensor(0).to(eos_mask)
# only consider eos when it's among the top beam_size indices
# Now we know what beam item(s) to finish
# Shape: 1d list of absolute-numbered
eos_bbsz_idx = torch.masked_select(
cand_bbsz_idx[:, :beam_size], mask=eos_mask[:, :beam_size]
)
finalized_sents: List[int] = []
if eos_bbsz_idx.numel() > 0:
eos_scores = torch.masked_select(
cand_scores[:, :beam_size], mask=eos_mask[:, :beam_size]
)
finalized_sents = self.finalize_hypos(
step,
eos_bbsz_idx,
eos_scores,
tokens,
scores,
finalized,
finished,
beam_size,
attn,
src_lengths,
max_len,
)
num_remaining_sent -= len(finalized_sents)
assert num_remaining_sent >= 0
if num_remaining_sent == 0:
break
if self.search.stop_on_max_len and step >= max_len:
break
assert step < max_len, f"{step} < {max_len}"
# Remove finalized sentences (ones for which {beam_size}
# finished hypotheses have been generated) from the batch.
if len(finalized_sents) > 0:
new_bsz = bsz - len(finalized_sents)
# construct batch_idxs which holds indices of batches to keep for the next pass
batch_mask = torch.ones(
bsz, dtype=torch.bool, device=cand_indices.device
)
batch_mask[finalized_sents] = False
# TODO replace `nonzero(as_tuple=False)` after TorchScript supports it
batch_idxs = torch.arange(
bsz, device=cand_indices.device
).masked_select(batch_mask)
# Choose the subset of the hypothesized constraints that will continue
self.search.prune_sentences(batch_idxs)
eos_mask = eos_mask[batch_idxs]
cand_beams = cand_beams[batch_idxs]
bbsz_offsets.resize_(new_bsz, 1)
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
cand_scores = cand_scores[batch_idxs]
cand_indices = cand_indices[batch_idxs]
if prefix_tokens is not None:
prefix_tokens = prefix_tokens[batch_idxs]
src_lengths = src_lengths[batch_idxs]
cands_to_ignore = cands_to_ignore[batch_idxs]
scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
if attn is not None:
attn = attn.view(bsz, -1)[batch_idxs].view(
new_bsz * beam_size, attn.size(1), -1
)
bsz = new_bsz
else:
batch_idxs = None
# Set active_mask so that values > cand_size indicate eos hypos
# and values < cand_size indicate candidate active hypos.
# After, the min values per row are the top candidate active hypos
# Rewrite the operator since the element wise or is not supported in torchscript.
eos_mask[:, :beam_size] = ~((~cands_to_ignore) & (~eos_mask[:, :beam_size]))
active_mask = torch.add(
eos_mask.type_as(cand_offsets) * cand_size,
cand_offsets[: eos_mask.size(1)],
)
# get the top beam_size active hypotheses, which are just
# the hypos with the smallest values in active_mask.
# {active_hypos} indicates which {beam_size} hypotheses
# from the list of {2 * beam_size} candidates were
# selected. Shapes: (batch size, beam size)
new_cands_to_ignore, active_hypos = torch.topk(
active_mask, k=beam_size, dim=1, largest=False
)
# update cands_to_ignore to ignore any finalized hypos.
cands_to_ignore = new_cands_to_ignore.ge(cand_size)[:, :beam_size]
# Make sure there is at least one active item for each sentence in the batch.
assert (~cands_to_ignore).any(dim=1).all()
# update cands_to_ignore to ignore any finalized hypos
# {active_bbsz_idx} denotes which beam number is continued for each new hypothesis (a beam
# can be selected more than once).
active_bbsz_idx = torch.gather(cand_bbsz_idx, dim=1, index=active_hypos)
active_scores = torch.gather(cand_scores, dim=1, index=active_hypos)
active_bbsz_idx = active_bbsz_idx.view(-1)
active_scores = active_scores.view(-1)
# copy tokens and scores for active hypotheses
# Set the tokens for each beam (can select the same row more than once)
tokens[:, : step + 1] = torch.index_select(
tokens[:, : step + 1], dim=0, index=active_bbsz_idx
)
# Select the next token for each of them
tokens.view(bsz, beam_size, -1)[:, :, step + 1] = torch.gather(
cand_indices, dim=1, index=active_hypos
)
if step > 0:
scores[:, :step] = torch.index_select(
scores[:, :step], dim=0, index=active_bbsz_idx
)
scores.view(bsz, beam_size, -1)[:, :, step] = torch.gather(
cand_scores, dim=1, index=active_hypos
)
# Update constraints based on which candidates were selected for the next beam
self.search.update_constraints(active_hypos)
# copy attention for active hypotheses
if attn is not None:
attn[:, :, : step + 2] = torch.index_select(
attn[:, :, : step + 2], dim=0, index=active_bbsz_idx
)
# reorder incremental state in decoder
reorder_state = active_bbsz_idx
# sort by score descending
for sent in range(len(finalized)):
scores = torch.tensor(
[float(elem["score"].item()) for elem in finalized[sent]]
)
_, sorted_scores_indices = torch.sort(scores, descending=True)
finalized[sent] = [finalized[sent][ssi] for ssi in sorted_scores_indices]
finalized[sent] = torch.jit.annotate(
List[Dict[str, Tensor]], finalized[sent]
)
return finalized
def _prefix_tokens(
self, step: int, lprobs, scores, tokens, prefix_tokens, beam_size: int
):
"""Handle prefix tokens"""
prefix_toks = prefix_tokens[:, step].unsqueeze(-1).repeat(1, beam_size).view(-1)
prefix_lprobs = lprobs.gather(-1, prefix_toks.unsqueeze(-1))
prefix_mask = prefix_toks.ne(self.pad)
lprobs[prefix_mask] = torch.tensor(-math.inf).to(lprobs)
lprobs[prefix_mask] = lprobs[prefix_mask].scatter(
-1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_lprobs[prefix_mask]
)
# if prefix includes eos, then we should make sure tokens and
# scores are the same across all beams
eos_mask = prefix_toks.eq(self.eos)
if eos_mask.any():
# validate that the first beam matches the prefix
first_beam = tokens[eos_mask].view(-1, beam_size, tokens.size(-1))[
:, 0, 1 : step + 1
]
eos_mask_batch_dim = eos_mask.view(-1, beam_size)[:, 0]
target_prefix = prefix_tokens[eos_mask_batch_dim][:, :step]
assert (first_beam == target_prefix).all()
# copy tokens, scores and lprobs from the first beam to all beams
tokens = self.replicate_first_beam(tokens, eos_mask_batch_dim, beam_size)
scores = self.replicate_first_beam(scores, eos_mask_batch_dim, beam_size)
lprobs = self.replicate_first_beam(lprobs, eos_mask_batch_dim, beam_size)
return lprobs, tokens, scores
def replicate_first_beam(self, tensor, mask, beam_size: int):
tensor = tensor.view(-1, beam_size, tensor.size(-1))
tensor[mask] = tensor[mask][:, :1, :]
return tensor.view(-1, tensor.size(-1))
def finalize_hypos(
self,
step: int,
bbsz_idx,
eos_scores,
tokens,
scores,
finalized: List[List[Dict[str, Tensor]]],
finished: List[bool],
beam_size: int,
attn: Optional[Tensor],
src_lengths,
max_len: int,
):
"""Finalize hypothesis, store finalized information in `finalized`, and change `finished` accordingly.
A sentence is finalized when {beam_size} finished items have been collected for it.
Returns number of sentences (not beam items) being finalized.
These will be removed from the batch and not processed further.
Args:
bbsz_idx (Tensor):
"""
assert bbsz_idx.numel() == eos_scores.numel()
# clone relevant token and attention tensors.
# tokens is (batch * beam, max_len). So the index_select
# gets the newly EOS rows, then selects cols 1..{step + 2}
tokens_clone = tokens.index_select(0, bbsz_idx)[
:, 1 : step + 2
] # skip the first index, which is EOS
tokens_clone[:, step] = self.eos
attn_clone = (
attn.index_select(0, bbsz_idx)[:, :, 1 : step + 2]
if attn is not None
else None
)
# compute scores per token position
pos_scores = scores.index_select(0, bbsz_idx)[:, : step + 1]
pos_scores[:, step] = eos_scores
# convert from cumulative to per-position scores
pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1]
# normalize sentence-level scores
if self.normalize_scores:
eos_scores /= (step + 1) ** self.len_penalty
# cum_unfin records which sentences in the batch are finished.
# It helps match indexing between (a) the original sentences
# in the batch and (b) the current, possibly-reduced set of
# sentences.
cum_unfin: List[int] = []
prev = 0
for f in finished:
if f:
prev += 1
else:
cum_unfin.append(prev)
cum_fin_tensor = torch.tensor(cum_unfin, dtype=torch.int).to(bbsz_idx)
unfin_idx = torch.div(bbsz_idx, beam_size, rounding_mode="trunc")
sent = unfin_idx + torch.index_select(cum_fin_tensor, 0, unfin_idx)
# Create a set of "{sent}{unfin_idx}", where
# "unfin_idx" is the index in the current (possibly reduced)
# list of sentences, and "sent" is the index in the original,
# unreduced batch
# For every finished beam item
# sentence index in the current (possibly reduced) batch
seen = (sent << 32) + unfin_idx
unique_seen: List[int] = torch.unique(seen).tolist()
if self.match_source_len:
condition = step > torch.index_select(src_lengths, 0, unfin_idx)
eos_scores = torch.where(condition, torch.tensor(-math.inf), eos_scores)
sent_list: List[int] = sent.tolist()
for i in range(bbsz_idx.size()[0]):
# An input sentence (among those in a batch) is finished when
# beam_size hypotheses have been collected for it
if len(finalized[sent_list[i]]) < beam_size:
if attn_clone is not None:
# remove padding tokens from attn scores
hypo_attn = attn_clone[i]
else:
hypo_attn = torch.empty(0)
finalized[sent_list[i]].append(
{
"tokens": tokens_clone[i],
"score": eos_scores[i],
"attention": hypo_attn, # src_len x tgt_len
"alignment": torch.empty(0),
"positional_scores": pos_scores[i],
}
)
newly_finished: List[int] = []
for unique_s in unique_seen:
# check termination conditions for this sentence
unique_sent: int = unique_s >> 32
unique_unfin_idx: int = unique_s - (unique_sent << 32)
if not finished[unique_sent] and self.is_finished(
step, unique_unfin_idx, max_len, len(finalized[unique_sent]), beam_size
):
finished[unique_sent] = True
newly_finished.append(unique_unfin_idx)
return newly_finished
def is_finished(
self,
step: int,
unfin_idx: int,
max_len: int,
finalized_sent_len: int,
beam_size: int,
):
"""
Check whether decoding for a sentence is finished, which
occurs when the list of finalized sentences has reached the
beam size, or when we reach the maximum length.
"""
assert finalized_sent_len <= beam_size
if finalized_sent_len == beam_size or step == max_len:
return True
return False
class EnsembleModel(nn.Module):
"""A wrapper around an ensemble of models."""
def __init__(self, models):
super().__init__()
self.models_size = len(models)
# method '__len__' is not supported in ModuleList for torch script
self.single_model = models[0]
self.models = nn.ModuleList(models)
self.has_incremental: bool = False
if all(
hasattr(m, "decoder") and isinstance(m.decoder, FairseqIncrementalDecoder)
for m in models
):
self.has_incremental = True
def forward(self):
pass
def has_encoder(self):
return hasattr(self.single_model, "encoder")
def has_incremental_states(self):
return self.has_incremental
def max_decoder_positions(self):
return min(
[
m.max_decoder_positions()
for m in self.models
if hasattr(m, "max_decoder_positions")
]
+ [sys.maxsize]
)
def set_decoder_beam_size(self, beam_size):
"""Set beam size for efficient beamable enc-dec attention."""
if beam_size > 1:
for model in self.models:
if hasattr(model, "set_beam_size"):
model.set_beam_size(beam_size)
@torch.jit.export
def forward_encoder(self, net_input: Dict[str, Tensor]):
if not self.has_encoder():
return None
return [model.encoder.forward_torchscript(net_input) for model in self.models]
@torch.jit.export
def forward_decoder(
self,
tokens,
encoder_outs: List[Dict[str, List[Tensor]]],
incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]],
temperature: float = 1.0,
):
log_probs = []
avg_attn: Optional[Tensor] = None
encoder_out: Optional[Dict[str, List[Tensor]]] = None
for i, model in enumerate(self.models):
if self.has_encoder():
encoder_out = encoder_outs[i]
# decode each model
if self.has_incremental_states():
decoder_out = model.decoder.forward(
tokens,
encoder_out=encoder_out,
incremental_state=incremental_states[i],
)
else:
if hasattr(model, "decoder"):
decoder_out = model.decoder.forward(tokens, encoder_out=encoder_out)
else:
decoder_out = model.forward(tokens)
attn: Optional[Tensor] = None
decoder_len = len(decoder_out)
if decoder_len > 1 and decoder_out[1] is not None:
if isinstance(decoder_out[1], Tensor):
attn = decoder_out[1]
else:
attn_holder = decoder_out[1]["attn"]
if isinstance(attn_holder, Tensor):
attn = attn_holder
elif attn_holder is not None:
attn = attn_holder[0]
if attn is not None:
attn = attn[:, -1, :]
decoder_out_tuple = (
decoder_out[0][:, -1:, :].div_(temperature),
None if decoder_len <= 1 else decoder_out[1],
)
probs = model.get_normalized_probs(
decoder_out_tuple, log_probs=True, sample=None
)
probs = probs[:, -1, :]
if self.models_size == 1:
return probs, attn
log_probs.append(probs)
if attn is not None:
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
avg_probs = torch.logsumexp(torch.stack(log_probs, dim=0), dim=0) - math.log(
self.models_size
)
if avg_attn is not None:
avg_attn.div_(self.models_size)
return avg_probs, avg_attn
@torch.jit.export
def reorder_encoder_out(
self, encoder_outs: Optional[List[Dict[str, List[Tensor]]]], new_order
):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
new_outs: List[Dict[str, List[Tensor]]] = []
if not self.has_encoder():
return new_outs
for i, model in enumerate(self.models):
assert encoder_outs is not None
new_outs.append(
model.encoder.reorder_encoder_out(encoder_outs[i], new_order)
)
return new_outs
@torch.jit.export
def reorder_incremental_state(
self,
incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]],
new_order,
):
if not self.has_incremental_states():
return
for i, model in enumerate(self.models):
model.decoder.reorder_incremental_state_scripting(
incremental_states[i], new_order
)
class SequenceGeneratorWithAlignment(SequenceGenerator):
def __init__(
self, models, tgt_dict, left_pad_target=False, print_alignment="hard", **kwargs
):
"""Generates translations of a given source sentence.
Produces alignments following "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
left_pad_target (bool, optional): Whether or not the
hypothesis should be left padded or not when they are
teacher forced for generating alignments.
"""
super().__init__(EnsembleModelWithAlignment(models), tgt_dict, **kwargs)
self.left_pad_target = left_pad_target
if print_alignment == "hard":
self.extract_alignment = utils.extract_hard_alignment
elif print_alignment == "soft":
self.extract_alignment = utils.extract_soft_alignment
@torch.no_grad()
def generate(self, models, sample, **kwargs):
finalized = super()._generate(sample, **kwargs)
src_tokens = sample["net_input"]["src_tokens"]
bsz = src_tokens.shape[0]
beam_size = self.beam_size
(
src_tokens,
src_lengths,
prev_output_tokens,
tgt_tokens,
) = self._prepare_batch_for_alignment(sample, finalized)
if any(getattr(m, "full_context_alignment", False) for m in self.model.models):
attn = self.model.forward_align(src_tokens, src_lengths, prev_output_tokens)
else:
attn = [
finalized[i // beam_size][i % beam_size]["attention"].transpose(1, 0)
for i in range(bsz * beam_size)
]
if src_tokens.device != "cpu":
src_tokens = src_tokens.to("cpu")
tgt_tokens = tgt_tokens.to("cpu")
attn = [i.to("cpu") for i in attn]
# Process the attn matrix to extract hard alignments.
for i in range(bsz * beam_size):
alignment = self.extract_alignment(
attn[i], src_tokens[i], tgt_tokens[i], self.pad, self.eos
)
finalized[i // beam_size][i % beam_size]["alignment"] = alignment
return finalized
def _prepare_batch_for_alignment(self, sample, hypothesis):
src_tokens = sample["net_input"]["src_tokens"]
bsz = src_tokens.shape[0]
src_tokens = (
src_tokens[:, None, :]
.expand(-1, self.beam_size, -1)
.contiguous()
.view(bsz * self.beam_size, -1)
)
src_lengths = sample["net_input"]["src_lengths"]
src_lengths = (
src_lengths[:, None]
.expand(-1, self.beam_size)
.contiguous()
.view(bsz * self.beam_size)
)
prev_output_tokens = data_utils.collate_tokens(
[beam["tokens"] for example in hypothesis for beam in example],
self.pad,
self.eos,
self.left_pad_target,
move_eos_to_beginning=True,
)
tgt_tokens = data_utils.collate_tokens(
[beam["tokens"] for example in hypothesis for beam in example],
self.pad,
self.eos,
self.left_pad_target,
move_eos_to_beginning=False,
)
return src_tokens, src_lengths, prev_output_tokens, tgt_tokens
class EnsembleModelWithAlignment(EnsembleModel):
"""A wrapper around an ensemble of models."""
def __init__(self, models):
super().__init__(models)
def forward_align(self, src_tokens, src_lengths, prev_output_tokens):
avg_attn = None
for model in self.models:
decoder_out = model(src_tokens, src_lengths, prev_output_tokens)
attn = decoder_out[1]["attn"][0]
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
if len(self.models) > 1:
avg_attn.div_(len(self.models))
return avg_attn
|
{
"content_hash": "eff57fd8ce8eb1b00f82d6dd8f214dfd",
"timestamp": "",
"source": "github",
"line_count": 992,
"max_line_length": 110,
"avg_line_length": 39.921370967741936,
"alnum_prop": 0.5491894348770264,
"repo_name": "pytorch/fairseq",
"id": "7d323d85e7c9ca9325c77f12c3a95d259459ede7",
"size": "39780",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "fairseq/sequence_generator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "21106"
},
{
"name": "Cuda",
"bytes": "38166"
},
{
"name": "Cython",
"bytes": "13294"
},
{
"name": "Lua",
"bytes": "4210"
},
{
"name": "Python",
"bytes": "3699357"
},
{
"name": "Shell",
"bytes": "2182"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import os
import sys
import signal
import subprocess
import argparse
import time
import math
import random
from multiprocessing import Process
from functools import reduce
import numpy as np
import pickle
import unittest
import six
import paddle
import paddle.fluid as fluid
from paddle.fluid import core
from paddle.fluid import io
from test_dist_base import TestDistRunnerBase, runtime_main, RUN_STEP
from dist_simnet_bow import TestDistSimnetBow2x2, DATA_URL, DATA_MD5
class TestDistSaveLoad2x2(TestDistSimnetBow2x2):
def _load_persistable_vars(self, executor, dirname, program):
def _is_checkpoint_var(var):
"""
the checkpoint will not save or load all the variables.
var type is FEED_MINIBATCH/FETCH_LIST/RAW or var name ends with @GRAD are discarded.
: param var(Variable)
"""
if var.desc.type() == core.VarDesc.VarType.FEED_MINIBATCH or \
var.desc.type() == core.VarDesc.VarType.FETCH_LIST or \
var.desc.type() == core.VarDesc.VarType.RAW:
return False
# @GRAD are named for gradient variables, checkpoint will not save it.
if "@GRAD" in var.name:
return False
# .trainer_ are named for distribute train variables, checkpoint will not save it.
if ".trainer_" in var.name:
return False
# .block is named for distribute train variables, checkpoint will not save it.
if ".block" in var.name:
return False
if "tmp_" in var.name:
return False
return var.persistable
io.load_vars(
executor,
dirname=dirname,
main_program=program,
predicate=_is_checkpoint_var,
filename=None)
def run_pserver(self, args):
self.get_model(batch_size=2)
# NOTE: pserver should not call memory optimize
t = self.get_transpiler(args.trainer_id,
fluid.default_main_program(), args.endpoints,
args.trainers, args.sync_mode, False,
args.current_endpoint)
pserver_prog = t.get_pserver_program(args.current_endpoint)
startup_prog = t.get_startup_program(args.current_endpoint,
pserver_prog)
need_load = bool(int(os.getenv("LOAD", "0")))
model_dir = os.getenv("MODEL_DIR", "")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_prog)
if need_load and model_dir:
fluid.io.load_persistables(exe, model_dir, pserver_prog)
exe.run(pserver_prog)
def run_trainer(self, args):
test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
self.get_model(batch_size=2)
if args.mem_opt:
fluid.memory_optimize(fluid.default_main_program(), skip_grads=True)
if args.update_method == "pserver":
t = self.get_transpiler(args.trainer_id,
fluid.default_main_program(),
args.endpoints, args.trainers,
args.sync_mode)
trainer_prog = t.get_trainer_program()
else:
trainer_prog = fluid.default_main_program()
if args.use_cuda:
place = fluid.CUDAPlace(0)
else:
place = fluid.CPUPlace()
startup_exe = fluid.Executor(place)
startup_exe.run(fluid.default_startup_program())
strategy = fluid.ExecutionStrategy()
strategy.num_threads = 1
strategy.allow_op_delay = False
build_stra = fluid.BuildStrategy()
if args.use_reduce:
build_stra.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce
else:
build_stra.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.AllReduce
exe = fluid.ParallelExecutor(
args.use_cuda,
loss_name=avg_cost.name,
exec_strategy=strategy,
build_strategy=build_stra)
feed_var_list = [
var for var in trainer_prog.global_block().vars.values()
if var.is_data
]
feeder = fluid.DataFeeder(feed_var_list, place)
reader_generator = train_reader()
def get_data():
origin_batch = next(reader_generator)
if args.update_method == "pserver" and args.use_reader_alloc:
new_batch = []
for offset, item in enumerate(origin_batch):
if offset % 2 == args.trainer_id:
new_batch.append(item)
return new_batch
else:
return origin_batch
need_save = bool(int(os.getenv("SAVE", "0")))
model_dir = os.getenv("MODEL_DIR", "")
save_mode = os.getenv("SAVE_MODE", "")
if save_mode == "LOCAL":
if need_save:
for _ in six.moves.xrange(RUN_STEP):
loss, = exe.run(fetch_list=[avg_cost.name],
feed=feeder.feed(get_data()))
if need_save and model_dir:
io.save_persistables(startup_exe, model_dir, trainer_prog)
var = np.array(fluid.global_scope().find_var('__fc_b__').get_tensor(
))
if six.PY2:
print(pickle.dumps(np.ravel(var).tolist()))
else:
sys.stdout.buffer.write(pickle.dumps(np.ravel(var).tolist()))
elif save_mode == "DIST":
skip_steps = int(os.getenv("SKIP_STEPS"))
loss = None
if need_save:
for idx in six.moves.xrange(8):
loss, = exe.run(fetch_list=[avg_cost.name],
feed=feeder.feed(get_data()))
if need_save and model_dir and idx == skip_steps and args.trainer_id == 0:
io.save_persistables(startup_exe, model_dir,
trainer_prog)
else:
for idx in six.moves.xrange(8):
data = get_data()
if idx <= skip_steps:
continue
loss, = exe.run(fetch_list=[avg_cost.name],
feed=feeder.feed(data))
if six.PY2:
print(pickle.dumps(loss.tolist()))
else:
sys.stdout.buffer.write(pickle.dumps(loss.tolist()))
else:
raise Exception("save_mode must be LOCAL or DIST")
if __name__ == "__main__":
paddle.dataset.common.download(DATA_URL, 'simnet', DATA_MD5, "train")
runtime_main(TestDistSaveLoad2x2)
|
{
"content_hash": "6e030c9877d4167f955270956f5c40a9",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 96,
"avg_line_length": 35.865284974093264,
"alnum_prop": 0.5443513435423288,
"repo_name": "baidu/Paddle",
"id": "f0f13a9d49c5b84521aa3e00bdcabe0c494853a7",
"size": "7535",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/dist_save_load.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "217842"
},
{
"name": "C++",
"bytes": "2771237"
},
{
"name": "CMake",
"bytes": "113670"
},
{
"name": "Cuda",
"bytes": "424141"
},
{
"name": "M4",
"bytes": "40913"
},
{
"name": "Perl",
"bytes": "11412"
},
{
"name": "Python",
"bytes": "892636"
},
{
"name": "Shell",
"bytes": "64351"
}
],
"symlink_target": ""
}
|
"""This file contains a syslog parser in plaso."""
import datetime
import logging
from plaso.lib import event
from plaso.lib import lexer
from plaso.lib import text_parser
from plaso.lib import timelib
from plaso.lib import utils
class SyslogLineEvent(event.TextEvent):
"""Convenience class for a syslog line event."""
DATA_TYPE = 'syslog:line'
def __init__(self, timestamp, offset, attributes):
"""Initializes the event object.
Args:
timestamp: The timestamp time value. The timestamp contains the
number of microseconds since Jan 1, 1970 00:00:00 UTC.
offset: The offset of the event.
attributes: A dict that contains the events attributes
"""
super(SyslogLineEvent, self).__init__(timestamp, attributes)
self.offset = offset
class SyslogParser(text_parser.SlowLexicalTextParser):
"""Parse text based syslog files."""
NAME = 'syslog'
# TODO: can we change this similar to SQLite where create an
# event specific object for different lines using a callback function.
# Define the tokens that make up the structure of a syslog file.
tokens = [
lexer.Token('INITIAL',
'(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) ',
'SetMonth', 'DAY'),
lexer.Token('DAY', r'\s?(\d{1,2})\s+', 'SetDay', 'TIME'),
lexer.Token('TIME', r'([0-9:\.]+) ', 'SetTime', 'STRING_HOST'),
lexer.Token('STRING_HOST', r'^--(-)', 'ParseHostname', 'STRING'),
lexer.Token('STRING_HOST', r'([^\s]+) ', 'ParseHostname', 'STRING_PID'),
lexer.Token('STRING_PID', r'([^\:\n]+)', 'ParsePid', 'STRING'),
lexer.Token('STRING', r'([^\n]+)', 'ParseString', ''),
lexer.Token('STRING', r'\n\t', None, ''),
lexer.Token('STRING', r'\t', None, ''),
lexer.Token('STRING', r'\n', 'ParseMessage', 'INITIAL'),
lexer.Token('.', '([^\n]+)\n', 'ParseIncomplete', 'INITIAL'),
lexer.Token('.', '\n[^\t]', 'ParseIncomplete', 'INITIAL'),
lexer.Token('S[.]+', '(.+)', 'ParseString', ''),
]
def __init__(self, pre_obj, config):
"""Initializes the syslog parser.
Args:
pre_obj: Preprocessor object. If the year cannot be determined
from the input the current year is assumed. The year
can be set to a specific value by defining it in the
preprocessor object, e.g. pre_obj.year = 2012.
config: A configuration object.
"""
super(SyslogParser, self).__init__(pre_obj, config, True)
# Set the initial year to 0 (fixed in the actual Parse method)
# TODO: this is a HACK to get the tests working let's discuss this
self._year_use = getattr(pre_obj, 'year', 0)
self._last_month = 0
# TODO: move to formatter.
self.source_long = 'Log File'
# Set some additional attributes.
self.attributes['reporter'] = ''
self.attributes['pid'] = ''
def GetYear(self, stat, zone):
"""Retrieves the year either from the input file or from the settings."""
time = getattr(stat, 'crtime', 0)
if not time:
time = getattr(stat, 'ctime', 0)
if not time:
current_year = timelib.GetCurrentYear()
logging.error((
u'Unable to determine year of syslog file.\nDefautling to: '
u'{0:d}').format(current_year))
return current_year
try:
timestamp = datetime.datetime.fromtimestamp(time, zone)
except ValueError as exception:
current_year = timelib.GetCurrentYear()
logging.error(
u'Unable to determine year of syslog file with error: {0:s}\n'
u'Defaulting to: {1:d}'.format(exception, current_year))
return current_year
return timestamp.year
def ParseLine(self, zone):
"""Parse a single line from the syslog file.
This method extends the one from TextParser slightly, adding
the context of the reporter and pid values found inside syslog
files.
Args:
zone: The timezone of the host computer.
Returns:
An EventObject that is constructed from the syslog entry.
"""
if not self._year_use:
# TODO: Find a decent way to actually calculate the correct year
# from the syslog file, instead of relying on stats object.
stat = self.file_entry.GetStat()
self._year_use = self.GetYear(stat, zone)
if not self._year_use:
# TODO: Make this sensible, not have the year permanent.
self._year_use = 2012
month_compare = int(self.attributes['imonth'])
if month_compare and self._last_month > month_compare:
self._year_use += 1
self._last_month = int(self.attributes['imonth'])
self.attributes['iyear'] = self._year_use
return super(SyslogParser, self).ParseLine(zone)
def ParseHostname(self, match, **_):
"""Parses the hostname.
This is a callback function for the text parser (lexer) and is
called by the STRING_HOST lexer state.
Args:
match: A regular expression match group that contains the match
by the lexer.
"""
self.attributes['hostname'] = match.group(1)
def ParsePid(self, match, **_):
"""Parses the process identifier (PID).
This is a callback function for the text parser (lexer) and is
called by the STRING_PID lexer state.
Args:
match: A regular expression match group that contains the match
by the lexer.
"""
# TODO: Change this logic and rather add more Tokens that
# fully cover all variations of the various PID stages.
line = match.group(1)
if line[-1] == ']':
splits = line.split('[')
if len(splits) == 2:
self.attributes['reporter'], pid = splits
else:
pid = splits[-1]
self.attributes['reporter'] = '['.join(splits[:-1])
try:
self.attributes['pid'] = int(pid[:-1])
except ValueError:
self.attributes['pid'] = 0
else:
self.attributes['reporter'] = line
def ParseString(self, match, **_):
"""Parses a (body text) string.
This is a callback function for the text parser (lexer) and is
called by the STRING lexer state.
Args:
match: A regular expression match group that contains the match
by the lexer.
"""
self.attributes['body'] += utils.GetUnicodeString(match.group(1))
def PrintLine(self):
"""Prints a log line."""
self.attributes['iyear'] = 2012
return super(SyslogParser, self).PrintLine()
# TODO: this is a rough initial implementation to get this working.
def CreateEvent(self, timestamp, offset, attributes):
"""Creates a syslog line event.
This overrides the default function in TextParser to create
syslog line events instead of text events.
Args:
timestamp: The timestamp time value. The timestamp contains the
number of microseconds since Jan 1, 1970 00:00:00 UTC.
offset: The offset of the event.
attributes: A dict that contains the events attributes.
Returns:
A text event (SyslogLineEvent).
"""
return SyslogLineEvent(timestamp, offset, attributes)
|
{
"content_hash": "32bd1408163c570baaa8a8a91c64e52a",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 78,
"avg_line_length": 34.08653846153846,
"alnum_prop": 0.6334273624823695,
"repo_name": "iwm911/plaso",
"id": "757be7b9faadf5b5a1bb10de432395af65bdca20",
"size": "7788",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plaso/parsers/syslog.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2431825"
},
{
"name": "Shell",
"bytes": "21885"
},
{
"name": "VHDL",
"bytes": "2100224"
}
],
"symlink_target": ""
}
|
from Board import Board
from Board import Card
from copy import deepcopy
from BestFirstSolitaire import BestFirstSolitaire
from Solitaire import find
import unittest
class TestBestFirstSolitaire(unittest.TestCase):
def setUp(self):
self.sol = BestFirstSolitaire()
self.cards = [Card(color, num) for num in range(1, 10) \
for color in range(3)]
self.cards += [Card(color, None) for color in range(3, 6) \
for i in range(4)]
self.cards += [Card(6, None)]
self.moves = []
def testTableauToStock(self):
board = Board(self.cards)
self.sol.__getTableauToStock__(board, self.moves)
self.assertEqual(len(self.moves), 8)
#for move in self.moves:
#print self.sol.getChild(deepcopy(board), move)
def testMovesFromStock(self):
board = Board(self.cards)
board.stock[find(board.stock, None)] = board.tableau[-1].pop()
board.stock[find(board.stock, None)] = board.tableau[-2].pop()
board.stock[find(board.stock, None)] = board.tableau[-3].pop()
board.tableau[-1] = []
self.sol.__getMovesFromStock__(board, self.moves)
self.assertEqual(len(self.moves), 12)
#for move in self.moves:
#print self.sol.getChild(deepcopy(board), move)
def testTableauToFoundation(self):
board = Board(self.cards)
self.sol.__getTableauToFoundation__(board, self.moves)
self.assertEqual(len(self.moves), 3)
#for move in self.moves:
#print self.sol.getChild(deepcopy(board), move)
def testTableauToTableau(self):
cards = [Card(color, num) for color in range(3) \
for num in range(9, 0, -1)]
cards += [Card(color, None) for color in range(3, 6) \
for i in range(4)]
cards += [Card(6, None)]
board = Board(cards)
self.sol.__getTableauToTableau__(board, self.moves)
#for move in sorted(self.moves):
#print self.sol.getChild(deepcopy(board), move)
def testTableauToTableauToEmpty(self):
cards = self.cards
cards.reverse()
board = Board(cards)
board.tableau[0] = []
self.sol.__getTableauToTableau__(board, self.moves)
self.assertEqual(len(self.moves), 7)
#for move in self.moves:
#print self.sol.getChild(deepcopy(board), move)
def testTableauToTableauMixed(self):
board = Board(self.cards)
for i in range(len(board.tableau)):
for j in range(i, len(board.tableau[0])):
board.tableau[i][j], board.tableau[j][i] = \
board.tableau[j][i],board.tableau[i][j]
board.tableau[0] = []
self.sol.__getTableauToTableau__(board, self.moves)
self.assertEqual(len(self.moves), 9)
#for move in sorted(self.moves):
#print self.sol.getChild(deepcopy(board), move)
def testNextMove(self):
board = Board(self.cards)
print board
for move in self.sol.nextMove(board):
print self.sol.getChild(deepcopy(board), move)
|
{
"content_hash": "97d9aef5e974f6c34e43e0c3b6ec1b0b",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 70,
"avg_line_length": 39.1375,
"alnum_prop": 0.6026828489300543,
"repo_name": "davidxk/SolitaireBot",
"id": "25abff8e8932d42c213ed1aebc044de3dc3dbc89",
"size": "3131",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/TestBestFirstSolitaire.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "50120"
}
],
"symlink_target": ""
}
|
import dota2api
import joblib
import time
api = dota2api.Initialise(api_key='')
my_id = 76561197997856332
private_id = 4294967295 # a private profile player's 32-bit Steam ID
hist = api.get_match_history(account_id=my_id)#, matches_requested=1)
print(len(hist['matches']))
cur_smid = hist['matches'][0]['match_seq_num'] # 0 = newest, -1 = oldest
print(cur_smid)
# keeping only very high skill (skill=3), captain mode (game_mode=2) matches
kept_matches = {}
def get_matches(cur_smid):
try:
matches = api.get_match_history_by_seq_num(start_at_match_seq_num=cur_smid,
game_mode=2)['matches']
except:
print("waiting on the API")
time.sleep(3) # wait 3 seconds
return get_matches(cur_smid)
for m in matches:
cur_smid = m['match_seq_num']
try:
d = api.get_match_details(match_id=m['match_id'])
except:
print("waiting on the API")
time.sleep(3) # wait 3 seconds
return get_matches(cur_smid)
if d['game_mode'] == 2: #and d['human_players'] == 10:
mid = m['match_id']
if mid not in kept_matches:
for p in d['players']:
pid = p['account_id']
if pid != private_id:
try:
tm = api.get_match_history(account_id=pid, skill=3,
start_at_match_id=mid, matches_requested=1)
except:
print("waiting on the API")
time.sleep(3) # wait 3 seconds
return get_matches(cur_smid)
if len(tm['matches']):
kept_matches[mid] = d
break
return cur_smid + 1
for _ in xrange(1000):
cur_smid = get_matches(cur_smid)
joblib.dump(kept_matches, 'matches.joblib', compress=5)
if cur_smid <= 0:
break
print("dumped", len(kept_matches), "so far")
|
{
"content_hash": "da0e10351849123c92e105e448c72396",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 83,
"avg_line_length": 36.89090909090909,
"alnum_prop": 0.5243962543124692,
"repo_name": "syhw/dota2_stats",
"id": "1fc10d2489652773477838028b00da3416705849",
"size": "2029",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "get_matches.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Lua",
"bytes": "5703"
},
{
"name": "Python",
"bytes": "9086"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import math
from fontTools.misc.transform import Transform
"""
This is a more sophisticated approach to performing math on transformation matrices.
Traditionally glyphMath applies the math straight to the elements in the matrix.
By decomposing the matrix into offset, scale and rotation factors, the interpoations
are much more natural. Or more intuitive.
This could help in complex glyphs in which the rotation of a component plays am important role.
This MathTransform object itself has its own interpolation method. But in order to be able
to participate in (for instance) superpolator math, it is necessary to keep the
offset, scale and rotation decomposed for more than one math operation.
So, MathTransform decomposes the matrix, ShallowTransform carries it through the math,
then MathTransform is used again to compose the new matrix. If you don't need to math with
the transformation object itself, the MathTransform object is fine.
MathTransform by Frederik Berlaen
Transformation decomposition algorithm from
http://dojotoolkit.org/reference-guide/1.9/dojox/gfx.html#decompose-js
http://dojotoolkit.org/license
"""
def matrixToMathTransform(matrix):
""" Take a 6-tuple and return a ShallowTransform object."""
if isinstance(matrix, ShallowTransform):
return matrix
off, scl, rot = MathTransform(matrix).decompose()
return ShallowTransform(off, scl, rot)
def mathTransformToMatrix(mathTransform):
""" Take a ShallowTransform object and return a 6-tuple. """
m = MathTransform().compose(mathTransform.offset, mathTransform.scale, mathTransform.rotation)
return tuple(m)
class ShallowTransform(object):
""" A shallow math container for offset, scale and rotation. """
def __init__(self, offset, scale, rotation):
self.offset = offset
self.scale = scale
self.rotation = rotation
def __repr__(self):
return "<ShallowTransform offset(%3.3f,%3.3f) scale(%3.3f,%3.3f) rotation(%3.3f,%3.3f)>"%(self.offset[0], self.offset[1], self.scale[0], self.scale[1], self.rotation[0], self.rotation[1])
def __add__(self, other):
newOffset = self.offset[0]+other.offset[0],self.offset[1]+other.offset[1]
newScale = self.scale[0]+other.scale[0],self.scale[1]+other.scale[1]
newRotation = self.rotation[0]+other.rotation[0],self.rotation[1]+other.rotation[1]
return self.__class__(newOffset, newScale, newRotation)
def __sub__(self, other):
newOffset = self.offset[0]-other.offset[0],self.offset[1]-other.offset[1]
newScale = self.scale[0]-other.scale[0],self.scale[1]-other.scale[1]
newRotation = self.rotation[0]-other.rotation[0],self.rotation[1]-other.rotation[1]
return self.__class__(newOffset, newScale, newRotation)
def __mul__(self, factor):
if isinstance(factor, (int, float)):
fx = fy = float(factor)
else:
fx, fy = float(factor[0]), float(factor[1])
newOffset = self.offset[0]*fx,self.offset[1]*fy
newScale = self.scale[0]*fx,self.scale[1]*fy
newRotation = self.rotation[0]*fx,self.rotation[1]*fy
return self.__class__(newOffset, newScale, newRotation)
__rmul__ = __mul__
def __truediv__(self, factor):
""" XXX why not __div__ ?"""
if isinstance(factor, (int, float)):
fx = fy = float(factor)
else:
fx, fy = float(factor)
if fx==0 or fy==0:
raise ZeroDivisionError((fx, fy))
newOffset = self.offset[0]/fx,self.offset[1]/fy
newScale = self.scale[0]/fx,self.scale[1]/fy
newRotation = self.rotation[0]/fx,self.rotation[1]/fy
return self.__class__(newOffset, newScale, newRotation)
def asTuple(self):
m = MathTransform().compose(self.offset, self.scale, self.rotation)
return tuple(m)
class MathTransform(object):
""" A Transform object that can compose and decompose the matrix into offset, scale and rotation."""
transformClass = Transform
def __init__(self, *matrixes):
matrix = self.transformClass()
if matrixes:
if isinstance(matrixes[0], (int, float)):
matrixes = [matrixes]
for m in matrixes:
matrix = matrix.transform(m)
self.matrix = matrix
def _get_matrix(self):
return (self.xx, self.xy, self.yx, self.yy, self.dx, self.dy)
def _set_matrix(self, matrix):
self.xx, self.xy, self.yx, self.yy, self.dx, self.dy = matrix
matrix = property(_get_matrix, _set_matrix)
def __repr__(self):
return "< %.8f %.8f %.8f %.8f %.8f %.8f >" % (self.xx, self.xy, self.yx, self.yy, self.dx, self.dy)
def __len__(self):
return 6
def __getitem__(self, index):
return self.matrix[index]
def __getslice__(self, i, j):
return self.matrix[i:j]
def __eq__(self, other):
return str(self) == str(other)
## transformations
def translate(self, x=0, y=0):
return self.__class__(self.transformClass(*self.matrix).translate(x, y))
def scale(self, x=1, y=None):
return self.__class__(self.transformClass(*self.matrix).scale(x, y))
def rotate(self, angle):
return self.__class__(self.transformClass(*self.matrix).rotate(angle))
def rotateDegrees(self, angle):
return self.rotate(math.radians(angle))
def skew(self, x=0, y=0):
return self.__class__(self.transformClass(*self.matrix).skew(x, y))
def skewDegrees(self, x=0, y=0):
return self.skew(math.radians(x), math.radians(y))
def transform(self, other):
return self.__class__(self.transformClass(*self.matrix).transform(other))
def reverseTransform(self, other):
return self.__class__(self.transformClass(*self.matrix).reverseTransform(other))
def inverse(self):
return self.__class__(self.transformClass(*self.matrix).inverse())
def copy(self):
return self.__class__(self.matrix)
## tools
def scaleSign(self):
if self.xx * self.yy < 0 or self.xy * self.yx > 0:
return -1
return 1
def eq(self, a, b):
return abs(a - b) <= 1e-6 * (abs(a) + abs(b))
def calcFromValues(self, r1, m1, r2, m2):
m1 = abs(m1)
m2 = abs(m2)
return (m1 * r1 + m2 * r2) / (m1 + m2)
def transpose(self):
return self.__class__(self.xx, self.yx, self.xy, self.yy, 0, 0)
def decompose(self):
self.translateX = self.dx
self.translateY = self.dy
self.scaleX = 1
self.scaleY = 1
self.angle1 = 0
self.angle2 = 0
if self.eq(self.xy, 0) and self.eq(self.yx, 0):
self.scaleX = self.xx
self.scaleY = self.yy
elif self.eq(self.xx * self.yx, -self.xy * self.yy):
self._decomposeScaleRotate()
elif self.eq(self.xx * self.xy, -self.yx * self.yy):
self._decomposeRotateScale()
else:
transpose = self.transpose()
(vx1, vy1), (vx2, vy2) = self._eigenvalueDecomposition(self.matrix, transpose.matrix)
u = self.__class__(vx1, vx2, vy1, vy2, 0, 0)
(vx1, vy1), (vx2, vy2) = self._eigenvalueDecomposition(transpose.matrix, self.matrix)
vt = self.__class__(vx1, vy1, vx2, vy2, 0, 0)
s = self.__class__(self.__class__().reverseTransform(u), self, self.__class__().reverseTransform(vt))
vt._decomposeScaleRotate()
self.angle1 = -vt.angle2
u._decomposeRotateScale()
self.angle2 = -u.angle1
self.scaleX = s.xx * vt.scaleX * u.scaleX
self.scaleY = s.yy * vt.scaleY * u.scaleY
return (self.translateX, self.translateY), (self.scaleX, self.scaleY), (self.angle1, self.angle2)
def _decomposeScaleRotate(self):
sign = self.scaleSign()
a = (math.atan2(self.yx, self.yy) + math.atan2(-sign * self.xy, sign * self.xx)) * .5
c = math.cos(a)
s = math.sin(a)
if c == 0: ## ????
c = 0.0000000000000000000000000000000001
if s == 0:
s = 0.0000000000000000000000000000000001
self.angle2 = -a
self.scaleX = self.calcFromValues(self.xx / float(c), c, -self.xy / float(s), s)
self.scaleY = self.calcFromValues(self.yy / float(c), c, self.yx / float(s), s)
def _decomposeRotateScale(self):
sign = self.scaleSign()
a = (math.atan2(sign * self.yx, sign * self.xx) + math.atan2(-self.xy, self.yy)) * .5
c = math.cos(a)
s = math.sin(a)
if c == 0:
c = 0.0000000000000000000000000000000001
if s == 0:
s = 0.0000000000000000000000000000000001
self.angle1 = -a
self.scaleX = self.calcFromValues(self.xx / float(c), c, self.yx / float(s), s)
self.scaleY = self.calcFromValues(self.yy / float(c), c, -self.xy / float(s), s)
def _eigenvalueDecomposition(self, *matrixes):
m = self.__class__(*matrixes)
b = -m.xx - m.yy
c = m.xx * m.yy - m.xy * m.yx
d = math.sqrt(abs(b * b - 4 * c))
if b < 0:
d *= -1
l1 = -(b + d) * .5
l2 = c / float(l1)
vx1 = vy2 = None
if l1 - m.xx != 0:
vx1 = m.xy / (l1 - m.xx)
vy1 = 1
elif m.xy != 0:
vx1 = 1
vy1 = (l1 - m.xx) / m.xy
elif m.yx != 0:
vx1 = (l1 - m.yy) / m.yx
vy1 = 1
elif l1 - m.yy != 0:
vx1 = 1
vy1 = m.yx / (l1 - m.yy)
vx2 = vy2 = None
if l2 - m.xx != 0:
vx2 = m.xy / (l2 - m.xx)
vy2 = 1
elif m.xy != 0:
vx2 = 1
vy2 = (l2 - m.xx) / m.xy
elif m.yx != 0:
vx2 = (l2 - m.yy) / m.yx
vy2 = 1
elif l2 - m.yy != 0:
vx2 = 1
vy2 = m.yx / (l2 - m.yy)
if self.eq(l1, l2):
vx1 = 1
vy1 = 0
vx2 = 0
vy2 = 1
d1 = math.sqrt(vx1 * vx1 + vy1 * vy1)
d2 = math.sqrt(vx2 * vx2 + vy2 * vy2)
vx1 /= d1
vy1 /= d1
vx2 /= d2
vy2 /= d2
return (vx1, vy1), (vx2, vy2)
def compose(self, translate, scale, angle):
translateX, translateY = translate
scaleX, scaleY = scale
angle1, angle2 = angle
matrix = self.transformClass()
matrix = matrix.translate(translateX, translateY)
matrix = matrix.rotate(angle2)
matrix = matrix.scale(scaleX, scaleY)
matrix = matrix.rotate(angle1)
return self.__class__(matrix)
def _interpolate(self, v1, v2, value):
return v1 * (1 - value) + v2 * value
def interpolate(self, other, value):
if isinstance(value, (int, float)):
x = y = value
else:
x, y = value
self.decompose()
other.decompose()
translateX = self._interpolate(self.translateX, other.translateX, x)
translateY = self._interpolate(self.translateY, other.translateY, y)
scaleX = self._interpolate(self.scaleX, other.scaleX, x)
scaleY = self._interpolate(self.scaleY, other.scaleY, y)
angle1 = self._interpolate(self.angle1, other.angle1, x)
angle2 = self._interpolate(self.angle2, other.angle2, y)
return self.compose((translateX, translateY), (scaleX, scaleY), (angle1, angle2))
class FontMathWarning(Exception): pass
def _interpolateValue(data1, data2, value):
return data1 * (1 - value) + data2 * value
def _linearInterpolationTransformMatrix(matrix1, matrix2, value):
""" Linear, 'oldstyle' interpolation of the transform matrix."""
return tuple(_interpolateValue(matrix1[i], matrix2[i], value) for i in range(len(matrix1)))
def _polarDecomposeInterpolationTransformation(matrix1, matrix2, value):
""" Interpolate using the MathTransform method. """
m1 = MathTransform(matrix1)
m2 = MathTransform(matrix2)
return tuple(m1.interpolate(m2, value))
def _mathPolarDecomposeInterpolationTransformation(matrix1, matrix2, value):
""" Interpolation with ShallowTransfor, wrapped by decompose / compose actions."""
off, scl, rot = MathTransform(matrix1).decompose()
m1 = ShallowTransform(off, scl, rot)
off, scl, rot = MathTransform(matrix2).decompose()
m2 = ShallowTransform(off, scl, rot)
m3 = m1 + value * (m2-m1)
m3 = MathTransform().compose(m3.offset, m3.scale, m3.rotation)
return tuple(m3)
if __name__ == "__main__":
from random import random
import sys
import doctest
sys.exit(doctest.testmod().failed)
|
{
"content_hash": "36b9704c090e769e98a884494dd871d0",
"timestamp": "",
"source": "github",
"line_count": 363,
"max_line_length": 195,
"avg_line_length": 35.256198347107436,
"alnum_prop": 0.5937646507266761,
"repo_name": "typesupply/fontMath",
"id": "e8db0b7e58ef010aad71df63dce95003484dcd9d",
"size": "12798",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Lib/fontMath/mathTransform.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "153343"
}
],
"symlink_target": ""
}
|
from django.utils import timezone
import mock
import pytest
from addons.wiki.tests.factories import NodeWikiFactory
from api.base.settings.defaults import API_BASE
from api_tests import utils as test_utils
from osf.models import Guid
from osf_tests.factories import (
ProjectFactory,
AuthUserFactory,
CommentFactory,
)
from rest_framework import exceptions
@pytest.mark.django_db
class CommentReportsMixin(object):
@pytest.fixture()
def user(self):
return AuthUserFactory()
@pytest.fixture()
def contributor(self):
return AuthUserFactory()
@pytest.fixture()
def non_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def payload(self, user):
return {
'data': {
'id': user._id,
'type': 'comment_reports',
'attributes': {
'category': 'spam',
'message': 'delicious spam'
}
}
}
# check if all necessary features are setup in subclass
@pytest.fixture()
def private_project(self):
raise NotImplementedError
@pytest.fixture()
def comment(self):
raise NotImplementedError
@pytest.fixture()
def private_url(self):
raise NotImplementedError
@pytest.fixture()
def public_project(self):
raise NotImplementedError
@pytest.fixture()
def public_comment(self):
raise NotImplementedError
@pytest.fixture()
def public_url(self):
raise NotImplementedError
@pytest.fixture()
def comment_level(self):
raise NotImplementedError
def test_private_node_view_reports_auth_misc(
self, app, user, contributor, non_contrib, private_url):
# test_private_node_logged_out_user_cannot_view_reports
res = app.get(private_url, expect_errors=True)
assert res.status_code == 401
# test_private_node_logged_in_non_contrib_cannot_view_reports
res = app.get(private_url, auth=non_contrib.auth, expect_errors=True)
assert res.status_code == 403
# test_private_node_only_reporting_user_can_view_reports
res = app.get(private_url, auth=user.auth)
assert res.status_code == 200
report_json = res.json['data']
report_ids = [report['id'] for report in report_json]
assert len(report_json) == 1
assert user._id in report_ids
# test_private_node_reported_user_does_not_see_report
res = app.get(private_url, auth=contributor.auth)
assert res.status_code == 200
report_json = res.json['data']
report_ids = [report['id'] for report in report_json]
assert len(report_json) == 0
assert contributor._id not in report_ids
def test_public_node_view_report_auth_misc(
self, app, user, contributor, non_contrib, public_url):
# test_public_node_logged_out_user_cannot_view_reports
res = app.get(public_url, expect_errors=True)
assert res.status_code == 401
# test_public_node_only_reporting_contributor_can_view_report
res = app.get(public_url, auth=user.auth)
assert res.status_code == 200
report_json = res.json['data']
report_ids = [report['id'] for report in report_json]
assert len(report_json) == 1
assert user._id in report_ids
# test_public_node_reported_user_does_not_see_report
res = app.get(public_url, auth=contributor.auth)
assert res.status_code == 200
report_json = res.json['data']
report_ids = [report['id'] for report in report_json]
assert len(report_json) == 0
assert contributor._id not in report_ids
# test_public_node_non_contrib_does_not_see_other_user_reports
res = app.get(public_url, auth=non_contrib.auth, expect_errors=True)
assert res.status_code == 200
report_json = res.json['data']
report_ids = [report['id'] for report in report_json]
assert len(report_json) == 0
assert non_contrib._id not in report_ids
def test_public_node_non_contrib_reporter_can_view_own_report(
self, app, non_contrib, public_comment, public_url):
public_comment.reports[non_contrib._id] = {
'category': 'spam',
'text': 'This is spam',
'date': timezone.now(),
'retracted': False,
}
public_comment.save()
res = app.get(public_url, auth=non_contrib.auth)
assert res.status_code == 200
report_json = res.json['data']
report_ids = [report['id'] for report in report_json]
assert len(report_json) == 1
assert non_contrib._id in report_ids
def test_public_node_private_comment_level_non_contrib_cannot_see_reports(
self, app, non_contrib, public_project, public_url):
public_project.comment_level = 'private'
public_project.save()
res = app.get(public_url, auth=non_contrib.auth, expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
def test_invalid_report_comment(self, app, user, private_url):
# test_report_comment_invalid_type
payload = {
'data': {
'type': 'Not a valid type.',
'attributes': {
'category': 'spam',
'message': 'delicious spam'
}
}
}
res = app.post_json_api(
private_url, payload,
auth=user.auth, expect_errors=True
)
assert res.status_code == 409
# test_report_comment_no_type
payload = {
'data': {
'type': '',
'attributes': {
'category': 'spam',
'message': 'delicious spam'
}
}
}
res = app.post_json_api(
private_url, payload,
auth=user.auth, expect_errors=True
)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be blank.'
assert res.json['errors'][0]['source']['pointer'] == '/data/type'
# test_report_comment_invalid_spam_category
category = 'Not a valid category'
payload = {
'data': {
'type': 'comment_reports',
'attributes': {
'category': category,
'message': 'delicious spam'
}
}
}
res = app.post_json_api(
private_url, payload,
auth=user.auth, expect_errors=True
)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == '\"' + \
category + '\"' + ' is not a valid choice.'
def test_report_comment_allow_blank_message(
self, app, user, contributor, private_project, comment):
comment_new = CommentFactory(
node=private_project,
user=contributor,
target=comment.target)
url = '/{}comments/{}/reports/'.format(API_BASE, comment_new._id)
payload = {
'data': {
'type': 'comment_reports',
'attributes': {
'category': 'spam',
'message': ''
}
}
}
res = app.post_json_api(url, payload, auth=user.auth)
assert res.status_code == 201
assert res.json['data']['id'] == user._id
assert res.json['data']['attributes']['message'] == payload['data']['attributes']['message']
def test_private_node_report_comment_auth_misc(
self, app, user, contributor,
non_contrib, private_project,
private_url, comment, payload
):
# test_private_node_logged_out_user_cannot_report_comment
res = app.post_json_api(private_url, payload, expect_errors=True)
assert res.status_code == 401
# test_private_node_logged_in_non_contrib_cannot_report_comment
res = app.post_json_api(
private_url, payload,
auth=non_contrib.auth, expect_errors=True
)
assert res.status_code == 403
# test_private_node_logged_in_contributor_can_report_comment
comment_new = CommentFactory(
node=private_project,
user=contributor,
target=comment.target)
url = '/{}comments/{}/reports/'.format(API_BASE, comment_new._id)
res = app.post_json_api(url, payload, auth=user.auth)
assert res.status_code == 201
assert res.json['data']['id'] == user._id
def test_user_cannot_report_comment_condition(
self, app, user, contributor, private_url, payload):
# test_user_cannot_report_own_comment
res = app.post_json_api(
private_url, payload,
auth=contributor.auth, expect_errors=True
)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'You cannot report your own comment.'
# test_user_cannot_report_comment_twice
# User cannot report the comment again
res = app.post_json_api(
private_url, payload,
auth=user.auth, expect_errors=True
)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Comment already reported.'
def test_public_node_report_comment_auth_misc(
self, app, user, contributor,
non_contrib, public_project,
public_url, public_comment, payload
):
# def test_public_node_logged_out_user_cannot_report_comment(self):
res = app.post_json_api(public_url, payload, expect_errors=True)
assert res.status_code == 401
# def test_public_node_contributor_can_report_comment(self):
comment = CommentFactory(
node=public_project,
user=contributor,
target=public_comment.target)
url = '/{}comments/{}/reports/'.format(API_BASE, comment._id)
res = app.post_json_api(url, payload, auth=user.auth)
assert res.status_code == 201
assert res.json['data']['id'] == user._id
# def test_public_node_non_contrib_can_report_comment(self):
""" Test that when a public project allows any osf user to
comment (comment_level == 'public), non-contributors
can also report comments.
"""
res = app.post_json_api(public_url, payload, auth=non_contrib.auth)
assert res.status_code == 201
assert res.json['data']['id'] == non_contrib._id
def test_public_node_private_comment_level_non_contrib_cannot_report_comment(
self, app, non_contrib, public_project, public_url):
public_project.comment_level = 'private'
public_project.save()
res = app.get(public_url, auth=non_contrib.auth, expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
class TestCommentReportsView(CommentReportsMixin):
# private_project_comment_reports
@pytest.fixture()
def private_project(self, user, contributor):
private_project = ProjectFactory.create(is_public=False, creator=user)
private_project.add_contributor(contributor=contributor, save=True)
return private_project
@pytest.fixture()
def comment(self, user, contributor, private_project):
comment = CommentFactory(node=private_project, user=contributor)
comment.reports = comment.reports or {}
comment.reports[user._id] = {
'category': 'spam',
'text': 'This is spam',
'date': timezone.now(),
'retracted': False,
}
comment.save()
return comment
@pytest.fixture()
def private_url(self, user, comment):
return '/{}comments/{}/reports/'.format(API_BASE, comment._id)
# public_project_comment_reports
@pytest.fixture()
def public_project(self, user, contributor):
public_project = ProjectFactory.create(
is_public=True, creator=user, comment_level='public')
public_project.add_contributor(contributor=contributor, save=True)
return public_project
@pytest.fixture()
def public_comment(self, user, contributor, public_project):
public_comment = CommentFactory(node=public_project, user=contributor)
public_comment.reports = public_comment.reports or {}
public_comment.reports[user._id] = {
'category': 'spam',
'text': 'This is spam',
'date': timezone.now(),
'retracted': False,
}
public_comment.save()
return public_comment
@pytest.fixture()
def public_url(self, user, public_comment):
return '/{}comments/{}/reports/'.format(API_BASE, public_comment._id)
class TestWikiCommentReportsView(CommentReportsMixin):
# private_project_comment_reports
@pytest.fixture()
def private_project(self, user, contributor):
private_project = ProjectFactory.create(is_public=False, creator=user)
private_project.add_contributor(contributor=contributor, save=True)
return private_project
@pytest.fixture()
def wiki(self, user, private_project):
with mock.patch('osf.models.AbstractNode.update_search'):
return NodeWikiFactory(node=private_project, user=user)
@pytest.fixture()
def comment(self, user, contributor, private_project, wiki):
comment = CommentFactory(
node=private_project,
target=Guid.load(wiki._id),
user=contributor
)
comment.reports = comment.reports or {}
comment.reports[user._id] = {
'category': 'spam',
'text': 'This is spam',
'date': timezone.now(),
'retracted': False,
}
comment.save()
return comment
@pytest.fixture()
def private_url(self, user, comment):
return '/{}comments/{}/reports/'.format(API_BASE, comment._id)
# public_project_comment_reports
@pytest.fixture()
def public_project(self, user, contributor):
public_project = ProjectFactory.create(
is_public=True, creator=user, comment_level='public')
public_project.add_contributor(contributor=contributor, save=True)
return public_project
@pytest.fixture()
def public_wiki(self, user, public_project):
with mock.patch('osf.models.AbstractNode.update_search'):
return NodeWikiFactory(node=public_project, user=user)
@pytest.fixture()
def public_comment(self, user, contributor, public_project, public_wiki):
public_comment = CommentFactory(
node=public_project,
target=Guid.load(public_wiki._id),
user=contributor
)
public_comment.reports = public_comment.reports or {}
public_comment.reports[user._id] = {
'category': 'spam',
'text': 'This is spam',
'date': timezone.now(),
'retracted': False,
}
public_comment.save()
return public_comment
@pytest.fixture()
def public_url(self, user, public_comment):
return '/{}comments/{}/reports/'.format(API_BASE, public_comment._id)
class TestFileCommentReportsView(CommentReportsMixin):
# private_project_comment_reports
@pytest.fixture()
def private_project(self, user, contributor):
private_project = ProjectFactory.create(is_public=False, creator=user)
private_project.add_contributor(contributor=contributor, save=True)
return private_project
@pytest.fixture()
def file(self, user, private_project):
return test_utils.create_test_file(private_project, user)
@pytest.fixture()
def comment(self, user, contributor, private_project, file):
comment = CommentFactory(
node=private_project,
target=file.get_guid(),
user=contributor)
comment.reports = comment.reports or {}
comment.reports[user._id] = {
'category': 'spam',
'text': 'This is spam',
'date': timezone.now(),
'retracted': False,
}
comment.save()
return comment
@pytest.fixture()
def private_url(self, user, comment):
return '/{}comments/{}/reports/'.format(API_BASE, comment._id)
# public_project_comment_reports
@pytest.fixture()
def public_project(self, user, contributor):
public_project = ProjectFactory.create(
is_public=True, creator=user, comment_level='public')
public_project.add_contributor(contributor=contributor, save=True)
return public_project
@pytest.fixture()
def public_file(self, user, public_project):
return test_utils.create_test_file(public_project, user)
@pytest.fixture()
def public_comment(self, user, contributor, public_project, public_file):
public_comment = CommentFactory(
node=public_project,
target=public_file.get_guid(),
user=contributor)
public_comment.reports = public_comment.reports or {}
public_comment.reports[user._id] = {
'category': 'spam',
'text': 'This is spam',
'date': timezone.now(),
'retracted': False,
}
public_comment.save()
return public_comment
@pytest.fixture()
def public_url(self, user, public_comment):
return '/{}comments/{}/reports/'.format(API_BASE, public_comment._id)
|
{
"content_hash": "71cf6d5751e55adf101c6f89a2d771c4",
"timestamp": "",
"source": "github",
"line_count": 497,
"max_line_length": 100,
"avg_line_length": 35.85311871227364,
"alnum_prop": 0.5963297603681463,
"repo_name": "chennan47/osf.io",
"id": "d80977fa5f05a47cca462aee356d926a37ba8e1a",
"size": "17819",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "api_tests/comments/views/test_comment_report_list.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "110839"
},
{
"name": "HTML",
"bytes": "236223"
},
{
"name": "JavaScript",
"bytes": "1830647"
},
{
"name": "Mako",
"bytes": "665098"
},
{
"name": "Python",
"bytes": "7650137"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
}
|
"""Contains User class"""
from .base_user import BaseUser
class User(BaseUser):
"""User class"""
USERNAME_FIELD = 'username'
EMAIL_FIELD = 'email'
PASSWORD_FIELD = 'password'
def __init__(self):
"""Initialize class attributes"""
# set all attributes to None because we will validate first
# before assigning values
self.username = None
self.email = None
self.password = None
@property
def get_username(self):
"""
gets username value
:return:
"""
return getattr(self, User.USERNAME_FIELD)
@property
def get_email(self):
"""
gets email value
:return:
"""
return getattr(self, User.EMAIL_FIELD)
def verify_password(self, password):
"""verify provided password with the stored password"""
return self._verify_password(password)
def __str__(self):
return '<%(username)s obj>' % dict(username=self.get_username)
def __repr__(self):
return self.__str__()
|
{
"content_hash": "97a1a4a13330accccfe3e8d5767bd4ea",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 70,
"avg_line_length": 24.74418604651163,
"alnum_prop": 0.5789473684210527,
"repo_name": "gr1d99/shopping-list",
"id": "7614cfd8d1b00fdaeeb95e4df253cd24fc9478c0",
"size": "1064",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "shopping_app/db/user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "556"
},
{
"name": "HTML",
"bytes": "33617"
},
{
"name": "JavaScript",
"bytes": "1239"
},
{
"name": "Python",
"bytes": "54053"
}
],
"symlink_target": ""
}
|
from PyQt4 import QtSql, QtCore
import numpy as np
import pickle, re, os
import acq4.Manager
import acq4.util.DataManager as DataManager
import collections
import acq4.util.functions as functions
import acq4.util.advancedTypes as advancedTypes
def quoteList(strns):
"""Given a list of strings, return a single string like '"string1", "string2",...'
Note: in SQLite, double quotes are for escaping table and column names;
single quotes are for string literals.
"""
return ','.join(['"'+s+'"' for s in strns])
class SqliteDatabase:
"""Encapsulates an SQLITE database through QtSql to make things a bit more pythonic.
Arbitrary SQL may be executed by calling the db object directly, eg: db('select * from table')
Using the select() and insert() methods will do automatic type conversions and allows
any picklable objects to be directly stored in BLOB type fields. (it is not necessarily
safe to store pickled objects in TEXT fields)
NOTE: Data types in SQLITE work differently than in most other DBs--each value may take any type
regardless of the type specified by its column.
"""
def __init__(self, fileName=':memory:'):
self.db = QtSql.QSqlDatabase.addDatabase("QSQLITE")
self.db.setDatabaseName(fileName)
self.db.open()
self._readTableList()
def close(self):
self.db.close()
def exe(self, cmd, data=None, toDict=True, toArray=False):
"""Execute an SQL query. If data is provided, it should be a list of dicts and each will
be bound to the query and executed sequentially. Returns the query object."""
q = QtSql.QSqlQuery(self.db)
if data is None:
self._exe(q, cmd)
else:
res = []
if not q.prepare(cmd):
print "SQL Query:\n %s" % cmd
raise Exception("Error preparing SQL query (query is printed above): %s" % str(q.lastError().text()))
for d in data:
#print len(d)
for k, v in d.iteritems():
q.bindValue(':'+k, v)
#print k, v, type(v)
#print "==execute with bound data=="
#print cmd
#print q.boundValues()
#for k, v in q.boundValues().iteritems():
#print str(k), v.typeName()
self._exe(q)
if toArray:
return self._queryToArray(q)
elif toDict:
return self._queryToDict(q)
else:
return q
def __call__(self, *args, **kargs):
return self.exe(*args, **kargs)
def select(self, table, fields='*', sql='', toDict=True, toArray=False):
"""fields should be a list of field names"""
if fields != '*':
if isinstance(fields, basestring):
fields = fields.split(',')
qf = []
for f in fields:
if f == '*':
qf.append(f)
else:
qf.append('"'+f+'"')
fields = ','.join(qf)
#fields = quoteList(fields)
cmd = "SELECT %s FROM %s %s" % (fields, table, sql)
#print cmd
q = self.exe(cmd, toDict=toDict, toArray=toArray)
#return self._queryToDict(q)
return q
def insert(self, table, records=None, replaceOnConflict=False, ignoreExtraColumns=False, addExtraColumns=False, **args):
"""Insert records (a dict or list of dicts) into table.
If records is None, a single record may be specified via keyword arguments.
Arguments:
ignoreExtraColumns: If True, ignore any extra columns in the data that do not exist in the table
addExtraColumns: If True, add any columns that exist in the data but do not yet exist in the table
(NOT IMPLEMENTED YET)
"""
## can we optimize this by using batch execution?
if records is None:
records = [args]
if type(records) is not list:
records = [records]
if len(records) == 0:
return
ret = []
## Rememember that _prepareData may change the number of columns!
records = self._prepareData(table, records, removeUnknownColumns=ignoreExtraColumns)
fields = records[0].keys()
insert = "INSERT"
if replaceOnConflict:
insert += " OR REPLACE"
#print "Insert:", fields
cmd = "%s INTO %s (%s) VALUES (%s)" % (insert, table, quoteList(fields), ','.join([':'+f for f in fields]))
#print len(fields), len(records[0]), len(self.tableSchema(table))
self.exe(cmd, records)
def delete(self, table, where):
cmd = "DELETE FROM %s WHERE %s" % (table, where)
return self(cmd)
def update(self, table, vals, where=None, rowid=None):
"""Update records in the DB.
Arguments:
vals: dict of {field: value} pairs
where: SQL clause specifying rows to update
rowid: int row IDs. Used instead of 'where'"""
if where is None:
if rowid is None:
raise Exception("Must specify 'where' or 'rowids'")
else:
where = "rowid=%d" % rowid
setStr = ', '.join(['"%s"=:%s' % (k, k) for k in vals])
data = self._prepareData(table, [vals])
cmd = "UPDATE %s SET %s WHERE %s" % (table, setStr, where)
return self(cmd, data)
def lastInsertRow(self):
q = self("select last_insert_rowid()")
return q[0].values()[0]
def replace(self, *args, **kargs):
return self.insert(*args, replaceOnConflict=True, **kargs)
def createTable(self, table, fields, sql=""):
"""Create a table in the database.
table: (str) the name of the table to create
fields: (list) a list of strings defining columns in the table.
These usually look like '"FieldName" type'
OR
(dict) a dictionary of 'FieldName': 'type' pairs
Types may be any string, but are typically int, real, text, or blob.
"""
#print "create table", table, ', '.join(fields)
if isinstance(fields, list):
fieldStr = ','.join(fields)
elif isinstance(fields, dict):
fieldStr = ', '.join(['"%s" %s' % (n, t) for n,t in fields.iteritems()])
self('CREATE TABLE %s (%s) %s' % (table, fieldStr, sql))
self._readTableList()
def hasTable(self, table):
return table in self.tables ## this is a case-insensitive operation
def tableSchema(self, table):
return self.tables[table] ## this is a case-insensitive operation
def _exe(self, query, cmd=None):
"""Execute an SQL query, raising an exception if there was an error. (internal use only)"""
if cmd is None:
ret = query.exec_()
else:
ret = query.exec_(cmd)
if not ret:
if cmd is not None:
print "SQL Query:\n %s" % cmd
raise Exception("Error executing SQL (query is printed above): %s" % str(query.lastError().text()))
else:
raise Exception("Error executing SQL: %s" % str(query.lastError().text()))
if str(query.executedQuery())[:6].lower() == 'create':
self._readTableList()
def _prepareData(self, table, data, removeUnknownColumns=False):
"""Massage data so it is ready for insert into the DB. (internal use only)
- data destined for BLOB fields is pickled
- numerical fields convert to int or float
- text fields convert to unicode
"""
## This can probably be optimized a bit..
#rec = data[0]
funcs = {}
## determine the functions to use for each field.
schema = self.tableSchema(table)
for k in schema:
#if k not in schema:
#raise Exception("Table %s has no field named '%s'. Schema is: %s" % (table, k, str(schema)))
typ = schema[k].lower()
if typ == 'blob':
funcs[k] = lambda obj: QtCore.QByteArray(pickle.dumps(obj))
elif typ == 'int':
funcs[k] = int
elif typ == 'real':
funcs[k] = float
elif typ == 'text':
funcs[k] = str
else:
funcs[k] = lambda obj: obj
newData = []
for rec in data:
newRec = {}
for k in rec:
if removeUnknownColumns and (k not in schema):
#print "skip column", k
continue
#print "include column", k
try:
newRec[k] = funcs[k](rec[k])
except:
newRec[k] = rec[k]
if k.lower() != 'rowid':
if k not in schema:
raise Exception("Field '%s' not present in table '%s'" % (k, table))
print "Warning: Setting %s field %s.%s with type %s" % (schema[k], table, k, str(type(rec[k])))
newData.append(newRec)
#print "new data:", newData
return newData
def _queryToDict(self, q):
res = []
while q.next():
res.append(self._readRecord(q.record()))
return res
def _queryToArray(self, q):
recs = self._queryToDict(q)
if len(recs) < 1:
#return np.array([]) ## need to return empty array *with correct fields*, but this is very difficult, so just return None
return None
rec1 = recs[0]
dtype = functions.suggestRecordDType(rec1)
#print rec1, dtype
arr = np.empty(len(recs), dtype=dtype)
arr[0] = tuple(rec1.values())
for i in xrange(1, len(recs)):
arr[i] = tuple(recs[i].values())
return arr
def _readRecord(self, rec):
data = collections.OrderedDict()
for i in range(rec.count()):
f = rec.field(i)
n = str(f.name())
if rec.isNull(i):
val = None
else:
val = rec.value(i)
if isinstance(val, QtCore.QByteArray):
val = pickle.loads(str(val))
#v = rec.value(i) ## required when not using V2 API for QVariant
#t = v.type()
#if t in [QtCore.QVariant.Int, QtCore.QVariant.LongLong]:
#val = v.toInt()[0]
#if t in [QtCore.QVariant.Double]:
#val = v.toDouble()[0]
#elif t == QtCore.QVariant.String:
#val = str(v.toString())
#elif t == QtCore.QVariant.ByteArray:
#val = pickle.loads(str(v.toByteArray()))
data[n] = val
return data
def _readTableList(self):
"""Reads the schema for each table, extracting the field names and types."""
res = self.select('sqlite_master', ['name', 'sql'], "where type = 'table'")
ident = r"(\w+|'[^']+'|\"[^\"]+\")"
#print "READ:"
tables = advancedTypes.CaselessDict()
for rec in res:
#print rec
sql = rec['sql'].replace('\n', ' ')
#print sql
m = re.match(r"\s*create\s+table\s+%s\s*\(([^\)]+)\)" % ident, sql, re.I)
#print m.groups()
fieldstr = m.groups()[1].split(',')
fields = advancedTypes.CaselessDict()
#print fieldstr
#print fieldstr
for f in fieldstr:
#print " ", f
m = re.findall(ident, f)
#print " ", m
if len(m) < 2:
typ = ''
else:
typ = m[1].strip('\'"')
field = m[0].strip('\'"')
fields[field] = typ
tables[rec['name']] = fields
self.tables = tables
#print tables
class AnalysisDatabase(SqliteDatabase):
"""Defines the structure for DBs used for analysis. Essential features are:
- a table of control parameters "DbParameters"
- a table defining relationships between tables "TableRelationships"
- a table assgning ownership of data tables to analysis modules
- Directories created by data manager can be added automatically to DB
- Automatic creation of views that join together directory hierarchies
"""
def __init__(self, dbFile, baseDir=None):
create = False
self._baseDir = None
if not os.path.exists(dbFile):
create = True
if baseDir is None:
raise Exception("Must specify a base directory when creating a database.")
#self.db = SqliteDatabase(dbFile)
SqliteDatabase.__init__(self, dbFile)
self.file = dbFile
if create:
self.initializeDb()
self.setBaseDir(baseDir)
def initializeDb(self):
self.createTable("DbParameters", ['"Param" text unique', '"Value" text'])
## Table1.Column refers to Table2.ROWID
self.createTable("TableRelationships", ['"Table1" text', '"Column" text', '"Table2" text'])
self.createTable("DataTableOwners", ['"Owner" text', '"Table" text unique on conflict abort'])
def baseDir(self):
"""Return a dirHandle for the base directory used for all file names in the database."""
if self._baseDir is None:
dirName = self.ctrlParam('BaseDirectory')
self._baseDir = DataManager.getHandle(dirName)
return self._baseDir
def setBaseDir(self, baseDir):
"""Sets the base dir which prefixes all file names in the database. Must be a DirHandle."""
self.setCtrlParam('BaseDirectory', baseDir.name())
self._baseDir = baseDir
def ctrlParam(self, param):
return self.select('DbParameters', ['Value'], "where Param='%s'"%param)[0]['Value']
def setCtrlParam(self, param, value):
self.replace('DbParameters', {'Param': param, 'Value': value})
def createDirTable(self, dirHandle, tableName=None, fields=None):
"""Creates a new table for storing directories similar to dirHandle"""
parent = dirHandle.parent()
fields = ['"Dir" text'] + fields
if tableName is None:
#info = dirHandle.info()
#tableName = info['dirType']
tableName = self.dirTypeName(dirHandle)
if parent is not self.baseDir():
fields = ['"Source" int'] + fields
self.linkTables(tableName, "Source", self.dirTypeName(parent))
self.createTable(tableName, fields)
return tableName
def linkTables(self, table1, col, table2):
"""Declare a key relationship between two tables. Values in table1.column are ROWIDs from table 2"""
self.insert('TableRelationships', Table1=table1, Column=col, Table2=table2)
def addDir(self, handle, table=None):
"""Create a record based on a DirHandle and its meta-info.
If no table is specified, use the dirType attribute as table name"""
info = handle.info().deepcopy()
## determine parent directory, make sure parent is in DB.
parent = handle.parent()
parentRowId = None
if parent.isManaged() and parent is not self.baseDir():
pTable, parentRowId = self.addDir(parent)
#if table is None:
#table = info.get('dirType', None)
#if table is None:
#raise Exception("Dir %s has no dirType; can not add to DB automatically." % handle.name())
if table is None:
table = self.dirTypeName(handle)
if not self.hasTable(table):
fields = acq4.Manager.getManager().suggestedDirFields(handle).keys()
for k in info:
if k not in fields:
fields.append(k)
spec = ["'%s' text"%k for k in fields]
#db.createTable(table, spec)
self.createDirTable(handle, table, spec)
## make sure dir is not already in DB.
## if it is, just return the row ID
rid = self.getDirRowID(handle, table)
if rid is not None:
return table, rid
if parentRowId is not None:
info['Source'] = parentRowId
info['Dir'] = handle.name(relativeTo=self.baseDir())
self.insert(table, info, ignoreExtraColumns=True)
return table, self.lastInsertRow()
def getDirRowID(self, dirHandle, table=None):
if table is None:
#info = dirHandle.info()
#if 'dirType' not in info:
#raise Exception("Directory '%s' has no dirType attribute." % dirHandle.name())
#table = info['dirType']
table = self.dirTypeName(dirHandle)
if not self.hasTable(table):
return None
rec = self.select(table, ['rowid'], "where Dir='%s'" % dirHandle.name(relativeTo=self.baseDir()))
if len(rec) < 1:
return None
#print rec[0]
return rec[0]['rowid']
def getDir(self, table, rowid):
res = self.select(table, ['Dir'], 'where rowid=%d'%rowid)
if len(res) < 1:
raise Exception('rowid %d does not exist in %s' % (rowid, table))
#print res
return self.baseDir()[res[0]['Dir']]
def dirTypeName(self, dh):
info = dh.info()
type = info.get('dirType', None)
if type is None:
if 'protocol' in info:
if 'sequenceParams' in info:
type = 'ProtocolSequence'
else:
type = 'Protocol' ## an individual protocol run, NOT a single run from within a sequence
else:
try:
if self.dirTypeName(dh.parent()) == 'ProtocolSequence':
type = 'Protocol'
else:
raise Exception()
except:
raise Exception("Can't determine type for dir %s" % dh.name())
return type
### TODO: No more 'purpose', just use 'owner.purpose' instead
def listTablesOwned(self, owner):
res = self.select("DataTableOwners", ["Table"], sql="where Owner='%s'" % owner)
return [x['Table'] for x in res]
def listTables(self):
return self.tables.keys()
def takeOwnership(self, table, owner):
self.insert("DataTableOwners", {'Table': table, "Owner": owner})
def tableOwner(self, table):
res = self.select("DataTableOwners", ["Owner"], sql='where "Table"=\'%s\'' % table)
if len(res) == 0:
return None
return res[0]['Owner']
def describeData(self, data):
"""Given a dict or record array, return a table description suitable for creating / checking tables."""
fields = collections.OrderedDict()
if isinstance(data, list): ## list of dicts is ok
data = data[0]
if isinstance(data, np.ndarray):
for i in xrange(len(data.dtype)):
name = data.dtype.names[i]
typ = data.dtype[i].kind
if typ == 'i':
typ = 'int'
elif typ == 'f':
typ = 'real'
elif typ == 'S':
typ = 'text'
else:
typ = 'blob'
fields[name] = typ
elif isinstance(data, dict):
for name, v in data.iteritems():
if functions.isFloat(v):
typ = 'real'
elif functions.isInt(v):
typ = 'int'
elif isinstance(v, basestring):
typ = 'text'
else:
typ = 'blob'
fields[name] = typ
else:
raise Exception("Can not describe data of type '%s'" % type(data))
return fields
def checkTable(self, table, owner, fields, links=[], create=False):
## Make sure target table exists and has correct columns, links to input file
if not self.hasTable(table):
if create:
## create table
self.createTable(table, fields)
for field, ptable in links:
self.linkTables(table, field, ptable)
self.takeOwnership(table, owner)
else:
raise Exception("Table %s does not exist." % table)
else:
## check table for ownership, columns
if self.tableOwner(table) != owner:
raise Exception("Table %s is not owned by %s." % (table, owner))
ts = self.tableSchema(table)
for f in fields:
if f not in ts: ## this is a case-insensitive operation
raise Exception("Table has different data structure: Missing field %s" % f)
elif ts[f].lower() != fields[f].lower(): ## type names are case-insensitive too
raise Exception("Table has different data structure: Field '%s' type is %s, should be %s" % (f, ts[f], fields[f]))
return True
if __name__ == '__main__':
print "Avaliable DB drivers:", list(QtSql.QSqlDatabase.drivers())
db = SqliteDatabase()
db("create table 't' ('int' int, 'real' real, 'text' text, 'blob' blob)")
## Test insertion and retrieval of different data types into each field type
vals = [('int', 1), ('float', 1.5), ('int-float', 10.0), ('int-string', '10'), ('float-string', '3.1415'), ('string', 'Stringy'), ('object', [1,'x']), ('byte-string', 'str\1\2\0str')]
for name, val in vals:
db('delete from t')
db.insert('t', int=val, real=val, text=val, blob=val)
print "Insert %s (%s):" % (name, repr(val))
print " ", db.select('t')[0]
print "Table extraction test:"
for name, val in vals:
#db('delete from t')
db.insert('t', int=val, real=val, text=val, blob=val)
#print "Insert %s (%s):" % (name, repr(val))
#print " ", db.select('t')[0]
print db.select('t', toArray=True)
|
{
"content_hash": "961b2c77ffa4a6edb69d713d4f9c058d",
"timestamp": "",
"source": "github",
"line_count": 568,
"max_line_length": 187,
"avg_line_length": 39.95070422535211,
"alnum_prop": 0.5334479111581174,
"repo_name": "mgraupe/acq4",
"id": "3f2917958ff5e02196c6737d902fef97d750ac0a",
"size": "22716",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "acq4/util/database/AnalysisDatabase_ver0.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "3037"
},
{
"name": "Batchfile",
"bytes": "247"
},
{
"name": "C",
"bytes": "757367"
},
{
"name": "C++",
"bytes": "1222891"
},
{
"name": "CSS",
"bytes": "716"
},
{
"name": "Inno Setup",
"bytes": "1606"
},
{
"name": "MATLAB",
"bytes": "1752"
},
{
"name": "Makefile",
"bytes": "30"
},
{
"name": "Processing",
"bytes": "13403"
},
{
"name": "Python",
"bytes": "6110588"
},
{
"name": "Shell",
"bytes": "70"
}
],
"symlink_target": ""
}
|
from tweepy import StreamListener
import json, time, sys
class TWaiter(StreamListener):
# see Tweepy for more info
def __init__(self, api = None, label = 'default_collection'):
self.api = api or API()
self.counter = 0
self.label = label
self.output = open(label + '.' + time.strftime('%b%d-%H%M') + '.txt', 'w')
self.deleted = open('deleted_tweets.txt', 'a')
def on_data(self, data):
# the presence of 'in_reply_to_status' indicates a "normal" tweet
# the presence of 'delete' indicates a tweet that was deleted after posting
if 'in_reply_to_status' in data:
self.on_status(data)
elif 'delete' in data:
delete = json.loads(data)['delete']['status']
if self.on_delete(delete['id'], delete['user_id']) is False:
return False
def on_status(self, status):
# for now we want only the text of the tweet and the id.
text = str(json.dumps(json.loads(status)['text']))
id = str(json.dumps(json.loads(status)['id_str']))
self.output.write("id:" + " " + id[1:-1] + ", " + "text:" + " " + text[1:-1] + "\n")
self.counter += 1
# stop at 500 tweets for testing
# increase this number to get bigger data!
if self.counter >= 500:
self.output.close()
print "Finished collecting tweets."
sys.exit()
# should exit more gracefully.
return
def on_delete(self, status_id, user_id):
self.deleted.write(str(status_id) + "\n")
return
def on_error(self, status_code):
sys.stderr.write('Error: ' + str(status_code) + "\n")
return False
|
{
"content_hash": "a95621b1421e3e20397a274bc3341eee",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 92,
"avg_line_length": 35.142857142857146,
"alnum_prop": 0.5632984901277585,
"repo_name": "marciw/sentiment",
"id": "382f85021022ece036d3bf69a9346692b57b4f46",
"size": "1922",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "twaiter.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4603"
}
],
"symlink_target": ""
}
|
import SimpleXMLRPCServer
from SimpleXMLRPCServer import SimpleXMLRPCServer
import cluster_constants
from cluster_constants import *
import utils
from utils import *
import os
import subprocess
import re
def main():
print ("init")
server = SimpleXMLRPCServer(('0.0.0.0', int(SLAVE_DAEMON_PORT)))
server.register_introspection_functions()
server.register_instance(Node())
print ("starting server")
server.serve_forever()
class Node:
def __init__(self):
pass
def backup_config_files(self):
# tested
for fil in get_conf_files():
dirname = os.path.dirname(os.path.realpath(fil))
src_fname = os.path.basename(os.path.realpath(fil))
dst_fname = src_fname+".backup."+get_timestamp()
backup_dir = dirname+"/backup"
self.create_dir(backup_dir)
cmd = "cp "+fil+" "+backup_dir+"/"+dst_fname
get_result(cmd)
return True
def is_zkfc_running(self):
# tested
'''
return true if ths zkfc is running and the state is active
'''
return is_process_running_by_name("org.apache.hadoop.hdfs.tools.DFSZKFailoverController")
def stop_zkfc(self):
# tested
cmd = HADOOP_HOME+"/sbin/hadoop-daemon.sh stop zkfc"
if self.is_zkfc_running():
get_result(cmd)
return True
def start_zkfc(self):
cmd = HADOOP_HOME+"/sbin/hadoop-daemon.sh start zkfc"
if not self.is_zkfc_running():
get_result(cmd)
return True
def clean_hadoop_temp_namenode_dir(self):
# tested
path = re.match("^(.*)/files/app/hadoop", HADOOP_HOME).group(1)+"/app/data/dfs/name"
cmd = "rm -rf "+path
get_result(cmd)
return True
def clean_hadoop_temp_datanode_dir(self):
# tested
path = re.match("^(.*)/files/app/hadoop", HADOOP_HOME).group(1)+"/app/data/dfs/data"
cmd = "rm -rf "+path
get_result(cmd)
return True
def clean_hadoop_temp_dirs(self):
# tested
self.clean_hadoop_temp_namenode_dir()
self.clean_hadoop_temp_datanode_dir()
return True
def clean_journalnode_dir(self):
# tested
cmd="rm -rf "+JOURNALNODE_EDITS_DIR+"/*"
get_result(cmd)
return True
def is_namenode_running(self):
# tested
'''
return true if ths namenode is running and the state is active
'''
return is_process_running_by_name("org.apache.hadoop.hdfs.server.namenode.NameNode")
def stop_namenode(self):
# tested
cmd = HADOOP_HOME+"/sbin/hadoop-daemon.sh stop namenode"
if self.is_namenode_running():
get_result(cmd)
return True
def start_namenode(self):
cmd = HADOOP_HOME+"/sbin/hadoop-daemon.sh start namenode"
if not self.is_namenode_running():
get_result(cmd)
return True
def is_datanode_running(self):
'''
return true if ths datanode is running and the state is active
'''
return is_process_running_by_name("org.apache.hadoop.hdfs.server.datanode.DataNode")
def stop_datanode(self):
cmd = HADOOP_HOME+"/sbin/hadoop-daemon.sh stop datanode"
if self.is_datanode_running():
get_result(cmd)
return True
def start_datanode(self):
cmd = HADOOP_HOME+"/sbin/hadoop-daemon.sh start datanode"
if not self.is_datanode_running():
get_result(cmd)
return True
def format_namenode(self):
cmd = HADOOP_HOME+"/bin/hdfs namenode -format"
get_result(cmd)
return True
def bootstrap_standby(self):
# tested, idemp
cmd = HADOOP_HOME+"/bin/hdfs namenode -bootstrapStandby -force -nonInteractive"
get_result(cmd)
return True
def jn_initialize_shared_edits(self):
# tested
cmd = HADOOP_HOME+"/bin/hdfs namenode -initializeSharedEdits -force -nonInteractive"
get_result(cmd)
return True
def is_journalnode_running(self):
#tested
return is_process_running_by_name("org.apache.hadoop.hdfs.qjournal.server.JournalNode")
def start_journalnode(self):
cmd = HADOOP_HOME+"/sbin/hadoop-daemon.sh start journalnode"
if not self.is_journalnode_running():
get_result(cmd)
return True
def stop_journalnode(self):
cmd = HADOOP_HOME+"/sbin/hadoop-daemon.sh stop journalnode"
if self.is_journalnode_running():
get_result(cmd)
return True
def is_hiveserver_running(self):
#tested
return is_process_running_by_name("org.apache.hive.service.server.HiveServer2")
def start_hiveserver(self):
# tested
cmd = HIVE_HOME+"/bin/hive --service hiveserver2 > "+HIVE_HOME+"/../../hive_log/hiveserver2.log"
if not self.is_hiveserver_running():
spawn(cmd)
return True
def stop_hiveserver(self):
# tested
if self.is_hiveserver_running():
kill_by_name("org.apache.hive.service.server.HiveServer2")
return True
def update_host_for_hiveservers(self):
hive_xml = XML_File(hive_site_xml_path)
hive_xml.set_property("hive.server2.thrift.bind.host", get_current_hostname())
hive_xml.write(hive_site_xml_path)
return True
def get_zk_nodes(self):
# tested
'''
returns list of server ip addresses of the zk ensemble
[zkserver-ip1, zkserver-ip2, ...]
'''
conf_file = ZOOKEEPER_CONF_DIR + "/zoo.cfg"
cmd = "grep ^server\. " + conf_file + " | cut -f2 -d= | cut -f1 -d:"
return [get_ipaddr(x) for x in get_result(cmd)[0]]
def format_ZK(self):
cmd = "hdfs zkfc -formatZK -force -nonInteractive"
get_result(cmd)
return True
def is_zookeeper_running(self):
return is_process_running_by_name("org.apache.zookeeper.server.quorum.QuorumPeerMain")
def start_zookeeper(self):
cmd = ZOOKEEPER_HOME + "/bin/zkServer.sh start"
if not self.is_zookeeper_running():
get_result(cmd)
return True
def stop_zookeeper(self):
if self.is_zookeeper_running():
cmd = ZOOKEEPER_HOME + "/bin/zkServer.sh stop"
get_result(cmd)
return True
def passwordless_ssh_works(self, target):
#tested
pipe = subprocess.Popen("ssh -oNumberOfPasswordPrompts=0 "+target+" \"echo hello\"", shell=True)
if pipe.wait() == 0:
return True
else:
return False
def start_hadoop_services(self):
assert(get_hostname(MASTER_NODE) == get_current_hostname())
cmd = HADOOP_HOME+"/sbin/start-dfs.sh"
get_result(cmd)
cmd = HADOOP_HOME+"/sbin/start-yarn.sh"
get_result(cmd)
return True
def stop_hadoop_services(self):
assert(get_hostname(MASTER_NODE) == get_current_hostname())
cmd = HADOOP_HOME+"/sbin/stop-yarn.sh"
get_result(cmd)
cmd = HADOOP_HOME+"/sbin/stop-dfs.sh"
get_result(cmd)
return True
def is_metastore_running(self):
# tested
return is_process_running_by_name("org.apache.hadoop.hive.metastore.HiveMetaStore")
def start_metastore(self):
cmd = HIVE_HOME+"/bin/hive --service metastore > "+HIVE_HOME+"/../../hive_log/metastore.log"
if not self.is_metastore_running():
spawn(cmd)
return True
def stop_metastore(self):
if self.is_metastore_running():
kill_by_name("org.apache.hadoop.hive.metastore.HiveMetaStore")
return True
def is_resourcemanager_running(self):
# tested
return is_process_running_by_name("org.apache.hadoop.yarn.server.resourcemanager.ResourceManager")
def start_resourcemanager(self):
cmd = HADOOP_HOME + "/sbin/yarn-daemon.sh start resourcemanager"
if not self.is_resourcemanager_running():
get_result(cmd)
return True
def stop_resourcemanager(self):
if self.is_resourcemanager_running():
cmd = HADOOP_HOME + "/sbin/yarn-daemon.sh stop resourcemanager"
get_result(cmd)
return True
def create_dir(self, direc):
# tested
if not os.path.isdir(direc):
os.mkdir(direc)
return True
if __name__ == "__main__":
print ("starting")
main()
|
{
"content_hash": "e443518df3e48db5ebe36084175cdbae",
"timestamp": "",
"source": "github",
"line_count": 305,
"max_line_length": 106,
"avg_line_length": 28.18032786885246,
"alnum_prop": 0.597324025596277,
"repo_name": "Infosys/High-Availability-Hadoop",
"id": "57c442034aed4f43263073ad1e9f653cd014a3c1",
"size": "9789",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "slave_daemon.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "43023"
},
{
"name": "Shell",
"bytes": "2398"
}
],
"symlink_target": ""
}
|
import logging
import tkinter as tk
from copy import deepcopy
from pathlib import Path
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Tuple
from PIL import Image
from PIL.ImageTk import PhotoImage
from core.api.grpc.wrappers import Interface, Link
from core.gui import appconfig
from core.gui import nodeutils as nutils
from core.gui.dialogs.shapemod import ShapeDialog
from core.gui.graph import tags
from core.gui.graph.edges import EDGE_WIDTH, CanvasEdge
from core.gui.graph.enums import GraphMode, ScaleOption
from core.gui.graph.node import CanvasNode, ShadowNode
from core.gui.graph.shape import Shape
from core.gui.graph.shapeutils import ShapeType, is_draw_shape, is_marker
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from core.gui.app import Application
from core.gui.graph.manager import CanvasManager
from core.gui.coreclient import CoreClient
ZOOM_IN: float = 1.1
ZOOM_OUT: float = 0.9
MOVE_NODE_MODES: Set[GraphMode] = {GraphMode.NODE, GraphMode.SELECT}
MOVE_SHAPE_MODES: Set[GraphMode] = {GraphMode.ANNOTATION, GraphMode.SELECT}
BACKGROUND_COLOR: str = "#cccccc"
class CanvasGraph(tk.Canvas):
def __init__(
self,
master: tk.BaseWidget,
app: "Application",
manager: "CanvasManager",
core: "CoreClient",
_id: int,
dimensions: Tuple[int, int],
) -> None:
super().__init__(master, highlightthickness=0, background=BACKGROUND_COLOR)
self.id: int = _id
self.app: "Application" = app
self.manager: "CanvasManager" = manager
self.core: "CoreClient" = core
self.selection: Dict[int, int] = {}
self.select_box: Optional[Shape] = None
self.selected: Optional[int] = None
self.nodes: Dict[int, CanvasNode] = {}
self.shadow_nodes: Dict[int, ShadowNode] = {}
self.shapes: Dict[int, Shape] = {}
self.shadow_core_nodes: Dict[int, ShadowNode] = {}
# map wireless/EMANE node to the set of MDRs connected to that node
self.wireless_network: Dict[int, Set[int]] = {}
self.drawing_edge: Optional[CanvasEdge] = None
self.rect: Optional[int] = None
self.shape_drawing: bool = False
self.current_dimensions: Tuple[int, int] = dimensions
self.ratio: float = 1.0
self.offset: Tuple[int, int] = (0, 0)
self.cursor: Tuple[int, int] = (0, 0)
self.to_copy: List[CanvasNode] = []
# background related
self.wallpaper_id: Optional[int] = None
self.wallpaper: Optional[Image.Image] = None
self.wallpaper_drawn: Optional[PhotoImage] = None
self.wallpaper_file: str = ""
self.scale_option: tk.IntVar = tk.IntVar(value=1)
self.adjust_to_dim: tk.BooleanVar = tk.BooleanVar(value=False)
# bindings
self.setup_bindings()
# draw base canvas
self.draw_canvas()
self.draw_grid()
def draw_canvas(self, dimensions: Tuple[int, int] = None) -> None:
if self.rect is not None:
self.delete(self.rect)
if not dimensions:
dimensions = self.manager.default_dimensions
self.current_dimensions = dimensions
self.rect = self.create_rectangle(
0,
0,
*dimensions,
outline="#000000",
fill="#ffffff",
width=1,
tags="rectangle",
)
self.configure(scrollregion=self.bbox(tk.ALL))
def setup_bindings(self) -> None:
"""
Bind any mouse events or hot keys to the matching action
"""
self.bind("<Control-c>", self.copy_selected)
self.bind("<Control-v>", self.paste_selected)
self.bind("<Control-x>", self.cut_selected)
self.bind("<Control-d>", self.delete_selected)
self.bind("<Control-h>", self.hide_selected)
self.bind("<ButtonPress-1>", self.click_press)
self.bind("<ButtonRelease-1>", self.click_release)
self.bind("<B1-Motion>", self.click_motion)
self.bind("<Delete>", self.delete_selected)
self.bind("<Control-1>", self.ctrl_click)
self.bind("<Double-Button-1>", self.double_click)
self.bind("<MouseWheel>", self.zoom)
self.bind("<Button-4>", lambda e: self.zoom(e, ZOOM_IN))
self.bind("<Button-5>", lambda e: self.zoom(e, ZOOM_OUT))
self.bind("<ButtonPress-3>", lambda e: self.scan_mark(e.x, e.y))
self.bind("<B3-Motion>", lambda e: self.scan_dragto(e.x, e.y, gain=1))
def get_shadow(self, node: CanvasNode) -> ShadowNode:
shadow_node = self.shadow_core_nodes.get(node.core_node.id)
if not shadow_node:
shadow_node = ShadowNode(self.app, self, node)
return shadow_node
def get_actual_coords(self, x: float, y: float) -> Tuple[float, float]:
actual_x = (x - self.offset[0]) / self.ratio
actual_y = (y - self.offset[1]) / self.ratio
return actual_x, actual_y
def get_scaled_coords(self, x: float, y: float) -> Tuple[float, float]:
scaled_x = (x * self.ratio) + self.offset[0]
scaled_y = (y * self.ratio) + self.offset[1]
return scaled_x, scaled_y
def inside_canvas(self, x: float, y: float) -> Tuple[bool, bool]:
x1, y1, x2, y2 = self.bbox(self.rect)
valid_x = x1 <= x <= x2
valid_y = y1 <= y <= y2
return valid_x and valid_y
def valid_position(self, x1: int, y1: int, x2: int, y2: int) -> Tuple[bool, bool]:
valid_topleft = self.inside_canvas(x1, y1)
valid_bottomright = self.inside_canvas(x2, y2)
return valid_topleft and valid_bottomright
def draw_grid(self) -> None:
"""
Create grid.
"""
width, height = self.width_and_height()
width = int(width)
height = int(height)
for i in range(0, width, 27):
self.create_line(i, 0, i, height, dash=(2, 4), tags=tags.GRIDLINE)
for i in range(0, height, 27):
self.create_line(0, i, width, i, dash=(2, 4), tags=tags.GRIDLINE)
self.tag_lower(tags.GRIDLINE)
self.tag_lower(self.rect)
def canvas_xy(self, event: tk.Event) -> Tuple[float, float]:
"""
Convert window coordinate to canvas coordinate
"""
x = self.canvasx(event.x)
y = self.canvasy(event.y)
return x, y
def get_selected(self, event: tk.Event) -> int:
"""
Retrieve the item id that is on the mouse position
"""
x, y = self.canvas_xy(event)
overlapping = self.find_overlapping(x, y, x, y)
selected = None
for _id in overlapping:
if self.drawing_edge and self.drawing_edge.id == _id:
continue
elif _id in self.nodes:
selected = _id
elif _id in self.shapes:
selected = _id
elif _id in self.shadow_nodes:
selected = _id
return selected
def click_release(self, event: tk.Event) -> None:
"""
Draw a node or finish drawing an edge according to the current graph mode
"""
logger.debug("click release")
x, y = self.canvas_xy(event)
if not self.inside_canvas(x, y):
return
if self.manager.mode == GraphMode.ANNOTATION:
self.focus_set()
if self.shape_drawing:
shape = self.shapes[self.selected]
shape.shape_complete(x, y)
self.shape_drawing = False
elif self.manager.mode == GraphMode.SELECT:
self.focus_set()
if self.select_box:
x0, y0, x1, y1 = self.coords(self.select_box.id)
inside = [
x
for x in self.find_enclosed(x0, y0, x1, y1)
if "node" in self.gettags(x) or "shape" in self.gettags(x)
]
for i in inside:
self.select_object(i, True)
self.select_box.disappear()
self.select_box = None
else:
self.focus_set()
self.selected = self.get_selected(event)
logger.debug(
"click release selected(%s) mode(%s)", self.selected, self.manager.mode
)
if self.manager.mode == GraphMode.EDGE:
self.handle_edge_release(event)
elif self.manager.mode == GraphMode.NODE:
self.add_node(x, y)
elif self.manager.mode == GraphMode.PICKNODE:
self.manager.mode = GraphMode.NODE
self.selected = None
def handle_edge_release(self, _event: tk.Event) -> None:
# not drawing edge return
if not self.drawing_edge:
return
edge = self.drawing_edge
self.drawing_edge = None
# edge dst must be a node
logger.debug("current selected: %s", self.selected)
dst_node = self.nodes.get(self.selected)
if not dst_node:
edge.delete()
return
# check if node can be linked
if not edge.src.is_linkable(dst_node):
edge.delete()
return
# finalize edge creation
edge.drawing(dst_node.position())
edge.complete(dst_node)
def select_object(self, object_id: int, choose_multiple: bool = False) -> None:
"""
create a bounding box when a node is selected
"""
if not choose_multiple:
self.clear_selection()
# draw a bounding box if node hasn't been selected yet
if object_id not in self.selection:
x0, y0, x1, y1 = self.bbox(object_id)
selection_id = self.create_rectangle(
(x0 - 6, y0 - 6, x1 + 6, y1 + 6),
activedash=True,
dash="-",
tags=tags.SELECTION,
)
self.selection[object_id] = selection_id
else:
selection_id = self.selection.pop(object_id)
self.delete(selection_id)
def clear_selection(self) -> None:
"""
Clear current selection boxes.
"""
for _id in self.selection.values():
self.delete(_id)
self.selection.clear()
def move_selection(self, object_id: int, x_offset: float, y_offset: float) -> None:
select_id = self.selection.get(object_id)
if select_id is not None:
self.move(select_id, x_offset, y_offset)
def delete_selected_objects(self, _event: tk.Event = None) -> None:
edges = set()
nodes = []
for object_id in self.selection:
# delete selection box
selection_id = self.selection[object_id]
self.delete(selection_id)
# delete node and related edges
if object_id in self.nodes:
canvas_node = self.nodes.pop(object_id)
# delete related edges
while canvas_node.edges:
edge = canvas_node.edges.pop()
if edge in edges:
continue
edges.add(edge)
edge.delete()
# delete node
canvas_node.delete()
nodes.append(canvas_node)
# delete shape
if object_id in self.shapes:
shape = self.shapes.pop(object_id)
shape.delete()
self.selection.clear()
self.core.deleted_canvas_nodes(nodes)
def hide_selected(self, _event: tk.Event = None) -> None:
for object_id in self.selection:
# delete selection box
selection_id = self.selection[object_id]
self.delete(selection_id)
# hide node and related edges
if object_id in self.nodes:
canvas_node = self.nodes[object_id]
canvas_node.hide()
def show_hidden(self) -> None:
for node in self.nodes.values():
if node.hidden:
node.show()
def zoom(self, event: tk.Event, factor: float = None) -> None:
if not factor:
factor = ZOOM_IN if event.delta > 0 else ZOOM_OUT
event.x, event.y = self.canvasx(event.x), self.canvasy(event.y)
self.scale(tk.ALL, event.x, event.y, factor, factor)
self.configure(scrollregion=self.bbox(tk.ALL))
self.ratio *= float(factor)
self.offset = (
self.offset[0] * factor + event.x * (1 - factor),
self.offset[1] * factor + event.y * (1 - factor),
)
logger.debug("ratio: %s", self.ratio)
logger.debug("offset: %s", self.offset)
self.app.statusbar.set_zoom(self.ratio)
if self.wallpaper:
self.redraw_wallpaper()
def click_press(self, event: tk.Event) -> None:
"""
Start drawing an edge if mouse click is on a node
"""
x, y = self.canvas_xy(event)
if not self.inside_canvas(x, y):
return
self.cursor = x, y
selected = self.get_selected(event)
logger.debug("click press(%s): %s", self.cursor, selected)
x_check = self.cursor[0] - self.offset[0]
y_check = self.cursor[1] - self.offset[1]
logger.debug("click press offset(%s, %s)", x_check, y_check)
is_node = selected in self.nodes
if self.manager.mode == GraphMode.EDGE and is_node:
node = self.nodes[selected]
self.drawing_edge = CanvasEdge(self.app, node)
self.organize()
if self.manager.mode == GraphMode.ANNOTATION:
if is_marker(self.manager.annotation_type):
r = self.app.toolbar.marker_frame.size.get()
self.create_oval(
x - r,
y - r,
x + r,
y + r,
fill=self.app.toolbar.marker_frame.color,
outline="",
tags=(tags.MARKER, tags.ANNOTATION),
state=self.manager.show_annotations.state(),
)
return
if selected is None:
shape = Shape(self.app, self, self.manager.annotation_type, x, y)
self.selected = shape.id
self.shape_drawing = True
self.shapes[shape.id] = shape
if selected is not None:
if selected not in self.selection:
if selected in self.shapes:
shape = self.shapes[selected]
self.select_object(shape.id)
self.selected = selected
elif selected in self.nodes:
node = self.nodes[selected]
self.select_object(node.id)
self.selected = selected
logger.debug(
"selected node(%s), coords: (%s, %s)",
node.core_node.name,
node.core_node.position.x,
node.core_node.position.y,
)
elif selected in self.shadow_nodes:
shadow_node = self.shadow_nodes[selected]
self.select_object(shadow_node.id)
self.selected = selected
logger.debug(
"selected shadow node(%s), coords: (%s, %s)",
shadow_node.node.core_node.name,
shadow_node.node.core_node.position.x,
shadow_node.node.core_node.position.y,
)
else:
if self.manager.mode == GraphMode.SELECT:
shape = Shape(self.app, self, ShapeType.RECTANGLE, x, y)
self.select_box = shape
self.clear_selection()
def ctrl_click(self, event: tk.Event) -> None:
# update cursor location
x, y = self.canvas_xy(event)
if not self.inside_canvas(x, y):
return
self.cursor = x, y
# handle multiple selections
logger.debug("control left click: %s", event)
selected = self.get_selected(event)
if (
selected not in self.selection
and selected in self.shapes
or selected in self.nodes
):
self.select_object(selected, choose_multiple=True)
def click_motion(self, event: tk.Event) -> None:
x, y = self.canvas_xy(event)
if not self.inside_canvas(x, y):
if self.select_box:
self.select_box.delete()
self.select_box = None
if is_draw_shape(self.manager.annotation_type) and self.shape_drawing:
shape = self.shapes.pop(self.selected)
shape.delete()
self.shape_drawing = False
return
x_offset = x - self.cursor[0]
y_offset = y - self.cursor[1]
self.cursor = x, y
if self.manager.mode == GraphMode.EDGE and self.drawing_edge is not None:
self.drawing_edge.drawing(self.cursor)
if self.manager.mode == GraphMode.ANNOTATION:
if is_draw_shape(self.manager.annotation_type) and self.shape_drawing:
shape = self.shapes[self.selected]
shape.shape_motion(x, y)
return
elif is_marker(self.manager.annotation_type):
r = self.app.toolbar.marker_frame.size.get()
self.create_oval(
x - r,
y - r,
x + r,
y + r,
fill=self.app.toolbar.marker_frame.color,
outline="",
tags=(tags.MARKER, tags.ANNOTATION),
)
return
if self.manager.mode == GraphMode.EDGE:
return
# move selected objects
if self.selection:
for selected_id in self.selection:
if self.manager.mode in MOVE_SHAPE_MODES and selected_id in self.shapes:
shape = self.shapes[selected_id]
shape.motion(x_offset, y_offset)
elif self.manager.mode in MOVE_NODE_MODES and selected_id in self.nodes:
node = self.nodes[selected_id]
node.motion(x_offset, y_offset, update=self.core.is_runtime())
elif (
self.manager.mode in MOVE_NODE_MODES
and selected_id in self.shadow_nodes
):
shadow_node = self.shadow_nodes[selected_id]
shadow_node.motion(x_offset, y_offset)
else:
if self.select_box and self.manager.mode == GraphMode.SELECT:
self.select_box.shape_motion(x, y)
def double_click(self, event: tk.Event) -> None:
selected = self.get_selected(event)
if selected is not None and selected in self.shapes:
shape = self.shapes[selected]
dialog = ShapeDialog(self.app, shape)
dialog.show()
def add_node(self, x: float, y: float) -> None:
if self.selected is not None and self.selected not in self.shapes:
return
actual_x, actual_y = self.get_actual_coords(x, y)
core_node = self.core.create_node(
actual_x,
actual_y,
self.manager.node_draw.node_type,
self.manager.node_draw.model,
)
if not core_node:
return
core_node.canvas = self.id
node = CanvasNode(self.app, self, x, y, core_node, self.manager.node_draw.image)
self.nodes[node.id] = node
self.core.set_canvas_node(core_node, node)
def width_and_height(self) -> Tuple[int, int]:
"""
retrieve canvas width and height in pixels
"""
x0, y0, x1, y1 = self.coords(self.rect)
canvas_w = abs(x0 - x1)
canvas_h = abs(y0 - y1)
return canvas_w, canvas_h
def get_wallpaper_image(self) -> Image.Image:
width = int(self.wallpaper.width * self.ratio)
height = int(self.wallpaper.height * self.ratio)
image = self.wallpaper.resize((width, height), Image.ANTIALIAS)
return image
def draw_wallpaper(
self, image: PhotoImage, x: float = None, y: float = None
) -> None:
if x is None and y is None:
x1, y1, x2, y2 = self.bbox(self.rect)
x = (x1 + x2) / 2
y = (y1 + y2) / 2
self.wallpaper_id = self.create_image((x, y), image=image, tags=tags.WALLPAPER)
self.wallpaper_drawn = image
def wallpaper_upper_left(self) -> None:
self.delete(self.wallpaper_id)
# create new scaled image, cropped if needed
width, height = self.width_and_height()
image = self.get_wallpaper_image()
cropx = image.width
cropy = image.height
if image.width > width:
cropx = image.width
if image.height > height:
cropy = image.height
cropped = image.crop((0, 0, cropx, cropy))
image = PhotoImage(cropped)
# draw on canvas
x1, y1, _, _ = self.bbox(self.rect)
x = (cropx / 2) + x1
y = (cropy / 2) + y1
self.draw_wallpaper(image, x, y)
def wallpaper_center(self) -> None:
"""
place the image at the center of canvas
"""
self.delete(self.wallpaper_id)
# dimension of the cropped image
width, height = self.width_and_height()
image = self.get_wallpaper_image()
cropx = 0
if image.width > width:
cropx = (image.width - width) / 2
cropy = 0
if image.height > height:
cropy = (image.height - height) / 2
x1 = 0 + cropx
y1 = 0 + cropy
x2 = image.width - cropx
y2 = image.height - cropy
cropped = image.crop((x1, y1, x2, y2))
image = PhotoImage(cropped)
self.draw_wallpaper(image)
def wallpaper_scaled(self) -> None:
"""
scale image based on canvas dimension
"""
self.delete(self.wallpaper_id)
canvas_w, canvas_h = self.width_and_height()
image = self.wallpaper.resize((int(canvas_w), int(canvas_h)), Image.ANTIALIAS)
image = PhotoImage(image)
self.draw_wallpaper(image)
def resize_to_wallpaper(self) -> None:
self.delete(self.wallpaper_id)
image = PhotoImage(self.wallpaper)
self.redraw_canvas((image.width(), image.height()))
self.draw_wallpaper(image)
def redraw_canvas(self, dimensions: Tuple[int, int] = None) -> None:
logger.debug("redrawing canvas to dimensions: %s", dimensions)
# reset scale and move back to original position
logger.debug("resetting scaling: %s %s", self.ratio, self.offset)
factor = 1 / self.ratio
self.scale(tk.ALL, self.offset[0], self.offset[1], factor, factor)
self.move(tk.ALL, -self.offset[0], -self.offset[1])
# reset ratio and offset
self.ratio = 1.0
self.offset = (0, 0)
# redraw canvas rectangle
self.draw_canvas(dimensions)
# redraw gridlines to new canvas size
self.delete(tags.GRIDLINE)
self.draw_grid()
self.app.manager.show_grid.click_handler()
def redraw_wallpaper(self) -> None:
if self.adjust_to_dim.get():
logger.debug("drawing wallpaper to canvas dimensions")
self.resize_to_wallpaper()
else:
option = ScaleOption(self.scale_option.get())
logger.debug("drawing canvas using scaling option: %s", option)
if option == ScaleOption.UPPER_LEFT:
self.wallpaper_upper_left()
elif option == ScaleOption.CENTERED:
self.wallpaper_center()
elif option == ScaleOption.SCALED:
self.wallpaper_scaled()
elif option == ScaleOption.TILED:
logger.warning("tiled background not implemented yet")
self.organize()
def organize(self) -> None:
for tag in tags.ORGANIZE_TAGS:
self.tag_raise(tag)
def set_wallpaper(self, filename: Optional[str]) -> None:
logger.info("setting canvas(%s) background: %s", self.id, filename)
if filename:
img = Image.open(filename)
self.wallpaper = img
self.wallpaper_file = filename
self.redraw_wallpaper()
else:
if self.wallpaper_id is not None:
self.delete(self.wallpaper_id)
self.wallpaper = None
self.wallpaper_file = None
def is_selection_mode(self) -> bool:
return self.manager.mode == GraphMode.SELECT
def create_edge(self, src: CanvasNode, dst: CanvasNode) -> CanvasEdge:
"""
create an edge between source node and destination node
"""
edge = CanvasEdge(self.app, src)
edge.complete(dst)
return edge
def copy_selected(self, _event: tk.Event = None) -> None:
if self.core.is_runtime():
logger.debug("copy is disabled during runtime state")
return
if self.selection:
logger.debug("to copy nodes: %s", self.selection)
self.to_copy.clear()
for node_id in self.selection.keys():
canvas_node = self.nodes[node_id]
self.to_copy.append(canvas_node)
def cut_selected(self, _event: tk.Event = None) -> None:
if self.core.is_runtime():
logger.debug("cut is disabled during runtime state")
return
self.copy_selected()
self.delete_selected()
def delete_selected(self, _event: tk.Event = None) -> None:
"""
delete selected nodes and any data that relates to it
"""
logger.debug("press delete key")
if self.core.is_runtime():
logger.debug("node deletion is disabled during runtime state")
return
self.delete_selected_objects()
self.app.default_info()
def paste_selected(self, _event: tk.Event = None) -> None:
if self.core.is_runtime():
logger.debug("paste is disabled during runtime state")
return
# maps original node canvas id to copy node canvas id
copy_map = {}
# the edges that will be copy over
to_copy_edges = set()
to_copy_ids = {x.id for x in self.to_copy}
for canvas_node in self.to_copy:
core_node = canvas_node.core_node
actual_x = core_node.position.x + 50
actual_y = core_node.position.y + 50
scaled_x, scaled_y = self.get_scaled_coords(actual_x, actual_y)
copy = self.core.create_node(
actual_x, actual_y, core_node.type, core_node.model
)
if not copy:
continue
node = CanvasNode(
self.app, self, scaled_x, scaled_y, copy, canvas_node.image
)
# copy configurations and services
node.core_node.services = core_node.services.copy()
node.core_node.config_services = core_node.config_services.copy()
node.core_node.emane_model_configs = deepcopy(core_node.emane_model_configs)
node.core_node.wlan_config = deepcopy(core_node.wlan_config)
node.core_node.mobility_config = deepcopy(core_node.mobility_config)
node.core_node.service_configs = deepcopy(core_node.service_configs)
node.core_node.service_file_configs = deepcopy(
core_node.service_file_configs
)
node.core_node.config_service_configs = deepcopy(
core_node.config_service_configs
)
copy_map[canvas_node.id] = node.id
self.nodes[node.id] = node
self.core.set_canvas_node(copy, node)
for edge in canvas_node.edges:
if edge.src not in to_copy_ids or edge.dst not in to_copy_ids:
if canvas_node.id == edge.src:
dst_node = self.nodes[edge.dst]
copy_edge = self.create_edge(node, dst_node)
elif canvas_node.id == edge.dst:
src_node = self.nodes[edge.src]
copy_edge = self.create_edge(src_node, node)
else:
continue
copy_link = copy_edge.link
iface1_id = copy_link.iface1.id if copy_link.iface1 else None
iface2_id = copy_link.iface2.id if copy_link.iface2 else None
options = edge.link.options
if options:
copy_edge.link.options = deepcopy(options)
if options and options.unidirectional:
asym_iface1 = None
if iface1_id is not None:
asym_iface1 = Interface(id=iface1_id)
asym_iface2 = None
if iface2_id is not None:
asym_iface2 = Interface(id=iface2_id)
copy_edge.asymmetric_link = Link(
node1_id=copy_link.node2_id,
node2_id=copy_link.node1_id,
iface1=asym_iface2,
iface2=asym_iface1,
options=deepcopy(edge.asymmetric_link.options),
)
copy_edge.redraw()
else:
to_copy_edges.add(edge)
# copy link and link config
for edge in to_copy_edges:
src_node_id = copy_map[edge.src]
dst_node_id = copy_map[edge.dst]
src_node_copy = self.nodes[src_node_id]
dst_node_copy = self.nodes[dst_node_id]
copy_edge = self.create_edge(src_node_copy, dst_node_copy)
copy_link = copy_edge.link
iface1_id = copy_link.iface1.id if copy_link.iface1 else None
iface2_id = copy_link.iface2.id if copy_link.iface2 else None
options = edge.link.options
if options:
copy_link.options = deepcopy(options)
if options and options.unidirectional:
asym_iface1 = None
if iface1_id is not None:
asym_iface1 = Interface(id=iface1_id)
asym_iface2 = None
if iface2_id is not None:
asym_iface2 = Interface(id=iface2_id)
copy_edge.asymmetric_link = Link(
node1_id=copy_link.node2_id,
node2_id=copy_link.node1_id,
iface1=asym_iface2,
iface2=asym_iface1,
options=deepcopy(edge.asymmetric_link.options),
)
copy_edge.redraw()
self.itemconfig(
copy_edge.id,
width=self.itemcget(edge.id, "width"),
fill=self.itemcget(edge.id, "fill"),
)
self.tag_raise(tags.NODE)
def scale_graph(self) -> None:
for node_id, canvas_node in self.nodes.items():
image = nutils.get_icon(canvas_node.core_node, self.app)
self.itemconfig(node_id, image=image)
canvas_node.image = image
canvas_node.scale_text()
canvas_node.scale_antennas()
for edge_id in self.find_withtag(tags.EDGE):
self.itemconfig(edge_id, width=int(EDGE_WIDTH * self.app.app_scale))
def get_metadata(self) -> Dict[str, Any]:
wallpaper_path = None
if self.wallpaper_file:
wallpaper = Path(self.wallpaper_file)
if appconfig.BACKGROUNDS_PATH == wallpaper.parent:
wallpaper_path = wallpaper.name
else:
wallpaper_path = str(wallpaper)
return dict(
id=self.id,
wallpaper=wallpaper_path,
wallpaper_style=self.scale_option.get(),
fit_image=self.adjust_to_dim.get(),
dimensions=self.current_dimensions,
)
def parse_metadata(self, config: Dict[str, Any]) -> None:
fit_image = config.get("fit_image", False)
self.adjust_to_dim.set(fit_image)
wallpaper_style = config.get("wallpaper_style", 1)
self.scale_option.set(wallpaper_style)
dimensions = config.get("dimensions")
if dimensions:
self.redraw_canvas(dimensions)
wallpaper = config.get("wallpaper")
if wallpaper:
wallpaper = Path(wallpaper)
if not wallpaper.is_file():
wallpaper = appconfig.BACKGROUNDS_PATH.joinpath(wallpaper)
logger.info("canvas(%s), wallpaper: %s", self.id, wallpaper)
if wallpaper.is_file():
self.set_wallpaper(str(wallpaper))
else:
self.app.show_error(
"Background Error", f"background file not found: {wallpaper}"
)
|
{
"content_hash": "06911c3da0cb24a6c2f83f4b2ca8b91b",
"timestamp": "",
"source": "github",
"line_count": 852,
"max_line_length": 88,
"avg_line_length": 39.005868544600936,
"alnum_prop": 0.5471669725874884,
"repo_name": "pexip/os-core-network",
"id": "e3225a4df73c9ce2859289405fb471916ab85d4a",
"size": "33233",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "daemon/core/gui/graph/graph.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "92828"
},
{
"name": "Dockerfile",
"bytes": "2855"
},
{
"name": "HTML",
"bytes": "329"
},
{
"name": "M4",
"bytes": "8355"
},
{
"name": "Makefile",
"bytes": "12003"
},
{
"name": "Python",
"bytes": "1664476"
},
{
"name": "Shell",
"bytes": "31246"
},
{
"name": "Tcl",
"bytes": "923461"
}
],
"symlink_target": ""
}
|
import logging
from nose.tools import assert_equal
from liboozie import conf
from liboozie.types import WorkflowAction, Coordinator
from liboozie.utils import config_gen
from oozie.tests import MockOozieApi
from useradmin.models import User
import desktop.conf as desktop_conf
from desktop.lib.test_utils import reformat_xml
LOG = logging.getLogger(__name__)
def test_valid_external_id():
action = WorkflowAction(MockOozieApi.JSON_WORKFLOW_LIST[0])
assert_equal('job_201208072118_0044', action.externalId)
assert_equal('/jobbrowser/jobs/job_201208072118_0044/single_logs', action.get_absolute_log_url())
assert_equal('/jobbrowser/jobs/job_201208072118_0044', action.get_external_id_url())
action = WorkflowAction(MockOozieApi.JSON_WORKFLOW_LIST[1])
assert_equal('-', action.externalId)
assert_equal(None, action.get_absolute_log_url())
assert_equal(None, action.get_external_id_url())
action = WorkflowAction(MockOozieApi.JSON_WORKFLOW_LIST[2])
assert_equal('', action.externalId)
assert_equal(None, action.get_absolute_log_url())
assert_equal(None, action.get_external_id_url())
action = WorkflowAction(MockOozieApi.JSON_WORKFLOW_LIST[3])
assert_equal(None, action.externalId)
assert_equal(None, action.get_absolute_log_url())
assert_equal(None, action.get_external_id_url())
def aggregate_coordinator_instances():
dates = ['1', '2', '3', '6', '7', '8', '10', '12', '15', '16', '20', '23', '30', '40']
assert_equal(['1-3', '6-8', '10-10', '12-12', '15-16', '20-20', '23-23', '30-30', '40-40'], Coordinator.aggreate(dates))
def test_config_gen():
properties = {
'user.name': 'hue',
'test.1': 'http://localhost/test?test1=test&test2=test'
}
assert_equal(reformat_xml(b"""<configuration>
<property>
<name>test.1</name>
<value><![CDATA[http://localhost/test?test1=test&test2=test]]></value>
</property>
<property>
<name>user.name</name>
<value><![CDATA[hue]]></value>
</property>
</configuration>"""), reformat_xml(config_gen(properties)))
def test_config_gen_negative():
properties = {
'user.name': 'hue<foo>bar</foo>',
'test.1': 'http://localhost/test?test1=test&test2=test]]>&test3=test'
}
assert_equal(reformat_xml(b"""<configuration>
<property>
<name>test.1</name>
<value><![CDATA[http://localhost/test?test1=test&test2=test&test3=test]]></value>
</property>
<property>
<name>user.name</name>
<value><![CDATA[hue<foo>bar</foo>]]></value>
</property>
</configuration>"""), reformat_xml(config_gen(properties)))
def test_ssl_validate():
for desktop_kwargs, conf_kwargs, expected in [
({'present': False}, {'present': False}, True),
({'present': False}, {'data': False}, False),
({'present': False}, {'data': True}, True),
({'data': False}, {'present': False}, False),
({'data': False}, {'data': False}, False),
({'data': False}, {'data': True}, True),
({'data': True}, {'present': False}, True),
({'data': True}, {'data': False}, False),
({'data': True}, {'data': True}, True),
]:
resets = [
desktop_conf.SSL_VALIDATE.set_for_testing(**desktop_kwargs),
conf.SSL_CERT_CA_VERIFY.set_for_testing(**conf_kwargs),
]
try:
assert_equal(conf.SSL_CERT_CA_VERIFY.get(), expected,
'desktop:%s conf:%s expected:%s got:%s' % (desktop_kwargs, conf_kwargs, expected, conf.SSL_CERT_CA_VERIFY.get()))
finally:
for reset in resets:
reset()
|
{
"content_hash": "e7afa4251477aec277b01e4e0b031b49",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 123,
"avg_line_length": 34.06930693069307,
"alnum_prop": 0.6599825632083697,
"repo_name": "kawamon/hue",
"id": "db5ea9b2595ae779f926cb1f2a61a1afbab8b131",
"size": "4233",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "desktop/libs/liboozie/src/liboozie/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "5786"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "Batchfile",
"bytes": "118907"
},
{
"name": "C",
"bytes": "3196521"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "308860"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "1050129"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "10981"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "7312"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "24999718"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "JSONiq",
"bytes": "4"
},
{
"name": "Java",
"bytes": "471854"
},
{
"name": "JavaScript",
"bytes": "28075556"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "Jupyter Notebook",
"bytes": "73168"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Lex",
"bytes": "264449"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1377"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "269655"
},
{
"name": "Mako",
"bytes": "3614942"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "31565"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "1412"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "3204"
},
{
"name": "Python",
"bytes": "76440000"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "95764"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "190718"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TSQL",
"bytes": "10013"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "317058"
},
{
"name": "TypeScript",
"bytes": "1607"
},
{
"name": "VBA",
"bytes": "2884"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "521413"
},
{
"name": "Yacc",
"bytes": "2133855"
}
],
"symlink_target": ""
}
|
import os
from subprocess import call
from jawaf.conf import settings
from jawaf.management.base import BaseCommand
class Command(BaseCommand):
"""Run py.test framework, set up test databases."""
def handle(self, **options):
"""Convenience for running alembic commands."""
alembic_cmd = [
'alembic',
'--config={}'.format(
os.path.join(settings.BASE_DIR, 'migrations', 'alembic.ini'))]
alembic_cmd.extend(options['unknown'])
call(alembic_cmd)
|
{
"content_hash": "f2988d5f30eb30fc18de3d21b6fb4db9",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 78,
"avg_line_length": 30.88235294117647,
"alnum_prop": 0.6323809523809524,
"repo_name": "danpozmanter/jawaf",
"id": "be0add32cb5c95d8b72043a3629aceeea35c0b57",
"size": "525",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jawaf/management/commands/db.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "131545"
},
{
"name": "Smarty",
"bytes": "5172"
}
],
"symlink_target": ""
}
|
import orange, math, orngTest, orngStat, random, orngMisc
##################################################################
# The orngEnsemble with CHANGED Random Forest Classifier voting!!!
##################################################################
# This function is built-in in Python 2.3,
# but we define it to be compatible with 2.2 as well
from operator import add
def sum(x):
return reduce(add, x)
########################################################################
# Boosting
inf = 100000
def BoostedLearner(learner, examples=None, t=10, name='AdaBoost.M1'):
learner = BoostedLearnerClass(learner, t, name)
if examples:
return learner(examples)
else:
return learner
class BoostedLearnerClass(orange.Learner):
def __init__(self, learner, t, name):
self.t = t
self.name = name
self.learner = learner
def __call__(self, instances, origWeight = 0):
weight = orange.newmetaid()
if origWeight:
for i in instances:
i.setweight(weight, i.getweight(origWeight))
else:
instances.addMetaAttribute(weight, 1.0)
n = len(instances)
classifiers = []
for i in range(self.t):
epsilon = 0.0
classifier = self.learner(instances, weight)
corr = []
for ex in instances:
if classifier(ex) != ex.getclass():
epsilon += ex.getweight(weight)
corr.append(0)
else:
corr.append(1)
epsilon = epsilon / float(reduce(lambda x,y:x+y.getweight(weight), instances, 0))
classifiers.append((classifier, epsilon and math.log((1-epsilon)/epsilon) or inf))
if epsilon==0 or epsilon >= 0.499:
if epsilon >= 0.499 and len(classifiers)>1:
del classifiers[-1]
instances.removeMetaAttribute(weight)
return BoostedClassifier(classifiers = classifiers, name=self.name, classvar=instances.domain.classVar)
beta = epsilon/(1-epsilon)
for e in range(n):
if corr[e]:
instances[e].setweight(weight, instances[e].getweight(weight)*beta)
f = 1/float(reduce(add, [e.getweight(weight) for e in instances]))
for e in range(n):
instances[e].setweight(weight, instances[e].getweight(weight)*f)
instances.removeMetaAttribute(weight)
return BoostedClassifier(classifiers = classifiers, name=self.name, classvar=instances.domain.classVar)
class BoostedClassifier(orange.Classifier):
def __init__(self, **kwds):
self.__dict__ = kwds
def __call__(self, example, resultType = orange.GetValue):
votes = [0.] * len(self.classvar.values)
for c, e in self.classifiers:
votes[int(c(example))] += e
index = orngMisc.selectBestIndex(votes)
value = orange.Value(self.classvar, index)
if resultType == orange.GetValue:
return value
sv = sum(votes)
for i in range(len(votes)):
votes[i] = votes[i]/sv
if resultType == orange.GetProbabilities:
return votes
else:
return (value, votes)
########################################################################
# Bagging
def BaggedLearner(learner=None, t=10, name='Bagging', examples=None):
learner = BaggedLearnerClass(learner, t, name)
if examples:
return learner(examples)
else:
return learner
class BaggedLearnerClass(orange.Learner):
def __init__(self, learner, t, name):
self.t = t
self.name = name
self.learner = learner
def __call__(self, examples, weight=0):
r = random.Random()
r.seed(0)
n = len(examples)
classifiers = []
for i in range(self.t):
selection = []
for i in range(n):
selection.append(r.randrange(n))
examples = orange.ExampleTable(examples)
data = examples.getitems(selection)
classifiers.append(self.learner(data, weight))
return BaggedClassifier(classifiers = classifiers, name=self.name, classvar=examples.domain.classVar)
class BaggedClassifier(orange.Classifier):
def __init__(self, **kwds):
self.__dict__ = kwds
def __call__(self, example, resultType = orange.GetValue):
freq = [0.] * len(self.classvar.values)
for c in self.classifiers:
freq[int(c(example))] += 1
index = freq.index(max(freq))
value = orange.Value(self.classvar, index)
if resultType == orange.GetValue:
return value
for i in range(len(freq)):
freq[i] = freq[i]/len(self.classifiers)
if resultType == orange.GetProbabilities:
return freq
else:
return (value, freq)
########################################################################
# Random Forests
from math import sqrt, floor
import orngTree
class SplitConstructor_AttributeSubset(orange.TreeSplitConstructor):
def __init__(self, scons, attributes, rand = None):
self.scons = scons # split constructor of original tree
self.attributes = attributes # number of attributes to consider
if rand:
self.rand = rand # a random generator
else:
self.rand = random.Random()
self.rand.seed(0)
def __call__(self, gen, weightID, contingencies, apriori, candidates, clsfr):
cand = [1]*self.attributes + [0]*(len(candidates) - self.attributes)
self.rand.shuffle(cand)
# instead with all attributes, we will invoke split constructor only for the
# subset of a attributes
t = self.scons(gen, weightID, contingencies, apriori, cand, clsfr)
return t
class HARFLearner(orange.Learner):#RandomForestLearner(orange.Learner):
def __new__(cls, examples=None, weight = 0, **kwds):
self = orange.Learner.__new__(cls, **kwds)
if examples:
self.__init__(**kwds)
return self.__call__(examples, weight)
else:
return self
def __init__(self, learner=None, trees=500, attributes=None, name='High Agreement Random Forest', rand=None, callback=None, agrLevel=70):
"""random forest learner"""
self.trees = trees
self.name = name
self.learner = learner
self.attributes = attributes
self.callback = callback
# Added for HARF
self.agrLevel = agrLevel
if rand:
self.rand = rand
else:
self.rand = random.Random()
self.rand.seed(0)
if not learner:
# tree learner assembled as suggested by Brieman (2001)
smallTreeLearner = orngTree.TreeLearner(storeNodeClassifier = 0, storeContingencies=0, storeDistributions=1, minExamples=5).instance()
smallTreeLearner.split.discreteSplitConstructor.measure = smallTreeLearner.split.continuousSplitConstructor.measure = orange.MeasureAttribute_gini()
smallTreeLearner.split = SplitConstructor_AttributeSubset(smallTreeLearner.split, attributes, self.rand)
self.learner = smallTreeLearner
def __call__(self, examples, weight=0):#agrLevel,
# if number of attributes for subset is not set, use square root
if hasattr(self.learner.split, 'attributes') and not self.learner.split.attributes:
self.learner.split.attributes = int(sqrt(len(examples.domain.attributes)))
n = len(examples)
# build the forest
classifiers = []
for i in range(self.trees):
# draw bootstrap sample
selection = []
for j in range(n):
selection.append(self.rand.randrange(n))
data = examples.getitems(selection)
# build the model from the bootstrap sample
classifiers.append(self.learner(data))
if self.callback:
self.callback()
# if self.callback: self.callback((i+1.)/self.trees)
return RandomForestClassifier(classifiers = classifiers, name=self.name, domain=examples.domain, classVar=examples.domain.classVar, agrLevel = self.agrLevel)
class RandomForestClassifier(orange.Classifier):
def __init__(self, **kwds):
self.__dict__.update(kwds)
#self.agrLevel = kwds['agrLevel']
def __call__(self, example, resultType = orange.GetValue):
from operator import add
# voting for class probabilities
if resultType == orange.GetProbabilities or resultType == orange.GetBoth:
cprob = [0.] * len(self.domain.classVar.values)
for c in self.classifiers:
a = [x for x in c(example, orange.GetProbabilities)]
cprob = map(add, cprob, a)
norm = sum(cprob)
for i in range(len(cprob)):
cprob[i] = cprob[i]/norm
# voting for crisp class membership, notice that
# this may not be the same class as one obtaining the
# highest probability through probability voting
if resultType == orange.GetValue or resultType == orange.GetBoth:
cfreq = [0] * len(self.domain.classVar.values)
for c in self.classifiers:
cfreq[int(c(example))] += 1
max1 = max(cfreq)
index = cfreq.index(max1)
cfreq[index] = 0
max2 = max(cfreq)
if max1 > self.agrLevel/100.0*(max1 + max2):
cvalue = orange.Value(self.domain.classVar, index)
else:
cvalue = None
if resultType == orange.GetValue: return cvalue #, cfreq)
elif resultType == orange.GetProbabilities: return cprob
else: return (cvalue, cprob)
##########################################################
### MeasureAttribute_randomForests
class MeasureAttribute_randomForests(orange.MeasureAttribute):
def __init__(self, learner=None, trees = 100, attributes=None, rand=None):
self.trees = trees
self.learner = learner
self.bufexamples = None
self.attributes = attributes
if self.learner == None:
temp = HARFLearner(attributes=self.attributes)#RandomForestLearner(attributes=self.attributes)
self.learner = temp.learner
if hasattr(self.learner.split, 'attributes'):
self.origattr = self.learner.split.attributes
if rand:
self.rand = rand # a random generator
else:
self.rand = random.Random()
self.rand.seed(0)
def __call__(self, a1, a2, a3=None):
"""
Returns importance of a given attribute. Can be given by index,
name or as a orange.Variable.
"""
attrNo = None
examples = None
if type(a1) == int: #by attr. index
attrNo, examples, apriorClass = a1, a2, a3
elif type(a1) == type("a"): #by attr. name
attrName, examples, apriorClass = a1, a2, a3
attrNo = examples.domain.index(attrName)
elif isinstance(a1, orange.Variable):
a1, examples, apriorClass = a1, a2, a3
atrs = [a for a in examples.domain.attributes]
attrNo = atrs.index(a1)
else:
contingency, classDistribution, apriorClass = a1, a2, a3
raise Exception("MeasureAttribute_rf can not be called with (contingency, classDistribution, apriorClass) as fuction arguments.")
self.buffer(examples)
return self.avimp[attrNo]*100/self.trees
def importances(self, examples):
"""
Returns importances of all attributes in dataset in a list. Buffered.
"""
self.buffer(examples)
return [a*100/self.trees for a in self.avimp]
def buffer(self, examples):
"""
recalcule importances if needed (new examples)
"""
recalculate = False
if examples != self.bufexamples:
recalculate = True
elif examples.version != self.bufexamples.version:
recalculate = True
if (recalculate):
self.bufexamples = examples
self.avimp = [0.0]*len(self.bufexamples.domain.attributes)
self.acu = 0
if hasattr(self.learner.split, 'attributes'):
self.learner.split.attributes = self.origattr
# if number of attributes for subset is not set, use square root
if hasattr(self.learner.split, 'attributes') and not self.learner.split.attributes:
self.learner.split.attributes = int(sqrt(len(examples.domain.attributes)))
self.importanceAcu(self.bufexamples, self.trees, self.avimp)
def getOOB(self, examples, selection, nexamples):
ooblist = filter(lambda x: x not in selection, range(nexamples))
return examples.getitems(ooblist)
def numRight(self, oob, classifier):
"""
returns a number of examples which are classified correcty
"""
right = 0
for el in oob:
if (el.getclass() == classifier(el)):
right = right + 1
return right
def numRightMix(self, oob, classifier, attr):
"""
returns a number of examples which are classified
correctly even if an attribute is shuffled
"""
n = len(oob)
perm = range(n)
self.rand.shuffle(perm)
right = 0
for i in range(n):
ex = orange.Example(oob[i])
ex[attr] = oob[perm[i]][attr]
if (ex.getclass() == classifier(ex)):
right = right + 1
return right
def importanceAcu(self, examples, trees, avimp):
"""
accumulate avimp by importances for a given number of trees
"""
n = len(examples)
attrs = len(examples.domain.attributes)
attrnum = {}
for attr in range(len(examples.domain.attributes)):
attrnum[examples.domain.attributes[attr].name] = attr
# build the forest
classifiers = []
for i in range(trees):
# draw bootstrap sample
selection = []
for j in range(n):
selection.append(self.rand.randrange(n))
data = examples.getitems(selection)
# build the model from the bootstrap sample
cla = self.learner(data)
#prepare OOB data
oob = self.getOOB(examples, selection, n)
#right on unmixed
right = self.numRight(oob, cla)
presl = list(self.presentInTree(cla.tree, attrnum))
#randomize each attribute in data and test
#only those on which there was a split
for attr in presl:
#calculate number of right classifications
#if the values of this attribute are permutated randomly
rightimp = self.numRightMix(oob, cla, attr)
avimp[attr] += (float(right-rightimp))/len(oob)
self.acu += trees
def presentInTree(self, node, attrnum):
"""
returns attributes present in tree (attributes that split)
"""
if not node:
return set([])
if node.branchSelector:
j = attrnum[node.branchSelector.classVar.name]
cs = set([])
for i in range(len(node.branches)):
s = self.presentInTree(node.branches[i], attrnum)
cs = s | cs
cs = cs | set([j])
return cs
else:
return set([])
|
{
"content_hash": "539d40a85d298af27fb75f01b20111f4",
"timestamp": "",
"source": "github",
"line_count": 440,
"max_line_length": 165,
"avg_line_length": 36.79090909090909,
"alnum_prop": 0.5589943167778602,
"repo_name": "xflows/textflows",
"id": "fb6314d004bf5e9d0d7a2d458d28f5d3845cc50e",
"size": "16188",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "workflows/orngRF_HARF.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "192012"
},
{
"name": "CSS",
"bytes": "76342"
},
{
"name": "HTML",
"bytes": "363446"
},
{
"name": "JavaScript",
"bytes": "794623"
},
{
"name": "Makefile",
"bytes": "385"
},
{
"name": "Prolog",
"bytes": "146760"
},
{
"name": "Python",
"bytes": "30267970"
},
{
"name": "Roff",
"bytes": "58306446"
},
{
"name": "Shell",
"bytes": "97"
}
],
"symlink_target": ""
}
|
from cinder.volume import configuration
from cinder.volume import driver
from cinder.volume.drivers.netapp import options as na_opts
SSC_VSERVER = 'fake_vserver'
SSC_VOLUMES = ('volume1', 'volume2')
SSC_VOLUME_MAP = {
SSC_VOLUMES[0]: {
'pool_name': SSC_VOLUMES[0],
},
SSC_VOLUMES[1]: {
'pool_name': SSC_VOLUMES[1],
},
}
SSC_AGGREGATES = ('aggr1', 'aggr2')
SSC = {
'volume1': {
'thick_provisioning_support': True,
'thin_provisioning_support': False,
'netapp_thin_provisioned': 'false',
'netapp_aggregate': 'aggr1',
'netapp_compression': 'false',
'netapp_dedup': 'true',
'netapp_mirrored': 'false',
'netapp_raid_type': 'raid_dp',
'netapp_disk_type': 'SSD',
'netapp_hybrid_aggregate': False,
'pool_name': 'volume1',
},
'volume2': {
'thick_provisioning_support': False,
'thin_provisioning_support': True,
'netapp_thin_provisioned': 'true',
'netapp_aggregate': 'aggr2',
'netapp_compression': 'true',
'netapp_dedup': 'true',
'netapp_mirrored': 'true',
'netapp_raid_type': 'raid_dp',
'netapp_disk_type': 'FCAL',
'netapp_hybrid_aggregate': True,
'pool_name': 'volume2',
},
}
SSC_FLEXVOL_INFO = {
'volume1': {
'thick_provisioning_support': True,
'thin_provisioning_support': False,
'netapp_thin_provisioned': 'false',
'netapp_aggregate': 'aggr1',
},
'volume2': {
'thick_provisioning_support': False,
'thin_provisioning_support': True,
'netapp_thin_provisioned': 'true',
'netapp_aggregate': 'aggr2',
},
}
SSC_DEDUPE_INFO = {
'volume1': {
'netapp_dedup': 'true',
'netapp_compression': 'false',
},
'volume2': {
'netapp_dedup': 'true',
'netapp_compression': 'true',
},
}
SSC_MIRROR_INFO = {
'volume1': {
'netapp_mirrored': 'false',
},
'volume2': {
'netapp_mirrored': 'true',
},
}
SSC_AGGREGATE_INFO = {
'volume1': {
'netapp_disk_type': 'SSD',
'netapp_raid_type': 'raid_dp',
'netapp_hybrid_aggregate': False,
},
'volume2': {
'netapp_disk_type': 'FCAL',
'netapp_raid_type': 'raid_dp',
'netapp_hybrid_aggregate': True,
},
}
PROVISIONING_OPTS = {
'aggregate': 'fake_aggregate',
'thin_provisioned': True,
'snapshot_policy': None,
'language': 'en_US',
'dedupe_enabled': False,
'compression_enabled': False,
'snapshot_reserve': '12',
'volume_type': 'rw',
'size': 20,
}
def get_fake_cmode_config(backend_name):
config = configuration.Configuration(driver.volume_opts,
config_group=backend_name)
config.append_config_values(na_opts.netapp_proxy_opts)
config.append_config_values(na_opts.netapp_connection_opts)
config.append_config_values(na_opts.netapp_transport_opts)
config.append_config_values(na_opts.netapp_basicauth_opts)
config.append_config_values(na_opts.netapp_provisioning_opts)
config.append_config_values(na_opts.netapp_cluster_opts)
config.append_config_values(na_opts.netapp_san_opts)
config.append_config_values(na_opts.netapp_replication_opts)
return config
|
{
"content_hash": "50781245f7fc0ed7e3b4c21cdf40f300",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 67,
"avg_line_length": 27.75,
"alnum_prop": 0.5864864864864865,
"repo_name": "Nexenta/cinder",
"id": "e104f9098ed19f8a940d006aaff9b73467afc8c8",
"size": "3964",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "cinder/tests/unit/volume/drivers/netapp/dataontap/utils/fakes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18007018"
},
{
"name": "Shell",
"bytes": "13543"
}
],
"symlink_target": ""
}
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Fisher'] , ['MovingMedian'] , ['Seasonal_Second'] , ['ARX'] );
|
{
"content_hash": "5067abf39b67ed60f7c74e25ca46cf5d",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 85,
"avg_line_length": 39.5,
"alnum_prop": 0.7088607594936709,
"repo_name": "antoinecarme/pyaf",
"id": "ee885fcb4f76657b2552daeb8326be835b6b80da",
"size": "158",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_control/detailed/transf_Fisher/model_control_one_enabled_Fisher_MovingMedian_Seasonal_Second_ARX.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
"""
TODO: add comments
"""
import os
import shutil
from tow.modules import load_module
from tow.dockerfile import Dockerfile
from tow.attrs import process_attrs
from tow import templates
import subprocess
import sys
import json
import collections
TOW_VOLUME = "/tow"
def project_paths():
current_dir = os.getcwd()
return (current_dir,
os.path.join(current_dir, "Dockerfile"),
os.path.join(current_dir, "mapping.py"),
os.path.join(current_dir, "templates"),
os.path.join(current_dir, "files"),
os.path.join(current_dir, "attributes"))
def prepare_workingdir(workingdir):
if os.path.exists(workingdir):
shutil.rmtree(workingdir, ignore_errors=True)
os.mkdir(workingdir)
def dequote(s):
"""
If a string has single or double quotes around it, remove them.
Make sure the pair of quotes match.
If a matching pair of quotes is not found, return the string unchanged.
"""
if (s[0] == s[-1]) and s.startswith(("'", '"')):
return s[1:-1]
return s
def parse_env_arg(env_arg):
if env_arg:
env_pair = env_arg.split("=")
if len(env_pair) < 2:
return (env_pair[0], os.getenv(env_pair[0], ""))
else:
return (env_pair[0], dequote("".join(env_pair[1:])))
def parse_envfile(env_file_name):
envs = []
with open(env_file_name, "r") as envfile:
for envfile_line in envfile.readlines():
if not envfile_line.strip().startswith("#"):
envs.append(parse_env_arg(envfile_line.strip()))
return envs
def get_env_args(args):
envs = {}
i = 0
while i < len(args):
arg = args[i]
if arg == "-e":
i = i + 1
(env_name, env_var) = parse_env_arg(args[i].strip())
envs[env_name] = env_var
i = i + 1
elif arg == "--env":
i = i + 1
while i < len(args) and not args[i].startswith("-"):
(env_name, env_var) = parse_env_arg(args[i].strip())
envs[env_name] = env_var
i = i + 1
elif arg == "--env-file":
i = i + 1
env_file_name = args[i].strip()
env_vars = parse_envfile(env_file_name)
envs.update({env_var: env_name for (env_name, env_var) in env_vars})
i = i + 1
else:
i = i + 1
return envs
def copy_files(workingdir, files_path, mapping):
for fm in mapping.get("files", []):
src = fm[0]
src_file_path = os.path.join(files_path, src)
if os.path.exists(src_file_path):
dst_file_path = os.path.join(workingdir, src)
file_path_dir = os.path.dirname(dst_file_path)
if not os.path.exists(file_path_dir):
os.makedirs(file_path_dir)
shutil.copy2(src_file_path, dst_file_path)
else:
print "file %s doesn't exists" % src_file_path
def init_tow(env_args={}, attributes_name="default", mapping_name="mapping"):
(current_dir, dockerfile_path,
mappingfile_path, templates_path,
files_path, attributes_path) = project_paths()
file_mapping = load_module({}, current_dir, "mapping")
mapping = getattr(file_mapping, mapping_name, {})
workingdir = os.path.join(current_dir, ".tow")
prepare_workingdir(workingdir)
dockerfile = Dockerfile(dockerfile_path)
# TODO: print warn into logs that you try to read env but it is not defined
envs = collections.defaultdict(lambda: "")
envs.update(dockerfile.envs())
# envs passed as params has more priority then Dockerfile envs
envs.update(env_args)
attrs = process_attrs(envs, attributes_path, attributes_name)
# process templates
for fm in mapping.get("templates", []):
src = fm[0]
src_template_path = os.path.join(templates_path, src)
if os.path.exists(src_template_path):
processed_template_path = os.path.join(workingdir, src)
template_path_dir = os.path.dirname(processed_template_path)
if not os.path.exists(template_path_dir):
os.makedirs(template_path_dir)
templates.process(os.path.dirname(src_template_path),
os.path.basename(src_template_path),
processed_template_path, attrs)
else:
print "WARN: template file %s doesn't exists" % src_template_path
copy_files(workingdir, files_path, mapping)
# Transform dict with mapping to list of file tuples that exists in .tow dir
handled_file_mapping = [fm for fm_list in mapping.values()
for fm in fm_list
if os.path.exists(os.path.join(workingdir, fm[0]))]
# Init mapping file
templates.process_template("mapping.sh.tmpl",
os.path.join(workingdir, "mapping.sh"),
{"mapping": handled_file_mapping,
"volume_name": TOW_VOLUME})
return (handled_file_mapping, dockerfile, envs, attrs, workingdir)
def get_linked_container_variables(args):
envs = {}
if "--link" in args:
link_idx = args.index("--link")
link_idx = link_idx + 1
link_info = args[link_idx]
(name, alias) = link_info.split(":")
current_name_idx = args.index("--name")
current_name_idx = current_name_idx + 1
current_name = args[current_name_idx]
p = subprocess.Popen(["docker", "inspect", name],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if err:
print "ERROR: Problem to make docker inspect for %s, %s" % (name, err)
sys.exit(1)
linked_info = json.loads(out.strip(), object_pairs_hook=collections.OrderedDict)
exposed_ports = [port for port in linked_info[0]["Config"]["ExposedPorts"].keys()]
linked_envs = [env for env in linked_info[0]["Config"]["Env"]
if not (env.startswith("HOME") or env.startswith("PATH"))]
ip_address = linked_info[0]["NetworkSettings"]["IPAddress"]
envs["%s_NAME" % alias.upper()] = "/%s/%s" % (current_name, alias)
if exposed_ports:
firts_port, first_proto = exposed_ports[0].split("/")
envs["%s_PORT" % alias.upper()] = "%s://%s:%s" % (first_proto,
ip_address,
firts_port)
for exposed_port in exposed_ports:
port, proto = exposed_port.split("/")
envs["%s_PORT_%s_%s" % (alias.upper(), port, proto.upper())] = "%s://%s:%s" % (proto,
ip_address,
port)
envs["%s_PORT_%s_%s_PROTO" % (alias.upper(), port, proto.upper())] = proto
envs["%s_PORT_%s_%s_PORT" % (alias.upper(), port, proto.upper())] = port
envs["%s_PORT_%s_%s_ADDR" % (alias.upper(), port, proto.upper())] = ip_address
if linked_envs:
for linked_env in linked_envs:
linked_env_name, linked_env_value = linked_env.split("=")
envs["%s_ENV_%s" % (alias.upper(), linked_env_name)] = linked_env_value
return envs
|
{
"content_hash": "278dafbf3a9f16c695e13a9b321dc429",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 106,
"avg_line_length": 37.665,
"alnum_prop": 0.5446701181468206,
"repo_name": "yurinnick/tow",
"id": "1ceb2c860027384a3ac65ce0691b9aa3412bff06",
"size": "7533",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tow/commands/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "30076"
},
{
"name": "Shell",
"bytes": "765"
}
],
"symlink_target": ""
}
|
import os
import sys
from distutils.core import Command, setup
from distutils.command.build_ext import build_ext
from traceback import print_exc
cython_available = False
try:
from Cython.Distutils import build_ext
from Cython.Distutils.extension import Extension
cython_available = True
except ImportError, e:
pass
try:
import nose
except ImportError:
nose = None
def get_ext_modules():
if not cython_available:
print 'WARNING: cython not available, proceeding with pure python implementation.'
return []
try:
import meinheld
except ImportError, e:
print 'WARNING: meinheld must be installed to build cython version of meinheld-zeromq (%s).' % e
return []
try:
import zmq
except ImportError, e:
print 'WARNING: pyzmq(==2.2.0) must be installed to build cython version of meinheld-zeromq (%s).' % e
return []
return [Extension('meinheld_zeromq.core', ['meinheld_zeromq/core.pyx'], include_dirs=zmq.get_includes())]
__version__ = (0, 0, 1)
setup(
name = 'meinheld_zeromq',
version = '.'.join([str(x) for x in __version__]),
packages = ['meinheld_zeromq'],
ext_modules = get_ext_modules(),
author = 'INADA Naoki',
author_email = 'songofacandy@gmail.com',
url = 'http://github.com/methane/meinheld-zeromq',
description = 'meinheld compatibility layer for pyzmq',
long_description=open('README.rst').read(),
install_requires = ['pyzmq==2.2.0', 'meinheld'],
license = 'New BSD',
)
|
{
"content_hash": "d7c6d323052b3edd9e48d373d03c6dfa",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 110,
"avg_line_length": 29.576923076923077,
"alnum_prop": 0.6644993498049415,
"repo_name": "methane/meinheld-zeromq",
"id": "067259c087935f35cdb29624662da30e81432f06",
"size": "1538",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "9724"
}
],
"symlink_target": ""
}
|
import random
from battlesnake.plugins.contrib.arena_master.puppets import outbound_commands as ai_commands
# This is the maximum number of tics that a unit can be speed 0.0 with
# a destination before we try to assign a new one.
MAX_IDLE_COUNTER = 25
def get_logical_roam_destination(puppet, edge_padding=8):
"""
Since we're relying on smaller maps to make sure that our AI eventually
stumbles across an enemy, pick a random hex that isn't near (but not on)
the map boundary.
:param ArenaMasterPuppet puppet:
:param int edge_padding: This is the closest a unit can get to the
map borders. This increases the chance of running into enemy units
and keeps the AI away from the map edges, where they tend to suck.
:rtype: tuple
:returns: A destination tuple in the form of (x,y).
"""
map_width = puppet.map_width - 1
map_height = puppet.map_height - 1
min_x = edge_padding
max_x = map_width - edge_padding
min_y = edge_padding
max_y = map_height - edge_padding
x = min(max(random.randint(0, map_width), min_x), max_x)
y = min(max(random.randint(0, map_height), min_y), max_y)
return x, y
def find_nearest_enemy(ai_unit, enemy_units):
"""
Given a unit and a list of its opponents, find the nearest one and return it.
:param ArenaMapUnit ai_unit: The unit whose enemies to find.
:param list enemy_units: List of enemy ArenaMapUnit instances.
:rtype: ArenaMapUnit or None
:returns: The enemy nearest to the given unit, or None if no enemies
are present.
"""
nearest_unit = None
range_to_nearest = None
for enemy in enemy_units:
range_to = ai_unit.distance_to_unit(enemy)
if not nearest_unit or range_to < range_to_nearest:
range_to_nearest = range_to
nearest_unit = enemy
continue
return nearest_unit
def move_idle_units(puppet, friendly_ai_units, enemy_units):
"""
Given a list of ArenaMapUnit instances, put any idle slackers to work.
:param ArenaMasterPuppet puppet:
:param list friendly_ai_units:
:param list enemy_ai_units:
"""
protocol = puppet.protocol
for unit in friendly_ai_units:
if unit.is_immobile():
# Lost cause.
continue
if unit.is_fallen():
unit.ai_idle_counter = 0
continue
if unit.speed != 0.0 and not unit.is_at_ai_destination():
# He's moving, don't bother him.
unit.ai_idle_counter = 0
continue
if unit.target_dbref != '#-1':
unit.ai_idle_counter = 0
continue
if unit.speed == 0.0 and unit.ai_last_destination and \
unit.ai_idle_counter < MAX_IDLE_COUNTER:
# This unit has a destination but is sitting still. Increment
# the idle timer, don't try to order it to do anything.
# Once the counter gets high enough, we'll fall through to
# the order section.
print "+Idle", \
unit, unit.ai_last_destination, unit.ai_idle_counter
unit.ai_idle_counter += 1
continue
if unit.ai_last_destination and not unit.is_at_ai_destination() and \
unit.ai_idle_counter < MAX_IDLE_COUNTER:
# He's got something to do, don't bother him.
continue
# We're going to tell them to do something. Reset the idle counter.
unit.ai_idle_counter = 0
print "Unit needs new orders:", unit
print " - Last destination", unit.ai_last_destination
# At this point, we've determined that the unit is idle and needs
# something to do. Start by trying to find the nearest enemy unit.
# If none can be found, we resort to roaming the map.
nearest_enemy = find_nearest_enemy(unit, enemy_units)
if nearest_enemy:
new_dest = nearest_enemy.x_coord, nearest_enemy.y_coord
print " - New destination (%s) %s" % (nearest_enemy, new_dest)
else:
new_dest = get_logical_roam_destination(puppet)
print " - New destination (roam)", new_dest
unit.ai_last_destination = new_dest
move_orders = "{ai_id} goto {x} {y}".format(
ai_id=unit.contact_id, x=new_dest[0], y=new_dest[1])
ai_commands.order_ai(protocol, puppet, move_orders)
chase_orders = "{ai_id} chasetarg on".format(ai_id=unit.contact_id)
ai_commands.order_ai(protocol, puppet, chase_orders)
def handle_ai_target_change(puppet, old_unit, new_unit):
"""
This gets called when an AI's target changes.
:param ArenaMasterPuppet puppet:
:param ArenaMapUnit old_unit: The old version of the unit in the
store. This doesn't have the new changes that were picked up.
:param ArenaMapUnit new_unit: The new unit instance generated from
polling the units on the map. The store will copy over the
changed attributes from this instance to ``old_unit`` after this
handler runs.
"""
protocol = puppet.protocol
aggressor_id = old_unit.contact_id
if new_unit.target_dbref == '#-1':
# Had a lock but lost it.
return
# If we get this far, the AI has a new lock. They need to follow
# this guy.
victim = puppet.unit_store.get_unit_by_dbref(new_unit.target_dbref)
victim_id = victim.contact_id
# We clear these out so that a new destination may be assigned immediately
# if something happens to the target and the unit comes to a stop.
old_unit.ai_last_destination = None
new_unit.ai_last_destination = None
chase_orders = "{aggressor_id} follow {victim_id}".format(
aggressor_id=aggressor_id, victim_id=victim_id)
ai_commands.order_ai(protocol, puppet, chase_orders)
follow_bearing = 180
follow_range = new_unit.ai_optimal_weap_range
position_orders = "{aggressor_id} position {follow_bearing} {follow_range}".format(
aggressor_id=aggressor_id, follow_bearing=follow_bearing,
follow_range=follow_range)
ai_commands.order_ai(protocol, puppet, position_orders)
|
{
"content_hash": "eee584fe0cdf12233b1a8d7ceefbfdef",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 93,
"avg_line_length": 37.339393939393936,
"alnum_prop": 0.6406427527998702,
"repo_name": "gtaylor/btmux_battlesnake",
"id": "f1bd7511743d438153966f656d174bd2c1fe4df7",
"size": "6161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "battlesnake/plugins/contrib/arena_master/game_modes/wave_survival/ai_strategic_logic.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "396903"
}
],
"symlink_target": ""
}
|
"""
Client for interacting with Nexus.
"""
import base64
from datetime import datetime
import hashlib
import json
import logging
from subprocess import Popen, PIPE
import time
import urllib
import urlparse
import yaml
import token_utils as token_utils
from utils import (
read_openssh_public_key,
read_openssh_private_key,
canonical_time,
b64encode,
sha1_base64,
sign_with_rsa,
sign_with_sshagent)
import requests
import rsa
import paramiko.agent
import os
import sshagent
log = logging.getLogger()
class NexusClient(object):
"""
Root object for interacting with the Nexus service
"""
def __init__(self, config=None, config_file=None):
if config_file is not None:
with open(config_file, 'r') as cfg:
self.config = yaml.load(cfg.read())
elif config is not None:
self.config = config
else:
raise AttributeError("No configuration was specified")
self.server = self.config['server']
cache_config = self.config.get('cache', {
'class': 'biokbase.nexus.token_utils.InMemoryCache',
'args': [],
})
self.client = self.config['client']
if 'SSH_AUTH_SOCK' in os.environ:
self.agent = sshagent.Agent2()
sshkeys = self.agent.keys
# strip out DSA keys since they are unusable for GO
self.agent_keys = { name : sshkeys[name] for name in sshkeys.keys() if sshkeys[name].get_name() == 'ssh-rsa' }
self.agent_keyname = self.agent_keys.keys()[0] if len(self.agent_keys.keys()) == 1 else None
self.client_secret = self.config['client_secret']
default_key = os.path.expanduser( '~/.ssh/id_rsa' )
self.user_key_file = self.config.get('user_private_key_file', default_key if os.path.exists(default_key) else None)
cache_class = cache_config['class']
self.verify_ssl = self.config.get('verify_ssl', True)
mod_name = '.'.join(cache_class.split('.')[:-1])
mod = __import__(mod_name)
for child_mod_name in mod_name.split('.')[1:]:
mod = getattr(mod, child_mod_name)
cache_impl_class = getattr(mod, cache_class.split('.')[-1])
self.cache = cache_impl_class(*cache_config.get('args', []))
self.cache = token_utils.LoggingCacheWrapper(self.cache)
def validate_token(self, token):
"""
Validate that a token was issued for the specified user and client by
the server in the SigningSubject.
:param token: An authentication token provided by the client.
:return: username, client id and the server that issued the token.
:raises ValueError: If the signature is invalid, the token is expired or
the public key could not be gotten.
"""
return token_utils.validate_token(token, self.cache, self.verify_ssl)
def generate_request_url(self, username=None):
"""
In order for the user to authorize the client to access his data, he
must first go to the custom url provided here.
:param username: (Optional) This will pre-populate the user's info in the form
:return: A custom authorization url
"""
query_params = {
"response_type": "code",
"client_id": self.client,
}
if username is not None:
query_params['username'] = username
parts = ('https', self.server, '/goauth/authorize',
urllib.urlencode(query_params), None)
return urlparse.urlunsplit(parts)
def get_access_token_from_code(self, code):
"""
After receiving a code from the end user, this method will acquire an
access token from the server which can be used for subsequent requests.
:param code: The code which the user received after authenticating with the server and authorizing the client.
:return: Tuple containing (access_token, refresh_token, expire_time)
"""
url_parts = ('https', self.server, '/goauth/token', None, None)
result = token_utils.request_access_token(self.client,
self.client_secret, code, urlparse.urlunsplit(url_parts))
return (
result.access_token,
result.refresh_token,
time.mktime(datetime.utcnow().timetuple()) + result.expires_in
)
def rsa_get_request_token(self, username, client_id, password=None):
query_params = {
"response_type": "code",
"client_id": client_id
}
query_params = urllib.urlencode(query_params)
path = '/goauth/authorize'
method = 'GET'
headers = sign_with_rsa(self.user_key_file,
path,
method,
username,
query=query_params,
password=password)
url_parts = ('https', self.server, '/goauth/authorize', query_params, None)
url = urlparse.urlunsplit(url_parts)
response = requests.get(url, headers=headers, verify=self.verify_ssl)
return response.json()
def sshagent_get_request_token(self, username, client_id, keyname):
query_params = {
"response_type": "code",
"client_id": client_id
}
query_params = urllib.urlencode(query_params)
path = '/goauth/authorize'
method = 'GET'
headers = sign_with_sshagent(self.agent_keys[keyname],
path,
method,
username,
query=query_params)
url_parts = ('https', self.server, '/goauth/authorize', query_params, None)
url = urlparse.urlunsplit(url_parts)
response = requests.get(url, headers=headers, verify=self.verify_ssl)
return response.json()
def request_client_credential(self, client_id, password=None):
"""
This is designed to support section 4.4 of the OAuth 2.0 spec:
"The client can request an access token using only its client
credentials (or other supported means of authentication) when the
client is requesting access to the protected resources under its
control"
If we have a user_key_file defined, the the client_id and password
for RSA authentication, if we don't have an RSA keyfile, use the
client_id and password for BASIC auth
"""
body = 'grant_type=client_credentials'
path = '/goauth/token'
method = 'POST'
url_parts = ('https', self.server, path, None, None)
url = urlparse.urlunsplit(url_parts)
if self.user_key_file:
headers = sign_with_rsa(self.user_key_file,
path,
method,
client_id,
body=body,
password=password)
response = requests.post(url, data={'grant_type': 'client_credentials'}, headers=headers, verify=self.verify_ssl)
elif password:
response = requests.post(url, data={'grant_type': 'client_credentials','client_id' : client_id }, auth = (client_id, password), verify=self.verify_ssl)
else:
raise Exception( "Password and legitimate user_key_file required")
return response.json()
def request_client_credential_sshagent(self, client_id=None, agent_keyname = None):
"""
This is designed to support section 4.4 of the OAuth 2.0 spec:
"The client can request an access token using only its client
credentials (or other supported means of authentication) when the
client is requesting access to the protected resources under its
control"
"""
body = 'grant_type=client_credentials'
path = '/goauth/token'
method = 'POST'
url_parts = ('https', self.server, path, None, None)
url = urlparse.urlunsplit(url_parts)
# Handle options based on explicitly parameters - ignore implicit options based
# on instance attributes
if client_id is None:
client_id = self.client
if agent_keyname is None:
agent_keyname = self.agent_keyname
if agent_keyname and client_id:
headers = sign_with_sshagent(self.agent_keys[agent_keyname],
path,
method,
client_id,
body=body)
response = requests.post(url, data={'grant_type': 'client_credentials'}, headers=headers, verify=self.verify_ssl)
else:
raise Exception('Requires client_id and ssh agent_keyname as parameters or as part of initial config to authenticate credential request')
return response.json()
def get_user_using_access_token(self, access_token):
access_token_dict = dict(field.split('=') for field in access_token.split('|'))
user_path = '/users/' + access_token_dict['un']
url_parts = ('https', self.server, user_path, None, None)
url = urlparse.urlunsplit(url_parts)
headers = {
"X-Globus-Goauthtoken": str(access_token),
"Content-Type": "application/json"
}
response = requests.get(url, headers=headers, verify=self.verify_ssl)
assert(response.status_code == requests.codes.ok)
return response.json()
|
{
"content_hash": "1d133fc58c7baee686233d9ddfccffcd",
"timestamp": "",
"source": "github",
"line_count": 236,
"max_line_length": 163,
"avg_line_length": 40.978813559322035,
"alnum_prop": 0.5889773549788027,
"repo_name": "kbase/auth",
"id": "171d62f8b56cded2489c440937d6196b042c6f25",
"size": "9671",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "python-libs/biokbase/nexus/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "11455"
},
{
"name": "Java",
"bytes": "89987"
},
{
"name": "Makefile",
"bytes": "6337"
},
{
"name": "Perl",
"bytes": "46731"
},
{
"name": "Python",
"bytes": "84318"
},
{
"name": "Shell",
"bytes": "160"
}
],
"symlink_target": ""
}
|
from rlib.unroll import unrolling_iterable
class AbstractNode(object):
pass
def _get_all_child_fields(clazz):
cls = clazz
field_names = []
while cls is not AbstractNode:
if hasattr(cls, "_child_nodes_"):
field_names += cls._child_nodes_ # pylint: disable=protected-access
cls = cls.__base__
return set(field_names)
def _generate_replace_method(cls):
child_fields = unrolling_iterable(_get_all_child_fields(cls))
def _replace_child_with(parent_node, old_child, new_child):
was_replaced = False # pylint: disable=unused-variable
for child_slot in child_fields:
if child_slot.endswith("[*]"):
slot_name = child_slot[:-3]
nodes = getattr(parent_node, slot_name)
if nodes and old_child in nodes:
# update old list, because iterators might have a copy of it
for i, n in enumerate(nodes):
if n is old_child:
nodes[i] = new_child
setattr(
parent_node, slot_name, nodes[:]
) # TODO: figure out whether we need the copy of the list here
was_replaced = True
else:
current = getattr(parent_node, child_slot)
if current is old_child:
setattr(parent_node, child_slot, new_child)
was_replaced = True
# TODO: method recursion is a problem causing specialization more than
# once of a node if the containing method is already on the stack
# if not was_replaced:
# raise ValueError("%s was not a direct child node of %s" % (
# old_child, parent_node))
return new_child
cls.replace_child_with = _replace_child_with
def _generate_adapt_after_inlining(cls):
child_fields = unrolling_iterable(_get_all_child_fields(cls))
def _adapt_after_inlining(node, mgenc):
for child_slot in child_fields:
if child_slot.endswith("[*]"):
slot_name = child_slot[:-3]
nodes = getattr(node, slot_name)
if nodes:
for n in nodes:
n.adapt_after_inlining(mgenc)
else:
current = getattr(node, child_slot)
current.adapt_after_inlining(mgenc)
node.handle_inlining(mgenc)
cls.adapt_after_inlining = _adapt_after_inlining
def _generate_adapt_after_outer_inlined(cls):
child_fields = unrolling_iterable(_get_all_child_fields(cls))
def _adapt_after_outer_inlined(node, removed_ctx_level, mgenc_with_inlined):
for child_slot in child_fields:
if child_slot.endswith("[*]"):
slot_name = child_slot[:-3]
nodes = getattr(node, slot_name)
if nodes:
for n in nodes:
n.adapt_after_outer_inlined(
removed_ctx_level, mgenc_with_inlined
)
else:
current = getattr(node, child_slot)
current.adapt_after_outer_inlined(removed_ctx_level, mgenc_with_inlined)
node.handle_outer_inlined(removed_ctx_level, mgenc_with_inlined)
cls.adapt_after_outer_inlined = _adapt_after_outer_inlined
class NodeInitializeMetaClass(type):
def __init__(cls, name, bases, dic):
type.__init__(cls, name, bases, dic)
cls._initialize_node_class() # pylint: disable=no-value-for-parameter
def _initialize_node_class(cls):
_generate_replace_method(cls)
_generate_adapt_after_inlining(cls)
_generate_adapt_after_outer_inlined(cls)
|
{
"content_hash": "27c1099784f7bb9619e0d9ded364a52d",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 88,
"avg_line_length": 37.76,
"alnum_prop": 0.5651483050847458,
"repo_name": "SOM-st/PySOM",
"id": "0ef3e080be238ab2eedc6ad77af425a582bec339",
"size": "3776",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/rtruffle/abstract_node.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1342"
},
{
"name": "Python",
"bytes": "538515"
},
{
"name": "Shell",
"bytes": "407"
}
],
"symlink_target": ""
}
|
"""
Copyright 2020 Google LLC
Copyright 2020 PerfectVIPs Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
import argparse
import logging
import sys
import vsc
from bitstring import BitArray
from pygen_src.riscv_instr_pkg import mtvec_mode_t, f_rounding_mode_t, \
riscv_reg_t, privileged_mode_t, \
riscv_instr_group_t
from pygen_src.target.rv32i import riscv_core_setting as rcs
@vsc.randobj
class riscv_instr_gen_config:
def __init__(self, argv):
# TODO Support for command line argument
self.main_program_instr_cnt = 100 # count of main_prog
self.sub_program_instr_cnt = [] # count of sub_prog
self.debug_program_instr_cnt = 0 # count of debug_rom
self.debug_sub_program_instr_cnt = [] # count of debug sub_progrms
# Commenting out for now
# self.data_page_pattern = list(
# map(lambda dta_pg: dta_pg.name, data_pattern_t))
# dicts for exception_cause_t & interrupt_cause_t Enum classes
self.m_mode_exception_delegation = {}
self.s_mode_exception_delegation = {}
self.m_mode_interrupt_delegation = {}
self.s_mode_interrupt_delegation = {}
# init_privileged_mode default to MACHINE_MODE
self.init_privileged_mode = privileged_mode_t.MACHINE_MODE
self.mstatus = BitArray(bin(0b0), length=rcs.XLEN - 1)
self.mie = BitArray(bin(0b0), length=rcs.XLEN - 1)
self.sstatus = BitArray(bin(0b0), length=rcs.XLEN - 1)
self.sie = BitArray(bin(0b0), length=rcs.XLEN - 1)
self.ustatus = BitArray(bin(0b0), length=rcs.XLEN - 1)
self.uie = BitArray(bin(0b0), length=rcs.XLEN - 1)
self.mstatus_mprv = 0
self.mstatus_mxr = 0
self.mstatus_sum = 0
self.mstatus_tvm = 0
self.mstatus_fs = BitArray(bin(0b0), length=2)
self.mstatus_vs = BitArray(bin(0b0), length=2)
self.mtvec_mode = vsc.rand_enum_t(mtvec_mode_t)
self.tvec_alignment = argv.tvec_alignment
self.fcsr_rm = list(map(lambda csr_rm: csr_rm.name, f_rounding_mode_t))
self.enable_sfence = 0
self.gpr = []
# Helper fields for gpr
self.gpr0 = vsc.rand_enum_t(riscv_reg_t)
self.gpr1 = vsc.rand_enum_t(riscv_reg_t)
self.gpr2 = vsc.rand_enum_t(riscv_reg_t)
self.gpr3 = vsc.rand_enum_t(riscv_reg_t)
self.scratch_reg = vsc.rand_enum_t(riscv_reg_t)
self.pmp_reg = vsc.rand_enum_t(riscv_reg_t)
self.sp = vsc.rand_enum_t(riscv_reg_t)
self.tp = vsc.rand_enum_t(riscv_reg_t)
self.ra = vsc.rand_enum_t(riscv_reg_t)
self.check_misa_init_val = 0
self.check_xstatus = 1
self.virtual_addr_translation_on = 0
# Commenting out for now
# vector_cfg = riscv_vector_cfg # TODO
# pmp_cfg = riscv_pmp_cfg # TODO
# self.mem_region = [] # TODO
# Self.amo_region = [] # TODO
self.stack_len = 5000
# Self.s_mem_region = [] # TODO
self.kernel_stack_len = 4000
self.kernel_program_instr_cnt = 400
# list of main implemented CSRs
self.invalid_priv_mode_csrs = []
self.num_of_sub_program = argv.num_of_sub_program
self.instr_cnt = argv.instr_cnt
self.num_of_tests = argv.num_of_tests
self.no_data_page = argv.no_data_page
self.no_branch_jump = argv.no_branch_jump
self.no_load_store = argv.no_load_store
self.no_csr_instr = argv.no_csr_instr
self.no_ebreak = argv.no_ebreak
self.no_dret = argv.no_dret
self.no_fence = argv.no_fence
self.no_wfi = argv.no_wfi
self.enable_unaligned_load_store = argv.enable_unaligned_load_store
self.illegal_instr_ratio = argv.illegal_instr_ratio
self.hint_instr_ratio = argv.hint_instr_ratio
self.num_of_harts = argv.num_of_harts
self.fix_sp = argv.fix_sp
self.use_push_data_section = argv.use_push_data_section
self.boot_mode_opts = argv.boot_mode_opts
if(self.boot_mode_opts):
logging.info("Got boot mode option - %0s", self.boot_mode_opts)
if(self.boot_mode_opts == "m"):
self.init_privileged_mode = privileged_mode_t.MACHINE_MODE.name
elif(self.boot_mode_opts == "s"):
self.init_privileged_mode = privileged_mode_t.SUPERVISOR_MODE.name
elif(self.boot_mode_opts == "u"):
self.init_privileged_mode = privileged_mode_t.USER_MODE.name
else:
logging.error("Illegal boot mode option - %0s", self.boot_mode_opts)
self.enable_page_table_exception = argv.enable_page_table_exception
self.no_directed_instr = argv.no_directed_instr
self.asm_test_suffix = argv.asm_test_suffix
self.enable_interrupt = argv.enable_interrupt
self.enable_nested_interrupt = argv.enable_nested_interrupt
self.enable_timer_irq = argv.enable_timer_irq
self.bare_program_mode = argv.bare_program_mode
self.enable_illegal_csr_instruction = argv.enable_illegal_csr_instruction
self.enable_access_invalid_csr_level = argv.enable_access_invalid_csr_level
self.enable_misaligned_instr = argv.enable_misaligned_instr
self.enable_dummy_csr_write = argv.enable_dummy_csr_write
self.randomize_csr = argv.randomize_csr
self.allow_sfence_exception = argv.allow_sfence_exception
self.no_delegation = argv.no_delegation
self.force_m_delegation = argv.force_m_delegation
self.force_s_delegation = argv.force_s_delegation
self.support_supervisor_mode = 0
self.disable_compressed_instr = argv.disable_compressed_instr
self.require_signature_addr = argv.require_signature_addr
if(self.require_signature_addr):
self.signature_addr = int(argv.signature_addr, 16)
else:
self.signature_addr = 0xdeadbeef
self.gen_debug_section = argv.gen_debug_section
self.enable_ebreak_in_debug_rom = argv.enable_ebreak_in_debug_rom
self.set_dcsr_ebreak = argv.set_dcsr_ebreak
self.num_debug_sub_program = argv.num_debug_sub_program
self.enable_debug_single_step = argv.enable_debug_single_step
self.single_step_iterations = 0
self.set_mstatus_tw = argv.set_mstatus_tw
self.set_mstatus_mprv = argv.set_mstatus_mprv
self.min_stack_len_per_program = 10 * (rcs.XLEN / 8)
self.max_stack_len_per_program = 16 * (rcs.XLEN / 8)
self.max_branch_step = 20
self.max_directed_instr_stream_seq = 20
self.reserved_regs = vsc.list_t(vsc.enum_t(riscv_reg_t))
self.enable_floating_point = argv.enable_floating_point
self.enable_vector_extension = argv.enable_vector_extension
self.enable_b_extension = argv.enable_b_extension
self.enable_bitmanip_groups = argv.enable_bitmanip_groups
self.dist_control_mode = 0
self.category_dist = {}
self.march_isa = argv.march_isa
if(len(self.march_isa) != 0):
rcs.supported_isa = self.march_isa
if(rcs.supported_isa != 'RV32C'):
self.disable_compressed_instr = 1
@vsc.constraint
def gpr_c(self):
self.gpr0.not_inside(vsc.rangelist(self.sp, self.tp, self.scratch_reg, self.pmp_reg,
riscv_reg_t.ZERO, riscv_reg_t.RA, riscv_reg_t.GP))
self.gpr1.not_inside(vsc.rangelist(self.sp, self.tp, self.scratch_reg, self.pmp_reg,
riscv_reg_t.ZERO, riscv_reg_t.RA, riscv_reg_t.GP))
self.gpr2.not_inside(vsc.rangelist(self.sp, self.tp, self.scratch_reg, self.pmp_reg,
riscv_reg_t.ZERO, riscv_reg_t.RA, riscv_reg_t.GP))
self.gpr3.not_inside(vsc.rangelist(self.sp, self.tp, self.scratch_reg, self.pmp_reg,
riscv_reg_t.ZERO, riscv_reg_t.RA, riscv_reg_t.GP))
vsc.unique(self.gpr0, self.gpr1, self.gpr2, self.gpr3)
def check_setting(self):
support_64b = 0
support_128b = 0
# list of satp_mode_t from riscv_core_setting.py
stp_md_lst = rcs.SATP_MODE
# list of riscv_instr_group_t with names of riscv_instr_name_t in it.
supported_isa_lst = list(map(lambda z: z.name, riscv_instr_group_t))
# check the valid isa support
for x in rcs.supported_isa:
if x == (supported_isa_lst[1] or supported_isa_lst[3] or supported_isa_lst[5] or
supported_isa_lst[8] or supported_isa_lst[11] or supported_isa_lst[13] or
supported_isa_lst[19]):
support_64b = 1
logging.info("support_64b=%d" % support_64b)
logging.debug("Supported ISA=%s" % x)
elif x == (supported_isa_lst[14] or supported_isa_lst[15]):
support_128b = 1
logging.info("support_128b=%d" % support_128b)
logging.debug("Supported ISA=%s" % x)
if (support_128b == 1) and (rcs.XLEN != 128):
logging.critical("XLEN should be set to 128 based on \
riscv_core_setting.supported_isa setting")
logging.info("XLEN Value=%d" % rcs.XLEN)
sys.exit("XLEN is not equal to 128, set it Accordingly!")
if (support_128b == 0) and (support_64b == 1) and (rcs.XLEN != 64):
logging.critical("XLEN should be set to 64 based on \
riscv_core_setting.supported_isa setting")
logging.info("XLEN Value=%d" % rcs.XLEN)
sys.exit("XLEN is not equal to 64, set it Accordingly!")
if not(support_128b or support_64b) and (rcs.XLEN != 32):
logging.critical("XLEN should be set to 32 based on \
riscv_core_setting.supported_isa setting")
logging.info("XLEN Value=%d" % rcs.XLEN)
sys.exit("XLEN is not equal to 32, set it Accordingly!")
if not(support_128b or support_64b) and not(('SV32' in stp_md_lst) or
('BARE' in stp_md_lst)):
logging.critical("SATP mode is not supported for RV32G ISA")
logging.info(stp_md_lst)
sys.exit("Supported SATP mode is not provided")
# TODO
def setup_instr_distribution(self):
pass
def init_delegation(self):
for i in self.mode_exp_lst:
if i == self.mode_exp_lst[0]:
continue
self.m_mode_exception_delegation[i] = 0
self.s_mode_exception_delegation[i] = 0
for j in self.mode_intrpt_lst:
if j == self.mode_intrpt_lst[0]:
continue
self.m_mode_interrupt_delegation[j] = 0
self.s_mode_interrupt_delegation[j] = 0
def pre_randomize(self):
# Clearing the contents of self.gpr after each randomization.
# As it is being extended in post_randomize function.
self.gpr.clear()
for x in rcs.supported_privileged_mode:
if(x == "SUPERVISOR_MODE"):
self.support_supervisor_mode = 1
def get_non_reserved_gpr(self):
pass
def post_randomize(self):
# Temporary fix for gpr_c constraint.
self.gpr.extend((self.gpr0, self.gpr1, self.gpr2, self.gpr3))
self.reserved_regs.append(self.tp)
self.reserved_regs.append(self.sp)
self.reserved_regs.append(self.scratch_reg)
self.min_stack_len_per_program = 2 * (rcs.XLEN / 8)
logging.info("min_stack_len_per_program value = %d"
% self.min_stack_len_per_program)
self.check_setting() # to check the setting is legal
# TODO, Need to change the logic once the constraints are up.
if "USER_MODE" == self.init_privileged_mode:
logging.info("mode=%s" % "USER_MODE")
self.no_wfi = 1
def get_invalid_priv_lvl_csr(self):
invalid_lvl = []
# Debug CSRs are inaccessible from all but Debug Mode
# and we cannot boot into Debug Mode.
invalid_lvl.append('D')
# TODO Need to change the logic once the constraints are up.
for mode in self.init_privileged_mode:
if mode == "MACHINE_MODE":
continue
if mode == 'SUPERVISOR_MODE':
invalid_lvl.append('M')
logging.info("supr_mode---")
logging.debug(invalid_lvl)
elif mode == 'USER_MODE':
invalid_lvl.append('S')
invalid_lvl.append('M')
logging.info("usr_mode---")
logging.debug(invalid_lvl)
else:
logging.critical("Unsupported initialization privilege mode")
# implemented_csr from riscv_core_setting.py
for x in rcs.implemented_csr:
if x[0] in invalid_lvl:
self.invalid_priv_mode_csrs.append(x)
# This function calls all the above defined function which should
# be called in init function as per SV logic.This function as to be
# called after every instance of the gen_config handle
def func_call_init(self):
self.init_delegation()
# self.setup_instr_distribution() # TODO
self.get_invalid_priv_lvl_csr()
def parse_args():
parse = argparse.ArgumentParser()
parse.add_argument('--num_of_tests', help = 'num_of_tests', type = int, default = 1)
parse.add_argument('--enable_page_table_exception',
help = 'enable_page_table_exception', type = int, default = 0)
parse.add_argument('--enable_interrupt', help = 'enable_interrupt',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--enable_nested_interrupt', help = 'enable_nested_interrupt',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--enable_timer_irq', help = 'enable_timer_irq',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--num_of_sub_program', help = 'num_of_sub_program', type = int, default = 5)
parse.add_argument('--instr_cnt', help = 'instr_cnt', type = int, default = 200)
parse.add_argument('--tvec_alignment', help = 'tvec_alignment', type = int, default = 2)
parse.add_argument('--no_ebreak', help = 'no_ebreak', choices = [0, 1], type = int, default = 1)
parse.add_argument('--no_dret', help = 'no_dret', choices = [0, 1], type = int, default = 1)
parse.add_argument('--no_wfi', help = 'no_wfi', choices = [0, 1], type = int, default = 1)
parse.add_argument('--no_branch_jump', help = 'no_branch_jump',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--no_load_store', help = 'no_load_store',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--no_csr_instr', help = 'no_csr_instr',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--fix_sp', help = 'fix_sp', choices = [0, 1], type = int, default = 0)
parse.add_argument('--use_push_data_section', help = 'use_push_data_section',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--enable_illegal_csr_instruction',
help = 'enable_illegal_csr_instruction', choices = [0, 1],
type = int, default = 0)
parse.add_argument('--enable_access_invalid_csr_level',
help = 'enable_access_invalid_csr_level', choices = [0, 1],
type = int, default = 0)
parse.add_argument('--enable_misaligned_instr', help = 'enable_misaligned_instr',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--enable_dummy_csr_write', help = 'enable_dummy_csr_write',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--allow_sfence_exception', help = 'allow_sfence_exception',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--no_data_page', help = 'no_data_page',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--no_directed_instr', help = 'no_directed_instr',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--no_fence', help = 'no_fence', choices = [0, 1], type = int, default = 1)
parse.add_argument('--no_delegation', help = 'no_delegation',
choices = [0, 1], type = int, default = 1)
parse.add_argument('--illegal_instr_ratio',
help = 'illegal_instr_ratio', type = int, default = 0)
parse.add_argument('--hint_instr_ratio', help = 'hint_instr_ratio', type = int, default = 0)
parse.add_argument('--num_of_harts', help = 'num_of_harts', type = int, default = rcs.NUM_HARTS)
parse.add_argument('--enable_unaligned_load_store',
help = 'enable_unaligned_load_store', choices = [0, 1],
type = int, default = 0)
parse.add_argument('--force_m_delegation', help = 'force_m_delegation',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--force_s_delegation', help = 'force_s_delegation',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--require_signature_addr', help = 'require_signature_addr',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--signature_addr', help = 'signature_addr', default = 0xdeadbeef)
parse.add_argument('--disable_compressed_instr',
help = 'disable_compressed_instr', choices = [0, 1], type = int, default = 0)
parse.add_argument('--randomize_csr', help = 'randomize_csr',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--gen_debug_section', help = 'gen_debug_section',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--bare_program_mode', help = 'bare_program_mode',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--num_debug_sub_program',
help = 'num_debug_sub_program', type = int, default = 0)
parse.add_argument('--enable_ebreak_in_debug_rom',
help = 'enable_ebreak_in_debug_rom', choices = [0, 1],
type = int, default = 0)
parse.add_argument('--set_dcsr_ebreak', help = 'set_dcsr_ebreak',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--enable_debug_single_step',
help = 'enable_debug_single_step', choices = [0, 1], type = int, default = 0)
parse.add_argument('--set_mstatus_tw', help = 'set_mstatus_tw',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--set_mstatus_mprv', help = 'set_mstatus_mprv',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--enable_floating_point', help = 'enable_floating_point',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--enable_vector_extension', help = 'enable_vector_extension',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--enable_b_extension', help = 'enable_b_extension',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--enable_bitmanip_groups', help = 'enable_bitmanip_groups',
default = ['ZBB', 'ZBS', 'ZBP', 'ZBE', 'ZBF',
'ZBC', 'ZBR', 'ZBM', 'ZBT', 'ZB_TMP'], nargs = '*')
parse.add_argument('--boot_mode_opts', help = 'boot_mode_opts', default = "")
parse.add_argument('--asm_test_suffix', help = 'asm_test_suffix', default = "")
parse.add_argument('--march_isa', help = 'march_isa', default = [],
choices = [i.name for i in riscv_instr_group_t], nargs = '*')
parse.add_argument('--directed_instr_0', help = 'directed_instr_0',
default = "riscv_int_numeric_corner_stream,4")
parse.add_argument('--stream_name_opts', help = 'stream_name_0',
default = "riscv_load_store_rand_instr_stream")
parse.add_argument('--stream_freq_opts', help = 'stream_freq_0', default = 4)
# TODO
'''
if ($value$plusargs("tvec_alignment=%0d", tvec_alignment)) begin
tvec_alignment.rand_mode(0);
end
vector_cfg = riscv_vector_cfg::type_id::create("vector_cfg");
pmp_cfg = riscv_pmp_cfg::type_id::create("pmp_cfg");
pmp_cfg.rand_mode(pmp_cfg.pmp_randomize);
pmp_cfg.initialize(require_signature_addr);
setup_instr_distribution();
get_invalid_priv_lvl_csr();
'''
args = parse.parse_args()
return args
args = parse_args()
args_dict = vars(args)
cfg = riscv_instr_gen_config(args)
|
{
"content_hash": "84d763972d79a5abfc2ac8fe29a1e2e5",
"timestamp": "",
"source": "github",
"line_count": 437,
"max_line_length": 100,
"avg_line_length": 49.006864988558355,
"alnum_prop": 0.587551363466567,
"repo_name": "chipsalliance/Surelog",
"id": "26f49aa23831585c9005c7607d6fc180b259c910",
"size": "21416",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "third_party/tests/Ibex/vendor/google_riscv-dv/pygen/pygen_src/riscv_instr_gen_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "155641"
},
{
"name": "C",
"bytes": "3114"
},
{
"name": "C++",
"bytes": "2808920"
},
{
"name": "CMake",
"bytes": "41750"
},
{
"name": "Forth",
"bytes": "81"
},
{
"name": "Makefile",
"bytes": "4820"
},
{
"name": "Nix",
"bytes": "784"
},
{
"name": "Python",
"bytes": "110922"
},
{
"name": "SWIG",
"bytes": "351"
},
{
"name": "Shell",
"bytes": "1349"
},
{
"name": "Slash",
"bytes": "37570"
},
{
"name": "SystemVerilog",
"bytes": "872314"
},
{
"name": "Tcl",
"bytes": "68865"
},
{
"name": "V",
"bytes": "1092"
},
{
"name": "Verilog",
"bytes": "495242"
}
],
"symlink_target": ""
}
|
"""
Just for some quick testing.
"""
import requests
'''
#res = requests.post('http://127.0.0.1:8089/swarm', {"locust_count": 10, "hatch_rate": 2})
#print(res.content)
def get_results():
res = requests.get('http://127.0.0.1:8089/stats/requests/csv').content.decode('utf-8')
lines = res.split('\n')[1:]
data = []
for l in lines:
ll = l.split(',')
local_data = []
for element in ll:
local_data.append(element)
data.append(local_data)
return data
'''
dictionary_keys = ['Type', 'Name', '# Requests', '# Fails', '# Median', 'Average', 'Min', 'Max', 'Content Size', '# Requests / Second']
def nationals_status_data():
res = requests.get('http://127.0.0.1:8089/stats/requests/csv').content.decode('utf-8')
lines = res.split('\n')[1:]
print(lines)
data = []
for l in lines:
ll = l.split(',')
local_data = {}
dict_key_index = 0
for element in ll:
#print(element + '\t' + str(dict_key_index))
local_data[dictionary_keys[dict_key_index]] = element
dict_key_index += 1
data.append(local_data)
return data
#return JsonResponse({'data': data})
def is_locust_running():
"""This function determines if Locusts are swarming or not.
:return: Boolean.
"""
try:
requests.get('http://localhost:8089/')
return True
except Exception:
return False
print(is_locust_running())
print(nationals_status_data())
|
{
"content_hash": "dc3b0f62cd8eac01f82645ff6e48f055",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 135,
"avg_line_length": 22.483333333333334,
"alnum_prop": 0.6434395848776872,
"repo_name": "utarsuno/urbtek",
"id": "96892ebe534adef687f25a8f0956f926c0bcce3e",
"size": "1365",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "nexus_django/nexus_front_end/test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "390447"
},
{
"name": "HTML",
"bytes": "80203"
},
{
"name": "JavaScript",
"bytes": "298511"
},
{
"name": "Python",
"bytes": "880231"
},
{
"name": "Shell",
"bytes": "22758"
}
],
"symlink_target": ""
}
|
__VERSION__="ete2-2.2rev1056"
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'show_newick.ui'
#
# Created: Tue Jan 10 15:56:56 2012
# by: PyQt4 UI code generator 4.7.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_Newick(object):
def setupUi(self, Newick):
Newick.setObjectName("Newick")
Newick.resize(594, 397)
self.nwFormat = QtGui.QComboBox(Newick)
self.nwFormat.setGeometry(QtCore.QRect(200, 20, 51, 23))
self.nwFormat.setObjectName("nwFormat")
self.nwFormat.addItem("")
self.nwFormat.addItem("")
self.nwFormat.addItem("")
self.nwFormat.addItem("")
self.nwFormat.addItem("")
self.nwFormat.addItem("")
self.nwFormat.addItem("")
self.nwFormat.addItem("")
self.nwFormat.addItem("")
self.nwFormat.addItem("")
self.nwFormat.addItem("")
self.label = QtGui.QLabel(Newick)
self.label.setGeometry(QtCore.QRect(100, 20, 91, 20))
self.label.setObjectName("label")
self.verticalLayoutWidget = QtGui.QWidget(Newick)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(300, 10, 258, 361))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setObjectName("verticalLayout")
self.newickBox = QtGui.QTextEdit(self.verticalLayoutWidget)
self.newickBox.setObjectName("newickBox")
self.verticalLayout.addWidget(self.newickBox)
self.attrName = QtGui.QLineEdit(Newick)
self.attrName.setGeometry(QtCore.QRect(20, 80, 113, 25))
self.attrName.setObjectName("attrName")
self.pushButton = QtGui.QPushButton(Newick)
self.pushButton.setGeometry(QtCore.QRect(140, 80, 51, 29))
self.pushButton.setObjectName("pushButton")
self.pushButton_2 = QtGui.QPushButton(Newick)
self.pushButton_2.setGeometry(QtCore.QRect(200, 80, 51, 29))
self.pushButton_2.setObjectName("pushButton_2")
self.features_list = QtGui.QListWidget(Newick)
self.features_list.setGeometry(QtCore.QRect(20, 120, 231, 251))
self.features_list.setObjectName("features_list")
self.label_3 = QtGui.QLabel(Newick)
self.label_3.setGeometry(QtCore.QRect(60, 60, 191, 20))
self.label_3.setObjectName("label_3")
self.useAllFeatures = QtGui.QCheckBox(Newick)
self.useAllFeatures.setGeometry(QtCore.QRect(20, 370, 221, 24))
self.useAllFeatures.setObjectName("useAllFeatures")
self.retranslateUi(Newick)
QtCore.QObject.connect(self.nwFormat, QtCore.SIGNAL("activated(QString)"), Newick.update_newick)
QtCore.QObject.connect(self.pushButton, QtCore.SIGNAL("released()"), Newick.add_feature)
QtCore.QObject.connect(self.pushButton_2, QtCore.SIGNAL("released()"), Newick.del_feature)
QtCore.QObject.connect(self.useAllFeatures, QtCore.SIGNAL("released()"), Newick.set_custom_features)
QtCore.QMetaObject.connectSlotsByName(Newick)
def retranslateUi(self, Newick):
Newick.setWindowTitle(QtGui.QApplication.translate("Newick", "Dialog", None, QtGui.QApplication.UnicodeUTF8))
self.nwFormat.setItemText(0, QtGui.QApplication.translate("Newick", "0", None, QtGui.QApplication.UnicodeUTF8))
self.nwFormat.setItemText(1, QtGui.QApplication.translate("Newick", "1", None, QtGui.QApplication.UnicodeUTF8))
self.nwFormat.setItemText(2, QtGui.QApplication.translate("Newick", "2", None, QtGui.QApplication.UnicodeUTF8))
self.nwFormat.setItemText(3, QtGui.QApplication.translate("Newick", "3", None, QtGui.QApplication.UnicodeUTF8))
self.nwFormat.setItemText(4, QtGui.QApplication.translate("Newick", "4", None, QtGui.QApplication.UnicodeUTF8))
self.nwFormat.setItemText(5, QtGui.QApplication.translate("Newick", "5", None, QtGui.QApplication.UnicodeUTF8))
self.nwFormat.setItemText(6, QtGui.QApplication.translate("Newick", "6", None, QtGui.QApplication.UnicodeUTF8))
self.nwFormat.setItemText(7, QtGui.QApplication.translate("Newick", "7", None, QtGui.QApplication.UnicodeUTF8))
self.nwFormat.setItemText(8, QtGui.QApplication.translate("Newick", "8", None, QtGui.QApplication.UnicodeUTF8))
self.nwFormat.setItemText(9, QtGui.QApplication.translate("Newick", "9", None, QtGui.QApplication.UnicodeUTF8))
self.nwFormat.setItemText(10, QtGui.QApplication.translate("Newick", "100", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("Newick", "Newick format", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton.setText(QtGui.QApplication.translate("Newick", "Add", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_2.setText(QtGui.QApplication.translate("Newick", "Del", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("Newick", "Node\'s attribute (NHX format)", None, QtGui.QApplication.UnicodeUTF8))
self.useAllFeatures.setText(QtGui.QApplication.translate("Newick", "Include all attributes in nodes", None, QtGui.QApplication.UnicodeUTF8))
|
{
"content_hash": "15f6716f8f3d1cccba3fa79531557be9",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 148,
"avg_line_length": 61.44186046511628,
"alnum_prop": 0.7072293716881151,
"repo_name": "csc8630Spring2014/Clusterizer",
"id": "1b0aa042a5d563405ac7eb2fac228932fe7dbd16",
"size": "6745",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ete2/treeview/_show_newick.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "326788"
},
{
"name": "Python",
"bytes": "2174874"
},
{
"name": "TeX",
"bytes": "9080"
}
],
"symlink_target": ""
}
|
"""
.. py:currentmodule:: FileFormat.Results.PhirhozEmittedCharacteristic
.. moduleauthor:: Hendrix Demers <hendrix.demers@mail.mcgill.ca>
Read PhirhozEmittedCharacteristic file from MCXRay.
"""
# Script information for the file.
__author__ = "Hendrix Demers (hendrix.demers@mail.mcgill.ca)"
__version__ = ""
__date__ = ""
__copyright__ = "Copyright (c) 2012 Hendrix Demers"
__license__ = ""
# Standard library modules.
import os.path
import csv
# Third party modules.
# Local modules.
# Project modules
import pymcxray.FileFormat.Results.BaseResults as BaseResults
# Globals and constants variables.
FIELD_DEPTH_A = "Depth (A)"
HDF5_PHIRHOZ_EMITTED_CHARACTERISTIC = "PhirhozEmittedCharacteristic"
HDF5_DEPTH_nm = "Depth (nm)"
class PhirhozEmittedCharacteristic(BaseResults.BaseResults):
def __init__(self):
super(PhirhozEmittedCharacteristic, self).__init__()
self.fieldNames = []
self.depth_A = []
self.phirhozs = {}
def read(self, regionID=0):
suffix = "_PhirhozEmittedCharacteristic_Region%i.csv" % (regionID)
filename = self.basename + suffix
filepath = os.path.join(self.path, filename)
with open(filepath, 'r') as csvFile:
reader = csv.DictReader(csvFile)
fieldnames = reader.fieldnames
assert fieldnames[0] == FIELD_DEPTH_A
self.fieldNames = fieldnames
for row in reader:
self.depth_A.append(float(row[FIELD_DEPTH_A]))
for elementSymbolLine in self.fieldNames[1:]:
symbol, _line, xrayLine = elementSymbolLine.split()
self.phirhozs.setdefault((symbol.strip(), xrayLine.strip()), []).append(float(row[elementSymbolLine]))
def write_hdf5(self, hdf5_group):
hdf5_group = hdf5_group.require_group(HDF5_PHIRHOZ_EMITTED_CHARACTERISTIC)
hdf5_group.create_dataset(HDF5_DEPTH_nm, data=self.depth_nm)
for phirhoz_name in self.phirhozs:
symbol, xray_line = phirhoz_name
group = hdf5_group.require_group(symbol)
group.create_dataset(xray_line, data=self.phirhozs[phirhoz_name])
@property
def fieldNames(self):
return self._fieldNames
@fieldNames.setter
def fieldNames(self, fieldNames):
self._fieldNames = fieldNames
@property
def depth_A(self):
return self._depth_A
@depth_A.setter
def depth_A(self, depth_A):
self._depth_A = depth_A
@property
def depth_nm(self):
depths_nm = [depth_A*0.1 for depth_A in self._depth_A]
return depths_nm
@property
def phirhozs(self):
return self._phirhozs
@phirhozs.setter
def phirhozs(self, phirhozs):
self._phirhozs = phirhozs
|
{
"content_hash": "beeafed2f3c83e9ca8801e5f2411a37a",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 122,
"avg_line_length": 31.369565217391305,
"alnum_prop": 0.6212751212751213,
"repo_name": "drix00/pymcxray",
"id": "f67af212e5a739af2b1f679c62549da96ee68e52",
"size": "2909",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymcxray/FileFormat/Results/PhirhozEmittedCharacteristic.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2299"
},
{
"name": "Python",
"bytes": "933760"
}
],
"symlink_target": ""
}
|
import re
from collections import OrderedDict
import prestodb
from query_designer.models import Query
import time
from threading import Thread
import sys
from django.db import connections, ProgrammingError
from django.conf import settings
from aggregator.models import Variable, Dimension
from .utils import GRANULARITY_MIN_PAGES, ResultEncoder
def process(self, dimension_values='', variable='', only_headers=False, commit=True, execute=False, raw_query=False):
dimension_values = preprocess_dimension_values(dimension_values)
selects = OrderedDict()
headers = []
header_sql_types = []
columns = []
groups = []
prejoin_groups = []
c_name, v_obj, data_table_names, groups = preprocess_document(columns, groups, prejoin_groups,
header_sql_types, headers, selects, self)
# import pdb
# pdb.set_trace()
prejoin_name = None
if len(self.document['from']) > 1:
prejoin_name = extract_prejoin_name(self.document['from'])
if is_query_for_average(self.document['from']) and prejoin_name is not None:
limit, query, subquery_cnt = build_prejoin_query(prejoin_name, columns,
prejoin_groups, self)
else:
limit, query, subquery_cnt = build_query(c_name, columns, groups, selects, self)
cursor = choose_db_cursor(v_obj)
if not only_headers:
# execute query & return results
t1 = time.time()
pages = {
'current': 1,
'total': 1
}
def _count():
cursor.execute(subquery_cnt)
self.count = cursor.fetchone()[0]
self.count = None
count_failed = False
t = Thread(target=_count, args=[])
t.start()
t.join(timeout=5)
if self.count is None:
count_failed = True
self.count = 10000000
if limit is not None:
pages['total'] = (self.count - 1) / limit + 1
# apply granularity
if self.count >= GRANULARITY_MIN_PAGES and (not count_failed):
try:
granularity = int(self.document.get('granularity', 0))
except ValueError:
granularity = 0
if granularity > 1:
query = """
SELECT %s FROM (
SELECT row_number() OVER () AS row_id, * FROM (%s) AS GQ
) AS GQ_C
WHERE (row_id %% %d = 0)
""" % (','.join([c[1] for c in columns]), query, granularity)
print "Executed query:"
print query
results = []
if execute:
cursor.execute(query)
all_rows = cursor.fetchall()
print "First rows"
print all_rows[:3]
print header_sql_types
results = all_rows
# include dimension values if requested
for d_name in dimension_values:
hdx, header = [hi for hi in enumerate(headers) if hi[1]['name'] == d_name][0]
d = Dimension.objects.get(pk=selects[d_name]['column'].split('_')[-1])
if not d.non_filterable:
header['values'] = d.values
# include variable ranges if requested
if variable:
vdx, v = [vi for vi in enumerate(headers) if vi[1]['name'] == variable][0]
v['distribution'] = Variable.objects.get(pk=selects[variable]['table'].split('_')[-1]).distribution
if not only_headers:
# monitor query duration
q_time = (time.time() - t1) * 1000
if not only_headers:
response = {
'results': results,
'headers': {
'runtime_msec': q_time,
'pages': pages,
}
}
else:
response = {'headers': {}}
response['headers']['columns'] = headers
if raw_query:
response['raw_query'] = query
# store headers
self.headers = ResultEncoder(mode='postgres').encode(headers)
if self.pk and commit:
self.save()
return response
def preprocess_dimension_values(dimension_values):
if dimension_values:
dimension_values = dimension_values.split(',')
else:
dimension_values = []
return dimension_values
def preprocess_document(columns, groups, prejoin_groups, header_sql_types, headers, selects, self):
data_table_names = []
for _from in self.document['from']:
v_obj = Variable.objects.get(pk=_from['type'])
for s in _from['select']:
if s['type'] != 'VALUE':
human_column_name = Dimension.objects.get(pk=s['type']).title
print human_column_name
dimension = Dimension.objects.get(pk=s['type'])
column_name = dimension.data_column_name
column_unit = dimension.unit
column_axis = dimension.axis
column_step = dimension.step
sql_type = dimension.sql_type
else:
human_column_name = Variable.objects.get(pk=_from['type']).title
print human_column_name
if v_obj.dataset.stored_at == 'UBITECH_PRESTO':
column_name = v_obj.name
else:
column_name = 'value'
column_unit = v_obj.unit
column_axis = None
column_step = None
sql_type = 'double precision'
selects[s['name']] = {'column': column_name, 'table': v_obj.data_table_name}
_from['name'] = v_obj.data_table_name
# if 'joined' not in s:
c_name = '%s.%s' % (_from['name'], selects[s['name']]['column'])
if s.get('aggregate', '') != '':
c_name_with_agg = '%s(%s)' % (s.get('aggregate'), c_name)
else:
c_name_with_agg = c_name
if not s.get('exclude', False):
header_sql_types.append(sql_type)
headers.append({
'title': human_column_name,
'name': s['name'],
'unit': column_unit,
'step': column_step,
'quote': '' if sql_type.startswith('numeric') or sql_type.startswith('double') else "'",
'isVariable': s['type'] == 'VALUE',
'axis': column_axis,
})
# add fields to select clause
columns.append((c_name_with_agg, '%s' % s['name'], '%s' % s['title'], s.get('aggregate')))
# add fields to grouping
if s.get('groupBy', False):
if str(s.get('aggregate', '')).startswith('round') or str(s.get('aggregate', '')).startswith('date'):
groups.append(c_name_with_agg)
prejoin_groups.append('%s(%s)' % (s.get('aggregate'), selects[s['name']]['column']))
else:
groups.append(c_name)
prejoin_groups.append('%s' % (selects[s['name']]['column']))
data_table_names.append(v_obj.data_table_name)
groups = list(set(groups))
return c_name_with_agg, v_obj, data_table_names, groups
def is_query_for_average(from_list):
for _from in from_list:
for s in _from['select']:
if 'type' in s and s['type'] == 'VALUE' and s['aggregate'] != 'AVG':
return False
return True
def extract_prejoin_name(from_list):
variable_id1, variable_id2 = extract_variable_ids_from_doc(from_list)
dataset_id1 = extract_dataset_id_from_varible_ids(variable_id1)
dataset_id2 = extract_dataset_id_from_varible_ids(variable_id2)
return extract_prejoin_name_for_datasets(dataset_id1, dataset_id2)
def build_prejoin_query(prejoin_name, columns, prejoin_groups, self):
select_clause = build_prejoin_select_clause(columns)
from_clause = 'FROM ' + prejoin_name + '\n'
where_clause = build_prejoin_where_clause(self, prejoin_name)
group_clause = build_group_by_clause(prejoin_groups)
order_by_clause = build_order_by_clause(self)
limit, limit_clause = build_limit_clause(self)
subquery = 'SELECT * FROM (' + select_clause + from_clause + where_clause + group_clause + order_by_clause + ') AS SQ1\n'
q = subquery + limit_clause
subquery_cnt = 'SELECT COUNT(*) FROM (' + q + ') AS SQ1\n'
print 'Initial Query:'
print subquery
q, subquery, subquery_cnt = fix_round(q, subquery, subquery_cnt)
q = fix_date_trunc(q, subquery, subquery_cnt)
return limit, q, subquery_cnt
def build_query(c_name, columns, groups, selects, self):
select_clause = build_select_clause(columns)
from_clause = build_from_clause(selects)
all_joins_for_check, join_clause = build_join_clause(c_name, selects, self)
if not is_same_range_joins(all_joins_for_check):
raise ValueError("Datasets have columns in common but actually nothing to join (ranges with nothing in common)")
where_clause = build_where_clause(self)
group_clause = build_group_by_clause(groups)
order_by_clause = build_order_by_clause(self)
limit, limit_clause = build_limit_clause(self)
# organize into subquery
subquery = 'SELECT * FROM (' + select_clause + from_clause + join_clause + where_clause + group_clause + order_by_clause + ') AS SQ1\n'
q = subquery + limit_clause
subquery_cnt = 'SELECT COUNT(*) FROM (' + q + ') AS SQ1\n'
print 'Initial Query:'
print subquery
q, subquery, subquery_cnt = fix_round(q, subquery, subquery_cnt)
q = fix_date_trunc(q, subquery, subquery_cnt)
return limit, q, subquery_cnt
def choose_db_cursor(v_obj):
if v_obj.dataset.stored_at == 'UBITECH_PRESTO':
presto_credentials = settings.DATABASES['UBITECH_PRESTO']
conn = prestodb.dbapi.connect(
host=presto_credentials['HOST'],
port=presto_credentials['PORT'],
user=presto_credentials['USER'],
catalog=presto_credentials['CATALOG'],
schema=presto_credentials['SCHEMA'],
)
cursor = conn.cursor()
else:
cursor = connections['default'].cursor()
return cursor
def extract_prejoin_name_for_datasets(dataset_id1, dataset_id2):
query = """SELECT view_name
FROM aggregator_joinofdatasets
WHERE (dataset_first_id =%s AND dataset_second_id = %s) OR
(dataset_first_id = %s AND dataset_second_id = %s) """ \
% (dataset_id1, dataset_id2, dataset_id2, dataset_id1)
cursor = connections['default'].cursor()
try:
cursor.execute(query)
except ProgrammingError as e:
print "query execution failed due to: ", e
return None
res = cursor.fetchone()
if res is not None:
return res[0]
return None
def translate_percentiles_if_needed(select_clause):
if not 'PERCENTILE_CONT' in select_clause:
return select_clause
else:
return translate_percentiles(select_clause)
def translate_percentiles(select_clause):
select_table = select_clause.split('(')
func = select_table[0]
params = select_table[1]
percentage = int(func.split('_')[2])
percentage_float = percentage / 100.0
func = 'APPROX_PERCENTILE'
params_table = params.split(')')
return func + '(' + params_table[0] + ', ' + str(percentage_float) + ')'
def build_select_clause(columns):
select_clause = 'SELECT ' + ','.join(['%s AS %s' %
(translate_percentiles_if_needed(c[0]), c[1]) for c in columns]) + '\n'
return select_clause
def build_prejoin_select_clause(columns):
select_clause = 'SELECT ' + ','.join(['%s(%s) AS %s' % (c[3], c[2], c[1]) for c in columns]) + '\n'
return select_clause
def build_from_clause(selects):
from_clause = 'FROM %s \n' % \
(selects[selects.keys()[0]]['table'])
return from_clause
def build_join_clause(c_name, selects, self):
join_clause = ''
all_joins_for_check = []
tables_in_query = set()
tables_in_query.add(selects[self.document['from'][0]['select'][0]['name']]['table'])
for _from in self.document['from'][1:]:
joins = []
joins_for_check = []
for s in _from['select']:
if 'joined' in s:
if s['name'].endswith('location_latitude'):
js = [
(s['name'], s['joined'] + '_latitude'),
(s['name'].replace('_latitude', '_longitude'), s['joined'] + '_longitude'),
]
elif s['name'].endswith('location_longitude'):
js = []
else:
js = [(s['name'], s['joined'])]
for j in js:
joins_for_check.append(((_from['type'], selects[j[0]]['column']),
(self.document['from'][0]['type'], selects[j[1]]['column'])))
if s.get('aggregate', '') != '':
c_name = '%s(%s)' % (s.get('aggregate'), c_name)
joins.append('%s(%s.%s)=%s(%s.%s)' %
(s.get('aggregate'),
_from['name'],
selects[j[0]]['column'],
s.get('aggregate'),
self.document['from'][0]['name'],
selects[j[1]]['column']))
else:
joins.append('%s.%s=%s.%s' %
(_from['name'],
selects[j[0]]['column'],
self.document['from'][0]['name'],
selects[j[1]]['column']))
print "LOOK FOR JOIN"
print selects
print _from['name']
print tables_in_query
if selects[_from['select'][0]['name']]['table'] not in tables_in_query:
tables_in_query.add(selects[_from['select'][0]['name']]['table'])
print "WE HAVE JOIN"
join_clause += 'JOIN %s ON %s\n' % \
(selects[_from['select'][0]['name']]['table'],
' AND '.join(joins))
if join_clause.replace(" ", "").replace("\n", "").replace(",", "").endswith("ON"):
raise ValueError("No common columns for all the datasets. They cannot be combined.")
all_joins_for_check.append(joins_for_check)
print "Joins to check"
print all_joins_for_check
if not is_same_range_joins(all_joins_for_check):
print "Datasets have columns in common but actually nothing to join (ranges with nothing in common)"
raise ValueError("Datasets do not match both in space and time. They cannot be combined.")
print "Query Continues"
# where
return all_joins_for_check, join_clause
def build_where_clause(self):
filters = self.document.get('filters', '')
if not filters:
where_clause = ''
else:
where_clause = self.process_filters(filters, 'presto')
if where_clause:
where_clause = 'WHERE ' + where_clause + ' \n'
return where_clause
def process_prejoin_filters(filters_json, self, view_name):
if type(filters_json) in [int, float]:
try:
filters = process_prejoin_leaf_filters(filters_json, self, view_name)
except:
return filters_json
return filters
if type(filters_json) in [str, unicode]:
try:
filters = process_prejoin_leaf_filters(filters_json, self, view_name)
except:
return filters_json
return "%s" % filters
_a = process_prejoin_filters(filters_json['a'], self, view_name)
_b = process_prejoin_filters(filters_json['b'], self, view_name)
result = '%s %s %s' % \
(('(%s)' % _a) if type(_a) not in [str, unicode, int, float] else _a,
Query.operator_to_str(filters_json['op']),
('(%s)' % _b) if type(_b) not in [str, unicode, int, float] else _b)
return result
def process_prejoin_leaf_filters(filters, self, view_name):
col_name = ''
from_order = int(filters[filters.find('i') + 1:filters.find('_')])
if from_order >= 0:
for x in self.document['from'][from_order]['select']:
if x['name'] == filters:
if x['type'] != 'VALUE':
col_name = Dimension.objects.get(pk=x['type']).data_column_name
else:
v_obj = Variable.objects.get(pk=int(self.document['from'][from_order]['type']))
if v_obj.dataset.stored_at == 'UBITECH_PRESTO':
col_name = v_obj.name
else:
col_name = 'value'
filters = view_name + '.' + col_name
return filters
def build_prejoin_where_clause(self, view_name):
filters = self.document.get('filters', '')
if not filters:
where_clause = ''
else:
where_clause = process_prejoin_filters(filters, self, view_name)
if where_clause:
where_clause = 'WHERE ' + where_clause + ' \n'
return where_clause
def build_group_by_clause(groups):
group_clause = ''
if groups:
group_clause = 'GROUP BY %s\n' % ','.join(groups)
return group_clause
def build_order_by_clause(self):
order_by_clause = ''
orderings = self.document.get('orderings', [])
if orderings:
order_by_clause = 'ORDER BY %s\n' % ','.join([(o['name'] + ' ' + o['type']) for o in orderings])
return order_by_clause
def build_limit_clause(self):
limit_clause = ''
limit = None
if 'limit' in self.document and self.document['limit']:
limit = int(self.document['limit'])
limit_clause = 'LIMIT %d\n' % limit
return limit, limit_clause
def fix_date_trunc(q, subquery, subquery_cnt):
if len(re.findall(r'date_trunc_(.*?)', subquery)) > 0:
print 'Trying to fix date_trunc'
time_trunc = str(subquery.split('date_trunc_')[1].split('(')[0])
# print
names = re.findall(r"date_trunc_" + time_trunc + "\((.*?)\)", subquery)
for name in names:
subquery = re.sub(r"date_trunc_" + time_trunc + "\((" + name + ")\)",
"date_trunc('" + time_trunc + "', " + name + ")", subquery)
# print subquery
names = re.findall(r"date_trunc_" + time_trunc + "\((.*?)\)", subquery_cnt)
for name in names:
subquery_cnt = re.sub(r"date_trunc_" + time_trunc + "\((" + name + ")\)",
"date_trunc('" + time_trunc + "', " + name + ")", subquery_cnt)
# print subquery_cnt
names = re.findall(r"date_trunc_" + time_trunc + "\((.*?)\)", q)
for name in names:
q = re.sub(r"date_trunc_" + time_trunc + "\((" + name + ")\)",
"date_trunc('" + time_trunc + "', " + name + ")", q)
# print q
return q
def fix_round(q, subquery, subquery_cnt):
if len(re.findall(r'round\d', subquery)) > 0:
print 'Trying to fix round'
# round_num = str(subquery.split('round')[1][0])
round_num = str(re.findall(r'round\d', subquery)[0])[-1]
print round_num
# names = re.findall(r"round" + round_num + "\((.*?)\)", subquery)
# print
names = re.findall(r"round" + round_num + "\((.*?)\)", subquery)
for name in names:
subquery = re.sub(r"round" + round_num + "\((" + name + ")\)",
"round(" + name + ", " + round_num + ")", subquery)
# print subquery
names = re.findall(r"round" + round_num + "\((.*?)\)", subquery_cnt)
for name in names:
subquery_cnt = re.sub(r"round" + round_num + "\((" + name + ")\)",
"round(" + name + ", " + round_num + ")", subquery_cnt)
# print subquery_cnt
names = re.findall(r"round" + round_num + "\((.*?)\)", q)
for name in names:
q = re.sub(r"round" + round_num + "\((" + name + ")\)",
"round(" + name + ", " + round_num + ")", q)
# print q
return q, subquery, subquery_cnt
def extract_variable_ids_from_doc(from_list):
variable_list = []
for _from in from_list:
variable_list.append(_from['type'])
return variable_list[0], variable_list[1]
def extract_dataset_id_from_varible_ids(variable_id):
query = """SELECT dataset_id
FROM aggregator_variable
WHERE id =%s """ % variable_id
cursor = connections['default'].cursor()
try:
cursor.execute(query)
except ProgrammingError as e:
print "query execution failed due to: ", e
return None
res = cursor.fetchone()
return res[0]
def is_same_range_joins(join_list):
join_chain_list = create_join_chain_list_from_joins(join_list)
print 'join_chain_list'
print join_chain_list
min_max_list = calculate_range_for_every_join_chain(join_chain_list)
print 'min_max_list'
print min_max_list
return is_valid_range_all_chains(min_max_list)
def create_join_chain_list_from_joins(join_list):
all_joins_list, chained_dimensions, join_chain_list, list_accessed_counter = init_variables(join_list)
for join in all_joins_list:
if not (chained_dimensions.__contains__(join[0]) and chained_dimensions.__contains__(join[1])):
chain_list = update_chain_join_list(join)
update_chained_dimensions(chained_dimensions, join)
add_joins_to_chain_if_exist(all_joins_list, chain_list, chained_dimensions, list_accessed_counter)
join_chain_list.append(chain_list)
list_accessed_counter += 1
return join_chain_list
def calculate_range_for_every_join_chain(join_chain_list):
min_max_dim_chain_list = []
for join_chain in join_chain_list:
min_max_dim_list = []
for dim in join_chain:
min_dim, max_dim = get_min_max_dimension(dim)
min_max_dim_list.append((min_dim, max_dim))
min_max_dim_chain_list.append(min_max_dim_list)
# print(min_max_dim_chain_list)
return min_max_dim_chain_list
def is_valid_range_all_chains(min_max_chain_list):
for chain in min_max_chain_list:
if not is_valid_range_for_chain(chain):
return False
return True
def init_variables(join_list):
join_chain_list = []
chained_dimensions = []
all_joins_list = extract_all_joins_from_join_list(join_list)
list_accessed_counter = 0
return all_joins_list, chained_dimensions, join_chain_list, list_accessed_counter
def update_chain_join_list(join):
chain_list = [join[0], join[1]]
return chain_list
def update_chained_dimensions(chained_dimensions, join):
chained_dimensions.append(join[0])
chained_dimensions.append(join[1])
def add_joins_to_chain_if_exist(all_joins_list, chain_list, chained_dimensions, list_accessed_counter):
list_size = len(all_joins_list)
for join2 in all_joins_list[list_accessed_counter:list_size]:
add_join_if_valid(chain_list, chained_dimensions, join2)
def get_min_max_dimension(dim):
# cursor = connections['default'].cursor()
# min_max_dim_query = build_min_max_dimension_query(dim)
# try:
# cursor.execute(min_max_dim_query)
# except ProgrammingError as e:
# print "query execution failed due to: ", e
# return None
# res = cursor.fetchone()
res = None
try:
min = Variable.objects.get(pk=int(dim[0])).dimensions.get(name=dim[1]).min
max = Variable.objects.get(pk=int(dim[0])).dimensions.get(name=dim[1]).max
if min is not None and max is not None:
res = (min, max)
except:
pass
if res is not None:
return res[0], res[1]
else:
return (-1 * sys.maxint), sys.maxint
def build_min_max_dimension_query(dim):
min_max_dim_query = """
SELECT
min,
max
FROM aggregator_dimension d
INNER JOIN aggregator_variable v
ON d.variable_id = v.id
WHERE d.title = '%s' AND variable_id = %s
""" % (dim[1], dim[0])
return min_max_dim_query
def is_valid_range_for_chain(chain):
max_of_mins, min_of_maxes = initialize_minofmaxes_and_maxofmins(chain)
if min_of_maxes is None or max_of_mins is None:
return True
for dim in chain:
if dim[0] is not None and dim[0] > max_of_mins:
max_of_mins = dim[0]
if dim[1] is not None and dim[1] < min_of_maxes:
min_of_maxes = dim[1]
if min_of_maxes < max_of_mins:
return False
return True
def add_join_if_valid(chain_list, chained_dimensions, join2):
if join2[0] in chain_list and join2[1] not in chain_list:
chain_list.append(join2[1])
chained_dimensions.append(join2[1])
elif join2[1] in chain_list and join2[0] not in chain_list:
chain_list.append(join2[0])
chained_dimensions.append(join2[0])
def extract_all_joins_from_join_list(join_list):
all_joins_list = []
for lists in join_list:
all_joins_list += lists
return all_joins_list
def initialize_minofmaxes_and_maxofmins(chain):
first_dim = next(iter(chain), None)
max_of_mins = first_dim[0]
min_of_maxes = first_dim[1]
chain_size = len(chain)
cnt = 0
while (min_of_maxes is None or max_of_mins is None) and cnt < chain_size:
first_dim = next(iter(chain), None)
max_of_mins = first_dim[0]
min_of_maxes = first_dim[1]
cnt += 1
return max_of_mins, min_of_maxes
|
{
"content_hash": "da83766c2d4a75f411c0ec31917bee4e",
"timestamp": "",
"source": "github",
"line_count": 705,
"max_line_length": 139,
"avg_line_length": 36.66950354609929,
"alnum_prop": 0.5564753210583321,
"repo_name": "dipapaspyros/bdo_platform",
"id": "17571901035808652138f1655649e55fe10a5396",
"size": "25852",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "query_designer/query_processors/presto.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "176900"
},
{
"name": "HTML",
"bytes": "69066"
},
{
"name": "JavaScript",
"bytes": "10644123"
},
{
"name": "Python",
"bytes": "195457"
},
{
"name": "XSLT",
"bytes": "1521"
}
],
"symlink_target": ""
}
|
from gluon import *
from s3 import *
from s3layouts import *
try:
from .layouts import *
except ImportError:
pass
import s3menus as default
class S3MainMenu(default.S3MainMenu):
"""
Custom Application Main Menu:
The main menu consists of several sub-menus, each of which can
be customised separately as a method of this class. The overall
composition of the menu is defined in the menu() method, which can
be customised as well:
Function Sub-Menu Access to (standard)
menu_modules() the modules menu the Eden modules
menu_admin() the Admin menu System/User Administration
menu_lang() the Language menu Selection of the GUI locale
menu_auth() the User menu Login, Logout, User Profile
menu_help() the Help menu Contact page, About page
The standard uses the MM layout class for main menu items - but you
can of course use a custom layout class which you define in layouts.py.
Additional sub-menus can simply be defined as additional functions in
this class, and then be included in the menu() method.
Each sub-menu function returns a list of menu items, only the menu()
function must return a layout class instance.
"""
# -------------------------------------------------------------------------
@classmethod
def menu(cls):
""" Compose Menu """
main_menu = MMO()(
# Align left
MM()(
# Home link
HM(),
# Modules
cls.menu_modules()
),
# Service menus, align-right
MM(right=True)(
cls.menu_admin(),
#cls.menu_gis()
cls.menu_lang(),
cls.menu_auth(),
cls.menu_help(),
),
)
return main_menu
# -------------------------------------------------------------------------
@classmethod
def menu_modules(cls):
""" Custom Modules Menu """
AUTHENTICATED = current.session.s3.system_roles.AUTHENTICATED
return [
MM("News", c="cms", f="newsfeed", args="datalist",
icon="icon-news"),
MM("Map", c="gis", f="index",
icon="icon-map"),
MM("Projects", c="project", f="project"),
MM("Requests", c="req", f="req")(
MM("Fulfill Requests", f="req"),
MM("Request Supplies", f="req", m="create", vars={"type": 1}),
MM("Request People", f="req", m="create", vars={"type": 3})
),
MM("Locations", c="org", f="facility", m="summary",
restrict=[AUTHENTICATED])(
MM("Facilities", c="org", f="facility", m="summary"),
MM("Create a Facility", c="org", f="facility", m="create")
),
MM("Contacts", c="hrm", f="staff")(
MM("People", c="hrm", f="staff"),
MM("Groups", c="hrm", f="group"),
MM("Organizations", c="org", f="organisation"),
#MM("People Registry", c="pr", f="index")
),
MM("Networks", c="org", f="group")(
#MM("Networks", c="org", f="group"),
#MM("Organizations", c="org", f="organisation"),
),
MM("Resources", c="inv", f="index")(
MM("Assets", c="asset", f="asset"),
MM("Inventory", c="inv", f="inv_item"),
MM("Stock Counts", c="inv", f="adj"),
MM("Shipments", c="inv", f="send")
),
MM("Cases", c="assess", f="building")(
MM("Building Assessments", f="building"),
MM("Canvass", f="canvass"),
),
MM("Incidents", c="event", f="event")(
MM("Events", f="event"),
MM("Incidents", f="incident"),
MM("Incident Reports", f="incident_report"),
),
MM("Survey", c="survey")(
MM("Templates", f="template"),
MM("Assessments", f="series"),
MM("Import Templates", f="question_list", m="import"),
),
]
# -------------------------------------------------------------------------
@classmethod
def menu_help(cls, **attr):
""" Help Menu """
menu_help = MM("Help", c="default", f="help",
icon="icon-question-sign", **attr
)(
MM("Contact us", f="contact"),
MM("About", f="about")
)
# -------------------------------------------------------------------
# Now add the available guided tours to the help menu
# check that a guided_tour is enabled
if current.deployment_settings.get_base_guided_tour():
# load the guided tour configuration from the database
table = current.s3db.tour_config
logged_in = current.auth.is_logged_in()
if logged_in:
query = (table.deleted == False) &\
(table.role != "")
else:
query = (table.deleted == False) &\
(table.role == "")
tours = current.db(query).select(table.id,
table.name,
table.controller,
table.function,
table.role,
)
if len(tours) > 0:
menu_help.append(SEP())
for row in tours:
menu_help.append(MM(row.name,
c=row.controller,
f=row.function,
vars={"tour":row.id},
restrict=row.role
)
)
return menu_help
# -------------------------------------------------------------------------
@classmethod
def menu_auth(cls, **attr):
""" Auth Menu """
auth = current.auth
logged_in = auth.is_logged_in()
if not logged_in:
self_registration = current.deployment_settings.get_security_self_registration()
request = current.request
login_next = URL(args=request.args, vars=request.vars)
if request.controller == "default" and \
request.function == "user" and \
"_next" in request.get_vars:
login_next = request.get_vars["_next"]
menu_auth = MM("Login", c="default", f="user", m="login",
icon="icon-signin",
_id="auth_menu_login",
vars=dict(_next=login_next), **attr)(
MM("Login", m="login",
vars=dict(_next=login_next)),
MM("Register", f="index", m="register",
vars=dict(_next=login_next),
check=self_registration),
MM("Lost Password", m="retrieve_password")
)
else:
# Logged-in
filter_manager = current.deployment_settings.get_search_filter_manager()
user = auth.user
menu_auth = MM(user.email, c="default", f="user",
translate=False, link=False, _id="auth_menu_email",
**attr)(
MM("Logout", m="logout", _id="auth_menu_logout",
icon="icon-off",
),
MM("Profile", c="default", f="person", m="update",
icon="icon-user",
),
MM("Saved Filters", c="pr", f="filter", m="datalist",
vars={"~.pe_id": user.pe_id},
icon="icon-filter",
),
MM("Change Password", m="change_password",
icon="icon-lock",
),
# @ToDo:
#SEP(),
#MM({"name": current.T("Rapid Data Entry"),
# "id": "rapid_toggle",
# "value": current.session.s3.rapid_data_entry is True},
# f="rapid"),
)
return menu_auth
# -------------------------------------------------------------------------
@classmethod
def menu_lang(cls, **attr):
""" Language Menu """
settings = current.deployment_settings
if not settings.get_L10n_display_toolbar():
return None
s3 = current.response.s3
languages = s3.l10n_languages
lang = s3.language
request = current.request
menu_lang = MM("Language", icon="icon-comment-alt", **attr)
for language in languages:
menu_lang.append(MM(languages[language], r=request,
translate=False,
vars={"_language": language},
ltr=True,
icon="icon-check" if language == lang else "icon-check-empty"
))
return menu_lang
# =============================================================================
class S3OptionsMenu(default.S3OptionsMenu):
"""
Custom Controller Menus
The options menu (left-hand options menu) is individual for each
controller, so each controller has its own options menu function
in this class.
Each of these option menu functions can be customised separately,
by simply overriding (re-defining) the default function. The
options menu function must return an instance of the item layout.
The standard menu uses the M item layout class, but you can of
course also use any other layout class which you define in
layouts.py (can also be mixed).
Make sure additional helper functions in this class don't match
any current or future controller prefix (e.g. by using an
underscore prefix).
"""
# -------------------------------------------------------------------------
@staticmethod
def hrm():
""" HRM / Human Resources Management """
s3 = current.session.s3
ADMIN = s3.system_roles.ADMIN
AUTHENTICATED = s3.system_roles.AUTHENTICATED
db = current.db
SUPER = lambda i: \
db(db.auth_group.uuid=="super").select(db.auth_group.id,
limitby=(0, 1),
cache=s3db.cache
).first().id
# Custom conditions for the check-hook, as lambdas in order
# to have them checked only immediately before rendering:
def hrm_vars():
if not s3.hrm:
if current.auth.is_logged_in():
current.s3db.hrm_vars()
else:
s3.hrm = Storage(mode="personal")
return True
manager_mode = lambda i: hrm_vars() and s3.hrm.mode is None
personal_mode = lambda i: hrm_vars() and s3.hrm.mode is not None
is_org_admin = lambda i: hrm_vars() and s3.hrm.orgs or \
ADMIN in s3.roles
settings = current.deployment_settings
teams = settings.get_hrm_teams()
use_teams = lambda i: teams
return M()(
M(settings.get_hrm_staff_label(), c="hrm", f="staff",
check=manager_mode)(
M("Create", m="create"),
M("Import", f="person", m="import",
vars={"group":"staff"}, p="create"),
),
M(teams, c="hrm", f="group",
check=[use_teams])(
M("Create", m="create"),
),
M("Organizations", c="org", f="organisation")(
M("Create", m="create",
restrict=[AUTHENTICATED]),
M("Review/Approve New", m="review"),
M("Import", m="import",
restrict=[SUPER])
),
M("Organization Types", c="org", f="organisation_type",
restrict=[ADMIN])(
M("Create", m="create"),
),
#M("Department Catalog", c="hrm", f="department",
# check=manager_mode)(
# M("Create", m="create"),
#),
M("Job Title Catalog", c="hrm", f="job_title",
check=manager_mode)(
M("Create", m="create"),
),
M("Skill Catalog", c="hrm", f="skill",
check=manager_mode)(
M("Create", m="create"),
#M("Skill Provisions", f="skill_provision"),
),
M("Personal Profile", c="hrm", f="person",
check=personal_mode, vars=dict(mode="personal")),
# This provides the link to switch to the manager mode:
M("Staff Management", c="hrm", f="index",
check=[personal_mode, is_org_admin]),
# This provides the link to switch to the personal mode:
M("Personal Profile", c="hrm", f="person",
check=manager_mode, vars=dict(mode="personal"))
)
# -------------------------------------------------------------------------
@staticmethod
def inv():
""" INV / Inventory """
ADMIN = current.session.s3.system_roles.ADMIN
return M()(
M("Facilities", c="inv", f="facility", m="summary")(
M("Create", m="create"),
M("Import", m="import")
),
M("Warehouse Stock", c="inv", f="inv_item")(
#M("Search Shipped Items", f="track_item"),
M("Stock Count", f="adj"),
#M("Kitting", f="kit"),
M("Import", f="inv_item", m="import", p="create"),
),
M("Reports", c="inv", f="inv_item")(
M("Warehouse Stock", f="inv_item",m="report"),
M("Expiration Report", c="inv", f="track_item",
vars=dict(report="exp")),
#M("Monetization Report", c="inv", f="inv_item",
# vars=dict(report="mon")),
#M("Utilization Report", c="inv", f="track_item",
# vars=dict(report="util")),
#M("Summary of Incoming Supplies", c="inv", f="track_item",
# vars=dict(report="inc")),
#M("Summary of Releases", c="inv", f="track_item",
# vars=dict(report="rel")),
),
#M(inv_recv_list, c="inv", f="recv")(
# M("Create", m="create"),
#),
M("Sent Shipments", c="inv", f="send")(
M("Create", m="create"),
M("Search Shipped Items", f="track_item"),
),
M("Items", c="supply", f="item")(
M("Create", m="create"),
M("Report", m="report"),
M("Import", f="catalog_item", m="import", p="create"),
),
M("Item Categories", c="supply", f="item_category",
restrict=[ADMIN])(
M("Create", m="create"),
),
)
# -------------------------------------------------------------------------
def org(self):
""" ORG / Organization Registry """
fn = current.request.function
if fn == "group":
return M()(
M("Networks", c="org", f="group")(
M("Create", m="create"),
),
)
elif fn in ("facility", "facility_type"):
ADMIN = current.session.s3.system_roles.ADMIN
return M()(
M("Facilities", c="org", f="facility", m="summary")(
M("Create", m="create"),
#M("Review/Approve New", m="review"),
M("Import", m="import")
),
M("Facility Types", c="org", f="facility_type",
restrict=[ADMIN])(
M("Create", m="create"),
),
)
else:
# organisation, organisation_type or hrm
return self.hrm()
# -------------------------------------------------------------------------
def pr(self):
""" Person Registry """
return self.hrm()
# -------------------------------------------------------------------------
@staticmethod
def project():
""" PROJECT / Project Tracking & Management """
return M(c="project")(
M("Projects", f="project")(
M("Create", m="create"),
M("Import", m="import", p="create"),
),
)
# -------------------------------------------------------------------------
@staticmethod
def req():
""" REQ / Request Management """
db = current.db
SUPER = lambda i: \
db(db.auth_group.uuid=="super").select(db.auth_group.id,
limitby=(0, 1),
cache=s3db.cache
).first().id
return M(c="req")(
M("Requests", f="req")(
M("Request Supplies", m="create", vars={"type": 1}),
M("Request People", m="create", vars={"type": 3}),
M("Fulfill Requests"),
M("List Recurring Requests", f="req_template"),
#M("Map", m="map"),
M("Report", m="report"),
M("FEMA Items Required", f="fema",
restrict=[SUPER]),
M("Search All Requested Items", f="req_item"),
M("Search All Requested Skills", f="req_skill"),
),
#M("Priority Items", f="summary_option")(
# M("Create", m="create"),
#),
M("Commitments", f="commit")(
),
M("Sent Shipments", f="send")(
#M("Create", m="create"),
#M("Search Shipped Items", f="track_item"),
),
M("Items", c="supply", f="item",
restrict=[SUPER])(
M("Create", m="create"),
M("Report", m="report"),
M("Import", f="catalog_item", m="import", p="create"),
),
M("Item Categories", c="supply", f="item_category",
restrict=[SUPER])(
M("Create", m="create"),
),
)
# END =========================================================================
|
{
"content_hash": "ddece8ab66d58b61d1a21831d171f877",
"timestamp": "",
"source": "github",
"line_count": 484,
"max_line_length": 93,
"avg_line_length": 42.13636363636363,
"alnum_prop": 0.39501814259095813,
"repo_name": "devinbalkind/eden",
"id": "98a5a22a440b59a39d4efef2008cba60581c12a2",
"size": "20419",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "private/templates/NYC/menus.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
import logging
import foursquare
from logging.handlers import RotatingFileHandler
from flask import Flask, render_template,request,jsonify,json
from flask.ext.bootstrap import Bootstrap
import os
tmpl_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
app = Flask(__name__,template_folder=tmpl_dir)
Bootstrap(app)
app.config['BOOTSTRAP_USE_CDN'] = True
@app.route('/')
def index():
return render_template('index.html')
@app.route('/breakme/')
def breakme():
return render_template('noformserror.html')
@app.route('/social_login')
def social_login():
return render_template('home.html')
@app.route('/process/<access_token>')
def start_calculation(access_token):
client = foursquare.Foursquare(client_id='OIHK2AIH43GOZWFR4EHQRQV3QEXJWZSUBPJ2WADXFBINMRWC', client_secret='VQPEDXH2DD2RM4J1D4Y4W03NEFABOQTUWKE5ZZDXSFZPPFHR', redirect_uri='http://127.0.0.1:5000/social_login')
client.set_access_token(access_token)
response = client.users.friends()
app.logger.info(response.checksum)
# checkins = []
# for friend in response.friends.items:
# app.logger.info(friend)
# client.users(friend.id)
# friend_checkins = client.users.all_checkins()
# checkings += friend_checkins
# app.logger.info(friend_checkins)
# app.logger.info(checkings)
return jsonify(response)
if __name__ == '__main__':
app.run(debug=True)
|
{
"content_hash": "77b9b907bbc255a9851fe2fea71c47bd",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 210,
"avg_line_length": 29.73913043478261,
"alnum_prop": 0.7426900584795322,
"repo_name": "firetix/BoringFriends",
"id": "e4173677fbd13a8068a8c862a0441a464fecebe8",
"size": "1414",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "start.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "6966"
},
{
"name": "Ruby",
"bytes": "12965"
}
],
"symlink_target": ""
}
|
from vistrails.db.domain import DBWorkflow, DBAdd, DBDelete, DBAction, DBAbstraction, \
DBModule, DBConnection, DBPort, DBFunction, DBParameter, DBGroup
from vistrails.db.services.action_chain import getActionChain, getCurrentOperationDict, \
getCurrentOperations, simplify_ops
from vistrails.db import VistrailsDBException
import copy
import datetime
import getpass
import unittest
import vistrails.core.system
from itertools import chain
def update_id_scope(vistrail):
if hasattr(vistrail, 'update_id_scope'):
vistrail.update_id_scope()
else:
for action in vistrail.db_actions:
vistrail.idScope.updateBeginId('action', action.db_id+1)
if action.db_session is not None:
vistrail.idScope.updateBeginId('session', action.db_session + 1)
for operation in action.db_operations:
vistrail.idScope.updateBeginId('operation', operation.db_id+1)
if operation.vtType == 'add' or operation.vtType == 'change':
# update ids of data
vistrail.idScope.updateBeginId(operation.db_what,
getNewObjId(operation)+1)
if operation.db_data is None:
if operation.vtType == 'change':
operation.db_objectId = operation.db_oldObjId
vistrail.db_add_object(operation.db_data)
for annotation in action.db_annotations:
vistrail.idScope.updateBeginId('annotation', annotation.db_id+1)
def materializeWorkflow(vistrail, version):
# construct path up through tree and perform each action
if vistrail.db_has_action_with_id(version):
workflow = DBWorkflow()
#for action in getActionChain(vistrail, version):
# oldPerformAction(action, workflow)
performActions(getActionChain(vistrail, version),
workflow)
workflow.db_id = version
workflow.db_vistrailId = vistrail.db_id
return workflow
elif version == 0:
return DBWorkflow()
else:
raise VistrailsDBException("invalid workflow version %s" % version)
def performAction(action, workflow):
if action.actionType == 'add':
for operation in action.db_operations:
workflow.db_add_object(operation.db_data,
operation.db_parentObjType,
operation.db_parentObjId)
elif action.actionType == 'change':
for operation in action.db_operations:
workflow.db_change_object(operation.db_data,
operation.db_parentObjType,
operation.db_parentObjId)
elif action.actionType == 'delete':
for operation in action.operations:
workflow.db_delete_object(operation.db_objectId,
operation.db_what,
operation.db_parentObjType,
operation.db_parentObjId)
else:
msg = "Unrecognized action type '%s'" % action.db_actionType
raise TypeError(msg)
def performDeletes(deleteOps, workflow):
for operation in deleteOps:
workflow.db_delete_object(getOldObjId(operation), operation.db_what,
operation.db_parentObjType,
operation.db_parentObjId)
def performAdds(addOps, workflow):
for operation in addOps:
# print "operation %d: %s %s" % (operation.db_id, operation.vtType,
# operation.db_what)
# print " to: %s %s" % (operation.db_parentObjType,
# operation.db_parentObjId)
workflow.db_add_object(operation.db_data,
operation.db_parentObjType,
operation.db_parentObjId)
def performActions(actions, workflow):
# get the current actions and run addObject on the workflow
# note that delete actions have been removed and
# a change after an add is effectively an add if the add is discarded
performAdds(getCurrentOperations(actions), workflow)
def synchronize(old_vistrail, new_vistrail, current_action_id):
id_remap = {}
for action in new_vistrail.db_actions:
if action.is_new:
new_action = action.do_copy(True, old_vistrail.idScope, id_remap)
old_vistrail.db_add_action(new_action)
else:
# it must exist in the old vistrail, too
old_action = old_vistrail.db_actions_id_index[action.db_id]
# use knowledge that we replace old notes...
for annotation in action.db_deleted_annotations:
if old_action.db_has_annotation_with_id(annotation.db_id):
old_action.db_delete_annotation(annotation)
else:
# FIXME conflict!
# we know that the annotation that was there isn't anymore
#print 'possible notes conflict'
if old_action.db_has_annotation_with_key('notes'):
old_annotation = \
old_action.db_get_annotation_by_key('notes')
old_action.db_delete_annotation(old_annotation)
else:
# we don't have to do anything
pass
for annotation in action.db_annotations:
if annotation.is_new:
new_annotation = annotation.do_copy(True,
old_vistrail.idScope,
id_remap)
old_action.db_add_annotation(new_annotation)
for tag in new_vistrail.db_deleted_tags:
if old_vistrail.db_has_tag_with_id(tag.db_id):
old_vistrail.db_delete_tag(tag)
else:
# FIXME conflict!
# we know the tag that was there isn't anymore
#print 'possible tag conflict'
# we don't have to do anything here, though
pass
for tag in new_vistrail.db_tags:
if tag.is_new:
new_tag = tag.do_copy(False)
# remap id
try:
new_tag.db_id = id_remap[(DBAction.vtType, new_tag.db_id)]
except KeyError:
pass
try:
old_tag = old_vistrail.db_tags_name_index[new_tag.db_name]
except KeyError:
# FIXME conflict!
#print "tag conflict--name already used"
old_vistrail.db_delete_tag(old_tag)
try:
old_tag = old_vistrail.db_tags_id_index[new_tag.db_id]
except KeyError:
#print 'possible tag conflict -- WILL NOT GET HERE!'
old_vistrail.db_delete_tag(old_tag)
old_vistrail.db_add_tag(new_tag)
new_action_id = \
id_remap.get((DBAction.vtType, current_action_id), current_action_id)
old_vistrail.db_currentVersion = new_action_id
return new_action_id
def merge(sb, next_sb, app='', interactive = False, tmp_dir = '', next_tmp_dir = ''):
""" def merge(sb: SaveBundle, next_sb: SaveBundle, app: str,
interactive: bool, tmp_dir: str, next_tmp_dir: str) -> None
Merges two save bundles that has been annotated with checkout
information from a database.
All changes from next_sb are appended onto sb. The changes in sb can
then be uploaded to the database and use as the new working copy.
first sb is the old one from db, last vt is the new one.
if interactive is gui.merge_gui.MergeGui then the tmp_dir's must be set.
"""
vt = sb.vistrail
next_vt = next_sb.vistrail
merge_gui = interactive
MergeGUI = merge_gui.MergeGUI if merge_gui else False
skip = 0
# right now we just replace mashups and subworkflows
sb.mashups = list(next_sb.mashups)
sb.abstractions = list(next_sb.abstractions)
id_remap = {}
checkout_key = "__checkout_version_"
action_key = checkout_key + app
annotation_key = action_key + '_annotationhash'
action_annotation_key = action_key + '_actionannotationhash'
# find the highest common checkin id
checkinId = 0
if len(app) and next_vt.db_has_annotation_with_key(action_key):
co = next_vt.db_get_annotation_by_key(action_key)
#print "found checkin id annotation"
checkinId = int(co._db_value)
else:
#print "calculating checkin id"
# create unique identifiers for all actions
actions = []
actionDict = {}
for action in vt.db_actions:
unique = action._db_user + str(action._db_date)
copy_no = 0
while (unique + str(copy_no)) in actionDict:
copy_no += 1
unique = unique + str(copy_no)
actions.append(unique)
actionDict[unique] = action
actionNexts = []
actionDictNext = {}
for action in next_vt.db_actions:
unique = action._db_user + str(action._db_date)
copy_no = 0
while (unique + str(copy_no)) in actionDictNext:
copy_no += 1
unique = unique + str(copy_no)
actionNexts.append(unique)
actionDictNext[unique] = action
# find last checkin action (only works for centralized syncs)
while checkinId < len(actions) and checkinId < len(actionNexts) and \
actions[checkinId] == actionNexts[checkinId]:
checkinId += 1
if checkinId > 0:
checkinId = actionDict[actions[checkinId-1]].db_id
#print "checkinId:", checkinId
# delete previous checkout annotations in vt
deletekeys = [action_key,annotation_key,action_annotation_key]
for key in deletekeys:
while vt.db_has_annotation_with_key(key):
a = vt.db_get_annotation_by_key(key)
vt.db_delete_annotation(a)
# check if someone else have changed the annotations
mergeAnnotations = True
if len(app) and next_vt.db_has_annotation_with_key(annotation_key):
#print "found annotationhash"
co = next_vt.db_get_annotation_by_key(annotation_key)
old_hash = co._db_value
mergeAnnotations = (old_hash != vt.hashAnnotations())
#print "merge annotations:", mergeAnnotations
# check if someone else have changed the action annotations
mergeActionAnnotations = True
if len(app) and next_vt.db_has_annotation_with_key(action_annotation_key):
#print "found actionannotationhash"
co = next_vt.db_get_annotation_by_key(action_annotation_key)
old_hash = co._db_value
mergeActionAnnotations = (old_hash != vt.hashActionAnnotations())
#print "merge actionannotations:", mergeActionAnnotations
################## merge actions ######################
for action in next_vt.db_actions:
# check for identical actions
if action._db_id > checkinId:
new_action = action.do_copy(True, vt.idScope, id_remap)
vt.db_add_action(new_action)
################## merge annotations ##################
if not mergeAnnotations:
# delete removed annotations
for annotation in [a for a in vt.db_annotations]:
if not next_vt.db_has_annotation_with_id(annotation.db_id):
# delete it
vt.db_delete_annotation(annotation)
# add new and update changed annotations
for annotation in next_vt.db_annotations:
if not vt.db_has_annotation_with_id(annotation.db_id):
# new annotation
new_annotation = annotation.do_copy(True, vt.idScope, id_remap)
vt.db_add_annotation(new_annotation)
else:
old_annotation = vt.db_get_annotation_by_id(annotation.db_id)
if old_annotation.db_key != annotation.db_key:
# key changed
old_annotation.db_key = annotation.db_key
if old_annotation.db_value != annotation.db_value:
# value changed
old_annotation.db_value = annotation.db_value
else:
annotations = {}
# create dict with keys and values
for annotation in vt.db_annotations:
if annotation.db_key not in annotations:
annotations[annotation.db_key] = []
if annotation.db_value not in annotations[annotation.db_key]:
annotations[annotation.db_key].append(annotation.db_value)
# add nonexisting key-value pairs
for annotation in next_vt.db_annotations:
if annotation.db_key not in annotations or \
annotation.db_value not in annotations[annotation.db_key]:
new_annotation = annotation.do_copy(True, vt.idScope, id_remap)
vt.db_add_annotation(new_annotation)
################# merge action annotations ############
if not mergeActionAnnotations:
# delete removed action annotations
for annotation in [a for a in vt.db_actionAnnotations]:
if not next_vt.db_has_actionAnnotation_with_id(annotation.db_id):
# delete it
vt.db_delete_actionAnnotation(annotation)
if annotation.db_key == '__thumb__' and len(sb.thumbnails) > 0:
# remove thumb
thumb = '/'.join(sb.thumbnails[0].split(
'/')[:-1]) + '/' + annotation.db_value
if thumb in sb.thumbnails:
sb.thumbnails.remove(thumb)
# add new and update changed annotations
for annotation in next_vt.db_actionAnnotations:
if not vt.db_has_actionAnnotation_with_id(annotation.db_id):
# new actionAnnotation
annotation = annotation.do_copy(True, vt.idScope, id_remap)
vt.db_add_actionAnnotation(annotation)
if annotation.db_key == '__thumb__' and \
len(next_sb.thumbnails) > 0:
# add thumb
thumb = '/'.join(next_sb.thumbnails[0].split(
'/')[:-1])+'/'+ annotation.db_value
if thumb not in sb.thumbnails:
sb.thumbnails.append(thumb)
else:
old_annotation = \
vt.db_get_actionAnnotation_by_id(annotation.db_id)
if old_annotation.db_value != annotation.db_value:
# value changed
if annotation.db_key == '__thumb__' and \
len(sb.thumbnails) > 0:
# remove thumb
thumb = '/'.join(sb.thumbnails[0].split(
'/')[:-1]) + '/' + old_annotation.db_value
if thumb in sb.thumbnails:
sb.thumbnails.remove(thumb)
if annotation.db_key == '__thumb__' and \
len(next_sb.thumbnails) > 0:
# add thumb
thumb = '/'.join(next_sb.thumbnails[0].split(
'/')[:-1])+'/'+ annotation.db_value
if thumb not in sb.thumbnails:
sb.thumbnails.append(thumb)
old_annotation.db_value = annotation.db_value
old_annotation.db_date = annotation.db_date
old_annotation.db_user = annotation.db_user
else:
# construct old action index (oas)
oas = {}
for a in vt.db_actionAnnotations:
if not a.db_action_id in oas:
oas[a.db_action_id] = {}
if not a.db_key in oas[a.db_action_id]:
oas[a.db_action_id][a.db_key] = []
oas[a.db_action_id][a.db_key].append(a)
# merge per action
for new_annotation in next_vt.db_actionAnnotations:
# keep both upgrades but update action id in new
if new_annotation.db_key == '__upgrade__':
value = int(new_annotation.db_value)
if ('action', value) in id_remap:
new_annotation.db_value = str(id_remap[('action', value)])
annotation = new_annotation.do_copy(True, vt.idScope, id_remap)
vt.db_add_actionAnnotation(annotation)
elif new_annotation.db_action_id <= checkinId and \
new_annotation.db_key in oas[new_annotation.db_action_id]:
old_action = oas[new_annotation.db_action_id]
# we have a conflict
# tags should be merged (the user need to resolve)
if new_annotation.db_key == '__tag__':
# there is only one
old_annotation = old_action[new_annotation.db_key][0]
if old_annotation.db_value != new_annotation.db_value:
value = old_annotation.db_value + " or " + \
new_annotation.db_value
if interactive:
if skip == 1:
pass
elif skip == 2:
old_annotation.db_value=new_annotation.db_value
old_annotation.db_date = new_annotation.db_date
old_annotation.db_user = new_annotation.db_user
else:
v, value = MergeGUI.resolveTags(
old_annotation, new_annotation, value)
if v == merge_gui.CHOICE_OTHER_ALL:
skip = 1
elif v == merge_gui.CHOICE_OTHER:
pass
elif v == merge_gui.CHOICE_RESOLVED:
#print "Tag resolved:", value
old_annotation.db_value = value
old_annotation.db_date = \
new_annotation.db_date
old_annotation.db_user = \
new_annotation.db_user
pass
elif v == merge_gui.CHOICE_OWN:
old_annotation.db_value = \
new_annotation.db_value
old_annotation.db_date = \
new_annotation.db_date
old_annotation.db_user = \
new_annotation.db_user
elif v == merge_gui.CHOICE_OWN_ALL:
old_annotation.db_value = \
new_annotation.db_value
old_annotation.db_date = \
new_annotation.db_date
old_annotation.db_user = \
new_annotation.db_user
skip = 2
else:
old_annotation.db_value = value
old_annotation.db_date = new_annotation.db_date
old_annotation.db_user = new_annotation.db_user
# notes should be merged (the user need to resolve)
elif new_annotation.db_key == '__notes__':
# there is only one
old_annotation = old_action[new_annotation.db_key][0]
if new_annotation.db_value != old_annotation.db_value:
value = ("#### conflicting versions! ####<br/>" +
"## Other version at %s by %s:%s<br/>" +
"## Your version at %s by %s:%s") % \
(str(old_annotation.db_date), old_annotation.db_user,
old_annotation.db_value, str(new_annotation.db_date),
new_annotation.db_user, new_annotation.db_value)
if interactive:
if skip == 1:
pass
elif skip == 2:
old_annotation.db_value=new_annotation.db_value
old_annotation.db_date = new_annotation.db_date
old_annotation.db_user = new_annotation.db_user
else:
v, value = MergeGUI.resolveNotes(
old_annotation, new_annotation, value)
if v == merge_gui.CHOICE_OTHER_ALL:
skip = 1
elif v == merge_gui.CHOICE_OTHER:
pass
elif v == merge_gui.CHOICE_RESOLVED:
#print "Note resolved:", value
old_annotation.db_value = value
old_annotation.db_date = \
new_annotation.db_date
old_annotation.db_user = \
new_annotation.db_user
pass
elif v == merge_gui.CHOICE_OWN:
old_annotation.db_value = \
new_annotation.db_value
old_annotation.db_date = \
new_annotation.db_date
old_annotation.db_user = \
new_annotation.db_user
elif v == merge_gui.CHOICE_OWN_ALL:
old_annotation.db_value = \
new_annotation.db_value
old_annotation.db_date = \
new_annotation.db_date
old_annotation.db_user = \
new_annotation.db_user
skip = 2
else:
old_annotation.db_value = value
old_annotation.db_date = new_annotation.db_date
old_annotation.db_user = new_annotation.db_user
# thumbs should be updated (we loose the other update)
elif new_annotation.db_key == '__thumb__':
# there is only one
old_annotation = old_action[new_annotation.db_key][0]
if new_annotation.db_value != old_annotation.db_value:
if interactive:
if skip == 1:
pass
elif skip == 2:
thumb = '/'.join(sb.thumbnails[0].split(
'/')[:-1])+'/'+ old_annotation.db_value
if thumb in sb.thumbnails:
sb.thumbnails.remove(thumb)
old_annotation.db_value=new_annotation.db_value
old_annotation.db_date = new_annotation.db_date
old_annotation.db_user = new_annotation.db_user
thumb = '/'.join(next_sb.thumbnails[0].split(
'/')[:-1])+'/'+ new_annotation.db_value
if thumb not in sb.thumbnails:
sb.thumbnails.append(thumb)
else:
v = MergeGUI.resolveThumbs(old_annotation,
new_annotation, tmp_dir, next_tmp_dir)
if v == merge_gui.CHOICE_OTHER_ALL:
skip = 1
elif v == merge_gui.CHOICE_OTHER:
pass
elif v in (merge_gui.CHOICE_OWN,
merge_gui.CHOICE_OWN_ALL):
thumb = '/'.join(sb.thumbnails[0].split(
'/')[:-1])+'/'+ old_annotation.db_value
if thumb in sb.thumbnails:
sb.thumbnails.remove(thumb)
old_annotation.db_value = \
new_annotation.db_value
old_annotation.db_date = \
new_annotation.db_date
old_annotation.db_user = \
new_annotation.db_user
thumb='/'.join(next_sb.thumbnails[0].split(
'/')[:-1])+'/'+ new_annotation.db_value
if thumb not in sb.thumbnails:
sb.thumbnails.append(thumb)
if v == merge_gui.CHOICE_OWN_ALL:
skip = 2
else:
thumb = '/'.join(sb.thumbnails[0].split(
'/')[:-1])+'/'+ old_annotation.db_value
if thumb in sb.thumbnails:
sb.thumbnails.remove(thumb)
old_annotation.db_value = new_annotation.db_value
old_annotation.db_date = new_annotation.db_date
old_annotation.db_user = new_annotation.db_user
thumb = '/'.join(next_sb.thumbnails[0].split(
'/')[:-1])+'/'+ new_annotation.db_value
if thumb not in sb.thumbnails:
sb.thumbnails.append(thumb)
elif new_annotation.db_key == '__prune__': # keep old
pass
# others should be appended if not already there
else:
values = []
for old_annotation in old_action[new_annotation.db_key]:
values.append(old_annotation.db_value)
if new_annotation.db_value not in values:
annotation = new_annotation.do_copy(True, vt.idScope, \
id_remap)
vt.db_add_actionAnnotation(annotation)
else:
annotation = new_annotation.do_copy(True, vt.idScope, id_remap)
vt.db_add_actionAnnotation(annotation)
if annotation.db_key == '__thumb__':
thumb = '/'.join(next_sb.thumbnails[0].split('/')[:-1]) + \
'/' + annotation.db_value
if thumb not in sb.thumbnails:
sb.thumbnails.append(thumb)
# make this a valid checked out version
if len(app):
vt.update_checkout_version(app)
################################################################################
# Analogy methods
def find_data(what, id, op_dict):
try:
return op_dict[(what, id)].db_data
except KeyError:
msg = 'cannot find data (%s, %s)' % (what, id)
raise KeyError(msg)
def invertOperations(op_dict, adds, deletes, do_copy=False):
inverse_ops = []
deletes.reverse()
for op in deletes:
data = find_data(op.db_what, getOldObjId(op), op_dict)
if do_copy:
data = copy.copy(data)
inv_op = DBAdd(id=-1,
what=op.db_what,
objectId=getOldObjId(op),
parentObjId=op.db_parentObjId,
parentObjType=op.db_parentObjType,
data=data
)
inverse_ops.append(inv_op)
adds.reverse()
for op in adds:
inv_op = DBDelete(id=-1,
what=op.db_what,
objectId=getNewObjId(op),
parentObjId=op.db_parentObjId,
parentObjType=op.db_parentObjType,
)
inverse_ops.append(inv_op)
return inverse_ops
def normalOperations(adds, deletes, do_copy=False):
new_ops = []
for op in deletes:
new_op = DBDelete(id=-1,
what=op.db_what,
objectId=getOldObjId(op),
parentObjId=op.db_parentObjId,
parentObjType=op.db_parentObjType,
)
new_ops.append(new_op)
for op in adds:
data = op.db_data
if do_copy:
data = copy.copy(op.db_data)
new_op = DBAdd(id=-1,
what=op.db_what,
objectId=getNewObjId(op),
parentObjId=op.db_parentObjId,
parentObjType=op.db_parentObjType,
data=data)
new_ops.append(new_op)
return new_ops
def getPathAsAction(vistrail, v1, v2, do_copy=False):
sharedRoot = getSharedRoot(vistrail, [v1, v2])
sharedActionChain = getActionChain(vistrail, sharedRoot)
sharedOperationDict = getCurrentOperationDict(sharedActionChain)
v1Actions = getActionChain(vistrail, v1, sharedRoot)
v2Actions = getActionChain(vistrail, v2, sharedRoot)
(v1AddDict, v1DeleteDict) = getOperationDiff(v1Actions,
sharedOperationDict)
(v2AddDict, v2DeleteDict) = getOperationDiff(v2Actions,
sharedOperationDict)
# need to invert one of them (v1)
v1Adds = v1AddDict.values()
v1Adds.sort(key=lambda x: x.db_id) # faster than sort(lambda x, y: cmp(x.db_id, y.db_id))
v1Deletes = v1DeleteDict.values()
v1Deletes.sort(key=lambda x: x.db_id) # faster than sort(lambda x, y: cmp(x.db_id, y.db_id))
v1InverseOps = \
invertOperations(sharedOperationDict, v1Adds, v1Deletes, do_copy)
# need to normalize ops of the other (v2)
v2Adds = v2AddDict.values()
v2Adds.sort(key=lambda x: x.db_id) # faster than sort(lambda x, y: cmp(x.db_id, y.db_id))
v2Deletes = v2DeleteDict.values()
v2Deletes.sort(key=lambda x: x.db_id) # faster than sort(lambda x, y: cmp(x.db_id, y.db_id))
v2Ops = normalOperations(v2Adds, v2Deletes, do_copy)
allOps = v1InverseOps + v2Ops
simplifiedOps = simplify_ops(allOps)
return DBAction(id=-1,
operations=simplifiedOps,
)
def addAndFixActions(startDict, actions):
curDict = copy.copy(startDict)
# print curDict
for action in actions:
# print "fixing action:", action.db_id
new_ops = []
for op in action.db_operations:
# print "op:", op.vtType, op.db_what, getOldObjId(op)
# print " ", op.db_parentObjType, op.db_parentObjId
if op.vtType == 'add':
if op.db_parentObjId is None or \
curDict.has_key((op.db_parentObjType,
op.db_parentObjId)):
curDict[(op.db_what, op.db_objectId)] = op
new_ops.append(op)
elif op.vtType == 'change':
if curDict.has_key((op.db_what, op.db_oldObjId)) and \
(op.db_parentObjId is None or \
curDict.has_key((op.db_parentObjType,
op.db_parentObjId))):
del curDict[(op.db_what, op.db_oldObjId)]
curDict[(op.db_what, op.db_newObjId)] = op
new_ops.append(op)
elif op.vtType == 'delete':
if (op.db_parentObjId is None or
curDict.has_key((op.db_parentObjType,
op.db_parentObjId))) and \
curDict.has_key((op.db_what, op.db_objectId)):
del curDict[(op.db_what, op.db_objectId)]
new_ops.append(op)
action.db_operations = new_ops
return curDict
def fixActions(vistrail, v, actions):
startingChain = getActionChain(vistrail, v)
startingDict = getCurrentOperationDict(startingChain)
addAndFixActions(startingDict, actions)
################################################################################
# Diff methods
def getSharedRoot(vistrail, versions):
# base case is 0
current = copy.copy(versions)
while 0 not in current:
maxId = max(current)
if current.count(maxId) == len(current):
return maxId
else:
newId = vistrail.db_get_action_by_id(maxId).db_prevId
for i, v in enumerate(current):
if v == maxId:
current[i] = newId
return 0
def getOperationDiff(actions, operationDict):
addDict = {}
deleteDict = {}
for action in actions:
# print 'action: %d' % action.db_id
for operation in action.db_operations:
if operation.vtType == 'add':
# print "add: %s %s" % (operation.db_what,
# operation.db_objectId)
addDict[(operation.db_what,
operation.db_objectId)] = operation
elif operation.vtType == 'delete':
# print "del: %s %s" % (operation.db_what,
# operation.db_objectId)
if operationDict.has_key((operation.db_what,
operation.db_objectId)):
deleteDict[(operation.db_what,
operation.db_objectId)] = operation
# del operationDict[(operation.db_what,
# operation.db_objectId)]
elif addDict.has_key((operation.db_what,
operation.db_objectId)):
del addDict[(operation.db_what,
operation.db_objectId)]
else:
pass
elif operation.vtType == 'change':
# print "chg: %s %s %s" % (operation.db_what,
# operation.db_oldObjId,
# operation.db_newObjId)
if operationDict.has_key((operation.db_what,
operation.db_oldObjId)):
deleteDict[(operation.db_what,
operation.db_oldObjId)] = operation
# del operationDict[(operation.db_what,
# operation.db_oldObjId)]
elif addDict.has_key((operation.db_what,
operation.db_oldObjId)):
del addDict[(operation.db_what, operation.db_oldObjId)]
addDict[(operation.db_what,
operation.db_newObjId)] = operation
else:
msg = "Unrecognized operation '%s'" % operation.vtType
raise TypeError(msg)
return (addDict, deleteDict)
def updateOperationDict(operationDict, deleteOps, addOps):
for operation in deleteOps:
if operationDict.has_key((operation.db_what, getOldObjId(operation))):
del operationDict[(operation.db_what, getOldObjId(operation))]
else:
msg = "Illegal operation: " + operation
for operation in addOps:
operationDict[(operation.db_what, getNewObjId(operation))] = operation
return operationDict
def getObjects(actions):
objects = {}
for action in actions:
for operation in action.db_operations:
if not objects.has_key(operation.db_what):
objects[operation.db_what] = []
object = copy.copy(operation.db_data)
objects[operation.db_what].append(object)
return objects
def getVersionDifferences(vistrail, versions):
sharedRoot = getSharedRoot(vistrail, versions)
sharedActionChain = getActionChain(vistrail, sharedRoot)
sharedOperationDict = getCurrentOperationDict(sharedActionChain)
vOnlySorted = []
for v in versions:
vActions = getActionChain(vistrail, v, sharedRoot)
(vAddDict, vDeleteDict) = getOperationDiff(vActions,
sharedOperationDict)
vOnlyAdds = vAddDict.values()
vOnlyAdds.sort(key=lambda x: x.db_id)
vOnlyDeletes = vDeleteDict.values()
vOnlyDeletes.sort(key=lambda x: x.db_id)
vOpDict = copy.copy(sharedOperationDict)
updateOperationDict(vOpDict, vOnlyDeletes, vOnlyAdds)
vOps = vOpDict.values()
vOps.sort(key=lambda x: x.db_id)
vOnlySorted.append((vOnlyAdds, vOnlyDeletes, vOps))
sharedOps = sharedOperationDict.values()
sharedOps.sort(key=lambda x: x.db_id)
return (sharedOps, vOnlySorted)
def heuristicModuleMatch(m1, m2):
"""takes two modules and returns 1 if exact match,
0 if module names match, -1 if no match
"""
if m1.db_name == m2.db_name and m1.db_namespace == m2.db_namespace and \
m1.db_package == m2.db_package:
if m1.vtType == 'group':
# check if we have __desc__ annotation
m1_desc = None
m2_desc = None
if '__desc__' in m1.db_annotations_key_index:
m1_desc = m1.db_annotations_key_index['__desc__']
if '__desc__' in m2.db_annotations_key_index:
m2_desc = m2.db_annotations_key_index['__desc__']
if not (m1_desc and m2_desc and m1_desc == m2_desc):
# if desc's don't exactly match, return 0
# else continue and check functions
# FIXME: maybe we should check functions here
return 0
m1_functions = copy.copy(m1.db_get_functions())
m2_functions = copy.copy(m2.db_get_functions())
if len(m1_functions) != len(m2_functions):
return 0
for f1 in m1_functions[:]:
match = None
for f2 in m2_functions:
isMatch = heuristicFunctionMatch(f1, f2)
if isMatch == 1:
match = f2
break
if match is not None:
m1_functions.remove(f1)
m2_functions.remove(f2)
else:
return 0
m1_cparams = copy.copy(m1.db_get_controlParameters())
m2_cparams = copy.copy(m2.db_get_controlParameters())
if len(m1_cparams) != len(m2_cparams):
return 0
for cp1 in m1_cparams[:]:
match = None
for cp2 in m2_cparams:
isMatch = heuristicControlParameterMatch(cp1, cp2)
if isMatch == 1:
match = cp2
break
if match is not None:
m1_cparams.remove(cp1)
m2_cparams.remove(cp2)
else:
return 0
m1_annots = copy.copy(m1.db_get_annotations())
m2_annots = copy.copy(m2.db_get_annotations())
if len(m1_annots) != len(m2_annots):
return 0
for a1 in m1_annots[:]:
match = None
for a2 in m2_annots:
isMatch = heuristicAnnotationMatch(a1, a2)
if isMatch == 1:
match = a2
break
if match is not None:
m1_annots.remove(a1)
m2_annots.remove(a2)
else:
return 0
if len(m1_functions) == len(m2_functions) == \
len(m1_cparams ) == len(m2_cparams ) == \
len(m1_annots ) == len(m2_annots ) == 0:
return 1
else:
return 0
return -1
def heuristicFunctionMatch(f1, f2):
"""takes two functions and returns 1 if exact match,
0 if function names match, -1 if no match
"""
if f1.db_name == f2.db_name:
f1_parameters = copy.copy(f1.db_get_parameters())
f2_parameters = copy.copy(f2.db_get_parameters())
if len(f1_parameters) != len(f2_parameters):
return 0
for p1 in f1_parameters[:]:
match = None
for p2 in f2_parameters:
isMatch = heuristicParameterMatch(p1, p2)
if isMatch == 1:
match = p2
break
if match is not None:
f1_parameters.remove(p1)
f2_parameters.remove(match)
else:
return 0
if len(f1_parameters) == len(f2_parameters) == 0:
return 1
else:
return 0
return -1
def heuristicParameterMatch(p1, p2):
"""takes two parameters and returns 1 if exact match,
0 if partial match (types match), -1 if no match
"""
if p1.db_type == p2.db_type and p1.db_pos == p2.db_pos:
if p1.db_val == p2.db_val:
return 1
else:
return 0
return -1
def heuristicControlParameterMatch(cp1, cp2):
"""takes two control parameters and returns 1 if exact match,
0 if partial match (types match), -1 if no match
"""
if cp1.db_name == cp2.db_name:
if cp1.db_value == cp2.db_value:
return 1
else:
return 0
return -1
def heuristicAnnotationMatch(a1, a2):
"""takes two annotations and returns 1 if exact match,
0 if partial match (types match), -1 if no match
"""
if a1.db_key == a2.db_key:
if a1.db_value == a2.db_value:
return 1
else:
return 0
return -1
def heuristicConnectionMatch(c1, c2):
"""takes two connections and returns 1 if exact match,
0 if partial match (currently undefined), -1 if no match
"""
c1_ports = copy.copy(c1.db_get_ports())
c2_ports = copy.copy(c2.db_get_ports())
for p1 in c1_ports[:]:
match = None
for p2 in c2_ports:
isMatch = heuristicPortMatch(p1, p2)
if isMatch == 1:
match = p2
break
elif isMatch == 0:
match = p2
if match is not None:
c1_ports.remove(p1)
c2_ports.remove(match)
else:
return -1
if len(c1_ports) == len(c2_ports) == 0:
return 1
return -1
def heuristicPortMatch(p1, p2):
"""takes two ports and returns 1 if exact match,
0 if partial match, -1 if no match
"""
if p1.db_moduleId == p2.db_moduleId:
return 1
elif p1.db_type == p2.db_type and \
p1.db_moduleName == p2.db_moduleName and \
p1.sig == p2.sig:
return 0
return -1
def function_sig(function):
return (function.db_name,
[(param.db_type, param.db_val)
for param in function.db_get_parameters()])
def getParamChanges(m1, m2, same_vt=True, heuristic_match=True):
paramChanges = []
# need to check to see if any children of m1 and m2 are affected
m1_functions = m1.db_get_functions()
m2_functions = m2.db_get_functions()
m1_unmatched = []
m2_unmatched = []
if same_vt:
for f1 in m1_functions:
# see if m2 has f1, too
f2 = m2.db_get_function(f1.db_id)
if f2 is None:
m1_unmatched.append(f1)
else:
# function is same, check if parameters have changed
if heuristic_match:
matchValue = heuristicFunctionMatch(f1, f2)
if matchValue != 1:
paramChanges.append((function_sig(f1),
function_sig(f2)))
else:
paramChanges.append((function_sig(f1), function_sig(f2)))
for f2 in m2_functions:
# see if m1 has f2, too
if m1.db_get_function(f2.db_id) is None:
m2_unmatched.append(f2)
else:
m1_unmatched.extend(m1_functions)
m2_unmatched.extend(m2_functions)
# functionMatch = True
# f1_params = f1.db_get_parameters()
# f2_params = f2.db_get_parameters()
# for p1 in f1_params:
# if f2.db_get_parameter(p1.db_id) is None:
# functionMatch = False
# m1_unmatched.append(f1)
# break
# for p2 in f2_params:
# if f1.db_get_parameter(p2.db_id) is None:
# functionMatch = False
# m2_unmatched.append(f2)
# break
# if functionMatch:
if len(m1_unmatched) + len(m2_unmatched) > 0:
if heuristic_match and len(m1_unmatched) > 0 and len(m2_unmatched) > 0:
# do heuristic matches
for f1 in m1_unmatched[:]:
matched = False
matchValue = 0
for f2 in m2_unmatched:
matchValue = heuristicFunctionMatch(f1, f2)
if matchValue == 1:
# best match so quit
matched = f1
break
elif matchValue == 0:
# match, but not exact so continue to look
matched = f1
if matched:
if matchValue != 1:
paramChanges.append((function_sig(f1),
function_sig(f2)))
m1_unmatched.remove(f1)
m2_unmatched.remove(f2)
for f in m1_unmatched:
paramChanges.append((function_sig(f), (None, None)))
for f in m2_unmatched:
paramChanges.append(((None, None), function_sig(f)))
return paramChanges
def getCParamChanges(m1, m2, same_vt=True, heuristic_match=True):
cparamChanges = []
# need to check to see if any children of m1 and m2 are affected
m1_cparams = m1.db_get_controlParameters()
m2_cparams = m2.db_get_controlParameters()
m1_unmatched = []
m2_unmatched = []
if same_vt:
for cp1 in m1_cparams:
# see if m2 has f1, too
cp2 = m2.db_get_controlParameter(cp1.db_id)
if cp2 is None:
m1_unmatched.append(cp1)
else:
# cparam is same, check if it has changed
if heuristic_match:
matchValue = heuristicControlParameterMatch(cp1, cp2)
if matchValue != 1:
cparamChanges.append(((cp1.db_name,cp1.db_value),
(cp2.db_name,cp2.db_value)))
else:
cparamChanges.append(((cp1.db_name,cp1.db_value),
(cp2.db_name,cp2.db_value)))
for cp2 in m2_cparams:
# see if m1 has f2, too
if m1.db_get_controlParameter(cp2.db_id) is None:
m2_unmatched.append(cp2)
else:
m1_unmatched.extend(m1_cparams)
m2_unmatched.extend(m2_cparams)
# functionMatch = True
# f1_params = f1.db_get_parameters()
# f2_params = f2.db_get_parameters()
# for p1 in f1_params:
# if f2.db_get_parameter(p1.db_id) is None:
# functionMatch = False
# m1_unmatched.append(f1)
# break
# for p2 in f2_params:
# if f1.db_get_parameter(p2.db_id) is None:
# functionMatch = False
# m2_unmatched.append(f2)
# break
# if functionMatch:
if len(m1_unmatched) + len(m2_unmatched) > 0:
if heuristic_match and len(m1_unmatched) > 0 and len(m2_unmatched) > 0:
# do heuristic matches
for cp1 in m1_unmatched[:]:
matched = False
matchValue = 0
for cp2 in m2_unmatched:
matchValue = heuristicControlParameterMatch(cp1, cp2)
if matchValue == 1:
# best match so quit
matched = cp1
break
elif matchValue == 0:
# match, but not exact so continue to look
matched = cp1
if matched:
if matchValue != 1:
cparamChanges.append(((cp1.db_name,cp1.db_value),
(cp2.db_name,cp2.db_value)))
m1_unmatched.remove(cp1)
m2_unmatched.remove(cp2)
for cp in m1_unmatched:
cparamChanges.append(((cp.db_name,cp.db_value), (None, None)))
for cp in m2_unmatched:
cparamChanges.append(((None, None), (cp.db_name,cp.db_value)))
return cparamChanges
def getAnnotationChanges(m1, m2, same_vt=True, heuristic_match=True):
annotChanges = []
# need to check to see if any children of m1 and m2 are affected
m1_annots = m1.db_get_annotations()
m2_annots = m2.db_get_annotations()
m1_unmatched = []
m2_unmatched = []
if same_vt:
for a1 in m1_annots:
# see if m2 has f1, too
a2 = m2.db_get_annotation(a1.db_id)
if a2 is None:
m1_unmatched.append(a1)
else:
# cparam is same, check if it has changed
if heuristic_match:
matchValue = heuristicAnnotationMatch(a1, a2)
if matchValue != 1:
annotChanges.append(((a1.db_key,a1.db_value),
(a2.db_key,a2.db_value)))
else:
annotChanges.append(((a1.db_key,a1.db_value),
(a2.db_key,a2.db_value)))
for a2 in m2_annots:
# see if m1 has f2, too
if m1.db_get_annotation(a2.db_id) is None:
m2_unmatched.append(a2)
else:
m1_unmatched.extend(m1_annots)
m2_unmatched.extend(m2_annots)
# functionMatch = True
# f1_params = f1.db_get_parameters()
# f2_params = f2.db_get_parameters()
# for p1 in f1_params:
# if f2.db_get_parameter(p1.db_id) is None:
# functionMatch = False
# m1_unmatched.append(f1)
# break
# for p2 in f2_params:
# if f1.db_get_parameter(p2.db_id) is None:
# functionMatch = False
# m2_unmatched.append(f2)
# break
# if functionMatch:
if len(m1_unmatched) + len(m2_unmatched) > 0:
if heuristic_match and len(m1_unmatched) > 0 and len(m2_unmatched) > 0:
# do heuristic matches
for a1 in m1_unmatched[:]:
matched = False
matchValue = 0
for a2 in m2_unmatched:
matchValue = heuristicAnnotationMatch(a1, a2)
if matchValue == 1:
# best match so quit
matched = a1
break
elif matchValue == 0:
# match, but not exact so continue to look
matched = a1
if matched:
if matchValue != 1:
annotChanges.append(((a1.db_key,a1.db_value),
(a2.db_key,a2.db_value)))
m1_unmatched.remove(a1)
m2_unmatched.remove(a2)
for cp in m1_unmatched:
annotChanges.append(((cp.db_key,cp.db_value), (None, None)))
for cp in m2_unmatched:
annotChanges.append(((None, None), (cp.db_key,cp.db_value)))
return annotChanges
def getOldObjId(operation):
if operation.vtType == 'change':
return operation.db_oldObjId
return operation.db_objectId
def getNewObjId(operation):
if operation.vtType == 'change':
return operation.db_newObjId
return operation.db_objectId
def setOldObjId(operation, id):
if operation.vtType == 'change':
operation.db_oldObjId = id
else:
operation.db_objectId = id
def setNewObjId(operation, id):
if operation.vtType == 'change':
operation.db_newObjId = id
else:
operation.db_objectId = id
def getWorkflowDiffCommon(vistrail, v1, v2, heuristic_match=True):
(sharedOps, vOnlyOps) = \
getVersionDifferences(vistrail, [v1, v2])
sharedWorkflow = DBWorkflow()
performAdds(sharedOps, sharedWorkflow)
# FIXME better to do additional ops (and do deletes) or do this?
v1Workflow = DBWorkflow()
v1Ops = vOnlyOps[0][2]
performAdds(v1Ops, v1Workflow)
v2Workflow = DBWorkflow()
v2Ops = vOnlyOps[1][2]
performAdds(v2Ops, v2Workflow)
# FIXME connections do not check their ports
sharedModuleIds = []
sharedConnectionIds = []
sharedFunctionIds = {}
sharedCParameterIds = {}
sharedAnnotationIds = {}
for op in sharedOps:
if op.what == 'module' or op.what == 'abstraction' or \
op.what == 'group':
sharedModuleIds.append(getNewObjId(op))
elif op.what == 'connection':
sharedConnectionIds.append(getNewObjId(op))
elif op.what == 'function':
sharedFunctionIds[getNewObjId(op)] = op.db_parentObjId
elif op.what == 'controlParameter':
sharedCParameterIds[getNewObjId(op)] = op.db_parentObjId
elif op.what == 'annotation':
sharedAnnotationIds[getNewObjId(op)] = op.db_parentObjId
vOnlyModules = []
vOnlyConnections = []
paramChgModules = {}
cparamChgModules = {}
annotChgModules = {}
for (vAdds, vDeletes, _) in vOnlyOps:
moduleDeleteIds = []
connectionDeleteIds = []
for op in vDeletes:
if op.what == 'module' or op.what == 'abstraction' or \
op.what == 'group':
moduleDeleteIds.append(getOldObjId(op))
if getOldObjId(op) in sharedModuleIds:
sharedModuleIds.remove(getOldObjId(op))
if paramChgModules.has_key(getOldObjId(op)):
del paramChgModules[getOldObjId(op)]
elif op.what == 'function' and \
(op.db_parentObjType == 'module' or
op.db_parentObjType == 'abstraction' or
op.db_parentObjType == 'group') and \
op.db_parentObjId in sharedModuleIds:
# have a function change
paramChgModules[op.db_parentObjId] = None
sharedModuleIds.remove(op.db_parentObjId)
elif op.what == 'parameter' and op.db_parentObjType == 'function' \
and sharedFunctionIds.has_key(op.db_parentObjId):
# have a parameter change
moduleId = sharedFunctionIds[op.db_parentObjId]
if moduleId in sharedModuleIds:
paramChgModules[moduleId] = None
sharedModuleIds.remove(moduleId)
elif op.what == 'controlParameter' and \
(op.db_parentObjType == 'module' or
op.db_parentObjType == 'abstraction' or
op.db_parentObjType == 'group') and \
op.db_parentObjId in sharedCParameterIds and \
op.db_parentObjId in sharedModuleIds:
# have a control parameter change
cparamChgModules[op.db_parentObjId] = None
sharedModuleIds.remove(op.db_parentObjId)
elif op.what == 'annotation' and \
(op.db_parentObjType == 'module' or
op.db_parentObjType == 'abstraction' or
op.db_parentObjType == 'group') and \
op.db_parentObjId in sharedAnnotationIds and \
op.db_parentObjId in sharedModuleIds:
# have an annotation change
annotChgModules[op.db_parentObjId] = None
sharedModuleIds.remove(op.db_parentObjId)
elif op.what == 'connection':
connectionDeleteIds.append(getOldObjId(op))
if getOldObjId(op) in sharedConnectionIds:
sharedConnectionIds.remove(getOldObjId(op))
moduleAddIds = []
connectionAddIds = []
for op in vAdds:
if op.what == 'module' or op.what == 'abstraction' or \
op.what == 'group':
moduleAddIds.append(getNewObjId(op))
elif (op.what == 'function' and
(op.db_parentObjType == 'module' or
op.db_parentObjType == 'abstraction' or
op.db_parentObjType == 'group') and
op.db_parentObjId in sharedModuleIds):
# have a function change
paramChgModules[op.db_parentObjId] = None
sharedModuleIds.remove(op.db_parentObjId)
elif op.what == 'parameter' and op.db_parentObjType == 'function' \
and sharedFunctionIds.has_key(op.db_parentObjId):
# have a parameter change
moduleId = sharedFunctionIds[op.db_parentObjId]
if moduleId in sharedModuleIds:
paramChgModules[moduleId] = None
sharedModuleIds.remove(moduleId)
elif (op.what == 'controlParameter' and
(op.db_parentObjType == 'module' or
op.db_parentObjType == 'abstraction' or
op.db_parentObjType == 'group') and
op.db_parentObjId in sharedCParameterIds and
op.db_parentObjId in sharedModuleIds):
# have a control parameter change
cparamChgModules[op.db_parentObjId] = None
sharedModuleIds.remove(op.db_parentObjId)
elif (op.what == 'annotation' and
(op.db_parentObjType == 'module' or
op.db_parentObjType == 'abstraction' or
op.db_parentObjType == 'group') and
op.db_parentObjId in sharedAnnotationIds and
op.db_parentObjId in sharedModuleIds):
# have an annotation change
annotChgModules[op.db_parentObjId] = None
sharedModuleIds.remove(op.db_parentObjId)
elif op.what == 'connection':
connectionAddIds.append(getOldObjId(op))
vOnlyModules.append((moduleAddIds, moduleDeleteIds))
vOnlyConnections.append((connectionAddIds, connectionDeleteIds))
sharedModulePairs = [(id, id) for id in sharedModuleIds]
v1Only = vOnlyModules[0][0]
v2Only = vOnlyModules[1][0]
for id in vOnlyModules[1][1]:
if id not in vOnlyModules[0][1]:
v1Only.append(id)
for id in vOnlyModules[0][1]:
if id not in vOnlyModules[1][1]:
v2Only.append(id)
sharedConnectionPairs = [(id, id) for id in sharedConnectionIds]
c1Only = vOnlyConnections[0][0]
c2Only = vOnlyConnections[1][0]
for id in vOnlyConnections[1][1]:
if id not in vOnlyConnections[0][1]:
c1Only.append(id)
for id in vOnlyConnections[0][1]:
if id not in vOnlyConnections[1][1]:
c2Only.append(id)
paramChgModulePairs = [(id, id) for id in paramChgModules.keys()]
cparamChgModulePairs = [(id, id) for id in cparamChgModules.keys()]
annotChgModulePairs = [(id, id) for id in annotChgModules.keys()]
# print "^^^^ SHARED MODULE PAIRS:", sharedModulePairs
c1Only, c2Only, heuristicConnectionPairs = [], [], []
if heuristic_match:
(heuristicModulePairs, heuristicConnectionPairs, v1Only, v2Only, \
c1Only, c2Only) = do_heuristic_diff(v1Workflow, v2Workflow, \
v1Only, v2Only, \
c1Only, c2Only)
paramChgModulePairs.extend(heuristicModulePairs)
cparamChgModulePairs.extend(heuristicModulePairs)
annotChgModulePairs.extend(heuristicModulePairs)
allChgModulePairs = list(set(chain(paramChgModulePairs,
cparamChgModulePairs,
annotChgModulePairs)))
(heuristicModulePairs, paramChanges, cparam_changes, annot_changes) = \
check_params_diff(v1Workflow, v2Workflow, allChgModulePairs,
True, heuristic_match)
return (v1Workflow, v2Workflow,
sharedModulePairs, heuristicModulePairs, v1Only, v2Only,
paramChanges, cparam_changes, annot_changes,
sharedConnectionPairs, heuristicConnectionPairs,
c1Only, c2Only)
def do_heuristic_diff(v1Workflow, v2Workflow, v1_modules, v2_modules,
v1_connections, v2_connections):
# add heuristic matches
heuristicModulePairs = []
heuristicConnectionPairs = []
v1Only = copy.copy(v1_modules)
v2Only = copy.copy(v2_modules)
c1Only = copy.copy(v1_connections)
c2Only = copy.copy(v2_connections)
# we now check all heuristic pairs for parameter changes
# match modules
# for (m1_id, m2_id) in paramChgModulePairs[:]:
# m1 = v1Workflow.db_get_module(m1_id)
# m2 = v2Workflow.db_get_module(m2_id)
# if heuristicModuleMatch(m1, m2) == 1:
# # paramChgModulePairs.remove((m1_id, m2_id))
# # heuristicModulePairs.append((m1_id, m2_id))
# pass
for m1_id in v1Only[:]:
m1 = v1Workflow.db_get_module(m1_id)
match = None
for m2_id in v2Only:
m2 = v2Workflow.db_get_module(m2_id)
isMatch = heuristicModuleMatch(m1, m2)
if isMatch == 1:
match = (m1_id, m2_id)
break
elif isMatch == 0:
match = (m1_id, m2_id)
if match is not None:
v1Only.remove(match[0])
v2Only.remove(match[1])
# we now check all heuristic pairs for parameter changes
heuristicModulePairs.append(match)
# match connections
for c1_id in c1Only[:]:
c1 = v1Workflow.db_get_connection(c1_id)
match = None
for c2_id in c2Only:
c2 = v2Workflow.db_get_connection(c2_id)
isMatch = heuristicConnectionMatch(c1, c2)
if isMatch == 1:
match = (c1_id, c2_id)
break
elif isMatch == 0:
match = (c1_id, c2_id)
if match is not None:
# don't have port changes yet
c1Only.remove(match[0])
c2Only.remove(match[1])
heuristicConnectionPairs.append(match)
return (heuristicModulePairs, heuristicConnectionPairs, v1Only, v2Only,
c1Only, c2Only)
def check_params_diff(v1Workflow, v2Workflow, paramChgModulePairs,
same_vt=True, heuristic_match=True):
matched = []
paramChanges = []
cparamChanges = []
annotChanges = []
# print "^^^^ PARAM CHG PAIRS:", paramChgModulePairs
for (m1_id, m2_id) in paramChgModulePairs:
m1 = v1Workflow.db_get_module(m1_id)
m2 = v2Workflow.db_get_module(m2_id)
moduleParamChanges = getParamChanges(m1, m2, same_vt, heuristic_match)
if len(moduleParamChanges) > 0:
paramChanges.append(((m1_id, m2_id), moduleParamChanges))
moduleCParamChanges = getCParamChanges(m1, m2, same_vt,
heuristic_match)
if len(moduleCParamChanges) > 0:
cparamChanges.append(((m1_id, m2_id), moduleCParamChanges))
moduleAnnotChanges = getAnnotationChanges(m1, m2, same_vt,
heuristic_match)
if len(moduleAnnotChanges) > 0:
annotChanges.append(((m1_id, m2_id), moduleAnnotChanges))
if len(moduleParamChanges) == len(moduleCParamChanges) == \
len(moduleAnnotChanges) == 0:
# heuristicModulePairs.append((m1_id, m2_id))
matched.append((m1_id, m2_id))
return (matched, paramChanges, cparamChanges, annotChanges)
def getWorkflowDiff(vt_pair_1, vt_pair_2, heuristic_match=True):
(vistrail_1, v_1) = vt_pair_1
(vistrail_2, v_2) = vt_pair_2
if vistrail_1 == vistrail_2:
return getWorkflowDiffCommon(vistrail_1, v_1, v_2, heuristic_match)
workflow_1 = materializeWorkflow(vistrail_1, v_1)
workflow_2 = materializeWorkflow(vistrail_2, v_2)
modules_1 = workflow_1.db_modules_id_index.keys()
modules_2 = workflow_2.db_modules_id_index.keys()
conns_1 = workflow_1.db_connections_id_index.keys()
conns_2 = workflow_2.db_connections_id_index.keys()
if heuristic_match:
(m_matches, c_matches, modules_1, modules_2, conns_1, conns_2) = \
do_heuristic_diff(workflow_1, workflow_2, modules_1, modules_2, \
conns_1, conns_2)
(m_matches, param_changes, cparam_changes, annot_changes) = \
check_params_diff(workflow_1, workflow_2,
m_matches, False,
heuristic_match)
return (workflow_1, workflow_2, [], m_matches, modules_1, modules_2,
param_changes, cparam_changes, annot_changes, [], c_matches, conns_1, conns_2)
return (workflow_1, workflow_2, [], [], modules_1, modules_2, [], [], [], [], [],
conns_1, conns_2)
################################################################################
class TestDBVistrailService(unittest.TestCase):
def test_parameter_heuristic(self):
from vistrails.core.vistrail.module_param import ModuleParam
param1 = ModuleParam(id=0, pos=0, type='String', val='abc')
param2 = ModuleParam(id=1, pos=0, type='String', val='abc')
param3 = ModuleParam(id=2, pos=1, type='Float', val='1.0')
param4 = ModuleParam(id=3, pos=0, type='String', val='def')
param5 = ModuleParam(id=4, pos=1, type='String', val='abc')
# test basic equality
assert heuristicParameterMatch(param1, param2) == 1
# test basic inequality
assert heuristicParameterMatch(param1, param3) == -1
# test partial match
assert heuristicParameterMatch(param1, param4) == 0
# test position inequality
assert heuristicParameterMatch(param1, param5) == -1
def test_function_heuristic(self):
from vistrails.core.vistrail.module_param import ModuleParam
from vistrails.core.vistrail.module_function import ModuleFunction
param1 = ModuleParam(id=0, pos=0, type='String', val='abc')
param2 = ModuleParam(id=1, pos=1, type='Float', val='1.0')
param3 = ModuleParam(id=2, pos=0, type='String', val='abc')
param4 = ModuleParam(id=3, pos=1, type='Float', val='1.0')
param5 = ModuleParam(id=4, pos=0, type='String', val='abc')
param6 = ModuleParam(id=5, pos=1, type='Float', val='2.0')
function1 = ModuleFunction(name='f1', parameters=[param1, param2])
function2 = ModuleFunction(name='f1', parameters=[param3, param4])
function3 = ModuleFunction(name='f1', parameters=[param5, param6])
function4 = ModuleFunction(name='f2', parameters=[param1, param2])
function5 = ModuleFunction(name='f1', parameters=[param1])
# test basic equality
assert heuristicFunctionMatch(function1, function2) == 1
# test partial match
assert heuristicFunctionMatch(function1, function3) == 0
# test basic inequality
assert heuristicFunctionMatch(function1, function4) == -1
# test length inequality
assert heuristicFunctionMatch(function1, function5) == 0
def test_module_heuristic(self):
from vistrails.core.vistrail.module_param import ModuleParam
from vistrails.core.vistrail.module_function import ModuleFunction
from vistrails.core.vistrail.module import Module
param1 = ModuleParam(id=0, pos=0, type='String', val='abc')
param2 = ModuleParam(id=1, pos=1, type='Float', val='1.0')
param3 = ModuleParam(id=2, pos=0, type='String', val='abc')
param4 = ModuleParam(id=3, pos=1, type='Float', val='1.0')
param5 = ModuleParam(id=4, pos=0, type='Integer', val='2')
param6 = ModuleParam(id=5, pos=0, type='Integer', val='2')
function1 = ModuleFunction(name='f1', parameters=[param1, param2])
function2 = ModuleFunction(name='f1', parameters=[param3, param4])
function3 = ModuleFunction(name='f2', parameters=[param5])
function4 = ModuleFunction(name='f2', parameters=[param6])
function5 = ModuleFunction(name='f1', parameters=[param2, param4])
function6 = ModuleFunction(name='f2', parameters=[param5])
module1 = Module(name='m1', functions=[function1, function3])
module2 = Module(name='m1', functions=[function2, function4])
module3 = Module(name='m2', functions=[function1, function2])
module4 = Module(name='m1', functions=[function5])
module5 = Module(name='m1', functions=[function5, function6])
# test basic equality
assert heuristicModuleMatch(module1, module2) == 1
# test basic inequality
assert heuristicModuleMatch(module1, module3) == -1
# test length inequality
assert heuristicModuleMatch(module1, module4) == 0
# test parameter change inequality
assert heuristicModuleMatch(module1, module5) == 0
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "e3b82867cfa2a65972d30880db1dbbdc",
"timestamp": "",
"source": "github",
"line_count": 1597,
"max_line_length": 96,
"avg_line_length": 44.69693174702567,
"alnum_prop": 0.518065031310853,
"repo_name": "celiafish/VisTrails",
"id": "5b453fd48385d4f2201599e64c1fa44121c684f8",
"size": "73261",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "vistrails/db/services/vistrail.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1421"
},
{
"name": "Inno Setup",
"bytes": "19611"
},
{
"name": "Makefile",
"bytes": "768"
},
{
"name": "Mako",
"bytes": "66415"
},
{
"name": "PHP",
"bytes": "49038"
},
{
"name": "Python",
"bytes": "19674395"
},
{
"name": "R",
"bytes": "778864"
},
{
"name": "Rebol",
"bytes": "3972"
},
{
"name": "Shell",
"bytes": "34182"
},
{
"name": "TeX",
"bytes": "145219"
},
{
"name": "XSLT",
"bytes": "1090"
}
],
"symlink_target": ""
}
|
import testtools
from neutronclient.common import exceptions
from neutronclient.neutron import v2_0 as neutronV20
class CLITestArgs(testtools.TestCase):
def test_empty(self):
_mydict = neutronV20.parse_args_to_dict([])
self.assertEqual({}, _mydict)
def test_default_bool(self):
_specs = ['--my_bool', '--arg1', 'value1']
_mydict = neutronV20.parse_args_to_dict(_specs)
self.assertTrue(_mydict['my_bool'])
def test_bool_true(self):
_specs = ['--my-bool', 'type=bool', 'true', '--arg1', 'value1']
_mydict = neutronV20.parse_args_to_dict(_specs)
self.assertTrue(_mydict['my_bool'])
def test_bool_false(self):
_specs = ['--my_bool', 'type=bool', 'false', '--arg1', 'value1']
_mydict = neutronV20.parse_args_to_dict(_specs)
self.assertFalse(_mydict['my_bool'])
def test_nargs(self):
_specs = ['--tag', 'x', 'y', '--arg1', 'value1']
_mydict = neutronV20.parse_args_to_dict(_specs)
self.assertTrue('x' in _mydict['tag'])
self.assertTrue('y' in _mydict['tag'])
def test_badarg(self):
_specs = ['--tag=t', 'x', 'y', '--arg1', 'value1']
self.assertRaises(exceptions.CommandError,
neutronV20.parse_args_to_dict, _specs)
def test_badarg_with_minus(self):
_specs = ['--arg1', 'value1', '-D']
self.assertRaises(exceptions.CommandError,
neutronV20.parse_args_to_dict, _specs)
def test_goodarg_with_minus_number(self):
_specs = ['--arg1', 'value1', '-1', '-1.0']
_mydict = neutronV20.parse_args_to_dict(_specs)
self.assertEqual(['value1', '-1', '-1.0'],
_mydict['arg1'])
def test_badarg_duplicate(self):
_specs = ['--tag=t', '--arg1', 'value1', '--arg1', 'value1']
self.assertRaises(exceptions.CommandError,
neutronV20.parse_args_to_dict, _specs)
def test_badarg_early_type_specification(self):
_specs = ['type=dict', 'key=value']
self.assertRaises(exceptions.CommandError,
neutronV20.parse_args_to_dict, _specs)
def test_arg(self):
_specs = ['--tag=t', '--arg1', 'value1']
self.assertEqual('value1',
neutronV20.parse_args_to_dict(_specs)['arg1'])
def test_dict_arg(self):
_specs = ['--tag=t', '--arg1', 'type=dict', 'key1=value1,key2=value2']
arg1 = neutronV20.parse_args_to_dict(_specs)['arg1']
self.assertEqual('value1', arg1['key1'])
self.assertEqual('value2', arg1['key2'])
def test_dict_arg_with_attribute_named_type(self):
_specs = ['--tag=t', '--arg1', 'type=dict', 'type=value1,key2=value2']
arg1 = neutronV20.parse_args_to_dict(_specs)['arg1']
self.assertEqual('value1', arg1['type'])
self.assertEqual('value2', arg1['key2'])
def test_list_of_dict_arg(self):
_specs = ['--tag=t', '--arg1', 'type=dict',
'list=true', 'key1=value1,key2=value2']
arg1 = neutronV20.parse_args_to_dict(_specs)['arg1']
self.assertEqual('value1', arg1[0]['key1'])
self.assertEqual('value2', arg1[0]['key2'])
def test_clear_action(self):
_specs = ['--anyarg', 'action=clear']
args = neutronV20.parse_args_to_dict(_specs)
self.assertEqual(None, args['anyarg'])
def test_bad_values_str(self):
_specs = ['--strarg', 'type=str']
self.assertRaises(exceptions.CommandError,
neutronV20.parse_args_to_dict, _specs)
def test_bad_values_list(self):
_specs = ['--listarg', 'list=true', 'type=str']
self.assertRaises(exceptions.CommandError,
neutronV20.parse_args_to_dict, _specs)
_specs = ['--listarg', 'type=list']
self.assertRaises(exceptions.CommandError,
neutronV20.parse_args_to_dict, _specs)
_specs = ['--listarg', 'type=list', 'action=clear']
self.assertRaises(exceptions.CommandError,
neutronV20.parse_args_to_dict, _specs)
|
{
"content_hash": "e3e3467fa1e7db6b1b8bd9ec7aa7797a",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 78,
"avg_line_length": 40.407766990291265,
"alnum_prop": 0.567275348390197,
"repo_name": "neumerance/deploy",
"id": "054d533b2a111427100c0ec3cac91369b70f0c8a",
"size": "4837",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": ".venv/lib/python2.7/site-packages/neutronclient/tests/unit/test_casual_args.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "49399"
},
{
"name": "CSS",
"bytes": "769836"
},
{
"name": "CoffeeScript",
"bytes": "21"
},
{
"name": "Erlang",
"bytes": "31042"
},
{
"name": "JavaScript",
"bytes": "642626"
},
{
"name": "PHP",
"bytes": "3858"
},
{
"name": "Perl",
"bytes": "386749"
},
{
"name": "Python",
"bytes": "23358678"
},
{
"name": "Racket",
"bytes": "28441"
},
{
"name": "Ruby",
"bytes": "453"
},
{
"name": "Shell",
"bytes": "29414"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
}
|
from time import sleep
from flask import jsonify
from arps_old.models import User
from arps_old.restserver import app
def __fields_as_dict(o, fields):
return {field: getattr(o, field) for field in fields}
@app.route('/api/v1.0/users', methods=['GET', 'POST'])
def api_users():
def __to_json(user):
return __fields_as_dict(user, ['id', 'name', 'email', 'active'])
sleep(3)
users = User.query.all()
response = jsonify([__to_json(user) for user in users])
response.headers.add('Access-Control-Allow-Origin', '*')
return response
|
{
"content_hash": "7191656acbda1f0dce308c7dbb45d03e",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 72,
"avg_line_length": 25.863636363636363,
"alnum_prop": 0.6572934973637962,
"repo_name": "sumpfgottheit/arps",
"id": "78ac61da0c7f0a75037ff7430fcb624d643e9a12",
"size": "569",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "arps_old/restserver/views/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1069773"
},
{
"name": "HTML",
"bytes": "62088"
},
{
"name": "JavaScript",
"bytes": "649216"
},
{
"name": "Python",
"bytes": "90610"
},
{
"name": "Shell",
"bytes": "588"
}
],
"symlink_target": ""
}
|
import numpy as np
from dimod.binary_quadratic_model import BinaryQuadraticModel
from dimod.constrained import ConstrainedQuadraticModel
from typing import Tuple
__all__ = ['random_multi_knapsack']
def random_multi_knapsack(num_items: int,
num_bins: int,
seed: int = 32,
value_range: Tuple[int, int] = (10, 50),
weight_range: Tuple[int, int] = (10, 50),
) -> ConstrainedQuadraticModel:
"""Generate a constrained quadratic model encoding a multiple-knapsack
problem.
Given the number of items and the number of bins, generates a
multiple-knapsack problem, formulated as a :class:`~dimod.ConstrainedQuadraticModel`.
Values and weights for each item are uniformly sampled within the specified
ranges. Capacities of bins are randomly assigned.
Args:
num_items: Number of items.
num_bins: Number of bins.
seed: Seed for RNG.
value_range: Range of the randomly generated values for each item.
weight_range: Range of the randomly generated weights for each item.
Returns:
A constrained quadratic model encoding the multiple-knapsack problem.
Variables are labelled as ``x_{i}_{j}``, where ``x_{i}_{j} == 1`` means
that item ``i`` is placed in bin ``j``.
"""
rng = np.random.RandomState(seed)
weights = rng.randint(*weight_range, num_items)
values = rng.randint(*value_range, num_items)
cap_low = int(weight_range[0] * num_items / num_bins)
cap_high = int(weight_range[1] * num_items / num_bins)
capacities = rng.randint(cap_low, cap_high, num_bins)
model = ConstrainedQuadraticModel()
obj = BinaryQuadraticModel(vartype='BINARY')
x = {(i, j): obj.add_variable(f'x_{i}_{j}') for i in range(num_items) for j in range(num_bins)}
for i in range(num_items):
for j in range(num_bins):
obj.set_linear(x[(i, j)], -values[i])
model.set_objective(obj)
# Each item at most goes to one bin.
for i in range(num_items):
model.add_constraint([(x[(i, j)], 1) for j in range(num_bins)] + [(-1,)], sense="<=",
label='item_placing_{}'.format(i))
# Build knapsack capacity constraints
for j in range(num_bins):
model.add_constraint(
[(x[(i, j)], weights[i]) for i in range(num_items)] + [(-capacities[j],)],
sense="<=", label='capacity_bin_{}'.format(j))
return model
|
{
"content_hash": "9883746a7460e2c783ef5458a972e9fd",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 99,
"avg_line_length": 34.41891891891892,
"alnum_prop": 0.6101295641931684,
"repo_name": "dwavesystems/dimod",
"id": "c9584dd3846d10279477cfe6c6281eb3f297c706",
"size": "3158",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "dimod/generators/multi_knapsack.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "239337"
},
{
"name": "Cython",
"bytes": "184652"
},
{
"name": "Makefile",
"bytes": "927"
},
{
"name": "Python",
"bytes": "1411314"
}
],
"symlink_target": ""
}
|
from . import consumer
from . import exceptions
from . import interfaces
from . import producer
from . import records
__all__ = ['client',
'consumer',
'exceptions',
'producer',
'records']
|
{
"content_hash": "f62187f6c2a60a04f3925557e4084269",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 24,
"avg_line_length": 21.181818181818183,
"alnum_prop": 0.5793991416309013,
"repo_name": "dnguyen0304/clare",
"id": "a0951f441920efd6dc0b3a349a77c4fa03f245d5",
"size": "258",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "clare/clare/common/messaging/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "185515"
},
{
"name": "Shell",
"bytes": "696"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import json
import logging
import os
import readline
import sys
from getpass import getpass
def load_config(config_file, silent=False, default_config=None, default_config_file=None):
if default_config:
config = copy.copy(default_config)
else:
config = dict()
config_path = os.path.expanduser(config_file)
if os.path.exists(config_path):
with open(config_path, 'r') as fp:
config.update(json.load(fp))
elif not silent:
if config_file == default_config_file:
logging.warning('No config file found, using default config...')
else:
raise OSError('Config file not found: {}'.format(config_file))
return config
class AutoCompleter(object):
"""
Autocompleter for list completion
"""
def __init__(self, options):
self.options = sorted(options)
self.matches = list()
def complete(self, text, state):
if state == 0:
if text:
self.matches = [s for s in self.options if s and s.startswith(text)]
else:
self.matches = self.options[:]
try:
return self.matches[state]
except IndexError:
return None
def ask_yesno(label, default="yes"):
valid = {"yes": True, "y": True, "Y": True,
"no": False, "n": False, "N": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(label + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Invalid input\n")
def ask_string(label, default=None):
string = None
while not string:
string = raw_input(label + ' ')
if not string and default:
return default
return string
def ask_password(label, default=None):
string = None
while not string:
string = getpass(label + ' ')
if not string and default:
return default
return string
def ask_list(label, items, alt=None, default=None):
completer = AutoCompleter(items)
readline.set_completer(completer.complete)
readline.set_completer_delims('')
readline.parse_and_bind('tab: complete')
item = None
while not item:
item = ask_string(label, default=default)
if item not in items:
if alt and item in alt:
item = items[alt.index(item)]
else:
print("Invalid entry (try pressing TAB)")
item = None
readline.set_completer(None)
return item
|
{
"content_hash": "eeeed9e226e825df9f8ced4ff854f488",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 90,
"avg_line_length": 26.56637168141593,
"alnum_prop": 0.583944037308461,
"repo_name": "reinforceio/tensorforce-benchmark",
"id": "58c65b1871f30848ff1b363076d78425b1b0cdc5",
"size": "3689",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rl_benchmark/cli/util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "95759"
}
],
"symlink_target": ""
}
|
from ksvd_wrapper import KSVD
|
{
"content_hash": "575aec3559025f8aaa130f6e3510efd5",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 29,
"avg_line_length": 30,
"alnum_prop": 0.8333333333333334,
"repo_name": "guker/pyksvd",
"id": "03d13f7fc53e4ba82b7fe9b139ea91d28c15acdf",
"size": "30",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ksvd/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [],
"symlink_target": ""
}
|
"""
Copyright (c) 2012 wong2 <wonderfuly@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
'Software'), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from main import process
from controller import bots
# 用来出错重启前,先清理出错时间段内的通知
while True:
for bot in bots:
process(bot, True)
|
{
"content_hash": "dd9f0cf606d0f06ef9294fd7ce40367b",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 70,
"avg_line_length": 38.125,
"alnum_prop": 0.7885245901639344,
"repo_name": "wangjun/xiaohuangji",
"id": "2a583476eba3a9e4125fff2ceb76eeae47f7d4c5",
"size": "1281",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "clear.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "106785"
},
{
"name": "Shell",
"bytes": "1729"
}
],
"symlink_target": ""
}
|
from heapq import heappush, heappop
from sys import maxint
# Represent each node as a list, ordering the elements so that a heap of nodes
# is ordered by f = g + h, with h as a first, greedy tie-breaker and num as a
# second, definite tie-breaker. Store the redundant g for fast and accurate
# calculations.
F, H, NUM, G, POS, OPEN, VALID, PARENT = xrange(8)
def astar(start_pos, neighbors, goal, start_g, cost, heuristic, limit=maxint,
debug=None):
"""Find the shortest path from start to goal.
Arguments:
start_pos - The starting position.
neighbors(pos) - A function returning all neighbor positions of the given
position.
goal(pos) - A function returning true given a goal position, false
otherwise.
start_g - The starting cost.
cost(a, b) - A function returning the cost for moving from one
position to another.
heuristic(pos) - A function returning an estimate of the total cost
remaining for reaching goal from the given position.
Overestimates can yield suboptimal paths.
limit - The maximum number of positions to search.
debug(nodes) - This function will be called with a dictionary of all
nodes.
The function returns the best path found. The returned path excludes the
starting position.
"""
# Create the start node.
nums = iter(xrange(maxint))
start_h = heuristic(start_pos)
start = [start_g + start_h, start_h, nums.next(), start_g, start_pos, True,
True, None]
# Track all nodes seen so far.
nodes = {start_pos: start}
# Maintain a heap of nodes.
heap = [start]
# Track the best path found so far.
best = start
while heap:
# Pop the next node from the heap.
current = heappop(heap)
current[OPEN] = False
# Have we reached the goal?
if goal(current[POS]):
best = current
break
# Visit the neighbors of the current node.
for neighbor_pos in neighbors(current[POS]):
neighbor_g = current[G] + cost(current[POS], neighbor_pos)
neighbor = nodes.get(neighbor_pos)
if neighbor is None:
# Limit the search.
if len(nodes) >= limit:
continue
# We have found a new node.
neighbor_h = heuristic(neighbor_pos)
neighbor = [neighbor_g + neighbor_h, neighbor_h, nums.next(),
neighbor_g, neighbor_pos, True, True, current[POS]]
nodes[neighbor_pos] = neighbor
heappush(heap, neighbor)
if neighbor_h < best[H]:
# We are approaching the goal.
best = neighbor
elif neighbor_g < neighbor[G]:
# We have found a better path to the neighbor.
if neighbor[OPEN]:
# The neighbor is already open. Finding and updating it
# in the heap would be a linear complexity operation.
# Instead we mark the neighbor as invalid and make an
# updated copy of it.
neighbor[VALID] = False
nodes[neighbor_pos] = neighbor = neighbor[:]
neighbor[F] = neighbor_g + neighbor[H]
neighbor[NUM] = nums.next()
neighbor[G] = neighbor_g
neighbor[VALID] = True
neighbor[PARENT] = current[POS]
heappush(heap, neighbor)
else:
# Reopen the neighbor.
neighbor[F] = neighbor_g + neighbor[H]
neighbor[G] = neighbor_g
neighbor[PARENT] = current[POS]
neighbor[OPEN] = True
heappush(heap, neighbor)
# Discard leading invalid nodes from the heap.
while heap and not heap[0][VALID]:
heappop(heap)
if debug is not None:
# Pass the dictionary of nodes to the caller.
debug(nodes)
# Return the best path as a list.
path = []
current = best
while current[PARENT] is not None:
path.append(current[POS])
current = nodes[current[PARENT]]
path.reverse()
return path
|
{
"content_hash": "03a2b6e86cad403258467031d79ddf0d",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 79,
"avg_line_length": 34.72093023255814,
"alnum_prop": 0.549899531145345,
"repo_name": "MadeInPierre/RobotOS",
"id": "43cb7a5c668297a5bae98e8efba717a5ec6d98a4",
"size": "5571",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/robot_navigation_pathfinder/src/Pathfinder/astar_pythonlib/astar.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "137388"
},
{
"name": "CMake",
"bytes": "432499"
},
{
"name": "Common Lisp",
"bytes": "156639"
},
{
"name": "JavaScript",
"bytes": "63021"
},
{
"name": "Makefile",
"bytes": "360"
},
{
"name": "NewLisp",
"bytes": "10743"
},
{
"name": "Python",
"bytes": "110585"
},
{
"name": "Shell",
"bytes": "3581"
}
],
"symlink_target": ""
}
|
import os
import sys
import unittest
# Hack to support the usage of `coverage`
sys.path.append(os.path.abspath("."))
import lbuild
class BuildLogTest(unittest.TestCase):
def setUp(self):
self.repo = lbuild.repository.Repository(".")
self.repo.name = "repo"
self.module1 = lbuild.module.Module(self.repo, "module.lb", ".")
self.module1.name = "module1"
self.module1.path = "/m1"
self.module1.register_module()
self.module1a = lbuild.module.Module(self.repo, "module.lb", ".")
self.module1a.name = "module1a"
self.module1a.path = "/m1/a"
self.module1a.parent = "module1"
self.module1a.register_module()
self.module2 = lbuild.module.Module(self.repo, "module.lb", ".")
self.module2.name = "module2"
self.module2.path = "/m2"
self.module2.register_module()
def test_should_collect_operations(self):
log = lbuild.buildlog.BuildLog()
o1 = log.log(self.module1, "in1", "out1")
o2 = log.log(self.module1, "in2", "out2")
self.assertEqual(2, len(log.operations))
self.assertIn(o1, log.operations)
self.assertIn(o2, log.operations)
def test_should_raise_on_overwriting_a_file(self):
log = lbuild.buildlog.BuildLog()
log.log(self.module1, "in", "out")
self.assertRaises(lbuild.exception.BlobBuildException,
lambda: log.log(self.module1, "in", "out"))
def test_should_generate_xml(self):
log = lbuild.buildlog.BuildLog()
log.log(self.module1, "in1", "out1")
log.log(self.module2, "in2", "out2")
self.assertEqual(b"""<?xml version='1.0' encoding='UTF-8'?>
<buildlog>
<operation>
<module>repo:module1</module>
<source>in1</source>
<destination>out1</destination>
</operation>
<operation>
<module>repo:module2</module>
<source>in2</source>
<destination>out2</destination>
</operation>
</buildlog>
""", log.to_xml())
def test_should_provide_operations_per_module(self):
log = lbuild.buildlog.BuildLog()
o1 = log.log(self.module1, "in1", "out1")
o1a = log.log(self.module1a, "in1a", "out1a")
o2 = log.log(self.module2, "in2", "out2")
operations = log.get_operations_per_module("repo:module1")
self.assertIn(o1, operations)
self.assertIn(o1a, operations)
self.assertNotIn(o2, operations)
def test_should_create_local_path(self):
log = lbuild.buildlog.BuildLog()
o1 = log.log(self.module1, "/m1/in1", "out1")
self.assertEqual("/m1/in1", o1.filename_in)
self.assertEqual("in1", o1.filename_local_in)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "dc0839069bad52328cf219602084cf12",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 73,
"avg_line_length": 29.902173913043477,
"alnum_prop": 0.613958560523446,
"repo_name": "dergraaf/library-builder",
"id": "134fb6f84a79e8efec0f3c852f9fdb6775ef612b",
"size": "3023",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "test/buildlog_test.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C++",
"bytes": "324"
},
{
"name": "Makefile",
"bytes": "1314"
},
{
"name": "Python",
"bytes": "162686"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
# Form implementation generated from reading ui file 'acq4/analysis/modules/MosaicEditor/MosaicEditorTemplate.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(427, 123)
self.horizontalLayout = QtGui.QHBoxLayout(Form)
self.horizontalLayout.setMargin(0)
self.horizontalLayout.setSpacing(3)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.frame_3 = QtGui.QFrame(Form)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(100)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame_3.sizePolicy().hasHeightForWidth())
self.frame_3.setSizePolicy(sizePolicy)
self.frame_3.setObjectName(_fromUtf8("frame_3"))
self.gridLayout_2 = QtGui.QGridLayout(self.frame_3)
self.gridLayout_2.setMargin(0)
self.gridLayout_2.setSpacing(0)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.atlasCombo = QtGui.QComboBox(self.frame_3)
self.atlasCombo.setObjectName(_fromUtf8("atlasCombo"))
self.atlasCombo.addItem(_fromUtf8(""))
self.gridLayout_2.addWidget(self.atlasCombo, 0, 0, 1, 1)
self.atlasLayout = QtGui.QGridLayout()
self.atlasLayout.setSpacing(0)
self.atlasLayout.setObjectName(_fromUtf8("atlasLayout"))
self.gridLayout_2.addLayout(self.atlasLayout, 1, 0, 1, 1)
self.horizontalLayout.addWidget(self.frame_3)
self.groupBox_2 = QtGui.QGroupBox(Form)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(10)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox_2.sizePolicy().hasHeightForWidth())
self.groupBox_2.setSizePolicy(sizePolicy)
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.verticalLayout = QtGui.QVBoxLayout(self.groupBox_2)
self.verticalLayout.setMargin(0)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.normalizeBtn = QtGui.QPushButton(self.groupBox_2)
self.normalizeBtn.setObjectName(_fromUtf8("normalizeBtn"))
self.verticalLayout.addWidget(self.normalizeBtn)
self.blendBtn = QtGui.QPushButton(self.groupBox_2)
self.blendBtn.setObjectName(_fromUtf8("blendBtn"))
self.verticalLayout.addWidget(self.blendBtn)
self.autoRangeBtn = QtGui.QPushButton(self.groupBox_2)
self.autoRangeBtn.setObjectName(_fromUtf8("autoRangeBtn"))
self.verticalLayout.addWidget(self.autoRangeBtn)
self.tileShadingBtn = QtGui.QPushButton(self.groupBox_2)
self.tileShadingBtn.setObjectName(_fromUtf8("tileShadingBtn"))
self.verticalLayout.addWidget(self.tileShadingBtn)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem)
self.horizontalLayout.addWidget(self.groupBox_2)
self.groupBox = QtGui.QGroupBox(Form)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.gridLayout = QtGui.QGridLayout(self.groupBox)
self.gridLayout.setMargin(0)
self.gridLayout.setSpacing(1)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.label_2 = QtGui.QLabel(self.groupBox)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout.addWidget(self.label_2, 0, 0, 1, 1)
self.mosaicDisplayMin = QtGui.QDoubleSpinBox(self.groupBox)
self.mosaicDisplayMin.setMinimum(-1.0)
self.mosaicDisplayMin.setMaximum(65000.0)
self.mosaicDisplayMin.setSingleStep(0.25)
self.mosaicDisplayMin.setObjectName(_fromUtf8("mosaicDisplayMin"))
self.gridLayout.addWidget(self.mosaicDisplayMin, 0, 1, 1, 1)
self.label = QtGui.QLabel(self.groupBox)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(100)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout.addWidget(self.label, 1, 0, 1, 1)
self.mosaicDisplayMax = QtGui.QDoubleSpinBox(self.groupBox)
self.mosaicDisplayMax.setMaximum(65536.0)
self.mosaicDisplayMax.setSingleStep(0.25)
self.mosaicDisplayMax.setProperty("value", 2.0)
self.mosaicDisplayMax.setObjectName(_fromUtf8("mosaicDisplayMax"))
self.gridLayout.addWidget(self.mosaicDisplayMax, 1, 1, 1, 1)
self.mosaicApplyScaleBtn = QtGui.QPushButton(self.groupBox)
self.mosaicApplyScaleBtn.setObjectName(_fromUtf8("mosaicApplyScaleBtn"))
self.gridLayout.addWidget(self.mosaicApplyScaleBtn, 2, 0, 1, 2)
self.mosaicFlipUDBtn = QtGui.QPushButton(self.groupBox)
self.mosaicFlipUDBtn.setObjectName(_fromUtf8("mosaicFlipUDBtn"))
self.gridLayout.addWidget(self.mosaicFlipUDBtn, 3, 1, 1, 1)
self.mosaicFlipLRBtn = QtGui.QPushButton(self.groupBox)
self.mosaicFlipLRBtn.setObjectName(_fromUtf8("mosaicFlipLRBtn"))
self.gridLayout.addWidget(self.mosaicFlipLRBtn, 3, 0, 1, 1)
spacerItem1 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem1, 4, 0, 1, 2)
self.horizontalLayout.addWidget(self.groupBox)
self.horizontalLayout.setStretch(0, 5)
self.horizontalLayout.setStretch(1, 1)
self.horizontalLayout.setStretch(2, 1)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.atlasCombo.setItemText(0, _translate("Form", "Select Atlas...", None))
self.groupBox_2.setTitle(_translate("Form", "Image Correction", None))
self.normalizeBtn.setText(_translate("Form", "Normalize", None))
self.blendBtn.setText(_translate("Form", "Blend", None))
self.autoRangeBtn.setText(_translate("Form", "Auto Range", None))
self.tileShadingBtn.setText(_translate("Form", "Tile Shading", None))
self.groupBox.setTitle(_translate("Form", "Tile Operations", None))
self.label_2.setText(_translate("Form", "Min", None))
self.label.setText(_translate("Form", "Max", None))
self.mosaicApplyScaleBtn.setText(_translate("Form", "Apply Tile Scale", None))
self.mosaicFlipUDBtn.setText(_translate("Form", "FlipUD", None))
self.mosaicFlipLRBtn.setText(_translate("Form", "FlipLR", None))
|
{
"content_hash": "89bcb9282cc67293d450cadf9476a6ee",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 113,
"avg_line_length": 53.191489361702125,
"alnum_prop": 0.7062666666666667,
"repo_name": "meganbkratz/acq4",
"id": "b272fa799f6cad87d9c68639c1ae02288f95dbf8",
"size": "7524",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "acq4/analysis/modules/MosaicEditor/MosaicEditorTemplate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "3037"
},
{
"name": "Arduino",
"bytes": "18651"
},
{
"name": "Batchfile",
"bytes": "64"
},
{
"name": "C",
"bytes": "705091"
},
{
"name": "C++",
"bytes": "321384"
},
{
"name": "CSS",
"bytes": "716"
},
{
"name": "MATLAB",
"bytes": "1752"
},
{
"name": "Objective-C",
"bytes": "596020"
},
{
"name": "Processing",
"bytes": "13403"
},
{
"name": "Python",
"bytes": "5922488"
}
],
"symlink_target": ""
}
|
import unittest
import smtplib
import mock
from nose.tools import * # flake8: noqa (PEP8 asserts)
import sendgrid
from framework.email.tasks import send_email, _send_with_sendgrid
from website import settings
from tests.base import fake
from osf_tests.factories import fake_email
# Check if local mail server is running
SERVER_RUNNING = True
try:
s = smtplib.SMTP(settings.MAIL_SERVER)
s.quit()
except Exception as err:
SERVER_RUNNING = False
class TestEmail(unittest.TestCase):
@unittest.skipIf(not SERVER_RUNNING,
"Mailserver isn't running. Run \"invoke mailserver\".")
@unittest.skipIf(not settings.USE_EMAIL,
'settings.USE_EMAIL is False')
def test_sending_email(self):
assert_true(send_email('foo@bar.com', 'baz@quux.com', subject='no subject',
message='<h1>Greetings!</h1>', ttls=False, login=False))
def test_send_with_sendgrid_success(self):
mock_client = mock.MagicMock()
mock_client.send.return_value = 200, 'success'
from_addr, to_addr = fake_email(), fake_email()
category1, category2 = fake.word(), fake.word()
subject = fake.bs()
message = fake.text()
ret = _send_with_sendgrid(
from_addr=from_addr,
to_addr=to_addr,
subject=subject,
message=message,
mimetype='html',
client=mock_client,
categories=(category1, category2)
)
assert_true(ret)
assert_equal(mock_client.send.call_count, 1)
# First call's argument should be a Mail object with
# the correct configuration
first_call_arg = mock_client.send.call_args[0][0]
assert_is_instance(first_call_arg, sendgrid.Mail)
assert_equal(first_call_arg.from_email, from_addr)
assert_equal(first_call_arg.to[0], to_addr)
assert_equal(first_call_arg.subject, subject)
assert_in(message, first_call_arg.html)
# Categories are set
assert_equal(first_call_arg.smtpapi.data['category'], (category1, category2))
def test_send_with_sendgrid_failure_returns_false(self):
mock_client = mock.MagicMock()
mock_client.send.return_value = 400, 'failed'
from_addr, to_addr = fake_email(), fake_email()
subject = fake.bs()
message = fake.text()
ret = _send_with_sendgrid(
from_addr=from_addr,
to_addr=to_addr,
subject=subject,
message=message,
mimetype='html',
client=mock_client
)
assert_false(ret)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "7d1ccde15c2d323d1d4e0edff82ce0ef",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 89,
"avg_line_length": 33.675,
"alnum_prop": 0.6117297698589458,
"repo_name": "caseyrollins/osf.io",
"id": "1ed14f35c003edfbf555acaf3fc33fc6e4c9fb1a",
"size": "2718",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "tests/framework_tests/test_email.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "93007"
},
{
"name": "Dockerfile",
"bytes": "8455"
},
{
"name": "HTML",
"bytes": "296984"
},
{
"name": "JavaScript",
"bytes": "1813961"
},
{
"name": "Mako",
"bytes": "676476"
},
{
"name": "Python",
"bytes": "8712355"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class PublicIPAddressSku(Model):
"""SKU of a public IP address.
:param name: Name of a public IP address SKU. Possible values include:
'Basic', 'Standard'
:type name: str or
~azure.mgmt.network.v2017_10_01.models.PublicIPAddressSkuName
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
}
def __init__(self, name=None):
super(PublicIPAddressSku, self).__init__()
self.name = name
|
{
"content_hash": "41625b93f734fdc9931a6e9932b01452",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 74,
"avg_line_length": 26.210526315789473,
"alnum_prop": 0.6204819277108434,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "0adb0b21dcf35ac76236158114b2f2945234c488",
"size": "972",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2017_10_01/models/public_ip_address_sku.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
}
|
import zstackwoodpecker.test_state as ts_header
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template1", \
path_list=[[TestAction.stop_vm, "vm1"], \
[TestAction.reinit_vm, "vm1"], \
[TestAction.start_vm, "vm1"], \
[TestAction.stop_vm, "vm1"], \
[TestAction.reinit_vm, "vm1"], \
[TestAction.start_vm, "vm1"], \
[TestAction.create_volume_snapshot, "vm1-root", "snapshot1"], \
[TestAction.stop_vm, "vm1"], \
[TestAction.reinit_vm, "vm1"], \
[TestAction.start_vm, "vm1"], \
[TestAction.create_volume_snapshot, "vm1-root", "snapshot2"], \
[TestAction.stop_vm, "vm1"], \
[TestAction.use_volume_snapshot, "snapshot2"], \
[TestAction.start_vm, "vm1"], \
[TestAction.batch_delete_volume_snapshot, ["snapshot2"]]
])
|
{
"content_hash": "9a52bc05fff67792ea6366b2f8967585",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 85,
"avg_line_length": 51.57142857142857,
"alnum_prop": 0.4709141274238227,
"repo_name": "zstackorg/zstack-woodpecker",
"id": "52ef1a1ff5d490e139cdf9dadac7b4e5a64fe2cd",
"size": "1083",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "integrationtest/vm/multihosts/snapshots/paths/path19.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "46522"
},
{
"name": "Makefile",
"bytes": "692"
},
{
"name": "Puppet",
"bytes": "875"
},
{
"name": "Python",
"bytes": "2891030"
},
{
"name": "Shell",
"bytes": "54266"
}
],
"symlink_target": ""
}
|
import os
import subprocess
import mock
import unittest2 as unittest
from quantum.rootwrap import filters
from quantum.rootwrap import wrapper
class RootwrapTestCase(unittest.TestCase):
def setUp(self):
super(RootwrapTestCase, self).setUp()
self.filters = [
filters.RegExpFilter("/bin/ls", "root", 'ls', '/[a-z]+'),
filters.CommandFilter("/usr/bin/foo_bar_not_exist", "root"),
filters.RegExpFilter("/bin/cat", "root", 'cat', '/[a-z]+'),
filters.CommandFilter("/nonexistant/cat", "root"),
filters.CommandFilter("/bin/cat", "root")] # Keep this one last
def tearDown(self):
super(RootwrapTestCase, self).tearDown()
def test_RegExpFilter_match(self):
usercmd = ["ls", "/root"]
filtermatch = wrapper.match_filter(self.filters, usercmd)
self.assertFalse(filtermatch is None)
self.assertEqual(filtermatch.get_command(usercmd),
["/bin/ls", "/root"])
def test_RegExpFilter_reject(self):
usercmd = ["ls", "root"]
filtermatch = wrapper.match_filter(self.filters, usercmd)
self.assertTrue(filtermatch is None)
def test_missing_command(self):
valid_but_missing = ["foo_bar_not_exist"]
invalid = ["foo_bar_not_exist_and_not_matched"]
filtermatch = wrapper.match_filter(self.filters, valid_but_missing)
self.assertTrue(filtermatch is not None)
filtermatch = wrapper.match_filter(self.filters, invalid)
self.assertTrue(filtermatch is None)
def test_DnsmasqFilter(self):
usercmd = ['QUANTUM_RELAY_SOCKET_PATH=A', 'QUANTUM_NETWORK_ID=foobar',
'dnsmasq', 'foo']
f = filters.DnsmasqFilter("/usr/bin/dnsmasq", "root")
self.assertTrue(f.match(usercmd))
self.assertEqual(f.get_command(usercmd), ['/usr/bin/dnsmasq', 'foo'])
env = f.get_environment(usercmd)
self.assertEqual(env.get('QUANTUM_RELAY_SOCKET_PATH'), 'A')
self.assertEqual(env.get('QUANTUM_NETWORK_ID'), 'foobar')
def test_DnsmasqNetnsFilter(self):
usercmd = ['QUANTUM_RELAY_SOCKET_PATH=A', 'QUANTUM_NETWORK_ID=foobar',
'ip', 'netns', 'exec', 'foo', 'dnsmasq', 'foo']
f = filters.DnsmasqNetnsFilter("/sbin/ip", "root")
self.assertTrue(f.match(usercmd))
self.assertEqual(f.get_command(usercmd), ['/sbin/ip', 'netns', 'exec',
'foo', 'dnsmasq', 'foo'])
env = f.get_environment(usercmd)
self.assertEqual(env.get('QUANTUM_RELAY_SOCKET_PATH'), 'A')
self.assertEqual(env.get('QUANTUM_NETWORK_ID'), 'foobar')
def test_KillFilter(self):
p = subprocess.Popen(["/bin/sleep", "5"])
f = filters.KillFilter("root", "/bin/sleep", "-9", "-HUP")
f2 = filters.KillFilter("root", "/usr/bin/sleep", "-9", "-HUP")
usercmd = ['kill', '-ALRM', p.pid]
# Incorrect signal should fail
self.assertFalse(f.match(usercmd) or f2.match(usercmd))
usercmd = ['kill', p.pid]
# Providing no signal should fail
self.assertFalse(f.match(usercmd) or f2.match(usercmd))
# Providing matching signal should be allowed
usercmd = ['kill', '-9', p.pid]
self.assertTrue(f.match(usercmd) or f2.match(usercmd))
f = filters.KillFilter("root", "/bin/sleep")
f2 = filters.KillFilter("root", "/usr/bin/sleep")
usercmd = ['kill', os.getpid()]
# Our own PID does not match /bin/sleep, so it should fail
self.assertFalse(f.match(usercmd) or f2.match(usercmd))
usercmd = ['kill', 999999]
# Nonexistant PID should fail
self.assertFalse(f.match(usercmd) or f2.match(usercmd))
usercmd = ['kill', p.pid]
# Providing no signal should work
self.assertTrue(f.match(usercmd) or f2.match(usercmd))
def test_KillFilter_no_raise(self):
"""Makes sure ValueError from bug 926412 is gone"""
f = filters.KillFilter("root", "")
# Providing anything other than kill should be False
usercmd = ['notkill', 999999]
self.assertFalse(f.match(usercmd))
# Providing something that is not a pid should be False
usercmd = ['kill', 'notapid']
self.assertFalse(f.match(usercmd))
def test_KillFilter_deleted_exe(self):
"""Makes sure deleted exe's are killed correctly"""
# See bug #1073768.
with mock.patch('os.readlink') as mock_readlink:
mock_readlink.return_value = '/bin/commandddddd (deleted)'
f = filters.KillFilter("root", "/bin/commandddddd")
usercmd = ['kill', 1234]
self.assertTrue(f.match(usercmd))
mock_readlink.assert_called_once_with("/proc/1234/exe")
def test_ReadFileFilter(self):
goodfn = '/good/file.name'
f = filters.ReadFileFilter(goodfn)
usercmd = ['cat', '/bad/file']
self.assertFalse(f.match(['cat', '/bad/file']))
usercmd = ['cat', goodfn]
self.assertEqual(f.get_command(usercmd), ['/bin/cat', goodfn])
self.assertTrue(f.match(usercmd))
def test_IpFilter_non_netns(self):
f = filters.IpFilter('/sbin/ip', 'root')
self.assertTrue(f.match(['ip', 'link', 'list']))
def _test_IpFilter_netns_helper(self, action):
f = filters.IpFilter('/sbin/ip', 'root')
self.assertTrue(f.match(['ip', 'link', action]))
def test_IpFilter_netns_add(self):
self._test_IpFilter_netns_helper('add')
def test_IpFilter_netns_delete(self):
self._test_IpFilter_netns_helper('delete')
def test_IpFilter_netns_list(self):
self._test_IpFilter_netns_helper('list')
def test_IpNetnsExecFilter_match(self):
f = filters.IpNetnsExecFilter('/sbin/ip', 'root')
self.assertTrue(
f.match(['ip', 'netns', 'exec', 'foo', 'ip', 'link', 'list']))
def test_IpNetnsExecFilter_nomatch(self):
f = filters.IpNetnsExecFilter('/sbin/ip', 'root')
self.assertFalse(f.match(['ip', 'link', 'list']))
def test_match_filter_recurses_exec_command_filter(self):
filter_list = [filters.IpNetnsExecFilter('/sbin/ip', 'root'),
filters.IpFilter('/sbin/ip', 'root')]
args = ['ip', 'netns', 'exec', 'foo', 'ip', 'link', 'list']
self.assertIsNotNone(wrapper.match_filter(filter_list, args))
def test_match_filter_recurses_exec_command_filter(self):
filter_list = [filters.IpNetnsExecFilter('/sbin/ip', 'root'),
filters.IpFilter('/sbin/ip', 'root')]
args = ['ip', 'netns', 'exec', 'foo', 'ip', 'netns', 'exec', 'bar',
'ip', 'link', 'list']
self.assertIsNone(wrapper.match_filter(filter_list, args))
def test_skips(self):
# Check that all filters are skipped and that the last matches
usercmd = ["cat", "/"]
filtermatch = wrapper.match_filter(self.filters, usercmd)
self.assertTrue(filtermatch is self.filters[-1])
|
{
"content_hash": "235fee96225112c239fde59a0fd14ad9",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 78,
"avg_line_length": 42.6566265060241,
"alnum_prop": 0.6040107329473238,
"repo_name": "FreescaleSemiconductor/quantum",
"id": "fdede5877e54f0e95888a9cb0dd67fed8e1d9892",
"size": "7735",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable/folsom",
"path": "quantum/tests/unit/test_rootwrap.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "67928"
},
{
"name": "Perl",
"bytes": "235"
},
{
"name": "Python",
"bytes": "2523404"
},
{
"name": "Scala",
"bytes": "3005"
},
{
"name": "Shell",
"bytes": "7879"
}
],
"symlink_target": ""
}
|
from bears.css.CSSAutoPrefixBear import CSSAutoPrefixBear
from coalib.testing.LocalBearTestHelper import verify_local_bear
good_file = """
.example {
display: -webkit-box;
display: -ms-flexbox;
display: flex;
}
"""
bad_file = """
.example {
display: flex;
}
"""
CSSAutoPrefixBearTest = verify_local_bear(CSSAutoPrefixBear,
valid_files=(good_file,),
invalid_files=(bad_file,))
|
{
"content_hash": "880ca5e98feef205918873025bd1aff9",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 68,
"avg_line_length": 22.857142857142858,
"alnum_prop": 0.5875,
"repo_name": "IPMITMO/statan",
"id": "15893760e6dbcd585a201e5bf3e46095092523ce",
"size": "480",
"binary": false,
"copies": "14",
"ref": "refs/heads/master",
"path": "coala-bears/tests/css/CSSAutoPrefixBearTest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "101"
},
{
"name": "Batchfile",
"bytes": "10931"
},
{
"name": "C",
"bytes": "28190"
},
{
"name": "C#",
"bytes": "45474"
},
{
"name": "C++",
"bytes": "335"
},
{
"name": "CSS",
"bytes": "6631"
},
{
"name": "Go",
"bytes": "96"
},
{
"name": "HTML",
"bytes": "1564"
},
{
"name": "Java",
"bytes": "592"
},
{
"name": "JavaScript",
"bytes": "472227"
},
{
"name": "Makefile",
"bytes": "15304"
},
{
"name": "PHP",
"bytes": "1804"
},
{
"name": "Python",
"bytes": "2312447"
},
{
"name": "Ruby",
"bytes": "447"
},
{
"name": "Shell",
"bytes": "12706"
}
],
"symlink_target": ""
}
|
import os
from twilio.rest import Client
# Find your credentials at twilio.com/console
# To set up environmental variables, see http://twil.io/secure
api_key_sid = os.environ['TWILIO_API_KEY']
api_key_secret = os.environ['TWILIO_API_KEY_SECRET']
client = Client(api_key_sid, api_key_secret)
compositionHook = client.video.compositionHooks.create(
friendlyName = 'MyHookWithComplexVideoLayout',
audio_sources = ['listener-audio', 'presenter-audio'],
video_layout = {
'main' : {
'z_pos': 1,
'video_sources': ['screen']
},
'pip': {
'z_pos': 2,
'x_pos': 1000,
'y_pos': 30,
'width': 240,
'height': 180,
'video_sources': ['presenter-cam']
}
},
status_callback = 'http://my.server.org/callbacks',
resolution = '1280x720',
format='mp4')
print('Created Composition Hook with SID=%s' % (compositionHook.sid))
|
{
"content_hash": "761a1ea69f62beffc16217488b9c3e89",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 69,
"avg_line_length": 36.16129032258065,
"alnum_prop": 0.5057983942908117,
"repo_name": "TwilioDevEd/api-snippets",
"id": "89886c3f83783045d7d0373eda1283b5d376038e",
"size": "1194",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "video/rest/compositionhooks/complex-layout-hook/complex-layout-hook.6.x.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "637161"
},
{
"name": "C++",
"bytes": "24856"
},
{
"name": "Go",
"bytes": "7217"
},
{
"name": "HTML",
"bytes": "335"
},
{
"name": "Java",
"bytes": "912474"
},
{
"name": "JavaScript",
"bytes": "512877"
},
{
"name": "M",
"bytes": "147"
},
{
"name": "Objective-C",
"bytes": "53325"
},
{
"name": "PHP",
"bytes": "517186"
},
{
"name": "Python",
"bytes": "442184"
},
{
"name": "Ruby",
"bytes": "438928"
},
{
"name": "Shell",
"bytes": "3854"
},
{
"name": "Swift",
"bytes": "42345"
},
{
"name": "TypeScript",
"bytes": "16767"
}
],
"symlink_target": ""
}
|
"""Tests the text output of Google C++ Testing and Mocking Framework.
SYNOPSIS
gtest_output_test.py --build_dir=BUILD/DIR --gengolden
# where BUILD/DIR contains the built gtest_output_test_ file.
gtest_output_test.py --gengolden
gtest_output_test.py
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import difflib
import os
import re
import sys
import gtest_test_utils
# The flag for generating the golden file
GENGOLDEN_FLAG = '--gengolden'
CATCH_EXCEPTIONS_ENV_VAR_NAME = 'GTEST_CATCH_EXCEPTIONS'
# The flag indicating stacktraces are not supported
NO_STACKTRACE_SUPPORT_FLAG = '--no_stacktrace_support'
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
IS_WINDOWS = os.name == 'nt'
# TODO(vladl@google.com): remove the _lin suffix.
GOLDEN_NAME = 'gtest_output_test_golden_lin.txt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_output_test_')
# At least one command we exercise must not have the
# 'internal_skip_environment_and_ad_hoc_tests' argument.
COMMAND_LIST_TESTS = ({}, [PROGRAM_PATH, '--gtest_list_tests'])
COMMAND_WITH_COLOR = ({}, [PROGRAM_PATH, '--gtest_color=yes'])
COMMAND_WITH_TIME = ({}, [PROGRAM_PATH,
'--gtest_print_time',
'internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=FatalFailureTest.*:LoggingTest.*'])
COMMAND_WITH_DISABLED = (
{}, [PROGRAM_PATH,
'--gtest_also_run_disabled_tests',
'internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=*DISABLED_*'])
COMMAND_WITH_SHARDING = (
{'GTEST_SHARD_INDEX': '1', 'GTEST_TOTAL_SHARDS': '2'},
[PROGRAM_PATH,
'internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=PassingTest.*'])
GOLDEN_PATH = os.path.join(gtest_test_utils.GetSourceDir(), GOLDEN_NAME)
def ToUnixLineEnding(s):
"""Changes all Windows/Mac line endings in s to UNIX line endings."""
return s.replace('\r\n', '\n').replace('\r', '\n')
def RemoveLocations(test_output):
"""Removes all file location info from a Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with all file location info (in the form of
'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or
'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by
'FILE_NAME:#: '.
"""
return re.sub(r'.*[/\\]((gtest_output_test_|gtest).cc)(\:\d+|\(\d+\))\: ',
r'\1:#: ', test_output)
def RemoveStackTraceDetails(output):
"""Removes all stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n',
'Stack trace: (omitted)\n\n', output)
def RemoveStackTraces(output):
"""Removes all traces of stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n', '', output)
def RemoveTime(output):
"""Removes all time information from a Google Test program's output."""
return re.sub(r'\(\d+ ms', '(? ms', output)
def RemoveTypeInfoDetails(test_output):
"""Removes compiler-specific type info from Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with type information normalized to canonical form.
"""
# some compilers output the name of type 'unsigned int' as 'unsigned'
return re.sub(r'unsigned int', 'unsigned', test_output)
def NormalizeToCurrentPlatform(test_output):
"""Normalizes platform specific output details for easier comparison."""
if IS_WINDOWS:
# Removes the color information that is not present on Windows.
test_output = re.sub('\x1b\\[(0;3\d)?m', '', test_output)
# Changes failure message headers into the Windows format.
test_output = re.sub(r': Failure\n', r': error: ', test_output)
# Changes file(line_number) to file:line_number.
test_output = re.sub(r'((\w|\.)+)\((\d+)\):', r'\1:\3:', test_output)
return test_output
def RemoveTestCounts(output):
"""Removes test counts from a Google Test program's output."""
output = re.sub(r'\d+ tests?, listed below',
'? tests, listed below', output)
output = re.sub(r'\d+ FAILED TESTS',
'? FAILED TESTS', output)
output = re.sub(r'\d+ tests? from \d+ test cases?',
'? tests from ? test cases', output)
output = re.sub(r'\d+ tests? from ([a-zA-Z_])',
r'? tests from \1', output)
return re.sub(r'\d+ tests?\.', '? tests.', output)
def RemoveMatchingTests(test_output, pattern):
"""Removes output of specified tests from a Google Test program's output.
This function strips not only the beginning and the end of a test but also
all output in between.
Args:
test_output: A string containing the test output.
pattern: A regex string that matches names of test cases or
tests to remove.
Returns:
Contents of test_output with tests whose names match pattern removed.
"""
test_output = re.sub(
r'.*\[ RUN \] .*%s(.|\n)*?\[( FAILED | OK )\] .*%s.*\n' % (
pattern, pattern),
'',
test_output)
return re.sub(r'.*%s.*\n' % pattern, '', test_output)
def NormalizeOutput(output):
"""Normalizes output (the output of gtest_output_test_.exe)."""
output = ToUnixLineEnding(output)
output = RemoveLocations(output)
output = RemoveStackTraceDetails(output)
output = RemoveTime(output)
return output
def GetShellCommandOutput(env_cmd):
"""Runs a command in a sub-process, and returns its output in a string.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
Returns:
A string with the command's combined standard and diagnostic output.
"""
# Spawns cmd in a sub-process, and gets its standard I/O file objects.
# Set and save the environment properly.
environ = os.environ.copy()
environ.update(env_cmd[0])
p = gtest_test_utils.Subprocess(env_cmd[1], env=environ)
return p.output
def GetCommandOutput(env_cmd):
"""Runs a command and returns its output with all file location
info stripped off.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
"""
# Disables exception pop-ups on Windows.
environ, cmdline = env_cmd
environ = dict(environ) # Ensures we are modifying a copy.
environ[CATCH_EXCEPTIONS_ENV_VAR_NAME] = '1'
return NormalizeOutput(GetShellCommandOutput((environ, cmdline)))
def GetOutputOfAllCommands():
"""Returns concatenated output from several representative commands."""
return (GetCommandOutput(COMMAND_WITH_COLOR) +
GetCommandOutput(COMMAND_WITH_TIME) +
GetCommandOutput(COMMAND_WITH_DISABLED) +
GetCommandOutput(COMMAND_WITH_SHARDING))
test_list = GetShellCommandOutput(COMMAND_LIST_TESTS)
SUPPORTS_DEATH_TESTS = 'DeathTest' in test_list
SUPPORTS_TYPED_TESTS = 'TypedTest' in test_list
SUPPORTS_THREADS = 'ExpectFailureWithThreadsTest' in test_list
SUPPORTS_STACK_TRACES = NO_STACKTRACE_SUPPORT_FLAG not in sys.argv
CAN_GENERATE_GOLDEN_FILE = (SUPPORTS_DEATH_TESTS and
SUPPORTS_TYPED_TESTS and
SUPPORTS_THREADS and
SUPPORTS_STACK_TRACES)
class GTestOutputTest(gtest_test_utils.TestCase):
def RemoveUnsupportedTests(self, test_output):
if not SUPPORTS_DEATH_TESTS:
test_output = RemoveMatchingTests(test_output, 'DeathTest')
if not SUPPORTS_TYPED_TESTS:
test_output = RemoveMatchingTests(test_output, 'TypedTest')
test_output = RemoveMatchingTests(test_output, 'TypedDeathTest')
test_output = RemoveMatchingTests(test_output, 'TypeParamDeathTest')
if not SUPPORTS_THREADS:
test_output = RemoveMatchingTests(test_output,
'ExpectFailureWithThreadsTest')
test_output = RemoveMatchingTests(test_output,
'ScopedFakeTestPartResultReporterTest')
test_output = RemoveMatchingTests(test_output,
'WorksConcurrently')
if not SUPPORTS_STACK_TRACES:
test_output = RemoveStackTraces(test_output)
return test_output
def testOutput(self):
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'rb')
# A mis-configured source control system can cause \r appear in EOL
# sequences when we read the golden file irrespective of an operating
# system used. Therefore, we need to strip those \r's from newlines
# unconditionally.
golden = ToUnixLineEnding(golden_file.read())
golden_file.close()
# We want the test to pass regardless of certain features being
# supported or not.
# We still have to remove type name specifics in all cases.
normalized_actual = RemoveTypeInfoDetails(output)
normalized_golden = RemoveTypeInfoDetails(golden)
if CAN_GENERATE_GOLDEN_FILE:
self.assertEqual(normalized_golden, normalized_actual,
'\n'.join(difflib.unified_diff(
normalized_golden.split('\n'),
normalized_actual.split('\n'),
'golden', 'actual')))
else:
normalized_actual = NormalizeToCurrentPlatform(
RemoveTestCounts(normalized_actual))
normalized_golden = NormalizeToCurrentPlatform(
RemoveTestCounts(self.RemoveUnsupportedTests(normalized_golden)))
# This code is very handy when debugging golden file differences:
if os.getenv('DEBUG_GTEST_OUTPUT_TEST'):
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_actual.txt'), 'wb').write(
normalized_actual)
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_golden.txt'), 'wb').write(
normalized_golden)
self.assertEqual(normalized_golden, normalized_actual)
if __name__ == '__main__':
if NO_STACKTRACE_SUPPORT_FLAG in sys.argv:
# unittest.main() can't handle unknown flags
sys.argv.remove(NO_STACKTRACE_SUPPORT_FLAG)
if GENGOLDEN_FLAG in sys.argv:
if CAN_GENERATE_GOLDEN_FILE:
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'wb')
golden_file.write(output)
golden_file.close()
else:
message = (
"""Unable to write a golden file when compiled in an environment
that does not support all the required features (death tests,
typed tests, stack traces, and multiple threads).
Please build this test and generate the golden file using Blaze on Linux.""")
sys.stderr.write(message)
sys.exit(1)
else:
gtest_test_utils.Main()
|
{
"content_hash": "f2ac26d782680dbb070f3501b6835526",
"timestamp": "",
"source": "github",
"line_count": 319,
"max_line_length": 79,
"avg_line_length": 34.84639498432602,
"alnum_prop": 0.6555415617128464,
"repo_name": "FabianHahn/libstore",
"id": "63763b95b91709f1d0d4ce5f2d3cc00d6be231e5",
"size": "12669",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "thirdparty/googletest/googletest/test/gtest_output_test.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "40587"
},
{
"name": "C++",
"bytes": "47254"
},
{
"name": "CMake",
"bytes": "3254"
}
],
"symlink_target": ""
}
|
import math
from .utils import clip
orientations = [(1, 0), (0, 1), (-1, 0), (0, -1)]
def turn_heading(heading, inc, headings=orientations):
return headings[(headings.index(heading) + inc) % len(headings)]
def turn_right(heading):
return turn_heading(heading, -1)
def turn_left(heading):
return turn_heading(heading, +1)
def distance(a, b):
"""The distance between two (x, y) points."""
return math.hypot((a[0] - b[0]), (a[1] - b[1]))
def distance2(a, b):
"The square of the distance between two (x, y) points."
return (a[0] - b[0])**2 + (a[1] - b[1])**2
def vector_clip(vector, lowest, highest):
"""Return vector, except if any element is less than the corresponding
value of lowest or more than the corresponding value of highest, clip to
those values."""
return type(vector)(map(clip, vector, lowest, highest))
|
{
"content_hash": "294ec99401020970eecb64e7748fbd05",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 76,
"avg_line_length": 25.647058823529413,
"alnum_prop": 0.6456422018348624,
"repo_name": "andres-root/AIND",
"id": "ef0774649df6011f668447a617e974ed69cd3332",
"size": "1173",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Therm1/Planning/Project/aimacode/grid.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1194859"
},
{
"name": "JavaScript",
"bytes": "9816"
},
{
"name": "Jupyter Notebook",
"bytes": "2717429"
},
{
"name": "Makefile",
"bytes": "562"
},
{
"name": "Python",
"bytes": "749403"
}
],
"symlink_target": ""
}
|
import sys, os
sys.path.insert(0,os.path.realpath(os.path.abspath('../')))
import hpe3parclient
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.coverage', 'sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'HPE 3PAR Client'
copyright = '2012-2015 Hewlett Packard Enterprise Development LP'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = hpe3parclient.version
# The full version, including alpha/beta/rc tags.
release = hpe3parclient.version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
#htmlhelp_basename = 'hpe3parclientdoc'
htmlhelp_basename = 'HPE3PARClient' + release.replace('.', '_')
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'hpe3parclient.tex', 'hpe3parclient Documentation',
'Author', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'hpe3parclient', 'hpe3parclient Documentation',
['Author'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'hpe3parclient', 'hpe3parclient Documentation',
'Author', 'hpe3parclient', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = 'hpe3parclient'
epub_author = 'Author'
epub_publisher = 'Author'
epub_copyright = '2012, Author'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
|
{
"content_hash": "f89329349d16a3873ff0858c1aba1cb6",
"timestamp": "",
"source": "github",
"line_count": 276,
"max_line_length": 100,
"avg_line_length": 32.07971014492754,
"alnum_prop": 0.7042014908515924,
"repo_name": "hp-storage/python-3parclient",
"id": "25d01c7975eb4b8ab656177c9772c3e029663ba8",
"size": "9278",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "623828"
},
{
"name": "Tcl",
"bytes": "3246"
}
],
"symlink_target": ""
}
|
import functools
import inspect
import os
import re
import sys
import threading
from collections import OrderedDict
import six
# noinspection PyUnresolvedReferences
from cheap_repr import cheap_repr, find_repr_function
from snoop.utils import my_cheap_repr, NO_ASTTOKENS, ArgDefaultDict, iscoroutinefunction, \
truncate_list, ensure_tuple, is_comprehension_frame, no_args_decorator
from .formatting import Event, Source
from .variables import CommonVariable, Exploding, BaseVariable
find_repr_function(six.text_type).maxparts = 100
find_repr_function(six.binary_type).maxparts = 100
find_repr_function(object).maxparts = 100
find_repr_function(int).maxparts = 999999
cheap_repr.suppression_threshold = 999999
class FrameInfo(object):
def __init__(self, frame):
self.frame = frame
self.local_reprs = {}
self.last_line_no = frame.f_lineno
self.comprehension_variables = OrderedDict()
self.source = Source.for_frame(frame)
self.is_generator = frame.f_code.co_flags & inspect.CO_GENERATOR
self.had_exception = False
if is_comprehension_frame(frame):
self.comprehension_type = (
re.match(r'<(\w+)comp>', frame.f_code.co_name).group(1).title()
+ u' comprehension'
)
else:
self.comprehension_type = ''
def update_variables(self, watch, watch_extras, event):
self.last_line_no = self.frame.f_lineno
old_local_reprs = self.local_reprs
self.local_reprs = OrderedDict(
(source, my_cheap_repr(value))
for source, value in
self.get_local_reprs(watch, watch_extras)
)
if self.comprehension_type:
for name, value_repr in self.local_reprs.items():
values = self.comprehension_variables.setdefault(name, [])
if not values or values[-1] != value_repr:
values.append(value_repr)
values[:] = truncate_list(values, 11)
if event in ('return', 'exception'):
return [
(name, ', '.join(values))
for name, values in self.comprehension_variables.items()
]
else:
return []
variables = []
for name, value_repr in self.local_reprs.items():
if name not in old_local_reprs or old_local_reprs[name] != value_repr:
variables.append((name, value_repr))
return variables
def get_local_reprs(self, watch, watch_extras):
frame = self.frame
code = frame.f_code
vars_order = code.co_varnames + code.co_cellvars + code.co_freevars + tuple(frame.f_locals.keys())
result_items = sorted(
frame.f_locals.items(),
key=lambda key_value: vars_order.index(key_value[0])
)
for variable in watch:
result_items += sorted(variable.items(frame))
for source, value in result_items:
yield source, value
for extra in watch_extras:
try:
pair = extra(source, value)
except Exception:
pass
else:
if pair is not None:
assert len(pair) == 2, "Watch extra must return pair or None"
yield pair
thread_global = threading.local()
internal_directories = (os.path.dirname((lambda: 0).__code__.co_filename),)
try:
# noinspection PyUnresolvedReferences
import birdseye
except ImportError:
pass
else:
internal_directories += (os.path.dirname(birdseye.__file__),)
class TracerMeta(type):
def __new__(mcs, *args, **kwargs):
result = super(TracerMeta, mcs).__new__(mcs, *args, **kwargs)
result.default = result()
return result
def __call__(cls, *args, **kwargs):
if no_args_decorator(args, kwargs):
return cls.default(args[0])
else:
return super(TracerMeta, cls).__call__(*args, **kwargs)
def __enter__(self):
return self.default.__enter__(context=1)
def __exit__(self, *args):
return self.default.__exit__(*args, context=1)
@six.add_metaclass(TracerMeta)
class Tracer(object):
def __init__(
self,
watch=(),
watch_explode=(),
depth=1,
):
self.watch = [
v if isinstance(v, BaseVariable) else CommonVariable(v)
for v in ensure_tuple(watch)
] + [
v if isinstance(v, BaseVariable) else Exploding(v)
for v in ensure_tuple(watch_explode)
]
self.frame_infos = ArgDefaultDict(FrameInfo)
self.depth = depth
assert self.depth >= 1
self.target_codes = set()
self.target_frames = set()
def __call__(self, function):
if iscoroutinefunction(function):
raise NotImplementedError("coroutines are not supported, sorry!")
self.target_codes.add(function.__code__)
@functools.wraps(function)
def simple_wrapper(*args, **kwargs):
with self:
return function(*args, **kwargs)
@functools.wraps(function)
def generator_wrapper(*args, **kwargs):
gen = function(*args, **kwargs)
method, incoming = gen.send, None
while True:
with self:
try:
outgoing = method(incoming)
except StopIteration:
return
try:
method, incoming = gen.send, (yield outgoing)
except Exception as e:
method, incoming = gen.throw, e
if inspect.isgeneratorfunction(function):
return generator_wrapper
else:
return simple_wrapper
def __enter__(self, context=0):
if not self.config.enabled:
return
calling_frame = sys._getframe(context + 1)
if not self._is_internal_frame(calling_frame):
calling_frame.f_trace = self.trace
self.target_frames.add(calling_frame)
self.config.last_frame = calling_frame
self.trace(calling_frame, 'enter', None)
stack = thread_global.__dict__.setdefault('original_trace_functions', [])
stack.append(sys.gettrace())
sys.settrace(self.trace)
def __exit__(self, exc_type, exc_value, exc_traceback, context=0):
if not self.config.enabled:
return
stack = thread_global.original_trace_functions
sys.settrace(stack.pop())
calling_frame = sys._getframe(context + 1)
self.trace(calling_frame, 'exit', None)
self.target_frames.discard(calling_frame)
self.frame_infos.pop(calling_frame, None)
def _is_internal_frame(self, frame):
return frame.f_code.co_filename.startswith(internal_directories)
def _is_traced_frame(self, frame):
return frame.f_code in self.target_codes or frame in self.target_frames
def trace(self, frame, event, arg):
if not self._is_traced_frame(frame):
if (
self.depth == 1
or self._is_internal_frame(frame)
) and not is_comprehension_frame(frame):
return None
else:
candidate = frame
i = 0
while True:
if is_comprehension_frame(candidate):
candidate = candidate.f_back
continue
i += 1
if self._is_traced_frame(candidate):
break
candidate = candidate.f_back
if i >= self.depth or candidate is None or self._is_internal_frame(candidate):
return None
thread_local = self.config.thread_local
thread_local.__dict__.setdefault('depth', -1)
frame_info = self.frame_infos[frame]
if event in ('call', 'enter'):
thread_local.depth += 1
elif self.config.last_frame and self.config.last_frame is not frame:
line_no = frame_info.last_line_no
trace_event = Event(frame_info, event, arg, thread_local.depth, line_no=line_no)
line = self.config.formatter.format_line_only(trace_event)
self.config.write(line)
if event == 'exception':
frame_info.had_exception = True
self.config.last_frame = frame
trace_event = Event(frame_info, event, arg, thread_local.depth)
if not (frame.f_code.co_name == '<genexpr>' and event not in ('return', 'exception')):
trace_event.variables = frame_info.update_variables(
self.watch,
self.config.watch_extras,
event,
)
if event in ('return', 'exit'):
del self.frame_infos[frame]
thread_local.depth -= 1
formatted = self.config.formatter.format(trace_event)
self.config.write(formatted)
return self.trace
class Spy(object):
def __init__(self, config):
self.config = config
def __call__(self, *args, **kwargs):
if NO_ASTTOKENS:
raise Exception("birdseye doesn't support this version of Python")
try:
import birdseye
except ImportError:
raise Exception("You must install birdseye separately to use spy: pip install birdseye")
# Decorator without parentheses
if no_args_decorator(args, kwargs):
return self._trace(args[0])
# Decorator with parentheses and perhaps arguments
def decorator(func):
return self._trace(func, *args, **kwargs)
return decorator
def _trace(self, func, *args, **kwargs):
# noinspection PyUnresolvedReferences
from birdseye import eye
traced = eye(func)
traced = self.config.snoop(*args, **kwargs)(traced)
@functools.wraps(func)
def wrapper(*func_args, **func_kwargs):
if self.config.enabled:
final_func = traced
else:
final_func = func
return final_func(*func_args, **func_kwargs)
return wrapper
|
{
"content_hash": "aac4fc9e06897ddbd880da84be5057c5",
"timestamp": "",
"source": "github",
"line_count": 306,
"max_line_length": 106,
"avg_line_length": 33.90522875816993,
"alnum_prop": 0.566843373493976,
"repo_name": "alexmojaki/executing",
"id": "1125ff9057928d9d2d224d4f04b92ccef037cf30",
"size": "10375",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/samples/tracer2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "411686"
},
{
"name": "Shell",
"bytes": "612"
}
],
"symlink_target": ""
}
|
"""
Takes an OSM way ID and turns it into a string that is readable and
e.g. OsmAnd can parse it as well.
"""
import json
import sys
import threading
import urllib.parse
import urllib.request
from typing import Dict
from typing import cast
def query_turbo(query: str) -> str:
"""Send query to overpass turbo."""
url = "http://overpass-api.de/api/interpreter"
sock = urllib.request.urlopen(url, bytes(query, "utf-8"))
buf = sock.read()
sock.close()
return cast(str, buf.decode("utf-8"))
def query_nominatim(query: str) -> str:
"""Send query to nominatim."""
url = "http://nominatim.openstreetmap.org/search.php?"
params = {
"q": query,
"format": "json"
}
url += urllib.parse.urlencode(params)
sock = urllib.request.urlopen(url)
buf = sock.read()
sock.close()
return cast(str, buf.decode("utf-8"))
def osmify(query: str) -> str:
"""Turn query into a coordinate + address string."""
# Use nominatim to get the coordinates and the osm type/id.
elements = json.loads(query_nominatim(query))
if not elements:
return "No results from nominatim"
if len(elements) > 1:
# There are multiple elements, prefer buildings if possible.
# Example where this is useful: 'Karinthy Frigyes út 18, Budapest'.
buildings = [i for i in elements if "class" in i.keys() and i["class"] == "building"]
if buildings:
elements = buildings
element = elements[0]
lat = element["lat"]
lon = element["lon"]
object_type = element["osm_type"]
object_id = element["osm_id"]
# Use overpass to get the properties of the object.
overpass_query = """[out:json];
(
%s(%s);
);
out body;""" % (object_type, object_id)
j = json.loads(query_turbo(overpass_query))
elements = j["elements"]
if not elements:
return "No results from overpass"
element = elements[0]
city = element['tags']['addr:city']
housenumber = element['tags']['addr:housenumber']
postcode = element['tags']['addr:postcode']
street = element['tags']['addr:street']
addr = "%s %s, %s %s" % (postcode, city, street, housenumber)
# Print the result.
return "%s,%s (%s)" % (lat, lon, addr)
def worker(context: Dict[str, str]) -> None:
"""Wrapper around osmify() that has no return value."""
context["out"] = osmify(context["in"])
def spinner(context: Dict[str, str], thread: threading.Thread) -> None:
"""Shows a spinner while osmify() is in progress."""
spin_characters = "\\|/-"
spin_index = 0
while True:
thread.join(timeout=0.1)
if thread.is_alive():
sys.stderr.write("\r [%s] " % spin_characters[spin_index])
sys.stderr.flush()
spin_index = (spin_index + 1) % len(spin_characters)
continue
sys.stderr.write("\r")
sys.stderr.flush()
print(context["out"])
break
def main() -> None:
"""Commandline interface to this module."""
if len(sys.argv) > 1:
context = {"in": sys.argv[1]}
thread = threading.Thread(target=worker, args=(context,))
thread.start()
spinner(context, thread)
else:
print("usage: addr-osmify <query>")
print()
print("e.g. addr-osmify 'Mészáros utca 58/a, Budapest'")
if __name__ == "__main__":
main()
# vim:set shiftwidth=4 softtabstop=4 expandtab:
|
{
"content_hash": "975a02b2ffebb062db3c59ba1e91ce4f",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 93,
"avg_line_length": 27.934959349593495,
"alnum_prop": 0.6059371362048894,
"repo_name": "vmiklos/vmexam",
"id": "afff55b3736a6ce19b869e66d29258bcd749afc2",
"size": "3619",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "osm/addr-osmify-py/addr_osmify.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "1356"
},
{
"name": "C",
"bytes": "207141"
},
{
"name": "C#",
"bytes": "6115"
},
{
"name": "C++",
"bytes": "174284"
},
{
"name": "CMake",
"bytes": "90430"
},
{
"name": "Go",
"bytes": "13344"
},
{
"name": "HTML",
"bytes": "7421"
},
{
"name": "Java",
"bytes": "33479"
},
{
"name": "JavaScript",
"bytes": "15830"
},
{
"name": "JetBrains MPS",
"bytes": "93"
},
{
"name": "Kotlin",
"bytes": "12619"
},
{
"name": "M4",
"bytes": "4410"
},
{
"name": "Makefile",
"bytes": "133045"
},
{
"name": "Objective-C",
"bytes": "6102"
},
{
"name": "PDDL",
"bytes": "2562"
},
{
"name": "PHP",
"bytes": "10859"
},
{
"name": "Perl",
"bytes": "566936"
},
{
"name": "PowerShell",
"bytes": "618"
},
{
"name": "Python",
"bytes": "185940"
},
{
"name": "Rust",
"bytes": "40567"
},
{
"name": "Shell",
"bytes": "74062"
},
{
"name": "TypeScript",
"bytes": "45072"
},
{
"name": "VBA",
"bytes": "3117"
},
{
"name": "Vim Script",
"bytes": "1105"
},
{
"name": "XSLT",
"bytes": "281"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals, print_function
import os
import json
import logging
import datetime
from six import StringIO
import glob
import re
from io import BytesIO
from botocore.vendored.requests.structures import CaseInsensitiveDict
from botocore.response import StreamingBody
from gcdt.gcdt_awsclient import AWSClient
ZERO = datetime.timedelta(0)
class UTC(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
LOG_ALL_TRAFFIC = True # False means do not log successful requests
log = logging.getLogger(__name__)
# hdlr = logging.FileHandler('./placebo.log')
# formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
# hdlr.setFormatter(formatter)
# log.addHandler(hdlr)
# log.setLevel(logging.DEBUG)
# TODO fix recording for kms, i.e. test_config_reader_aws.py tests
class FakeHttpResponse(object):
def __init__(self, status_code):
self.status_code = status_code
class PlaceboAWSClient(AWSClient):
def __init__(self, session, data_path):
"""Test tool to replace AWSClient. It can record and playback calls to
AWS services.
:param session: botocore session
:param data_path: basepath for your recordings
"""
self._session = session
self._client_cache = {}
self._mode = None # None, record, playback
# TODO remove _prefix
self._prefix = None # not used!!
self._data_path = data_path
self._index = {} # playback registry per service count
self._events = [] # keep track of registered events
self._filename_re = re.compile(r'.*\..*_(?P<index>\d+).json')
def record(self, services='*', operations='*'):
"""Unregister all events and switch to 'record' mode.
:param services: defaults to '*' but you can filter specific ones, too
:param operations: defaults to '*' but you can filter specific ones, too
:return:
"""
if self._mode == 'playback':
self.stop()
self._mode = 'record'
for service in services.split(','):
for operation in operations.split(','):
event = 'after-call.{0}.{1}'.format(
service.strip(), operation.strip())
log.debug('recording: %s', event)
self._events.append(event)
self._session.register(
event, self._record_data, 'placebo-record-mode')
def _record_data(self, http_response, parsed, model, **kwargs):
log.debug('_record_data')
service_name = model.service_model.endpoint_prefix
operation_name = model.name
self._save_response(service_name, operation_name, parsed,
http_response.status_code)
def _save_response(self, service, operation, response_data,
http_response=200):
"""
Store a response to the data directory. The ``operation``
should be the name of the operation in the service API (e.g.
DescribeInstances), the ``response_data`` should a value you want
to return from a placebo call and the ``http_response`` should be
the HTTP status code returned from the service. You can add
multiple responses for a given operation and they will be
returned in order.
"""
log.debug('save_response: %s.%s', service, operation)
filepath = self._get_new_file_path(service, operation)
log.debug('save_response: path=%s', filepath)
json_data = {'status_code': http_response,
'data': response_data}
with open(filepath, 'w') as fp:
json.dump(json_data, fp, indent=4, default=serialize_patch)
def _get_new_file_path(self, service, operation):
base_name = '{0}.{1}'.format(service, operation)
if self._prefix:
base_name = '{0}.{1}'.format(self._prefix, base_name)
log.debug('get_new_file_path: %s', base_name)
index = 0
glob_pattern = os.path.join(self._data_path, base_name + '*')
for file_path in glob.glob(glob_pattern):
file_name = os.path.basename(file_path)
m = self._filename_re.match(file_name)
if m:
i = int(m.group('index'))
if i > index:
index = i
index += 1
return os.path.join(
self._data_path, '{0}_{1}.json'.format(base_name, index))
def playback(self):
"""Unregister all events and switch to 'playback' mode.
:return:
"""
if self._mode == 'record':
self.stop()
if self._mode is None:
event = 'before-call.*.*'
self._events.append(event)
self._session.register(
event, self._mock_request, 'placebo-playback-mode')
self._mode = 'playback'
def _mock_request(self, **kwargs):
"""
A mocked out make_request call that bypasses all network calls
and simply returns any mocked responses defined.
"""
model = kwargs.get('model')
service = model.service_model.endpoint_prefix
operation = model.name
log.debug('_make_request: %s.%s', service, operation)
return self._load_response(service, operation)
def _load_response(self, service, operation):
log.debug('load_response: %s.%s', service, operation)
response_file = self._get_next_file_path(service, operation)
log.debug('load_responses: %s', response_file)
with open(response_file, 'r') as fp:
response_data = json.load(fp, object_hook=deserialize)
return (FakeHttpResponse(response_data['status_code']),
response_data['data'])
def _get_next_file_path(self, service, operation):
base_name = '{0}.{1}'.format(service, operation)
if self._prefix:
base_name = '{0}.{1}'.format(self._prefix, base_name)
log.debug('get_next_file_path: %s', base_name)
next_file = None
while next_file is None:
index = self._index.setdefault(base_name, 1)
fn = os.path.join(
self._data_path, base_name + '_{0}.json'.format(index))
if os.path.exists(fn):
next_file = fn
self._index[base_name] += 1
elif index != 1:
self._index[base_name] = 1
else:
# we are looking for the first index and it's not here
raise IOError('response file ({0}) not found'.format(fn))
return fn
def stop(self):
"""Unregister events and switch back to 'normal' mode.
:return:
"""
log.debug('stopping, mode=%s', self._mode)
if self._mode == 'record':
if self._session:
for event in self._events:
self._session.unregister(
event, unique_id='placebo-record-mode')
self._events = []
elif self._mode == 'playback':
if self._session:
for event in self._events:
self._session.unregister(
event, unique_id='placebo-playback-mode')
self._events = []
self._mode = None
def deserialize(obj):
"""Convert JSON dicts back into objects."""
# Be careful of shallow copy here
target = dict(obj)
class_name = None
if '__class__' in target:
class_name = target.pop('__class__')
if '__module__' in obj:
module_name = obj.pop('__module__')
# Use getattr(module, class_name) for custom types if needed
if class_name == 'datetime':
target['tzinfo'] = UTC()
return datetime.datetime(**target)
if class_name == 'StreamingBody':
return StringIO(target['body'])
# Return unrecognized structures as-is
return obj
def serialize_patch(obj):
"""Convert objects into JSON structures."""
# Record class and module information for deserialization
result = {'__class__': obj.__class__.__name__}
try:
result['__module__'] = obj.__module__
except AttributeError:
pass
# Convert objects to dictionary representation based on type
if isinstance(obj, datetime.datetime):
result['year'] = obj.year
result['month'] = obj.month
result['day'] = obj.day
result['hour'] = obj.hour
result['minute'] = obj.minute
result['second'] = obj.second
result['microsecond'] = obj.microsecond
return result
if isinstance(obj, StreamingBody):
original_text = obj.read()
# We remove a BOM here if it exists so that it doesn't get reencoded
# later on into a UTF-16 string, presumably by the json library
result['body'] = original_text.decode('utf-8-sig')
obj._raw_stream = BytesIO(original_text)
obj._amount_read = 0
return result
if isinstance(obj, CaseInsensitiveDict):
result['as_dict'] = dict(obj)
return result
raise TypeError('Type not serializable')
|
{
"content_hash": "c4dc8a179691ab227ebfe2a3989cce00",
"timestamp": "",
"source": "github",
"line_count": 254,
"max_line_length": 80,
"avg_line_length": 36.318897637795274,
"alnum_prop": 0.5835230352303523,
"repo_name": "glomex/gcdt",
"id": "a147c02e39db6da52cefedf14969968507ad090e",
"size": "9249",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "gcdt_testtools/placebo_awsclient.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "368"
},
{
"name": "Groovy",
"bytes": "7494"
},
{
"name": "HTML",
"bytes": "102"
},
{
"name": "JavaScript",
"bytes": "1723"
},
{
"name": "Python",
"bytes": "509123"
},
{
"name": "Shell",
"bytes": "10192"
},
{
"name": "Smarty",
"bytes": "271"
}
],
"symlink_target": ""
}
|
"""Translation methods for generating localized strings.
To load a locale and generate a translated string::
user_locale = tornado.locale.get("es_LA")
print user_locale.translate("Sign out")
`tornado.locale.get()` returns the closest matching locale, not necessarily the
specific locale you requested. You can support pluralization with
additional arguments to `~Locale.translate()`, e.g.::
people = [...]
message = user_locale.translate(
"%(list)s is online", "%(list)s are online", len(people))
print message % {"list": user_locale.list(people)}
The first string is chosen if ``len(people) == 1``, otherwise the second
string is chosen.
Applications should call one of `load_translations` (which uses a simple
CSV format) or `load_gettext_translations` (which uses the ``.mo`` format
supported by `gettext` and related tools). If neither method is called,
the `Locale.translate` method will simply return the original string.
"""
from __future__ import absolute_import, division, print_function, with_statement
import csv
import datetime
import os
import re
from tornado import escape
from tornado.log import gen_log
from tornado.util import u
_default_locale = "en_US"
_translations = {}
_supported_locales = frozenset([_default_locale])
_use_gettext = False
def get(*locale_codes):
"""Returns the closest match for the given locale codes.
We iterate over all given locale codes in order. If we have a tight
or a loose match for the code (e.g., "en" for "en_US"), we return
the locale. Otherwise we move to the next code in the list.
By default we return ``en_US`` if no translations are found for any of
the specified locales. You can change the default locale with
`set_default_locale()`.
"""
return Locale.get_closest(*locale_codes)
def set_default_locale(code):
"""Sets the default locale.
The default locale is assumed to be the language used for all strings
in the system. The translations loaded from disk are mappings from
the default locale to the destination locale. Consequently, you don't
need to create a translation file for the default locale.
"""
global _default_locale
global _supported_locales
_default_locale = code
_supported_locales = frozenset(list(_translations.keys()) + [_default_locale])
def load_translations(directory):
"""Loads translations from CSV files in a directory.
Translations are strings with optional Python-style named placeholders
(e.g., ``My name is %(name)s``) and their associated translations.
The directory should have translation files of the form ``LOCALE.csv``,
e.g. ``es_GT.csv``. The CSV files should have two or three columns: string,
translation, and an optional plural indicator. Plural indicators should
be one of "plural" or "singular". A given string can have both singular
and plural forms. For example ``%(name)s liked this`` may have a
different verb conjugation depending on whether %(name)s is one
name or a list of names. There should be two rows in the CSV file for
that string, one with plural indicator "singular", and one "plural".
For strings with no verbs that would change on translation, simply
use "unknown" or the empty string (or don't include the column at all).
The file is read using the `csv` module in the default "excel" dialect.
In this format there should not be spaces after the commas.
Example translation ``es_LA.csv``::
"I love you","Te amo"
"%(name)s liked this","A %(name)s les gustó esto","plural"
"%(name)s liked this","A %(name)s le gustó esto","singular"
"""
global _translations
global _supported_locales
_translations = {}
for path in os.listdir(directory):
if not path.endswith(".csv"):
continue
locale, extension = path.split(".")
if not re.match("[a-z]+(_[A-Z]+)?$", locale):
gen_log.error("Unrecognized locale %r (path: %s)", locale,
os.path.join(directory, path))
continue
full_path = os.path.join(directory, path)
try:
# python 3: csv.reader requires a file open in text mode.
# Force utf8 to avoid dependence on $LANG environment variable.
f = open(full_path, "r", encoding="utf-8")
except TypeError:
# python 2: files return byte strings, which are decoded below.
f = open(full_path, "r")
_translations[locale] = {}
for i, row in enumerate(csv.reader(f)):
if not row or len(row) < 2:
continue
row = [escape.to_unicode(c).strip() for c in row]
english, translation = row[:2]
if len(row) > 2:
plural = row[2] or "unknown"
else:
plural = "unknown"
if plural not in ("plural", "singular", "unknown"):
gen_log.error("Unrecognized plural indicator %r in %s line %d",
plural, path, i + 1)
continue
_translations[locale].setdefault(plural, {})[english] = translation
f.close()
_supported_locales = frozenset(list(_translations.keys()) + [_default_locale])
gen_log.debug("Supported locales: %s", sorted(_supported_locales))
def load_gettext_translations(directory, domain):
"""Loads translations from `gettext`'s locale tree
Locale tree is similar to system's ``/usr/share/locale``, like::
{directory}/{lang}/LC_MESSAGES/{domain}.mo
Three steps are required to have you app translated:
1. Generate POT translation file::
xgettext --language=Python --keyword=_:1,2 -d mydomain file1.py file2.html etc
2. Merge against existing POT file::
msgmerge old.po mydomain.po > new.po
3. Compile::
msgfmt mydomain.po -o {directory}/pt_BR/LC_MESSAGES/mydomain.mo
"""
import gettext
global _translations
global _supported_locales
global _use_gettext
_translations = {}
for lang in os.listdir(directory):
if lang.startswith('.'):
continue # skip .svn, etc
if os.path.isfile(os.path.join(directory, lang)):
continue
try:
os.stat(os.path.join(directory, lang, "LC_MESSAGES", domain + ".mo"))
_translations[lang] = gettext.translation(domain, directory,
languages=[lang])
except Exception as e:
gen_log.error("Cannot load translation for '%s': %s", lang, str(e))
continue
_supported_locales = frozenset(list(_translations.keys()) + [_default_locale])
_use_gettext = True
gen_log.debug("Supported locales: %s", sorted(_supported_locales))
def get_supported_locales():
"""Returns a list of all the supported locale codes."""
return _supported_locales
class Locale(object):
"""Object representing a locale.
After calling one of `load_translations` or `load_gettext_translations`,
call `get` or `get_closest` to get a Locale object.
"""
@classmethod
def get_closest(cls, *locale_codes):
"""Returns the closest match for the given locale code."""
for code in locale_codes:
if not code:
continue
code = code.replace("-", "_")
parts = code.split("_")
if len(parts) > 2:
continue
elif len(parts) == 2:
code = parts[0].lower() + "_" + parts[1].upper()
if code in _supported_locales:
return cls.get(code)
if parts[0].lower() in _supported_locales:
return cls.get(parts[0].lower())
return cls.get(_default_locale)
@classmethod
def get(cls, code):
"""Returns the Locale for the given locale code.
If it is not supported, we raise an exception.
"""
if not hasattr(cls, "_cache"):
cls._cache = {}
if code not in cls._cache:
assert code in _supported_locales
translations = _translations.get(code, None)
if translations is None:
locale = CSVLocale(code, {})
elif _use_gettext:
locale = GettextLocale(code, translations)
else:
locale = CSVLocale(code, translations)
cls._cache[code] = locale
return cls._cache[code]
def __init__(self, code, translations):
self.code = code
self.name = LOCALE_NAMES.get(code, {}).get("name", u("Unknown"))
self.rtl = False
for prefix in ["fa", "ar", "he"]:
if self.code.startswith(prefix):
self.rtl = True
break
self.translations = translations
# Initialize strings for date formatting
_ = self.translate
self._months = [
_("January"), _("February"), _("March"), _("April"),
_("May"), _("June"), _("July"), _("August"),
_("September"), _("October"), _("November"), _("December")]
self._weekdays = [
_("Monday"), _("Tuesday"), _("Wednesday"), _("Thursday"),
_("Friday"), _("Saturday"), _("Sunday")]
def translate(self, message, plural_message=None, count=None):
"""Returns the translation for the given message for this locale.
If ``plural_message`` is given, you must also provide
``count``. We return ``plural_message`` when ``count != 1``,
and we return the singular form for the given message when
``count == 1``.
"""
raise NotImplementedError()
def format_date(self, date, gmt_offset=0, relative=True, shorter=False,
full_format=False):
"""Formats the given date (which should be GMT).
By default, we return a relative time (e.g., "2 minutes ago"). You
can return an absolute date string with ``relative=False``.
You can force a full format date ("July 10, 1980") with
``full_format=True``.
This method is primarily intended for dates in the past.
For dates in the future, we fall back to full format.
"""
if self.code.startswith("ru"):
relative = False
if type(date) in (int, long, float):
date = datetime.datetime.utcfromtimestamp(date)
now = datetime.datetime.utcnow()
if date > now:
if relative and (date - now).seconds < 60:
# Due to click skew, things are some things slightly
# in the future. Round timestamps in the immediate
# future down to now in relative mode.
date = now
else:
# Otherwise, future dates always use the full format.
full_format = True
local_date = date - datetime.timedelta(minutes=gmt_offset)
local_now = now - datetime.timedelta(minutes=gmt_offset)
local_yesterday = local_now - datetime.timedelta(hours=24)
difference = now - date
seconds = difference.seconds
days = difference.days
_ = self.translate
format = None
if not full_format:
if relative and days == 0:
if seconds < 50:
return _("1 second ago", "%(seconds)d seconds ago",
seconds) % {"seconds": seconds}
if seconds < 50 * 60:
minutes = round(seconds / 60.0)
return _("1 minute ago", "%(minutes)d minutes ago",
minutes) % {"minutes": minutes}
hours = round(seconds / (60.0 * 60))
return _("1 hour ago", "%(hours)d hours ago",
hours) % {"hours": hours}
if days == 0:
format = _("%(time)s")
elif days == 1 and local_date.day == local_yesterday.day and \
relative:
format = _("yesterday") if shorter else \
_("yesterday at %(time)s")
elif days < 5:
format = _("%(weekday)s") if shorter else \
_("%(weekday)s at %(time)s")
elif days < 334: # 11mo, since confusing for same month last year
format = _("%(month_name)s %(day)s") if shorter else \
_("%(month_name)s %(day)s at %(time)s")
if format is None:
format = _("%(month_name)s %(day)s, %(year)s") if shorter else \
_("%(month_name)s %(day)s, %(year)s at %(time)s")
tfhour_clock = self.code not in ("en", "en_US", "zh_CN")
if tfhour_clock:
str_time = "%d:%02d" % (local_date.hour, local_date.minute)
elif self.code == "zh_CN":
str_time = "%s%d:%02d" % (
(u('\u4e0a\u5348'), u('\u4e0b\u5348'))[local_date.hour >= 12],
local_date.hour % 12 or 12, local_date.minute)
else:
str_time = "%d:%02d %s" % (
local_date.hour % 12 or 12, local_date.minute,
("am", "pm")[local_date.hour >= 12])
return format % {
"month_name": self._months[local_date.month - 1],
"weekday": self._weekdays[local_date.weekday()],
"day": str(local_date.day),
"year": str(local_date.year),
"time": str_time
}
def format_day(self, date, gmt_offset=0, dow=True):
"""Formats the given date as a day of week.
Example: "Monday, January 22". You can remove the day of week with
``dow=False``.
"""
local_date = date - datetime.timedelta(minutes=gmt_offset)
_ = self.translate
if dow:
return _("%(weekday)s, %(month_name)s %(day)s") % {
"month_name": self._months[local_date.month - 1],
"weekday": self._weekdays[local_date.weekday()],
"day": str(local_date.day),
}
else:
return _("%(month_name)s %(day)s") % {
"month_name": self._months[local_date.month - 1],
"day": str(local_date.day),
}
def list(self, parts):
"""Returns a comma-separated list for the given list of parts.
The format is, e.g., "A, B and C", "A and B" or just "A" for lists
of size 1.
"""
_ = self.translate
if len(parts) == 0:
return ""
if len(parts) == 1:
return parts[0]
comma = u(' \u0648 ') if self.code.startswith("fa") else u(", ")
return _("%(commas)s and %(last)s") % {
"commas": comma.join(parts[:-1]),
"last": parts[len(parts) - 1],
}
def friendly_number(self, value):
"""Returns a comma-separated number for the given integer."""
if self.code not in ("en", "en_US"):
return str(value)
value = str(value)
parts = []
while value:
parts.append(value[-3:])
value = value[:-3]
return ",".join(reversed(parts))
class CSVLocale(Locale):
"""Locale implementation using tornado's CSV translation format."""
def translate(self, message, plural_message=None, count=None):
if plural_message is not None:
assert count is not None
if count != 1:
message = plural_message
message_dict = self.translations.get("plural", {})
else:
message_dict = self.translations.get("singular", {})
else:
message_dict = self.translations.get("unknown", {})
return message_dict.get(message, message)
class GettextLocale(Locale):
"""Locale implementation using the `gettext` module."""
def __init__(self, code, translations):
try:
# python 2
self.ngettext = translations.ungettext
self.gettext = translations.ugettext
except AttributeError:
# python 3
self.ngettext = translations.ngettext
self.gettext = translations.gettext
# self.gettext must exist before __init__ is called, since it
# calls into self.translate
super(GettextLocale, self).__init__(code, translations)
def translate(self, message, plural_message=None, count=None):
if plural_message is not None:
assert count is not None
return self.ngettext(message, plural_message, count)
else:
return self.gettext(message)
LOCALE_NAMES = {
"af_ZA": {"name_en": u("Afrikaans"), "name": u("Afrikaans")},
"am_ET": {"name_en": u("Amharic"), "name": u('\u12a0\u121b\u122d\u129b')},
"ar_AR": {"name_en": u("Arabic"), "name": u("\u0627\u0644\u0639\u0631\u0628\u064a\u0629")},
"bg_BG": {"name_en": u("Bulgarian"), "name": u("\u0411\u044a\u043b\u0433\u0430\u0440\u0441\u043a\u0438")},
"bn_IN": {"name_en": u("Bengali"), "name": u("\u09ac\u09be\u0982\u09b2\u09be")},
"bs_BA": {"name_en": u("Bosnian"), "name": u("Bosanski")},
"ca_ES": {"name_en": u("Catalan"), "name": u("Catal\xe0")},
"cs_CZ": {"name_en": u("Czech"), "name": u("\u010ce\u0161tina")},
"cy_GB": {"name_en": u("Welsh"), "name": u("Cymraeg")},
"da_DK": {"name_en": u("Danish"), "name": u("Dansk")},
"de_DE": {"name_en": u("German"), "name": u("Deutsch")},
"el_GR": {"name_en": u("Greek"), "name": u("\u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ac")},
"en_GB": {"name_en": u("English (UK)"), "name": u("English (UK)")},
"en_US": {"name_en": u("English (US)"), "name": u("English (US)")},
"es_ES": {"name_en": u("Spanish (Spain)"), "name": u("Espa\xf1ol (Espa\xf1a)")},
"es_LA": {"name_en": u("Spanish"), "name": u("Espa\xf1ol")},
"et_EE": {"name_en": u("Estonian"), "name": u("Eesti")},
"eu_ES": {"name_en": u("Basque"), "name": u("Euskara")},
"fa_IR": {"name_en": u("Persian"), "name": u("\u0641\u0627\u0631\u0633\u06cc")},
"fi_FI": {"name_en": u("Finnish"), "name": u("Suomi")},
"fr_CA": {"name_en": u("French (Canada)"), "name": u("Fran\xe7ais (Canada)")},
"fr_FR": {"name_en": u("French"), "name": u("Fran\xe7ais")},
"ga_IE": {"name_en": u("Irish"), "name": u("Gaeilge")},
"gl_ES": {"name_en": u("Galician"), "name": u("Galego")},
"he_IL": {"name_en": u("Hebrew"), "name": u("\u05e2\u05d1\u05e8\u05d9\u05ea")},
"hi_IN": {"name_en": u("Hindi"), "name": u("\u0939\u093f\u0928\u094d\u0926\u0940")},
"hr_HR": {"name_en": u("Croatian"), "name": u("Hrvatski")},
"hu_HU": {"name_en": u("Hungarian"), "name": u("Magyar")},
"id_ID": {"name_en": u("Indonesian"), "name": u("Bahasa Indonesia")},
"is_IS": {"name_en": u("Icelandic"), "name": u("\xcdslenska")},
"it_IT": {"name_en": u("Italian"), "name": u("Italiano")},
"ja_JP": {"name_en": u("Japanese"), "name": u("\u65e5\u672c\u8a9e")},
"ko_KR": {"name_en": u("Korean"), "name": u("\ud55c\uad6d\uc5b4")},
"lt_LT": {"name_en": u("Lithuanian"), "name": u("Lietuvi\u0173")},
"lv_LV": {"name_en": u("Latvian"), "name": u("Latvie\u0161u")},
"mk_MK": {"name_en": u("Macedonian"), "name": u("\u041c\u0430\u043a\u0435\u0434\u043e\u043d\u0441\u043a\u0438")},
"ml_IN": {"name_en": u("Malayalam"), "name": u("\u0d2e\u0d32\u0d2f\u0d3e\u0d33\u0d02")},
"ms_MY": {"name_en": u("Malay"), "name": u("Bahasa Melayu")},
"nb_NO": {"name_en": u("Norwegian (bokmal)"), "name": u("Norsk (bokm\xe5l)")},
"nl_NL": {"name_en": u("Dutch"), "name": u("Nederlands")},
"nn_NO": {"name_en": u("Norwegian (nynorsk)"), "name": u("Norsk (nynorsk)")},
"pa_IN": {"name_en": u("Punjabi"), "name": u("\u0a2a\u0a70\u0a1c\u0a3e\u0a2c\u0a40")},
"pl_PL": {"name_en": u("Polish"), "name": u("Polski")},
"pt_BR": {"name_en": u("Portuguese (Brazil)"), "name": u("Portugu\xeas (Brasil)")},
"pt_PT": {"name_en": u("Portuguese (Portugal)"), "name": u("Portugu\xeas (Portugal)")},
"ro_RO": {"name_en": u("Romanian"), "name": u("Rom\xe2n\u0103")},
"ru_RU": {"name_en": u("Russian"), "name": u("\u0420\u0443\u0441\u0441\u043a\u0438\u0439")},
"sk_SK": {"name_en": u("Slovak"), "name": u("Sloven\u010dina")},
"sl_SI": {"name_en": u("Slovenian"), "name": u("Sloven\u0161\u010dina")},
"sq_AL": {"name_en": u("Albanian"), "name": u("Shqip")},
"sr_RS": {"name_en": u("Serbian"), "name": u("\u0421\u0440\u043f\u0441\u043a\u0438")},
"sv_SE": {"name_en": u("Swedish"), "name": u("Svenska")},
"sw_KE": {"name_en": u("Swahili"), "name": u("Kiswahili")},
"ta_IN": {"name_en": u("Tamil"), "name": u("\u0ba4\u0bae\u0bbf\u0bb4\u0bcd")},
"te_IN": {"name_en": u("Telugu"), "name": u("\u0c24\u0c46\u0c32\u0c41\u0c17\u0c41")},
"th_TH": {"name_en": u("Thai"), "name": u("\u0e20\u0e32\u0e29\u0e32\u0e44\u0e17\u0e22")},
"tl_PH": {"name_en": u("Filipino"), "name": u("Filipino")},
"tr_TR": {"name_en": u("Turkish"), "name": u("T\xfcrk\xe7e")},
"uk_UA": {"name_en": u("Ukraini "), "name": u("\u0423\u043a\u0440\u0430\u0457\u043d\u0441\u044c\u043a\u0430")},
"vi_VN": {"name_en": u("Vietnamese"), "name": u("Ti\u1ebfng Vi\u1ec7t")},
"zh_CN": {"name_en": u("Chinese (Simplified)"), "name": u("\u4e2d\u6587(\u7b80\u4f53)")},
"zh_TW": {"name_en": u("Chinese (Traditional)"), "name": u("\u4e2d\u6587(\u7e41\u9ad4)")},
}
|
{
"content_hash": "2555e62968163c94bcdb4303d5c47105",
"timestamp": "",
"source": "github",
"line_count": 496,
"max_line_length": 117,
"avg_line_length": 43.104838709677416,
"alnum_prop": 0.5652478952291862,
"repo_name": "interactiveinstitute/watthappened",
"id": "66e9ff6d8a149e316cb1acf3ada43d11b9efea3b",
"size": "22001",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "python_modules/tornado/locale.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "379166"
},
{
"name": "CSS",
"bytes": "16435"
},
{
"name": "JavaScript",
"bytes": "56669"
},
{
"name": "Python",
"bytes": "1349810"
},
{
"name": "Shell",
"bytes": "6707"
}
],
"symlink_target": ""
}
|
"""
Helper functions for creating Form classes from Django models
and database field objects.
"""
from __future__ import unicode_literals
from collections import OrderedDict
import warnings
from django.core.exceptions import ValidationError, NON_FIELD_ERRORS, FieldError
from django.forms.fields import Field, ChoiceField
from django.forms.forms import DeclarativeFieldsMetaclass, BaseForm
from django.forms.formsets import BaseFormSet, formset_factory
from django.forms.utils import ErrorList
from django.forms.widgets import (SelectMultiple, HiddenInput,
MultipleHiddenInput, CheckboxSelectMultiple)
from django.utils.encoding import smart_text, force_text
from django.utils import six
from django.utils.text import get_text_list, capfirst
from django.utils.translation import ugettext_lazy as _, ugettext, string_concat
__all__ = (
'ModelForm', 'BaseModelForm', 'model_to_dict', 'fields_for_model',
'save_instance', 'ModelChoiceField', 'ModelMultipleChoiceField',
'ALL_FIELDS', 'BaseModelFormSet', 'modelformset_factory',
'BaseInlineFormSet', 'inlineformset_factory',
)
ALL_FIELDS = '__all__'
def construct_instance(form, instance, fields=None, exclude=None):
"""
Constructs and returns a model instance from the bound ``form``'s
``cleaned_data``, but does not save the returned instance to the
database.
"""
from django.db import models
opts = instance._meta
cleaned_data = form.cleaned_data
file_field_list = []
for f in opts.fields:
if not f.editable or isinstance(f, models.AutoField) \
or not f.name in cleaned_data:
continue
if fields is not None and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
# Defer saving file-type fields until after the other fields, so a
# callable upload_to can use the values from other fields.
if isinstance(f, models.FileField):
file_field_list.append(f)
else:
f.save_form_data(instance, cleaned_data[f.name])
for f in file_field_list:
f.save_form_data(instance, cleaned_data[f.name])
return instance
def save_instance(form, instance, fields=None, fail_message='saved',
commit=True, exclude=None, construct=True):
"""
Saves bound Form ``form``'s cleaned_data into model instance ``instance``.
If commit=True, then the changes to ``instance`` will be saved to the
database. Returns ``instance``.
If construct=False, assume ``instance`` has already been constructed and
just needs to be saved.
"""
if construct:
instance = construct_instance(form, instance, fields, exclude)
opts = instance._meta
if form.errors:
raise ValueError("The %s could not be %s because the data didn't"
" validate." % (opts.object_name, fail_message))
# Wrap up the saving of m2m data as a function.
def save_m2m():
cleaned_data = form.cleaned_data
# Note that for historical reasons we want to include also
# virtual_fields here. (GenericRelation was previously a fake
# m2m field).
for f in opts.many_to_many + opts.virtual_fields:
if not hasattr(f, 'save_form_data'):
continue
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
if f.name in cleaned_data:
f.save_form_data(instance, cleaned_data[f.name])
if commit:
# If we are committing, save the instance and the m2m data immediately.
instance.save()
save_m2m()
else:
# We're not committing. Add a method to the form to allow deferred
# saving of m2m data.
form.save_m2m = save_m2m
return instance
# ModelForms #################################################################
def model_to_dict(instance, fields=None, exclude=None):
"""
Returns a dict containing the data in ``instance`` suitable for passing as
a Form's ``initial`` keyword argument.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned dict.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned dict, even if they are listed in
the ``fields`` argument.
"""
# avoid a circular import
from django.db.models.fields.related import ManyToManyField
opts = instance._meta
data = {}
for f in opts.concrete_fields + opts.virtual_fields + opts.many_to_many:
if not getattr(f, 'editable', False):
continue
if fields and not f.name in fields:
continue
if exclude and f.name in exclude:
continue
if isinstance(f, ManyToManyField):
# If the object doesn't have a primary key yet, just use an empty
# list for its m2m fields. Calling f.value_from_object will raise
# an exception.
if instance.pk is None:
data[f.name] = []
else:
# MultipleChoiceWidget needs a list of pks, not object instances.
qs = f.value_from_object(instance)
if qs._result_cache is not None:
data[f.name] = [item.pk for item in qs]
else:
data[f.name] = list(qs.values_list('pk', flat=True))
else:
data[f.name] = f.value_from_object(instance)
return data
def fields_for_model(model, fields=None, exclude=None, widgets=None,
formfield_callback=None, localized_fields=None,
labels=None, help_texts=None, error_messages=None):
"""
Returns a ``OrderedDict`` containing form fields for the given model.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned fields.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned fields, even if they are listed
in the ``fields`` argument.
``widgets`` is a dictionary of model field names mapped to a widget.
``localized_fields`` is a list of names of fields which should be localized.
``labels`` is a dictionary of model field names mapped to a label.
``help_texts`` is a dictionary of model field names mapped to a help text.
``error_messages`` is a dictionary of model field names mapped to a
dictionary of error messages.
``formfield_callback`` is a callable that takes a model field and returns
a form field.
"""
field_list = []
ignored = []
opts = model._meta
# Avoid circular import
from django.db.models.fields import Field as ModelField
sortable_virtual_fields = [f for f in opts.virtual_fields
if isinstance(f, ModelField)]
for f in sorted(opts.concrete_fields + sortable_virtual_fields + opts.many_to_many):
if not getattr(f, 'editable', False):
continue
if fields is not None and not f.name in fields:
continue
if exclude and f.name in exclude:
continue
kwargs = {}
if widgets and f.name in widgets:
kwargs['widget'] = widgets[f.name]
if localized_fields == ALL_FIELDS or (localized_fields and f.name in localized_fields):
kwargs['localize'] = True
if labels and f.name in labels:
kwargs['label'] = labels[f.name]
if help_texts and f.name in help_texts:
kwargs['help_text'] = help_texts[f.name]
if error_messages and f.name in error_messages:
kwargs['error_messages'] = error_messages[f.name]
if formfield_callback is None:
formfield = f.formfield(**kwargs)
elif not callable(formfield_callback):
raise TypeError('formfield_callback must be a function or callable')
else:
formfield = formfield_callback(f, **kwargs)
if formfield:
field_list.append((f.name, formfield))
else:
ignored.append(f.name)
field_dict = OrderedDict(field_list)
if fields:
field_dict = OrderedDict(
[(f, field_dict.get(f)) for f in fields
if ((not exclude) or (exclude and f not in exclude)) and (f not in ignored)]
)
return field_dict
class ModelFormOptions(object):
def __init__(self, options=None):
self.model = getattr(options, 'model', None)
self.fields = getattr(options, 'fields', None)
self.exclude = getattr(options, 'exclude', None)
self.widgets = getattr(options, 'widgets', None)
self.localized_fields = getattr(options, 'localized_fields', None)
self.labels = getattr(options, 'labels', None)
self.help_texts = getattr(options, 'help_texts', None)
self.error_messages = getattr(options, 'error_messages', None)
class ModelFormMetaclass(DeclarativeFieldsMetaclass):
def __new__(mcs, name, bases, attrs):
formfield_callback = attrs.pop('formfield_callback', None)
new_class = super(ModelFormMetaclass, mcs).__new__(mcs, name, bases, attrs)
if bases == (BaseModelForm,):
return new_class
opts = new_class._meta = ModelFormOptions(getattr(new_class, 'Meta', None))
# We check if a string was passed to `fields` or `exclude`,
# which is likely to be a mistake where the user typed ('foo') instead
# of ('foo',)
for opt in ['fields', 'exclude', 'localized_fields']:
value = getattr(opts, opt)
if isinstance(value, six.string_types) and value != ALL_FIELDS:
msg = ("%(model)s.Meta.%(opt)s cannot be a string. "
"Did you mean to type: ('%(value)s',)?" % {
'model': new_class.__name__,
'opt': opt,
'value': value,
})
raise TypeError(msg)
if opts.model:
# If a model is defined, extract form fields from it.
if opts.fields is None and opts.exclude is None:
# This should be some kind of assertion error once deprecation
# cycle is complete.
warnings.warn("Creating a ModelForm without either the 'fields' attribute "
"or the 'exclude' attribute is deprecated - form %s "
"needs updating" % name,
DeprecationWarning, stacklevel=2)
if opts.fields == ALL_FIELDS:
# Sentinel for fields_for_model to indicate "get the list of
# fields from the model"
opts.fields = None
fields = fields_for_model(opts.model, opts.fields, opts.exclude,
opts.widgets, formfield_callback,
opts.localized_fields, opts.labels,
opts.help_texts, opts.error_messages)
# make sure opts.fields doesn't specify an invalid field
none_model_fields = [k for k, v in six.iteritems(fields) if not v]
missing_fields = (set(none_model_fields) -
set(new_class.declared_fields.keys()))
if missing_fields:
message = 'Unknown field(s) (%s) specified for %s'
message = message % (', '.join(missing_fields),
opts.model.__name__)
raise FieldError(message)
# Override default model fields with any custom declared ones
# (plus, include all the other declared fields).
fields.update(new_class.declared_fields)
else:
fields = new_class.declared_fields
new_class.base_fields = fields
return new_class
class BaseModelForm(BaseForm):
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=None,
empty_permitted=False, instance=None):
opts = self._meta
if opts.model is None:
raise ValueError('ModelForm has no model class specified.')
if instance is None:
# if we didn't get an instance, instantiate a new one
self.instance = opts.model()
object_data = {}
else:
self.instance = instance
object_data = model_to_dict(instance, opts.fields, opts.exclude)
# if initial was provided, it should override the values from instance
if initial is not None:
object_data.update(initial)
# self._validate_unique will be set to True by BaseModelForm.clean().
# It is False by default so overriding self.clean() and failing to call
# super will stop validate_unique from being called.
self._validate_unique = False
super(BaseModelForm, self).__init__(data, files, auto_id, prefix, object_data,
error_class, label_suffix, empty_permitted)
# Apply ``limit_choices_to`` to each field.
for field_name in self.fields:
formfield = self.fields[field_name]
if hasattr(formfield, 'queryset'):
limit_choices_to = formfield.limit_choices_to
if limit_choices_to is not None:
if callable(limit_choices_to):
limit_choices_to = limit_choices_to()
formfield.queryset = formfield.queryset.complex_filter(limit_choices_to)
def _get_validation_exclusions(self):
"""
For backwards-compatibility, several types of fields need to be
excluded from model validation. See the following tickets for
details: #12507, #12521, #12553
"""
exclude = []
# Build up a list of fields that should be excluded from model field
# validation and unique checks.
for f in self.instance._meta.fields:
field = f.name
# Exclude fields that aren't on the form. The developer may be
# adding these values to the model after form validation.
if field not in self.fields:
exclude.append(f.name)
# Don't perform model validation on fields that were defined
# manually on the form and excluded via the ModelForm's Meta
# class. See #12901.
elif self._meta.fields and field not in self._meta.fields:
exclude.append(f.name)
elif self._meta.exclude and field in self._meta.exclude:
exclude.append(f.name)
# Exclude fields that failed form validation. There's no need for
# the model fields to validate them as well.
elif field in self._errors.keys():
exclude.append(f.name)
# Exclude empty fields that are not required by the form, if the
# underlying model field is required. This keeps the model field
# from raising a required error. Note: don't exclude the field from
# validation if the model field allows blanks. If it does, the blank
# value may be included in a unique check, so cannot be excluded
# from validation.
else:
form_field = self.fields[field]
field_value = self.cleaned_data.get(field, None)
if not f.blank and not form_field.required and field_value in form_field.empty_values:
exclude.append(f.name)
return exclude
def clean(self):
self._validate_unique = True
return self.cleaned_data
def _update_errors(self, errors):
# Override any validation error messages defined at the model level
# with those defined at the form level.
opts = self._meta
for field, messages in errors.error_dict.items():
if (field == NON_FIELD_ERRORS and opts.error_messages and
NON_FIELD_ERRORS in opts.error_messages):
error_messages = opts.error_messages[NON_FIELD_ERRORS]
elif field in self.fields:
error_messages = self.fields[field].error_messages
else:
continue
for message in messages:
if (isinstance(message, ValidationError) and
message.code in error_messages):
message.message = error_messages[message.code]
self.add_error(None, errors)
def _post_clean(self):
opts = self._meta
# Update the model instance with self.cleaned_data.
self.instance = construct_instance(self, self.instance, opts.fields, opts.exclude)
exclude = self._get_validation_exclusions()
# Foreign Keys being used to represent inline relationships
# are excluded from basic field value validation. This is for two
# reasons: firstly, the value may not be supplied (#12507; the
# case of providing new values to the admin); secondly the
# object being referred to may not yet fully exist (#12749).
# However, these fields *must* be included in uniqueness checks,
# so this can't be part of _get_validation_exclusions().
for name, field in self.fields.items():
if isinstance(field, InlineForeignKeyField):
exclude.append(name)
try:
self.instance.full_clean(exclude=exclude, validate_unique=False)
except ValidationError as e:
self._update_errors(e)
# Validate uniqueness if needed.
if self._validate_unique:
self.validate_unique()
def validate_unique(self):
"""
Calls the instance's validate_unique() method and updates the form's
validation errors if any were raised.
"""
exclude = self._get_validation_exclusions()
try:
self.instance.validate_unique(exclude=exclude)
except ValidationError as e:
self._update_errors(e)
def save(self, commit=True):
"""
Saves this ``form``'s cleaned_data into model instance
``self.instance``.
If commit=True, then the changes to ``instance`` will be saved to the
database. Returns ``instance``.
"""
if self.instance.pk is None:
fail_message = 'created'
else:
fail_message = 'changed'
return save_instance(self, self.instance, self._meta.fields,
fail_message, commit, self._meta.exclude,
construct=False)
save.alters_data = True
class ModelForm(six.with_metaclass(ModelFormMetaclass, BaseModelForm)):
pass
def modelform_factory(model, form=ModelForm, fields=None, exclude=None,
formfield_callback=None, widgets=None, localized_fields=None,
labels=None, help_texts=None, error_messages=None):
"""
Returns a ModelForm containing form fields for the given model.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned fields. If omitted or '__all__',
all fields will be used.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned fields, even if they are listed
in the ``fields`` argument.
``widgets`` is a dictionary of model field names mapped to a widget.
``localized_fields`` is a list of names of fields which should be localized.
``formfield_callback`` is a callable that takes a model field and returns
a form field.
``labels`` is a dictionary of model field names mapped to a label.
``help_texts`` is a dictionary of model field names mapped to a help text.
``error_messages`` is a dictionary of model field names mapped to a
dictionary of error messages.
"""
# Create the inner Meta class. FIXME: ideally, we should be able to
# construct a ModelForm without creating and passing in a temporary
# inner class.
# Build up a list of attributes that the Meta object will have.
attrs = {'model': model}
if fields is not None:
attrs['fields'] = fields
if exclude is not None:
attrs['exclude'] = exclude
if widgets is not None:
attrs['widgets'] = widgets
if localized_fields is not None:
attrs['localized_fields'] = localized_fields
if labels is not None:
attrs['labels'] = labels
if help_texts is not None:
attrs['help_texts'] = help_texts
if error_messages is not None:
attrs['error_messages'] = error_messages
# If parent form class already has an inner Meta, the Meta we're
# creating needs to inherit from the parent's inner meta.
parent = (object,)
if hasattr(form, 'Meta'):
parent = (form.Meta, object)
Meta = type(str('Meta'), parent, attrs)
# Give this new form class a reasonable name.
class_name = model.__name__ + str('Form')
# Class attributes for the new form class.
form_class_attrs = {
'Meta': Meta,
'formfield_callback': formfield_callback
}
# The ModelFormMetaclass will trigger a similar warning/error, but this will
# be difficult to debug for code that needs updating, so we produce the
# warning here too.
if (getattr(Meta, 'fields', None) is None and
getattr(Meta, 'exclude', None) is None):
warnings.warn("Calling modelform_factory without defining 'fields' or "
"'exclude' explicitly is deprecated",
DeprecationWarning, stacklevel=2)
# Instatiate type(form) in order to use the same metaclass as form.
return type(form)(class_name, (form,), form_class_attrs)
# ModelFormSets ##############################################################
class BaseModelFormSet(BaseFormSet):
"""
A ``FormSet`` for editing a queryset and/or adding new objects to it.
"""
model = None
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
queryset=None, **kwargs):
self.queryset = queryset
self.initial_extra = kwargs.pop('initial', None)
defaults = {'data': data, 'files': files, 'auto_id': auto_id, 'prefix': prefix}
defaults.update(kwargs)
super(BaseModelFormSet, self).__init__(**defaults)
def initial_form_count(self):
"""Returns the number of forms that are required in this FormSet."""
if not (self.data or self.files):
return len(self.get_queryset())
return super(BaseModelFormSet, self).initial_form_count()
def _existing_object(self, pk):
if not hasattr(self, '_object_dict'):
self._object_dict = dict((o.pk, o) for o in self.get_queryset())
return self._object_dict.get(pk)
def _get_to_python(self, field):
"""
If the field is a related field, fetch the concrete field's (that
is, the ultimate pointed-to field's) get_prep_value.
"""
while field.rel is not None:
field = field.rel.get_related_field()
return field.to_python
def _construct_form(self, i, **kwargs):
if self.is_bound and i < self.initial_form_count():
pk_key = "%s-%s" % (self.add_prefix(i), self.model._meta.pk.name)
pk = self.data[pk_key]
pk_field = self.model._meta.pk
to_python = self._get_to_python(pk_field)
pk = to_python(pk)
kwargs['instance'] = self._existing_object(pk)
if i < self.initial_form_count() and 'instance' not in kwargs:
kwargs['instance'] = self.get_queryset()[i]
if i >= self.initial_form_count() and self.initial_extra:
# Set initial values for extra forms
try:
kwargs['initial'] = self.initial_extra[i - self.initial_form_count()]
except IndexError:
pass
return super(BaseModelFormSet, self)._construct_form(i, **kwargs)
def get_queryset(self):
if not hasattr(self, '_queryset'):
if self.queryset is not None:
qs = self.queryset
else:
qs = self.model._default_manager.get_queryset()
# If the queryset isn't already ordered we need to add an
# artificial ordering here to make sure that all formsets
# constructed from this queryset have the same form order.
if not qs.ordered:
qs = qs.order_by(self.model._meta.pk.name)
# Removed queryset limiting here. As per discussion re: #13023
# on django-dev, max_num should not prevent existing
# related objects/inlines from being displayed.
self._queryset = qs
return self._queryset
def save_new(self, form, commit=True):
"""Saves and returns a new model instance for the given form."""
return form.save(commit=commit)
def save_existing(self, form, instance, commit=True):
"""Saves and returns an existing model instance for the given form."""
return form.save(commit=commit)
def save(self, commit=True):
"""Saves model instances for every form, adding and changing instances
as necessary, and returns the list of instances.
"""
if not commit:
self.saved_forms = []
def save_m2m():
for form in self.saved_forms:
form.save_m2m()
self.save_m2m = save_m2m
return self.save_existing_objects(commit) + self.save_new_objects(commit)
save.alters_data = True
def clean(self):
self.validate_unique()
def validate_unique(self):
# Collect unique_checks and date_checks to run from all the forms.
all_unique_checks = set()
all_date_checks = set()
forms_to_delete = self.deleted_forms
valid_forms = [form for form in self.forms if form.is_valid() and form not in forms_to_delete]
for form in valid_forms:
exclude = form._get_validation_exclusions()
unique_checks, date_checks = form.instance._get_unique_checks(exclude=exclude)
all_unique_checks = all_unique_checks.union(set(unique_checks))
all_date_checks = all_date_checks.union(set(date_checks))
errors = []
# Do each of the unique checks (unique and unique_together)
for uclass, unique_check in all_unique_checks:
seen_data = set()
for form in valid_forms:
# get data for each field of each of unique_check
row_data = (form.cleaned_data[field]
for field in unique_check if field in form.cleaned_data)
# Reduce Model instances to their primary key values
row_data = tuple(d._get_pk_val() if hasattr(d, '_get_pk_val') else d
for d in row_data)
if row_data and not None in row_data:
# if we've already seen it then we have a uniqueness failure
if row_data in seen_data:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_unique_error_message(unique_check))
form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()])
# remove the data from the cleaned_data dict since it was invalid
for field in unique_check:
if field in form.cleaned_data:
del form.cleaned_data[field]
# mark the data as seen
seen_data.add(row_data)
# iterate over each of the date checks now
for date_check in all_date_checks:
seen_data = set()
uclass, lookup, field, unique_for = date_check
for form in valid_forms:
# see if we have data for both fields
if (form.cleaned_data and form.cleaned_data[field] is not None
and form.cleaned_data[unique_for] is not None):
# if it's a date lookup we need to get the data for all the fields
if lookup == 'date':
date = form.cleaned_data[unique_for]
date_data = (date.year, date.month, date.day)
# otherwise it's just the attribute on the date/datetime
# object
else:
date_data = (getattr(form.cleaned_data[unique_for], lookup),)
data = (form.cleaned_data[field],) + date_data
# if we've already seen it then we have a uniqueness failure
if data in seen_data:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_date_error_message(date_check))
form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()])
# remove the data from the cleaned_data dict since it was invalid
del form.cleaned_data[field]
# mark the data as seen
seen_data.add(data)
if errors:
raise ValidationError(errors)
def get_unique_error_message(self, unique_check):
if len(unique_check) == 1:
return ugettext("Please correct the duplicate data for %(field)s.") % {
"field": unique_check[0],
}
else:
return ugettext("Please correct the duplicate data for %(field)s, "
"which must be unique.") % {
"field": get_text_list(unique_check, six.text_type(_("and"))),
}
def get_date_error_message(self, date_check):
return ugettext("Please correct the duplicate data for %(field_name)s "
"which must be unique for the %(lookup)s in %(date_field)s.") % {
'field_name': date_check[2],
'date_field': date_check[3],
'lookup': six.text_type(date_check[1]),
}
def get_form_error(self):
return ugettext("Please correct the duplicate values below.")
def save_existing_objects(self, commit=True):
self.changed_objects = []
self.deleted_objects = []
if not self.initial_forms:
return []
saved_instances = []
forms_to_delete = self.deleted_forms
for form in self.initial_forms:
obj = form.instance
if form in forms_to_delete:
# If the pk is None, it means that the object can't be
# deleted again. Possible reason for this is that the
# object was already deleted from the DB. Refs #14877.
if obj.pk is None:
continue
self.deleted_objects.append(obj)
if commit:
obj.delete()
elif form.has_changed():
self.changed_objects.append((obj, form.changed_data))
saved_instances.append(self.save_existing(form, obj, commit=commit))
if not commit:
self.saved_forms.append(form)
return saved_instances
def save_new_objects(self, commit=True):
self.new_objects = []
for form in self.extra_forms:
if not form.has_changed():
continue
# If someone has marked an add form for deletion, don't save the
# object.
if self.can_delete and self._should_delete_form(form):
continue
self.new_objects.append(self.save_new(form, commit=commit))
if not commit:
self.saved_forms.append(form)
return self.new_objects
def add_fields(self, form, index):
"""Add a hidden field for the object's primary key."""
from django.db.models import AutoField, OneToOneField, ForeignKey
self._pk_field = pk = self.model._meta.pk
# If a pk isn't editable, then it won't be on the form, so we need to
# add it here so we can tell which object is which when we get the
# data back. Generally, pk.editable should be false, but for some
# reason, auto_created pk fields and AutoField's editable attribute is
# True, so check for that as well.
def pk_is_not_editable(pk):
return ((not pk.editable) or (pk.auto_created or isinstance(pk, AutoField))
or (pk.rel and pk.rel.parent_link and pk_is_not_editable(pk.rel.to._meta.pk)))
if pk_is_not_editable(pk) or pk.name not in form.fields:
if form.is_bound:
pk_value = form.instance.pk
else:
try:
if index is not None:
pk_value = self.get_queryset()[index].pk
else:
pk_value = None
except IndexError:
pk_value = None
if isinstance(pk, OneToOneField) or isinstance(pk, ForeignKey):
qs = pk.rel.to._default_manager.get_queryset()
else:
qs = self.model._default_manager.get_queryset()
qs = qs.using(form.instance._state.db)
if form._meta.widgets:
widget = form._meta.widgets.get(self._pk_field.name, HiddenInput)
else:
widget = HiddenInput
form.fields[self._pk_field.name] = ModelChoiceField(qs, initial=pk_value, required=False, widget=widget)
super(BaseModelFormSet, self).add_fields(form, index)
def modelformset_factory(model, form=ModelForm, formfield_callback=None,
formset=BaseModelFormSet, extra=1, can_delete=False,
can_order=False, max_num=None, fields=None, exclude=None,
widgets=None, validate_max=False, localized_fields=None,
labels=None, help_texts=None, error_messages=None):
"""
Returns a FormSet class for the given Django model class.
"""
# modelform_factory will produce the same warning/error, but that will be
# difficult to debug for code that needs upgrading, so we produce the
# warning here too. This logic is reproducing logic inside
# modelform_factory, but it can be removed once the deprecation cycle is
# complete, since the validation exception will produce a helpful
# stacktrace.
meta = getattr(form, 'Meta', None)
if meta is None:
meta = type(str('Meta'), (object,), {})
if (getattr(meta, 'fields', fields) is None and
getattr(meta, 'exclude', exclude) is None):
warnings.warn("Calling modelformset_factory without defining 'fields' or "
"'exclude' explicitly is deprecated",
DeprecationWarning, stacklevel=2)
form = modelform_factory(model, form=form, fields=fields, exclude=exclude,
formfield_callback=formfield_callback,
widgets=widgets, localized_fields=localized_fields,
labels=labels, help_texts=help_texts, error_messages=error_messages)
FormSet = formset_factory(form, formset, extra=extra, max_num=max_num,
can_order=can_order, can_delete=can_delete,
validate_max=validate_max)
FormSet.model = model
return FormSet
# InlineFormSets #############################################################
class BaseInlineFormSet(BaseModelFormSet):
"""A formset for child objects related to a parent."""
def __init__(self, data=None, files=None, instance=None,
save_as_new=False, prefix=None, queryset=None, **kwargs):
if instance is None:
self.instance = self.fk.rel.to()
else:
self.instance = instance
self.save_as_new = save_as_new
if queryset is None:
queryset = self.model._default_manager
if self.instance.pk is not None:
qs = queryset.filter(**{self.fk.name: self.instance})
else:
qs = queryset.none()
super(BaseInlineFormSet, self).__init__(data, files, prefix=prefix,
queryset=qs, **kwargs)
def initial_form_count(self):
if self.save_as_new:
return 0
return super(BaseInlineFormSet, self).initial_form_count()
def _construct_form(self, i, **kwargs):
form = super(BaseInlineFormSet, self)._construct_form(i, **kwargs)
if self.save_as_new:
# Remove the primary key from the form's data, we are only
# creating new instances
form.data[form.add_prefix(self._pk_field.name)] = None
# Remove the foreign key from the form's data
form.data[form.add_prefix(self.fk.name)] = None
# Set the fk value here so that the form can do its validation.
setattr(form.instance, self.fk.get_attname(), self.instance.pk)
return form
@classmethod
def get_default_prefix(cls):
from django.db.models.fields.related import RelatedObject
return RelatedObject(cls.fk.rel.to, cls.model, cls.fk).get_accessor_name().replace('+', '')
def save_new(self, form, commit=True):
# Use commit=False so we can assign the parent key afterwards, then
# save the object.
obj = form.save(commit=False)
pk_value = getattr(self.instance, self.fk.rel.field_name)
setattr(obj, self.fk.get_attname(), getattr(pk_value, 'pk', pk_value))
if commit:
obj.save()
# form.save_m2m() can be called via the formset later on if commit=False
if commit and hasattr(form, 'save_m2m'):
form.save_m2m()
return obj
def add_fields(self, form, index):
super(BaseInlineFormSet, self).add_fields(form, index)
if self._pk_field == self.fk:
name = self._pk_field.name
kwargs = {'pk_field': True}
else:
# The foreign key field might not be on the form, so we poke at the
# Model field to get the label, since we need that for error messages.
name = self.fk.name
kwargs = {
'label': getattr(form.fields.get(name), 'label', capfirst(self.fk.verbose_name))
}
if self.fk.rel.field_name != self.fk.rel.to._meta.pk.name:
kwargs['to_field'] = self.fk.rel.field_name
form.fields[name] = InlineForeignKeyField(self.instance, **kwargs)
# Add the generated field to form._meta.fields if it's defined to make
# sure validation isn't skipped on that field.
if form._meta.fields:
if isinstance(form._meta.fields, tuple):
form._meta.fields = list(form._meta.fields)
form._meta.fields.append(self.fk.name)
def get_unique_error_message(self, unique_check):
unique_check = [field for field in unique_check if field != self.fk.name]
return super(BaseInlineFormSet, self).get_unique_error_message(unique_check)
def _get_foreign_key(parent_model, model, fk_name=None, can_fail=False):
"""
Finds and returns the ForeignKey from model to parent if there is one
(returns None if can_fail is True and no such field exists). If fk_name is
provided, assume it is the name of the ForeignKey field. Unles can_fail is
True, an exception is raised if there is no ForeignKey from model to
parent_model.
"""
# avoid circular import
from django.db.models import ForeignKey
opts = model._meta
if fk_name:
fks_to_parent = [f for f in opts.fields if f.name == fk_name]
if len(fks_to_parent) == 1:
fk = fks_to_parent[0]
if not isinstance(fk, ForeignKey) or \
(fk.rel.to != parent_model and
fk.rel.to not in parent_model._meta.get_parent_list()):
raise ValueError(
"fk_name '%s' is not a ForeignKey to '%s.%'."
% (fk_name, parent_model._meta.app_label, parent_model._meta.object_name))
elif len(fks_to_parent) == 0:
raise ValueError(
"'%s.%s' has no field named '%s'."
% (model._meta.app_label, model._meta.object_name, fk_name))
else:
# Try to discover what the ForeignKey from model to parent_model is
fks_to_parent = [
f for f in opts.fields
if isinstance(f, ForeignKey)
and (f.rel.to == parent_model
or f.rel.to in parent_model._meta.get_parent_list())
]
if len(fks_to_parent) == 1:
fk = fks_to_parent[0]
elif len(fks_to_parent) == 0:
if can_fail:
return
raise ValueError(
"'%s.%s' has no ForeignKey to '%s.%s'."
% (model._meta.app_label, model._meta.object_name, parent_model._meta.app_label, parent_model._meta.object_name))
else:
raise ValueError(
"'%s.%s' has more than one ForeignKey to '%s.%s'."
% (model._meta.app_label, model._meta.object_name, parent_model._meta.app_label, parent_model._meta.object_name))
return fk
def inlineformset_factory(parent_model, model, form=ModelForm,
formset=BaseInlineFormSet, fk_name=None,
fields=None, exclude=None, extra=3, can_order=False,
can_delete=True, max_num=None, formfield_callback=None,
widgets=None, validate_max=False, localized_fields=None,
labels=None, help_texts=None, error_messages=None):
"""
Returns an ``InlineFormSet`` for the given kwargs.
You must provide ``fk_name`` if ``model`` has more than one ``ForeignKey``
to ``parent_model``.
"""
fk = _get_foreign_key(parent_model, model, fk_name=fk_name)
# enforce a max_num=1 when the foreign key to the parent model is unique.
if fk.unique:
max_num = 1
kwargs = {
'form': form,
'formfield_callback': formfield_callback,
'formset': formset,
'extra': extra,
'can_delete': can_delete,
'can_order': can_order,
'fields': fields,
'exclude': exclude,
'max_num': max_num,
'widgets': widgets,
'validate_max': validate_max,
'localized_fields': localized_fields,
'labels': labels,
'help_texts': help_texts,
'error_messages': error_messages,
}
FormSet = modelformset_factory(model, **kwargs)
FormSet.fk = fk
return FormSet
# Fields #####################################################################
class InlineForeignKeyField(Field):
"""
A basic integer field that deals with validating the given value to a
given parent instance in an inline.
"""
widget = HiddenInput
default_error_messages = {
'invalid_choice': _('The inline foreign key did not match the parent instance primary key.'),
}
def __init__(self, parent_instance, *args, **kwargs):
self.parent_instance = parent_instance
self.pk_field = kwargs.pop("pk_field", False)
self.to_field = kwargs.pop("to_field", None)
if self.parent_instance is not None:
if self.to_field:
kwargs["initial"] = getattr(self.parent_instance, self.to_field)
else:
kwargs["initial"] = self.parent_instance.pk
kwargs["required"] = False
super(InlineForeignKeyField, self).__init__(*args, **kwargs)
def clean(self, value):
if value in self.empty_values:
if self.pk_field:
return None
# if there is no value act as we did before.
return self.parent_instance
# ensure the we compare the values as equal types.
if self.to_field:
orig = getattr(self.parent_instance, self.to_field)
else:
orig = self.parent_instance.pk
if force_text(value) != force_text(orig):
raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice')
return self.parent_instance
def _has_changed(self, initial, data):
return False
class ModelChoiceIterator(object):
def __init__(self, field):
self.field = field
self.queryset = field.queryset
def __iter__(self):
if self.field.empty_label is not None:
yield ("", self.field.empty_label)
if self.field.cache_choices:
if self.field.choice_cache is None:
self.field.choice_cache = [
self.choice(obj) for obj in self.queryset.all()
]
for choice in self.field.choice_cache:
yield choice
else:
for obj in self.queryset.all():
yield self.choice(obj)
def __len__(self):
return (len(self.queryset) +
(1 if self.field.empty_label is not None else 0))
def choice(self, obj):
return (self.field.prepare_value(obj), self.field.label_from_instance(obj))
class ModelChoiceField(ChoiceField):
"""A ChoiceField whose choices are a model QuerySet."""
# This class is a subclass of ChoiceField for purity, but it doesn't
# actually use any of ChoiceField's implementation.
default_error_messages = {
'invalid_choice': _('Select a valid choice. That choice is not one of'
' the available choices.'),
}
def __init__(self, queryset, empty_label="---------", cache_choices=False,
required=True, widget=None, label=None, initial=None,
help_text='', to_field_name=None, limit_choices_to=None,
*args, **kwargs):
if required and (initial is not None):
self.empty_label = None
else:
self.empty_label = empty_label
self.cache_choices = cache_choices
# Call Field instead of ChoiceField __init__() because we don't need
# ChoiceField.__init__().
Field.__init__(self, required, widget, label, initial, help_text,
*args, **kwargs)
self.queryset = queryset
self.limit_choices_to = limit_choices_to # limit the queryset later.
self.choice_cache = None
self.to_field_name = to_field_name
def __deepcopy__(self, memo):
result = super(ChoiceField, self).__deepcopy__(memo)
# Need to force a new ModelChoiceIterator to be created, bug #11183
result.queryset = result.queryset
return result
def _get_queryset(self):
return self._queryset
def _set_queryset(self, queryset):
self._queryset = queryset
self.widget.choices = self.choices
queryset = property(_get_queryset, _set_queryset)
# this method will be used to create object labels by the QuerySetIterator.
# Override it to customize the label.
def label_from_instance(self, obj):
"""
This method is used to convert objects into strings; it's used to
generate the labels for the choices presented by this object. Subclasses
can override this method to customize the display of the choices.
"""
return smart_text(obj)
def _get_choices(self):
# If self._choices is set, then somebody must have manually set
# the property self.choices. In this case, just return self._choices.
if hasattr(self, '_choices'):
return self._choices
# Otherwise, execute the QuerySet in self.queryset to determine the
# choices dynamically. Return a fresh ModelChoiceIterator that has not been
# consumed. Note that we're instantiating a new ModelChoiceIterator *each*
# time _get_choices() is called (and, thus, each time self.choices is
# accessed) so that we can ensure the QuerySet has not been consumed. This
# construct might look complicated but it allows for lazy evaluation of
# the queryset.
return ModelChoiceIterator(self)
choices = property(_get_choices, ChoiceField._set_choices)
def prepare_value(self, value):
if hasattr(value, '_meta'):
if self.to_field_name:
return value.serializable_value(self.to_field_name)
else:
return value.pk
return super(ModelChoiceField, self).prepare_value(value)
def to_python(self, value):
if value in self.empty_values:
return None
try:
key = self.to_field_name or 'pk'
value = self.queryset.get(**{key: value})
except (ValueError, self.queryset.model.DoesNotExist):
raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice')
return value
def validate(self, value):
return Field.validate(self, value)
def _has_changed(self, initial, data):
initial_value = initial if initial is not None else ''
data_value = data if data is not None else ''
return force_text(self.prepare_value(initial_value)) != force_text(data_value)
class ModelMultipleChoiceField(ModelChoiceField):
"""A MultipleChoiceField whose choices are a model QuerySet."""
widget = SelectMultiple
hidden_widget = MultipleHiddenInput
default_error_messages = {
'list': _('Enter a list of values.'),
'invalid_choice': _('Select a valid choice. %(value)s is not one of the'
' available choices.'),
'invalid_pk_value': _('"%(pk)s" is not a valid value for a primary key.')
}
def __init__(self, queryset, cache_choices=False, required=True,
widget=None, label=None, initial=None,
help_text='', *args, **kwargs):
super(ModelMultipleChoiceField, self).__init__(queryset, None,
cache_choices, required, widget, label, initial, help_text,
*args, **kwargs)
# Remove this in Django 1.8
if isinstance(self.widget, SelectMultiple) and not isinstance(self.widget, CheckboxSelectMultiple):
msg = _('Hold down "Control", or "Command" on a Mac, to select more than one.')
self.help_text = string_concat(self.help_text, ' ', msg)
def to_python(self, value):
if not value:
return []
to_py = super(ModelMultipleChoiceField, self).to_python
return [to_py(val) for val in value]
def clean(self, value):
if self.required and not value:
raise ValidationError(self.error_messages['required'], code='required')
elif not self.required and not value:
return self.queryset.none()
if not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['list'], code='list')
key = self.to_field_name or 'pk'
for pk in value:
try:
self.queryset.filter(**{key: pk})
except ValueError:
raise ValidationError(
self.error_messages['invalid_pk_value'],
code='invalid_pk_value',
params={'pk': pk},
)
qs = self.queryset.filter(**{'%s__in' % key: value})
pks = set(force_text(getattr(o, key)) for o in qs)
for val in value:
if force_text(val) not in pks:
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': val},
)
# Since this overrides the inherited ModelChoiceField.clean
# we run custom validators here
self.run_validators(value)
return qs
def prepare_value(self, value):
if (hasattr(value, '__iter__') and
not isinstance(value, six.text_type) and
not hasattr(value, '_meta')):
return [super(ModelMultipleChoiceField, self).prepare_value(v) for v in value]
return super(ModelMultipleChoiceField, self).prepare_value(value)
def _has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
initial_set = set(force_text(value) for value in self.prepare_value(initial))
data_set = set(force_text(value) for value in data)
return data_set != initial_set
def modelform_defines_fields(form_class):
return (form_class is not None and (
hasattr(form_class, '_meta') and
(form_class._meta.fields is not None or
form_class._meta.exclude is not None)
))
|
{
"content_hash": "1d50d65e8fda9d8d9539d2a440b24354",
"timestamp": "",
"source": "github",
"line_count": 1263,
"max_line_length": 129,
"avg_line_length": 42.358669833729216,
"alnum_prop": 0.5898801846763491,
"repo_name": "zedr/django",
"id": "a0b47e64b48f92d5301f46a5f26d6eea238356a4",
"size": "53499",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "django/forms/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52958"
},
{
"name": "JavaScript",
"bytes": "102431"
},
{
"name": "Python",
"bytes": "9522070"
},
{
"name": "Shell",
"bytes": "12137"
}
],
"symlink_target": ""
}
|
import linkr # flake8: noqa: F401
import util.templating
from test.backend.test_case import LinkrTestCase
from uri.link import LinkAddURI
class TestTemplating(LinkrTestCase):
_multiprocess_can_split_ = True
def test_get_config(self):
self.assertIsNotNone(util.templating.get_config()['config'])
def test_get_uri_path(self):
uri = util.templating.get_uri_path()['uri']
full_uri = util.templating.get_uri_path()['full_uri']
self.assertEqual(uri('link', 'LinkAddURI'), LinkAddURI.uri())
self.assertEqual(full_uri('link', 'LinkAddURI'), LinkAddURI.full_uri())
def test_get_all_uris(self):
uri = util.templating.get_uri_path()['uri']
all_uris = util.templating.get_all_uris()['all_uris']()
self.assertGreater(len(all_uris), 0)
for uri_module in all_uris:
for uri_class in all_uris[uri_module]:
self.assertIsNotNone(uri(uri_module, uri_class))
|
{
"content_hash": "3326575439ab0a5bdafa9da783505a16",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 79,
"avg_line_length": 34.392857142857146,
"alnum_prop": 0.6552440290758048,
"repo_name": "LINKIWI/linkr",
"id": "0bd980731136842f8d5214135dc162ffd0f8fe8c",
"size": "963",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/backend/test_util/test_templating.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "15753"
},
{
"name": "Dockerfile",
"bytes": "109"
},
{
"name": "Groovy",
"bytes": "1908"
},
{
"name": "HTML",
"bytes": "1043"
},
{
"name": "JavaScript",
"bytes": "230542"
},
{
"name": "Python",
"bytes": "191755"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from utils.DiffTimer import DiffTimer
###############################################################################################################
class Progress(object):
header = u''
dot_point = 10
line_point = 1000
point = 0
upper_bound = 0
prev = 0
percent_mode = False
timer = None
fulltimer = None
def __init__(self, _header, _dot_point=0, _line_point=0, _percent_mode=False):
self.reset(_header, _dot_point, _line_point, _percent_mode)
def reset(self, _header, _dot_point=0, _line_point=0, _percent_mode=False):
self.header = _header
self.set_dotpoint(_dot_point)
self.set_linepoint(_line_point)
self.point = 0
self.timer = DiffTimer()
self.fulltimer = DiffTimer()
self.prev = 1
self.percent_mode = _percent_mode
def set_header(self, _header):
self.header = _header
return self
def set_upperbound(self, _max):
self.upper_bound = _max
return self
def set_point(self, _point):
self.point = _point
return self
def set_dotpoint(self, _dot_point):
self.dot_point = _dot_point if _dot_point > 0 else 1
return self
def set_linepoint(self, _line_point):
self.line_point = _line_point if _line_point > 0 else 1
return self
def start(self):
print(u'%s'%self.header, end=u'')
self.prev = 1
self.timer.set()
self.fulltimer.set()
def _percent(self):
div = int ((float(self.point) / self.upper_bound) * 100)
if div >= 100: div=100
for i in range(self.prev,div):
if i==0: continue
elif i%self.line_point==0: print(u',', end=u'')
elif i%self.dot_point==0:print(u'.', end=u'')
self.prev = div
def check(self, _msg=None):
self.point += 1
# work with percent
if self.percent_mode is True:
self._percent()
return
if (self.point % self.dot_point) == 0:
print(u'.', end=u'')
if (self.point % self.line_point) == 0:
text = u'%s'%(u'{:,}'.format(self.point))
if self.upper_bound >0:
text += u'/%s'%(u'{:,}'.format(self.upper_bound))
text += u' (time:%s'%self.timer.diff_auto()
if _msg is not None:
text += u' %s' %_msg
text += u')'
print(text)
print(u'%s'%self.header, end=u'')
def done(self, _msg=None):
text = u'Done. (size:%s'%(u'{:,}'.format(self.point))
text += u' time:%s'% self.fulltimer.diff_auto()
if _msg is not None:
text += u' %s)'%_msg
else:
text += u')'
print(text)
|
{
"content_hash": "b6ae712936e17a99ed3ebfb02d62dacd",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 111,
"avg_line_length": 23.274509803921568,
"alnum_prop": 0.5939342881213142,
"repo_name": "irblsensitivity/irblsensitivity",
"id": "e9e7fea1a262d24e92c704df7d78c675a16164bc",
"size": "2397",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/utils/Progress.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "1768144"
},
{
"name": "Python",
"bytes": "374811"
},
{
"name": "Shell",
"bytes": "2451"
}
],
"symlink_target": ""
}
|
"""Persistent identifier fetchers."""
from __future__ import absolute_import
from invenio_pidstore.fetchers import FetchedPID
from .providers import DepositProvider
def deposit_fetcher(record_uuid, data):
"""Fetch a deposit identifier.
:param record_uuid: Record UUID.
:param data: Record content.
:returns: A :class:`invenio_pidstore.fetchers.FetchedPID` that contains
data['_deposit']['id'] as pid_value.
"""
return FetchedPID(
provider=DepositProvider,
pid_type=DepositProvider.pid_type,
pid_value=str(data['_deposit']['id']),
)
|
{
"content_hash": "eb03e7322c8a7992f0a49dd6b60a35a9",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 75,
"avg_line_length": 27.181818181818183,
"alnum_prop": 0.6839464882943144,
"repo_name": "inveniosoftware/invenio-deposit",
"id": "42f7d60bfc2150e51bf677875008c7ab9d4cbf5c",
"size": "833",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "invenio_deposit/fetchers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "13828"
},
{
"name": "JavaScript",
"bytes": "595"
},
{
"name": "Python",
"bytes": "149048"
},
{
"name": "Shell",
"bytes": "474"
}
],
"symlink_target": ""
}
|
import os
import math
import numpy as np
def read_terra_hst(filename, header_only=False,
rshl_in=None, header_in=None, nr_in=None):
"""Read an optionally gzip compressed hst file from TERRA
Returns numpy arrays containing data from the formatted
hst (history) file written by TERRA. All data is parsed
but currently only a subset is returned. Currently returned
data is:
nr: number of radial layers (integer)
header: a list of strings of header information
iter_num: a numpy array of iteration numbers corresponding to each
stored step (not all time steps are stored)
time: a numpy array of the time (in yr) of each recoreded step
rshl: a numpy array of radii for the rn+1 radial layers
tav: a (rshl, nsteps) array of layer avarage temperatures for
each layer at each recorded time step
htop:
hbot:
hrad:
heat:
The optional argument "header_only" allows just the file header
information to be extracted.
The optional arguments header_in rshl_in and nr_in are used
toghether for a restart, where the header must be extracted
from the first stage of the run.
"""
# Deal with compressed files.
if (os.path.splitext(filename)[1] == '.gz'):
import gzip
f = gzip.open(filename, 'rb')
else:
f = open(filename, 'r')
# Empty lists for data storage. Note that
# these are named as per the TERRA variables
# that are written (except with _something
# added when the variable means something to
# python.
header = []
propr = []
rshl = []
iter_num = []
time = []
tstep = []
step = []
fnrm = []
rnrm = []
divnorm_cutdiv = []
unrm = []
ekin = []
htop = []
hbot = []
hrad = []
heat = []
hshr = []
hvol = []
hnet = []
tnrm = []
rat = []
rnu = []
tbalc_sctar = []
tav = []
# Simple state machine to read the data (as charaters)
# into the lists. Better to do the conversion to numpy
# arrays in one go afterwards.
header_num = 7
# If we have a header we need to skip some steps
if ((nr_in is not None) and (header_in is not None) and
(rshl_in is not None)):
nr = nr_in
header = header_in
running_block_lines = int(math.ceil((20.0 + float(nr) + 1.0)
/ 6.0))
running_block_lines_tb = running_block_lines
rshl = rshl_in
header_num = 0
for line in f:
if header_num == 7:
# First line - just nr
nr = int(line)
# For counting lines in the timesteps and header.
header_block_lines = 4 + int(math.ceil((float(nr)+1.0)/6.0))
header_block_lines_tb = header_block_lines
running_block_lines = int(math.ceil((20.0 + float(nr) + 1.0)
/ 6.0))
running_block_lines_tb = running_block_lines
header_num = 6
elif header_num >= 3:
# Four lines of header text
header.append(line)
header_num = header_num - 1
elif header_num == 2:
# First block of data - embedded in the header
if header_block_lines_tb > header_block_lines - 4:
# The propr array (over 4 lines)
propr.extend(line.split())
elif header_block_lines_tb > 0:
rshl.extend(line.split())
header_block_lines_tb = header_block_lines_tb - 1
if header_block_lines_tb == 0:
header_num = header_num - 1
elif header_num == 1:
# tscale
tscale = float(line)
header_num = header_num - 1
elif header_num == 0:
# Now into the main blocks of data
if header_only:
# We are only looking for header info
break
if running_block_lines_tb == running_block_lines:
# First line of a new block
these_temps = []
words = line.split()
iter_num.append(words[0])
time.append(words[1])
tstep.append(words[2])
step.append(words[3])
fnrm.append(words[4])
rnrm.append(words[5])
elif running_block_lines_tb == running_block_lines - 1:
words = line.split()
divnorm_cutdiv.append(words[0])
unrm.append(words[1])
ekin.append(words[2])
htop.append(words[3])
hbot.append(words[4])
hrad.append(words[5])
elif running_block_lines_tb == running_block_lines - 2:
words = line.split()
heat.append(words[0])
hshr.append(words[1])
hvol.append(words[2])
hnet.append(words[3])
tnrm.append(words[4])
rat.append(words[5])
elif running_block_lines_tb == running_block_lines - 3:
words = line.split()
rnu.append(words[0])
tbalc_sctar.append(words[1])
these_temps.extend(words[2:])
else:
# Some more temps
these_temps.extend(line.split())
running_block_lines_tb = running_block_lines_tb - 1
if running_block_lines_tb == 0:
running_block_lines_tb = running_block_lines
tav.append(these_temps)
f.close()
if header_only:
rshl = np.array(rshl).astype(np.float)
iter_num = None
time = None
tav = None
htop = None
hbot = None
hrad = None
heat = None
else:
# Convert into Numpy arrays (and ignore the stuff we
# don't need for now)
iter_num = np.array(iter_num).astype(np.float)
time = np.array(time).astype(np.float)
rshl = np.array(rshl).astype(np.float)
tav = np.array(tav).astype(np.float).T
htop = np.array(htop).astype(np.float)
hbot = np.array(hbot).astype(np.float)
hrad = np.array(hrad).astype(np.float)
heat = np.array(heat).astype(np.float)
return nr, header, iter_num, time, rshl, tav, htop, hbot, hrad, heat
def plot_layertemp_iter(iter_num, rshl, tav, filename=None):
"""Create a visulaisation of the time evolution of the layer temperature"""
import matplotlib
if filename is not None:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
im = ax.imshow(tav, extent=[iter_num.min(),iter_num.max(),
rshl.min(),rshl.max()],
interpolation='nearest', aspect='auto')
ax.set_ylabel('Radius (m)')
ax.set_xlabel('Timestep number')
cb = fig.colorbar(im)
cb.set_label('Temperature (K)')
if filename is not None:
plt.savefig(filename)
else:
plt.show()
if __name__ == "__main__":
import argparse
import terraheat
parser = argparse.ArgumentParser(description=
'Generate a graphs from a TERRA hst file.')
parser.add_argument('hstfile', help='TERRA hst file')
parser.add_argument('-o', '--outfile', help='Ouput graph to a file')
parser.add_argument('--heatflux', help='Generate heatflux graph',
action='store_true')
parser.add_argument('-r', '--restart',
help='File name of first stage of restarted run')
args = parser.parse_args()
if args.restart is not None:
# A restart job. The header information lives somewhere else!
nr, header, iter_num, time, rshl, tav, htop, hbot, hrad, heat = \
read_terra_hst(args.restart, header_only=True)
nr, header, iter_num, time, rshl, tav, htop, hbot, hrad, heat = \
read_terra_hst(args.hstfile, header_in=header, nr_in=nr,
rshl_in=rshl)
else:
nr, header, iter_num, time, rshl, tav, htop, hbot, hrad, heat = \
read_terra_hst(args.hstfile)
for line in header:
print line.rstrip('\r\n').strip()
if args.heatflux:
terraheat.plot_heat(time, htop, hbot, hrad, heat, filename=args.outfile)
else:
plot_layertemp_iter(iter_num, rshl, tav, filename=args.outfile)
|
{
"content_hash": "d783eefdc9d8c74bc49d0e28ac09b411",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 80,
"avg_line_length": 35.18257261410788,
"alnum_prop": 0.5498289892676023,
"repo_name": "andreww/theia_tools",
"id": "851ac5ec90729cc23ef1152b0ff0b08b6707b569",
"size": "8502",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "terrahst.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "49499"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from iswift.folderlist.models import company_tree
from iswift.folderlist.models import folder_tree
from iswift.folderlist.models import user_storage
admin.site.register(folder_tree)
admin.site.register(company_tree)
admin.site.register(user_storage)
|
{
"content_hash": "2722212a5cbd7413688590dccf4cbf5f",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 49,
"avg_line_length": 28.6,
"alnum_prop": 0.8356643356643356,
"repo_name": "gotostack/iSwift",
"id": "d179ce2cf7200fffe4f4d809854624d5b928cc16",
"size": "859",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "iswift/folderlist/admin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "5577"
},
{
"name": "Python",
"bytes": "17233"
},
{
"name": "Shell",
"bytes": "15911"
}
],
"symlink_target": ""
}
|
import torch
from torch.distributions import Normal, Categorical
class MixtureNormal(object):
def __init__(self, pi, loc, scale):
super(MixtureNormal, self).__init__()
self.pi = pi
self.loc = loc
self.scale = scale
self.normal_pd = Normal(self.loc, self.scale)
self.pi_pd = Categorical(self.pi)
def sample(self):
with torch.no_grad():
raw_sample = self.normal_pd.sample()
index = self.pi_pd.sample().unsqueeze(-1)
return torch.gather(raw_sample, -1, index).squeeze(-1)
def log_prob(self, value):
value = value.unsqueeze(-1).expand_as(self.loc)
probs = self.normal_pd.log_prob(value).exp()
weighted_probs = self.pi * probs
sum_prob = torch.sum(weighted_probs, -1)
return sum_prob.log()
|
{
"content_hash": "a4535381d0e7632e359249290d657c06",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 66,
"avg_line_length": 32.07692307692308,
"alnum_prop": 0.5959232613908872,
"repo_name": "qsheeeeen/Self-Driving-Car",
"id": "2db88e1cd7f79adb2be260a6ccc1b519e3e372ef",
"size": "834",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rl_toolbox/distributions/mixture_normal.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37465"
}
],
"symlink_target": ""
}
|
from . import core as ac
from . import web_api
from .arg_parser import parse_args
def main():
args = parse_args()
ac.dbInit()
if args.webui:
web_api.start()
if args.version:
print("Anicolle Server: v0.2.3")
|
{
"content_hash": "64de3348dcce1054e4d3f68b15dbd059",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 40,
"avg_line_length": 20.083333333333332,
"alnum_prop": 0.6141078838174274,
"repo_name": "chienius/anicolle",
"id": "8df7ed79b39fb9e696b3173c3c275bbce825cd2f",
"size": "287",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "anicolle/command.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16612"
}
],
"symlink_target": ""
}
|
import unittest
import pipeline_xml as pxml
import version
class TestXML(unittest.TestCase):
def test_simplest_xml(self):
p = pxml.Pipeline()
p.source = pxml.Reader('foo')
xml = p.xml()
self.assertEqual(xml.getroot().attrib['version'], version.as_string())
self.assertEqual(xml.find('Reader').attrib['type'],
'readers.foo')
def test_writer(self):
xml = pxml.Writer('bar').xml()
self.assertEqual(xml.tag, 'Writer')
self.assertEqual(xml.attrib['type'], 'writers.bar')
def test_pipeline_version(self):
xml = pxml.Pipeline('custom version').xml()
self.assertEqual(xml.getroot().attrib['version'], 'custom version')
def test_filter(self):
xml = pxml.Filter('foo').xml()
self.assertEqual(xml.tag, 'Filter')
self.assertEqual(xml.attrib['type'], 'filters.foo')
def test_multifilter(self):
xml = pxml.MultiFilter('foo').xml()
self.assertEqual(xml.tag, 'MultiFilter')
self.assertEqual(xml.attrib['type'], 'filters.foo')
def test_writer_source(self):
writer = pxml.Writer('foo')
writer.source = pxml.Reader('bar')
xml = writer.xml()
self.assert_(xml.find('Reader') is not None)
def test_multifilter_source(self):
mfilter = pxml.MultiFilter('multi')
mfilter.source = [pxml.Reader('foo'), pxml.Reader('bar')]
xml = mfilter.xml()
self.assertEqual(len(xml.findall('Reader')), 2)
def test_reader_source_error(self):
reader = pxml.Reader('foo')
otherreader = pxml.Reader('bar')
self.assertRaises(ValueError, setattr, reader, 'source', otherreader)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "0051491890f54178ae26f74983164a68",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 78,
"avg_line_length": 32.648148148148145,
"alnum_prop": 0.6063528077141237,
"repo_name": "DougFirErickson/PDAL",
"id": "db8d19dd416fdd815879ff1270f2947e25f1fb52",
"size": "1763",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python/pdal/test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "7294"
},
{
"name": "C",
"bytes": "13415"
},
{
"name": "C++",
"bytes": "2242551"
},
{
"name": "CMake",
"bytes": "131090"
},
{
"name": "Python",
"bytes": "3791"
},
{
"name": "SQLPL",
"bytes": "844"
},
{
"name": "Shell",
"bytes": "20238"
}
],
"symlink_target": ""
}
|
from django.db import models
from .utils.network.client import NetworkClient
from .utils.network.errors import NetworkClientError
class NetworkLocation(models.Model):
"""
``NetworkLocation`` stores information about a network address through which an instance of Kolibri can be accessed,
which can be used to sync content or data.
"""
base_url = models.CharField(max_length=100)
application = models.CharField(max_length=32, blank=True)
kolibri_version = models.CharField(max_length=100, blank=True)
instance_id = models.CharField(max_length=32, blank=True)
device_name = models.CharField(max_length=100, blank=True)
operating_system = models.CharField(max_length=32, blank=True)
added = models.DateTimeField(auto_now_add=True)
last_accessed = models.DateTimeField(auto_now=True)
@property
def available(self):
try:
info = NetworkClient(base_url=self.base_url).info
self.application = info.get("application", self.application) or ""
self.kolibri_version = info.get("kolibri_version", self.kolibri_version) or ""
self.device_name = self.device_name or info.get("device_name") or ""
self.instance_id = info.get("instance_id", self.instance_id) or ""
self.operating_system = info.get("operating_system", self.operating_system) or ""
self.save()
return True
except NetworkClientError:
return False
|
{
"content_hash": "7362f41f610070d301c454a550ae225e",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 120,
"avg_line_length": 41.138888888888886,
"alnum_prop": 0.6806212018906145,
"repo_name": "DXCanas/kolibri",
"id": "7d01fd6addfbe834ae09249b213a42da8098f2e7",
"size": "1481",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "kolibri/core/discovery/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "864"
},
{
"name": "CSS",
"bytes": "32872"
},
{
"name": "Dockerfile",
"bytes": "4332"
},
{
"name": "Gherkin",
"bytes": "115979"
},
{
"name": "HTML",
"bytes": "14251"
},
{
"name": "JavaScript",
"bytes": "890295"
},
{
"name": "Makefile",
"bytes": "9885"
},
{
"name": "Python",
"bytes": "1363204"
},
{
"name": "Shell",
"bytes": "10407"
},
{
"name": "Vue",
"bytes": "944905"
}
],
"symlink_target": ""
}
|
"""
Module used to perform integration testing on lostromos, by attempting multiple create/update/delete commands via
kubectl.
"""
import os
import requests
import signal
import subprocess
from time import sleep
from unittest import TestCase
_LOSTROMOS_EXE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..", "lostromos")
_TEST_DATA_DIRECTORY = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "data")
_LOSTROMOS_CONFIGURATION_FILE = os.path.join(_TEST_DATA_DIRECTORY, "config.yaml")
_CUSTOM_RESOURCE_DEFINION_FILE = os.path.join(_TEST_DATA_DIRECTORY, "crd.yml")
_THINGS_CUSTOM_RESOURCE_FILE = os.path.join(_TEST_DATA_DIRECTORY, "cr_things.yml")
_THINGS_FILTERED_CUSTOM_RESOURCE_FILE = os.path.join(_TEST_DATA_DIRECTORY, "cr_things_filter.yml")
_THINGS_FILTERED_UPDATE_CUSTOM_RESOURCE_FILE = os.path.join(_TEST_DATA_DIRECTORY, "cr_things_filter_update.yml")
_NEMO_CUSTOM_RESOURCE_FILE = os.path.join(_TEST_DATA_DIRECTORY, "cr_nemo.yml")
_NEMO_UPDATE_CUSTOM_RESOURCE_FILE = os.path.join(_TEST_DATA_DIRECTORY, "cr_nemo_update.yml")
class Kubectl(object):
"""
Class used to interact with kubectl and return data.
"""
def __run_command(self, command, filepath, raise_error):
"""
Run a command via kubectl. Will raise an error if the command fails.
:param command: The command to run.
:param filepath: The path to the file to use for the command.
:param raise_error: Whether to raise the error should one occur. If False any error is swallowed.
"""
try:
subprocess.check_call([
"kubectl",
command,
"-f",
filepath
])
except subprocess.CalledProcessError as error:
if raise_error:
raise error
def apply(self, filepath):
"""
Run an apply with the given filepath. Will raise an error if the command fails.
:param filepath: The file to be used for the given apply.
"""
self.__run_command("apply", filepath, True)
def delete(self, filepath, raise_error=False):
"""
Run a delete with the given filepath. Will raise an error if the command fails.
:param filepath: The file to be used for the given delete.
:param raise_error: Whether or not to smother the error.
"""
self.__run_command("delete", filepath, raise_error)
class Helm(object):
"""
Class used to interact with helm and return data
"""
def delete(self, release_name):
"""
Delete a named helm release
"""
subprocess.call(
[
"helm",
"delete",
"--purge",
release_name
]
)
def init(self):
"""
Run a helm init to ensure there is a working tiller
:return:
"""
subprocess.check_call(
[
"helm",
"init"
]
)
def status(self, release_name):
"""
Return the output of helm status
:param release_name: The name of the release to get
:return: The output of command as bytes
"""
try:
output = subprocess.check_output(
[
"helm",
"status",
release_name
]
)
return output
except subprocess.CalledProcessError as error:
return error.output
class HelmIntegrationTest(TestCase):
"""
Class used to perform Lostromos integration testing with helm against a minikube environment. Uses kubectl to
manipulate the kubernetes system and helm to interact with helm.
"""
def setUp(self):
"""
Ensure the custom resource definition exists, and set up Helm.
"""
self.__kubectl = Kubectl()
# Set up the tiller and expose it via a nodeport service
self.__helm = Helm()
self.__helm.init()
self.__kubectl.apply(_TEST_DATA_DIRECTORY + "/tiller_nodeport_service.yml")
self.__kubectl.apply(_CUSTOM_RESOURCE_DEFINION_FILE)
self.__kubectl.delete(_NEMO_CUSTOM_RESOURCE_FILE)
self.__minikube_ip = subprocess.check_output(["minikube", "ip"]).strip().decode('utf-8')
def runTest(self):
"""
Run test using Lostromos with a helm controller.
"""
print("Starting Lostromos with helm controller")
self.__lostromos_process = subprocess.Popen(
[
_LOSTROMOS_EXE,
"start",
"--config",
_TEST_DATA_DIRECTORY + "/helm/wait-config.yaml",
"--helm-tiller",
self.__minikube_ip + ":32664",
],
)
print("Started Lostromos with PID: {}".format(self.__lostromos_process.pid))
# Sleep for a bit until the tiller is available
sleep(15)
self.__kubectl.apply(_NEMO_CUSTOM_RESOURCE_FILE)
self.__wait_for_helm_to_fail(15)
def tearDown(self):
"""
Kill the lostromos process if it was created.
"""
self.__kubectl.delete(_CUSTOM_RESOURCE_DEFINION_FILE)
self.__helm.delete("lostromos-nemo")
if self.__lostromos_process:
self.__lostromos_process.send_signal(signal.SIGINT)
def __wait_for_helm_to_fail(self, timeout):
"""
Wait for the helm timeout to be reached and verify the release is marked as failed
:return:
"""
seconds_to_sleep = 1
# Check that the release wasn't immediately marked as failed or successful
output = self.__helm.status("lostromos-nemo")
self.assertNotIn(
"STATUS: FAILED",
output.decode("utf-8"),
"Helm release is FAILED but did not wait for the timeout"
)
self.assertNotIn("STATUS: DEPLOYED", output.decode("utf-8"), "Helm release is DEPLOYED but should be FAILED")
while timeout > 0:
try:
output = self.__helm.status("lostromos-nemo")
self.assertIn("STATUS: FAILED", output.decode("utf-8"))
return
except AssertionError:
sleep(1)
timeout -= seconds_to_sleep
raise AssertionError("Helm release not marked as failed")
class TemplateIntegrationTestWithFiltering(TestCase):
"""
Class used to perform Lostromos integration testing against a minikube environment. Uses kubectl to manipulate the
kubernetes system.
"""
def setUp(self):
"""
Ensure the custom resource definition exists, and set up the status and metrics url.
"""
self.__kubectl = Kubectl()
# Ensure the CRD is there and there are no characters, for a clean starting point
self.__kubectl.apply(_CUSTOM_RESOURCE_DEFINION_FILE)
self.__kubectl.delete(_THINGS_CUSTOM_RESOURCE_FILE)
self.__kubectl.delete(_THINGS_FILTERED_CUSTOM_RESOURCE_FILE)
self.__kubectl.delete(_THINGS_FILTERED_UPDATE_CUSTOM_RESOURCE_FILE)
self.__kubectl.delete(_NEMO_CUSTOM_RESOURCE_FILE)
self.__kubectl.delete(_NEMO_UPDATE_CUSTOM_RESOURCE_FILE)
self.__lostromos_process = None
self.__status_url = "http://localhost:8080/status"
self.__metrics_url = "http://localhost:8080/metrics"
def runTest(self):
"""
Ensure Lostromos is functioning as expected. Does the following steps.
1. Ensures we see thing1 and thing2 as existing on the system.
2. Add the nemo custom resource and see that Lostromos sees it as created.
3. Modify the nemo custom resource and see that Lostromos sees it as updated.
4. Delete both sets of custom resources and see that Lostromos picks them up as deleted.
"""
self.__lostromos_process = subprocess.Popen(
[
_LOSTROMOS_EXE,
"start",
"--nop",
"--config",
_LOSTROMOS_CONFIGURATION_FILE,
]
)
print("Started Lostromos with PID: {}".format(self.__lostromos_process.pid))
self.__wait_for_lostromos_start()
self.__kubectl.apply(_THINGS_CUSTOM_RESOURCE_FILE)
self.__check_metrics(2, 2, 2, 0, 0)
self.__kubectl.apply(_NEMO_CUSTOM_RESOURCE_FILE)
self.__check_metrics(3, 3, 3, 0, 0)
self.__kubectl.apply(_NEMO_UPDATE_CUSTOM_RESOURCE_FILE)
self.__check_metrics(4, 3, 3, 0, 1)
self.__kubectl.delete(_THINGS_CUSTOM_RESOURCE_FILE, True)
self.__check_metrics(6, 1, 3, 2, 1)
self.__kubectl.delete(_NEMO_CUSTOM_RESOURCE_FILE, True)
self.__check_metrics(7, 0, 3, 3, 1)
self.__lostromos_process.kill()
self.__lostromos_process = subprocess.Popen(
[
_LOSTROMOS_EXE,
"start",
"--nop",
"--config",
_LOSTROMOS_CONFIGURATION_FILE,
"--crd-filter",
"io.nicolerenee.lostromosApplied",
]
)
print("Started Lostromos with PID: {}".format(self.__lostromos_process.pid))
self.__wait_for_lostromos_start()
self.__kubectl.apply(_THINGS_FILTERED_CUSTOM_RESOURCE_FILE)
self.__check_metrics(2, 2, 2, 0, 0)
self.__kubectl.apply(_THINGS_FILTERED_UPDATE_CUSTOM_RESOURCE_FILE)
self.__check_metrics(5, 2, 3, 1, 1)
self.__kubectl.delete(_THINGS_FILTERED_UPDATE_CUSTOM_RESOURCE_FILE)
self.__check_metrics(7, 0, 3, 3, 1)
def tearDown(self):
"""
Kill the lostromos process if it was created.
"""
self.__kubectl.delete(_CUSTOM_RESOURCE_DEFINION_FILE)
if self.__lostromos_process:
self.__lostromos_process.send_signal(signal.SIGINT)
def __check_metrics(self, events, managed, created, deleted, updated):
"""
Check the metrics output to ensure that what we are expecting has occurred. Will wait up to 10 seconds looking
for the expected amount of events to have occurred. If the events haven't occurred, then an assertionError will
be raised. If the events occurred, we will check the stats for the managed/created/deleted/updated resources.
:param events: Number of events we are expecting to have happened.
:param managed: Number of resources we expect Lostromos to be managing.
:param created: Number of resources we expect Lostromos to have created.
:param deleted: Number of resources we expect Lostromos to have deleted.
:param updated: Number of resources we expect Lostromos to have updated.
"""
metrics = []
attempts = 10
while attempts > 0:
metrics_response = requests.get(self.__metrics_url)
metrics_response.raise_for_status()
metrics = metrics_response.text.split("\n")
if "releases_events_total {}".format(events) not in metrics:
sleep(1)
attempts -= 1
else:
self.assertIn("releases_total {}".format(managed), metrics)
self.assertIn("releases_create_total {}".format(created), metrics)
self.assertIn("releases_delete_total {}".format(deleted), metrics)
self.assertIn("releases_update_total {}".format(updated), metrics)
return
raise AssertionError("Failed to see the expected number of events. {}".format(metrics))
def __wait_for_lostromos_start(self):
"""
Wait for Lostromos to start up, then return.
"""
# 15 seconds is probably more than we need, but the main use of these tests will be to run in TravisCI, and
# since we don't control that infrastructure it makes sense to inflate the value a bit. An extra 10 seconds
# should cause no harm, but help out in cases where the Travis servers are overwhelmed.
seconds_to_wait = 15
seconds_to_sleep = 1
while seconds_to_wait > 0:
try:
status_response = requests.get(self.__status_url)
status_response.raise_for_status()
self.assertTrue(status_response.json()["success"])
except requests.exceptions.ConnectionError:
sleep(seconds_to_sleep)
seconds_to_wait -= seconds_to_sleep
return
raise AssertionError("Failed to start Lostromos.")
|
{
"content_hash": "3667c4dd9edf4fc19d9119547f984be2",
"timestamp": "",
"source": "github",
"line_count": 322,
"max_line_length": 119,
"avg_line_length": 39.13354037267081,
"alnum_prop": 0.5951908578684232,
"repo_name": "wpdrush21/lostromos",
"id": "16eef073c529af29170af6da5e416b572ebdb1ad",
"size": "13187",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/scripts/integration_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "89147"
},
{
"name": "Makefile",
"bytes": "2866"
},
{
"name": "Python",
"bytes": "13187"
},
{
"name": "Shell",
"bytes": "1372"
},
{
"name": "Smarty",
"bytes": "1032"
}
],
"symlink_target": ""
}
|
"""
sqlalchemy-geonames
-------------------
"""
from __future__ import print_function
from setuptools import setup, find_packages
appname = 'sqlalchemy-geonames'
pkgname = appname.lower().replace('-', '_')
metadata_relpath = '{}/metadata.py'.format(pkgname)
# Get package metadata. We use exec here instead of importing the
# package directly, so we can avoid any potential import errors.
with open(metadata_relpath) as fh:
metadata = {}
exec(fh.read(), globals(), metadata)
setup(
name=appname,
version=metadata['__version__'],
description='',
long_description=__doc__,
packages=find_packages(exclude=('sqlalchemy_geonames.tests', )),
install_requires=[
'GeoAlchemy2',
'ipdb',
'progressbar2',
'psycopg2',
'requests',
'SQLAlchemy',
],
entry_points={
'console_scripts': {
'sqlageonames = sqlalchemy_geonames.bin.sqlageonames:main',
},
},
extras_require={
'test': {
'coverage>=4.2',
'flake8>=3.0.4',
'pytest>=3.0.3',
'sqlalchemy-utils>=0.32.9',
},
},
author='Jacob Magnusson',
author_email='m@jacobian.se',
url='https://github.com/jmagnusson/sqlalchemy-geonames',
license='BSD',
platforms=['unix', 'macos'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: Unix',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python',
],
)
|
{
"content_hash": "7757f6c2cf33900e80c757b9512168a3",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 71,
"avg_line_length": 26.442622950819672,
"alnum_prop": 0.5778053316800992,
"repo_name": "jmagnusson/sqlalchemy-geonames",
"id": "6428b22f633b9107b0aaf1d13846a582e100807c",
"size": "1635",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "43028"
},
{
"name": "Shell",
"bytes": "244"
}
],
"symlink_target": ""
}
|
from base import SqoopClient
from connection import Connection
from connector import Connector
from framework import Framework
from job import Job
from submission import Submission
|
{
"content_hash": "42098aef0d3fd0e7ba31a4e9d76e629e",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 33,
"avg_line_length": 30.166666666666668,
"alnum_prop": 0.8674033149171271,
"repo_name": "yongshengwang/builthue",
"id": "3b3495c4313db667ea2b4139f7ad72b644b15076",
"size": "951",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "apps/sqoop/src/sqoop/client/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "207947"
},
{
"name": "C",
"bytes": "10774013"
},
{
"name": "C++",
"bytes": "184593"
},
{
"name": "CSS",
"bytes": "655282"
},
{
"name": "Emacs Lisp",
"bytes": "14875"
},
{
"name": "GAP",
"bytes": "11337"
},
{
"name": "Java",
"bytes": "3080564"
},
{
"name": "JavaScript",
"bytes": "2418037"
},
{
"name": "Makefile",
"bytes": "86977"
},
{
"name": "Perl",
"bytes": "161801"
},
{
"name": "PigLatin",
"bytes": "282"
},
{
"name": "Prolog",
"bytes": "4590"
},
{
"name": "Python",
"bytes": "29990389"
},
{
"name": "Shell",
"bytes": "38643"
},
{
"name": "TeX",
"bytes": "129526"
},
{
"name": "Thrift",
"bytes": "99710"
},
{
"name": "XSLT",
"bytes": "367778"
}
],
"symlink_target": ""
}
|
from AlgorithmImports import *
### <summary>
### In this algortihm, we fetch a list of tickers with corresponding dates from a file on Dropbox.
### We then create a fine fundamental universe which contains those symbols on their respective dates.###
### </summary>
### <meta name="tag" content="download" />
### <meta name="tag" content="universes" />
### <meta name="tag" content="custom data" />
class DropboxCoarseFineAlgorithm(QCAlgorithm):
def Initialize(self):
self.SetStartDate(2019, 9, 23) # Set Start Date
self.SetEndDate(2019, 9, 30) # Set End Date
self.SetCash(100000) # Set Strategy Cash
self.AddUniverse(self.SelectCoarse, self.SelectFine)
self.universeData = None
self.nextUpdate = datetime(1, 1, 1) # Minimum datetime
self.url = "https://www.dropbox.com/s/x2sb9gaiicc6hm3/tickers_with_dates.csv?dl=1"
def OnEndOfDay(self):
for security in self.ActiveSecurities.Values:
self.Debug(f"{self.Time.date()} {security.Symbol.Value} with Market Cap: ${security.Fundamentals.MarketCap}")
def SelectCoarse(self, coarse):
return self.GetSymbols()
def SelectFine(self, fine):
symbols = self.GetSymbols()
# Return symbols from our list which have a market capitalization of at least 10B
return [f.Symbol for f in fine if f.MarketCap > 1e10 and f.Symbol in symbols]
def GetSymbols(self):
# In live trading update every 12 hours
if self.LiveMode:
if self.Time < self.nextUpdate:
# Return today's row
return self.universeData[self.Time.date()]
# When updating set the new reset time.
self.nextUpdate = self.Time + timedelta(hours=12)
self.universeData = self.Parse(self.url)
# In backtest load once if not set, then just use the dates.
if self.universeData is None:
self.universeData = self.Parse(self.url)
# Check if contains the row we need
if self.Time.date() not in self.universeData:
return Universe.Unchanged
return self.universeData[self.Time.date()]
def Parse(self, url):
# Download file from url as string
file = self.Download(url).split("\n")
# # Remove formatting characters
data = [x.replace("\r", "").replace(" ", "") for x in file]
# # Split data by date and symbol
split_data = [x.split(",") for x in data]
# Dictionary to hold list of active symbols for each date, keyed by date
symbolsByDate = {}
# Parse data into dictionary
for arr in split_data:
date = datetime.strptime(arr[0], "%Y%m%d").date()
symbols = [Symbol.Create(ticker, SecurityType.Equity, Market.USA) for ticker in arr[1:]]
symbolsByDate[date] = symbols
return symbolsByDate
def OnSecuritiesChanged(self, changes):
self.Log(f"Added Securities: {[security.Symbol.Value for security in changes.AddedSecurities]}")
|
{
"content_hash": "f2385092101a97b38c30d9d539de2028",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 121,
"avg_line_length": 40.139240506329116,
"alnum_prop": 0.6127404604225797,
"repo_name": "JKarathiya/Lean",
"id": "c308adb9f90eb8f992abcb65e2b39da23b1ee992",
"size": "3859",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Algorithm.Python/DropboxCoarseFineAlgorithm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "411"
},
{
"name": "C#",
"bytes": "23776443"
},
{
"name": "CSS",
"bytes": "10299"
},
{
"name": "Dockerfile",
"bytes": "1172"
},
{
"name": "HTML",
"bytes": "15714"
},
{
"name": "Jupyter Notebook",
"bytes": "32187"
},
{
"name": "Python",
"bytes": "1222583"
},
{
"name": "Shell",
"bytes": "3408"
}
],
"symlink_target": ""
}
|
from share import ShareAnalysisBase
from share2 import ShareAnalysis2
from ..core.corrections import TurbulenceCorrection
from ..core.turbine import Relaxation
from ..core.analysis import Analysis
from ..configuration.inner_range_configuration import InnerRangeDimension
from ..core.path_builder import PathBuilder
from ..core.status import Status
class ShareAnalysis3(ShareAnalysis2):
INCLUDE_DENSITY_IN_INNER_RANGE = False
DENSITY_RANGE_WIDTH = 0.10
def get_interpolation_mode(self):
return "Marmander (Cubic Hermite)"
def should_apply_density_correction_to_baseline(self):
if ShareAnalysis3.INCLUDE_DENSITY_IN_INNER_RANGE:
return False
else:
return True
def calculate_corrections(self):
ShareAnalysis2.calculate_corrections(self)
self.calculate_augmented_turbulence_correction_with_relaxation()
def set_pdm_path(self, filename):
if self.inner_range_id is None:
raise Exception('Cannot set range specific PDM path as inner range is undefined.')
filename = filename.replace('RANGE', self.inner_range_id)
Status.add("Using matrix {0}".format(filename))
pdm_path = PathBuilder.get_path(filename, folder_relative_to_root='Data')
self.specified_power_deviation_matrix.absolute_path = pdm_path
def calculate_pdm_corrections(self):
self.calculate_pdm_based('HypothesisMatrix_2D_Share3_RANGE.xml')
self.calculate_pdm_based('HypothesisMatrix_3D_Share3_RANGE.xml')
def calculate_augmented_turbulence_correction_with_relaxation(self):
self.powerCurve.update_zero_ti(Relaxation(0.7))
Status.add("Relaxed Zero-TI Curve")
for i in range(len(self.powerCurve.zeroTurbulencePowerCurve.wind_speeds)):
Status.add("{0} {1}".format(self.powerCurve.zeroTurbulencePowerCurve.wind_speeds[i],
self.powerCurve.zeroTurbulencePowerCurve.powers[i]), verbosity=2)
correction = TurbulenceCorrection(self.dataFrame,
self.baseline,
self.hubTurbulence,
self.normalisedWS,
self.powerCurve,
augment=True,
relaxed=True)
self.register_correction(correction)
self.powerCurve.revert_zero_ti()
def set_inner_range(self, inner_range_id):
ShareAnalysis2.set_inner_range(self, inner_range_id)
if ShareAnalysis3.INCLUDE_DENSITY_IN_INNER_RANGE:
range_half_width = 0.5 * ShareAnalysis3.DENSITY_RANGE_WIDTH
self.inner_range_dimensions.append(InnerRangeDimension("Density",
Analysis.STANDARD_DENSITY - range_half_width,
Analysis.STANDARD_DENSITY + range_half_width))
|
{
"content_hash": "d3e72820fe6b588d2f5e7949d11154cb",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 113,
"avg_line_length": 39.45454545454545,
"alnum_prop": 0.6214614878209348,
"repo_name": "peterdougstuart/PCWG",
"id": "a4782c2ce2ee93d3a90564d833d7536d6a7c6bea",
"size": "3039",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pcwg/share/share3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2475"
},
{
"name": "Python",
"bytes": "921759"
}
],
"symlink_target": ""
}
|
"""Example of a HTTP request handler that supports requests via a HTTP proxy."""
#
# In order to run this sample, you will need to have a proxy available to
# relay your requests to Splunk. One way to do this is to run the tiny-proxy.py
# script included in this directory and then run this script using whatever
# port you bound tiny-proxy to, eg:
#
# > python tiny-proxy.py -p 8080
# > python handler_proxy.py --proxy=localhost:8080
#
from pprint import pprint
from StringIO import StringIO
import sys, os
import ssl
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
import urllib2
import splunklib.client as client
try:
import utils
except ImportError:
raise Exception("Add the SDK repository to your PYTHONPATH to run the examples "
"(e.g., export PYTHONPATH=~/splunk-sdk-python.")
RULES = {
"proxy": {
'flags': ["--proxy"],
'default': "localhost:8080",
'help': "Use proxy on given <host[:port]> (default localhost:8080)",
}
}
def request(url, message, **kwargs):
method = message['method'].lower()
data = message.get('body', "") if method == 'post' else None
headers = dict(message.get('headers', []))
req = urllib2.Request(url, data, headers)
try:
response = urllib2.urlopen(req)
except urllib2.URLError, response:
# If running Python 2.7.9+, disable SSL certificate validation and try again
if sys.version_info >= (2, 7, 9):
response = urllib2.urlopen(req, context=ssl._create_unverified_context())
else:
raise
except urllib2.HTTPError, response:
pass # Propagate HTTP errors via the returned response message
return {
'status': response.code,
'reason': response.msg,
'headers': response.info().dict,
'body': StringIO(response.read())
}
def handler(proxy):
proxy_handler = urllib2.ProxyHandler({'http': proxy, 'https': proxy})
opener = urllib2.build_opener(proxy_handler)
urllib2.install_opener(opener)
return request
opts = utils.parse(sys.argv[1:], RULES, ".splunkrc")
proxy = opts.kwargs['proxy']
try:
service = client.connect(handler=handler(proxy), **opts.kwargs)
pprint([app.name for app in service.apps])
except urllib2.URLError as e:
if e.reason.errno == 1 and sys.version_info < (2, 6, 3):
# There is a bug in Python < 2.6.3 that does not allow proxies with
# HTTPS. You can read more at: http://bugs.python.org/issue1424152
pass
else:
raise
|
{
"content_hash": "39a9b2e4ee404072c75dd9f4227af836",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 85,
"avg_line_length": 33.60526315789474,
"alnum_prop": 0.6503523884103367,
"repo_name": "sullivanmatt/splunk-sdk-python",
"id": "aa15db88dc33a23a445102849186336ffe5a372a",
"size": "3160",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/handlers/handler_proxy.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "727032"
}
],
"symlink_target": ""
}
|
"""Upgrader for Python scripts from pre-1.0 TensorFlow to 1.0 TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import ast
import collections
import os
import shutil
import sys
import tempfile
import traceback
class APIChangeSpec(object):
"""List of maps that describe what changed in the API."""
def __init__(self):
# Maps from a function name to a dictionary that describes how to
# map from an old argument keyword to the new argument keyword.
self.function_keyword_renames = {
"tf.count_nonzero": {
"reduction_indices": "axis"
},
"tf.reduce_all": {
"reduction_indices": "axis"
},
"tf.reduce_any": {
"reduction_indices": "axis"
},
"tf.reduce_max": {
"reduction_indices": "axis"
},
"tf.reduce_mean": {
"reduction_indices": "axis"
},
"tf.reduce_min": {
"reduction_indices": "axis"
},
"tf.reduce_prod": {
"reduction_indices": "axis"
},
"tf.reduce_sum": {
"reduction_indices": "axis"
},
"tf.reduce_logsumexp": {
"reduction_indices": "axis"
},
"tf.expand_dims": {
"dim": "axis"
},
"tf.argmax": {
"dimension": "axis"
},
"tf.argmin": {
"dimension": "axis"
},
"tf.reduce_join": {
"reduction_indices": "axis"
},
"tf.sparse_concat": {
"concat_dim": "axis"
},
"tf.sparse_split": {
"split_dim": "axis"
},
"tf.sparse_reduce_sum": {
"reduction_axes": "axis"
},
"tf.reverse_sequence": {
"seq_dim": "seq_axis",
"batch_dim": "batch_axis"
},
"tf.sparse_reduce_sum_sparse": {
"reduction_axes": "axis"
},
"tf.squeeze": {
"squeeze_dims": "axis"
},
"tf.split": {
"split_dim": "axis",
"num_split": "num_or_size_splits"
},
"tf.concat": {
"concat_dim": "axis"
},
}
# Mapping from function to the new name of the function
self.function_renames = {
"tf.inv": "tf.reciprocal",
"tf.contrib.deprecated.scalar_summary": "tf.summary.scalar",
"tf.contrib.deprecated.histogram_summary": "tf.summary.histogram",
"tf.listdiff": "tf.setdiff1d",
"tf.list_diff": "tf.setdiff1d",
"tf.mul": "tf.multiply",
"tf.neg": "tf.negative",
"tf.sub": "tf.subtract",
"tf.train.SummaryWriter": "tf.summary.FileWriter",
"tf.scalar_summary": "tf.summary.scalar",
"tf.histogram_summary": "tf.summary.histogram",
"tf.audio_summary": "tf.summary.audio",
"tf.image_summary": "tf.summary.image",
"tf.merge_summary": "tf.summary.merge",
"tf.merge_all_summaries": "tf.summary.merge_all",
"tf.image.per_image_whitening": "tf.image.per_image_standardization",
"tf.all_variables": "tf.global_variables",
"tf.VARIABLES": "tf.GLOBAL_VARIABLES",
"tf.initialize_all_variables": "tf.global_variables_initializer",
"tf.initialize_variables": "tf.variables_initializer",
"tf.initialize_local_variables": "tf.local_variables_initializer",
"tf.batch_matrix_diag": "tf.matrix_diag",
"tf.batch_band_part": "tf.band_part",
"tf.batch_set_diag": "tf.set_diag",
"tf.batch_matrix_transpose": "tf.matrix_transpose",
"tf.batch_matrix_determinant": "tf.matrix_determinant",
"tf.batch_matrix_inverse": "tf.matrix_inverse",
"tf.batch_cholesky": "tf.cholesky",
"tf.batch_cholesky_solve": "tf.cholesky_solve",
"tf.batch_matrix_solve": "tf.matrix_solve",
"tf.batch_matrix_triangular_solve": "tf.matrix_triangular_solve",
"tf.batch_matrix_solve_ls": "tf.matrix_solve_ls",
"tf.batch_self_adjoint_eig": "tf.self_adjoint_eig",
"tf.batch_self_adjoint_eigvals": "tf.self_adjoint_eigvals",
"tf.batch_svd": "tf.svd",
"tf.batch_fft": "tf.fft",
"tf.batch_ifft": "tf.ifft",
"tf.batch_fft2d": "tf.fft2d",
"tf.batch_ifft2d": "tf.ifft2d",
"tf.batch_fft3d": "tf.fft3d",
"tf.batch_ifft3d": "tf.ifft3d",
"tf.select": "tf.where",
"tf.complex_abs": "tf.abs",
"tf.batch_matmul": "tf.matmul",
"tf.pack": "tf.stack",
"tf.unpack": "tf.unstack",
}
self.change_to_function = {
"tf.ones_initializer",
"tf.zeros_initializer",
}
# Functions that were reordered should be changed to the new keyword args
# for safety, if positional arguments are used. If you have reversed the
# positional arguments yourself, this could do the wrong thing.
self.function_reorders = {
"tf.split": ["axis", "num_or_size_splits", "value", "name"],
"tf.sparse_split": ["axis", "num_or_size_splits", "value", "name"],
"tf.concat": ["concat_dim", "values", "name"],
"tf.svd": ["tensor", "compute_uv", "full_matrices", "name"],
"tf.nn.softmax_cross_entropy_with_logits": [
"logits", "labels", "dim", "name"],
"tf.nn.sparse_softmax_cross_entropy_with_logits": [
"logits", "labels", "name"],
"tf.nn.sigmoid_cross_entropy_with_logits": [
"logits", "labels", "name"]
}
# Specially handled functions.
self.function_handle = {"tf.reverse": self._reverse_handler}
@staticmethod
def _reverse_handler(file_edit_recorder, node):
# TODO(aselle): Could check for a literal list of bools and try to convert
# them to indices.
comment = ("ERROR: tf.reverse has had its argument semantics changed\n"
"significantly the converter cannot detect this reliably, so you"
"need to inspect this usage manually.\n")
file_edit_recorder.add(comment,
node.lineno,
node.col_offset,
"tf.reverse",
"tf.reverse",
error="tf.reverse requires manual check.")
class FileEditTuple(collections.namedtuple(
"FileEditTuple", ["comment", "line", "start", "old", "new"])):
"""Each edit that is recorded by a FileEditRecorder.
Fields:
comment: A description of the edit and why it was made.
line: The line number in the file where the edit occurs (1-indexed).
start: The line number in the file where the edit occurs (0-indexed).
old: text string to remove (this must match what was in file).
new: text string to add in place of `old`.
"""
__slots__ = ()
class FileEditRecorder(object):
"""Record changes that need to be done to the file."""
def __init__(self, filename):
# all edits are lists of chars
self._filename = filename
self._line_to_edit = collections.defaultdict(list)
self._errors = []
def process(self, text):
"""Process a list of strings, each corresponding to the recorded changes.
Args:
text: A list of lines of text (assumed to contain newlines)
Returns:
A tuple of the modified text and a textual description of what is done.
Raises:
ValueError: if substitution source location does not have expected text.
"""
change_report = ""
# Iterate of each line
for line, edits in self._line_to_edit.items():
offset = 0
# sort by column so that edits are processed in order in order to make
# indexing adjustments cumulative for changes that change the string
# length
edits.sort(key=lambda x: x.start)
# Extract each line to a list of characters, because mutable lists
# are editable, unlike immutable strings.
char_array = list(text[line - 1])
# Record a description of the change
change_report += "%r Line %d\n" % (self._filename, line)
change_report += "-" * 80 + "\n\n"
for e in edits:
change_report += "%s\n" % e.comment
change_report += "\n Old: %s" % (text[line - 1])
# Make underscore buffers for underlining where in the line the edit was
change_list = [" "] * len(text[line - 1])
change_list_new = [" "] * len(text[line - 1])
# Iterate for each edit
for e in edits:
# Create effective start, end by accounting for change in length due
# to previous edits
start_eff = e.start + offset
end_eff = start_eff + len(e.old)
# Make sure the edit is changing what it should be changing
old_actual = "".join(char_array[start_eff:end_eff])
if old_actual != e.old:
raise ValueError("Expected text %r but got %r" %
("".join(e.old), "".join(old_actual)))
# Make the edit
char_array[start_eff:end_eff] = list(e.new)
# Create the underline highlighting of the before and after
change_list[e.start:e.start + len(e.old)] = "~" * len(e.old)
change_list_new[start_eff:end_eff] = "~" * len(e.new)
# Keep track of how to generate effective ranges
offset += len(e.new) - len(e.old)
# Finish the report comment
change_report += " %s\n" % "".join(change_list)
text[line - 1] = "".join(char_array)
change_report += " New: %s" % (text[line - 1])
change_report += " %s\n\n" % "".join(change_list_new)
return "".join(text), change_report, self._errors
def add(self, comment, line, start, old, new, error=None):
"""Add a new change that is needed.
Args:
comment: A description of what was changed
line: Line number (1 indexed)
start: Column offset (0 indexed)
old: old text
new: new text
error: this "edit" is something that cannot be fixed automatically
Returns:
None
"""
self._line_to_edit[line].append(
FileEditTuple(comment, line, start, old, new))
if error:
self._errors.append("%s:%d: %s" % (self._filename, line, error))
class TensorFlowCallVisitor(ast.NodeVisitor):
"""AST Visitor that finds TensorFlow Function calls.
Updates function calls from old API version to new API version.
"""
def __init__(self, filename, lines):
self._filename = filename
self._file_edit = FileEditRecorder(filename)
self._lines = lines
self._api_change_spec = APIChangeSpec()
def process(self, lines):
return self._file_edit.process(lines)
def generic_visit(self, node):
ast.NodeVisitor.generic_visit(self, node)
def _rename_functions(self, node, full_name):
function_renames = self._api_change_spec.function_renames
try:
new_name = function_renames[full_name]
self._file_edit.add("Renamed function %r to %r" % (full_name,
new_name),
node.lineno, node.col_offset, full_name, new_name)
except KeyError:
pass
def _get_attribute_full_path(self, node):
"""Traverse an attribute to generate a full name e.g. tf.foo.bar.
Args:
node: A Node of type Attribute.
Returns:
a '.'-delimited full-name or None if the tree was not a simple form.
i.e. `foo()+b).bar` returns None, while `a.b.c` would return "a.b.c".
"""
curr = node
items = []
while not isinstance(curr, ast.Name):
if not isinstance(curr, ast.Attribute):
return None
items.append(curr.attr)
curr = curr.value
items.append(curr.id)
return ".".join(reversed(items))
def _find_true_position(self, node):
"""Return correct line number and column offset for a given node.
This is necessary mainly because ListComp's location reporting reports
the next token after the list comprehension list opening.
Args:
node: Node for which we wish to know the lineno and col_offset
"""
import re
find_open = re.compile("^\s*(\\[).*$")
find_string_chars = re.compile("['\"]")
if isinstance(node, ast.ListComp):
# Strangely, ast.ListComp returns the col_offset of the first token
# after the '[' token which appears to be a bug. Workaround by
# explicitly finding the real start of the list comprehension.
line = node.lineno
col = node.col_offset
# loop over lines
while 1:
# Reverse the text to and regular expression search for whitespace
text = self._lines[line-1]
reversed_preceding_text = text[:col][::-1]
# First find if a [ can be found with only whitespace between it and
# col.
m = find_open.match(reversed_preceding_text)
if m:
new_col_offset = col - m.start(1) - 1
return line, new_col_offset
else:
if (reversed_preceding_text=="" or
reversed_preceding_text.isspace()):
line = line - 1
prev_line = self._lines[line - 1]
# TODO(aselle):
# this is poor comment detection, but it is good enough for
# cases where the comment does not contain string literal starting/
# ending characters. If ast gave us start and end locations of the
# ast nodes rather than just start, we could use string literal
# node ranges to filter out spurious #'s that appear in string
# literals.
comment_start = prev_line.find("#")
if comment_start == -1:
col = len(prev_line) -1
elif find_string_chars.search(prev_line[comment_start:]) is None:
col = comment_start
else:
return None, None
else:
return None, None
# Most other nodes return proper locations (with notably does not), but
# it is not possible to use that in an argument.
return node.lineno, node.col_offset
def visit_Call(self, node): # pylint: disable=invalid-name
"""Handle visiting a call node in the AST.
Args:
node: Current Node
"""
# Find a simple attribute name path e.g. "tf.foo.bar"
full_name = self._get_attribute_full_path(node.func)
# Make sure the func is marked as being part of a call
node.func.is_function_for_call = True
if full_name and full_name.startswith("tf."):
# Call special handlers
function_handles = self._api_change_spec.function_handle
if full_name in function_handles:
function_handles[full_name](self._file_edit, node)
# Examine any non-keyword argument and make it into a keyword argument
# if reordering required.
function_reorders = self._api_change_spec.function_reorders
function_keyword_renames = (
self._api_change_spec.function_keyword_renames)
if full_name in function_reorders:
reordered = function_reorders[full_name]
for idx, arg in enumerate(node.args):
lineno, col_offset = self._find_true_position(arg)
if lineno is None or col_offset is None:
self._file_edit.add(
"Failed to add keyword %r to reordered function %r"
% (reordered[idx], full_name), arg.lineno, arg.col_offset,
"", "",
error="A necessary keyword argument failed to be inserted.")
else:
keyword_arg = reordered[idx]
if (full_name in function_keyword_renames and
keyword_arg in function_keyword_renames[full_name]):
keyword_arg = function_keyword_renames[full_name][keyword_arg]
self._file_edit.add("Added keyword %r to reordered function %r"
% (reordered[idx], full_name), lineno,
col_offset, "", keyword_arg + "=")
# Examine each keyword argument and convert it to the final renamed form
renamed_keywords = ({} if full_name not in function_keyword_renames else
function_keyword_renames[full_name])
for keyword in node.keywords:
argkey = keyword.arg
argval = keyword.value
if argkey in renamed_keywords:
argval_lineno, argval_col_offset = self._find_true_position(argval)
if (argval_lineno is not None and argval_col_offset is not None):
# TODO(aselle): We should scan backward to find the start of the
# keyword key. Unfortunately ast does not give you the location of
# keyword keys, so we are forced to infer it from the keyword arg
# value.
key_start = argval_col_offset - len(argkey) - 1
key_end = key_start + len(argkey) + 1
if self._lines[argval_lineno - 1][key_start:key_end] == argkey + "=":
self._file_edit.add("Renamed keyword argument from %r to %r" %
(argkey, renamed_keywords[argkey]),
argval_lineno,
argval_col_offset - len(argkey) - 1,
argkey + "=", renamed_keywords[argkey] + "=")
continue
self._file_edit.add(
"Failed to rename keyword argument from %r to %r" %
(argkey, renamed_keywords[argkey]),
argval.lineno,
argval.col_offset - len(argkey) - 1,
"", "",
error="Failed to find keyword lexographically. Fix manually.")
ast.NodeVisitor.generic_visit(self, node)
def visit_Attribute(self, node): # pylint: disable=invalid-name
"""Handle bare Attributes i.e. [tf.foo, tf.bar].
Args:
node: Node that is of type ast.Attribute
"""
full_name = self._get_attribute_full_path(node)
if full_name and full_name.startswith("tf."):
self._rename_functions(node, full_name)
if full_name in self._api_change_spec.change_to_function:
if not hasattr(node, "is_function_for_call"):
new_text = full_name + "()"
self._file_edit.add("Changed %r to %r"%(full_name, new_text),
node.lineno, node.col_offset, full_name, new_text)
ast.NodeVisitor.generic_visit(self, node)
class TensorFlowCodeUpgrader(object):
"""Class that handles upgrading a set of Python files to TensorFlow 1.0."""
def __init__(self):
pass
def process_file(self, in_filename, out_filename):
"""Process the given python file for incompatible changes.
Args:
in_filename: filename to parse
out_filename: output file to write to
Returns:
A tuple representing number of files processed, log of actions, errors
"""
# Write to a temporary file, just in case we are doing an implace modify.
with open(in_filename, "r") as in_file, \
tempfile.NamedTemporaryFile("w", delete=False) as temp_file:
ret = self.process_opened_file(
in_filename, in_file, out_filename, temp_file)
shutil.move(temp_file.name, out_filename)
return ret
# Broad exceptions are required here because ast throws whatever it wants.
# pylint: disable=broad-except
def process_opened_file(self, in_filename, in_file, out_filename, out_file):
"""Process the given python file for incompatible changes.
This function is split out to facilitate StringIO testing from
tf_upgrade_test.py.
Args:
in_filename: filename to parse
in_file: opened file (or StringIO)
out_filename: output file to write to
out_file: opened file (or StringIO)
Returns:
A tuple representing number of files processed, log of actions, errors
"""
process_errors = []
text = "-" * 80 + "\n"
text += "Processing file %r\n outputting to %r\n" % (in_filename,
out_filename)
text += "-" * 80 + "\n\n"
parsed_ast = None
lines = in_file.readlines()
try:
parsed_ast = ast.parse("".join(lines))
except Exception:
text += "Failed to parse %r\n\n" % in_filename
text += traceback.format_exc()
if parsed_ast:
visitor = TensorFlowCallVisitor(in_filename, lines)
visitor.visit(parsed_ast)
out_text, new_text, process_errors = visitor.process(lines)
text += new_text
if out_file:
out_file.write(out_text)
text += "\n"
return 1, text, process_errors
# pylint: enable=broad-except
def process_tree(self, root_directory, output_root_directory):
"""Processes upgrades on an entire tree of python files in place.
Note that only Python files. If you have custom code in other languages,
you will need to manually upgrade those.
Args:
root_directory: Directory to walk and process.
output_root_directory: Directory to use as base
Returns:
A tuple of files processed, the report string ofr all files, and errors
"""
# make sure output directory doesn't exist
if output_root_directory and os.path.exists(output_root_directory):
print("Output directory %r must not already exist." % (
output_root_directory))
sys.exit(1)
# make sure output directory does not overlap with root_directory
norm_root = os.path.split(os.path.normpath(root_directory))
norm_output = os.path.split(os.path.normpath(output_root_directory))
if norm_root == norm_output:
print("Output directory %r same as input directory %r" % (
root_directory, output_root_directory))
sys.exit(1)
# Collect list of files to process (we do this to correctly handle if the
# user puts the output directory in some sub directory of the input dir)
files_to_process = []
for dir_name, _, file_list in os.walk(root_directory):
py_files = [f for f in file_list if f.endswith(".py")]
for filename in py_files:
fullpath = os.path.join(dir_name, filename)
fullpath_output = os.path.join(
output_root_directory, os.path.relpath(fullpath, root_directory))
files_to_process.append((fullpath, fullpath_output))
file_count = 0
tree_errors = []
report = ""
report += ("=" * 80) + "\n"
report += "Input tree: %r\n" % root_directory
report += ("=" * 80) + "\n"
for input_path, output_path in files_to_process:
output_directory = os.path.dirname(output_path)
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
file_count += 1
_, l_report, l_errors = self.process_file(input_path, output_path)
tree_errors += l_errors
report += l_report
return file_count, report, tree_errors
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""Convert a TensorFlow Python file to 1.0
Simple usage:
tf_convert.py --infile foo.py --outfile bar.py
tf_convert.py --intree ~/code/old --outtree ~/code/new
""")
parser.add_argument(
"--infile",
dest="input_file",
help="If converting a single file, the name of the file "
"to convert")
parser.add_argument(
"--outfile",
dest="output_file",
help="If converting a single file, the output filename.")
parser.add_argument(
"--intree",
dest="input_tree",
help="If converting a whole tree of files, the directory "
"to read from (relative or absolute).")
parser.add_argument(
"--outtree",
dest="output_tree",
help="If converting a whole tree of files, the output "
"directory (relative or absolute).")
parser.add_argument(
"--reportfile",
dest="report_filename",
help=("The name of the file where the report log is "
"stored."
"(default: %(default)s)"),
default="report.txt")
args = parser.parse_args()
upgrade = TensorFlowCodeUpgrader()
report_text = None
report_filename = args.report_filename
files_processed = 0
if args.input_file:
files_processed, report_text, errors = upgrade.process_file(
args.input_file, args.output_file)
files_processed = 1
elif args.input_tree:
files_processed, report_text, errors = upgrade.process_tree(
args.input_tree, args.output_tree)
else:
parser.print_help()
if report_text:
open(report_filename, "w").write(report_text)
print("TensorFlow 1.0 Upgrade Script")
print("-----------------------------")
print("Converted %d files\n" % files_processed)
print("Detected %d errors that require attention" % len(errors))
print("-" * 80)
print("\n".join(errors))
print("\nMake sure to read the detailed log %r\n" % report_filename)
|
{
"content_hash": "477dd667ede56f3b2a6bb30a7080c40d",
"timestamp": "",
"source": "github",
"line_count": 668,
"max_line_length": 81,
"avg_line_length": 37.01347305389221,
"alnum_prop": 0.598948432760364,
"repo_name": "HKUST-SING/tensorflow",
"id": "39ba22c44727a686028aea57b8981497212fa674",
"size": "25414",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/tools/compatibility/tf_upgrade.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7583"
},
{
"name": "C",
"bytes": "175403"
},
{
"name": "C++",
"bytes": "21786680"
},
{
"name": "CMake",
"bytes": "130702"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "786880"
},
{
"name": "HTML",
"bytes": "558790"
},
{
"name": "Java",
"bytes": "279510"
},
{
"name": "JavaScript",
"bytes": "13406"
},
{
"name": "Jupyter Notebook",
"bytes": "1833840"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "36991"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "64656"
},
{
"name": "Protocol Buffer",
"bytes": "200763"
},
{
"name": "Python",
"bytes": "17932974"
},
{
"name": "Shell",
"bytes": "324690"
},
{
"name": "TypeScript",
"bytes": "775401"
}
],
"symlink_target": ""
}
|
import re
from rpython.rlib import debug
from rpython.jit.tool.oparser import pure_parse
from rpython.jit.metainterp import logger
from rpython.jit.metainterp.typesystem import llhelper
from StringIO import StringIO
from rpython.jit.metainterp.optimizeopt.util import equaloplists
from rpython.jit.metainterp.history import AbstractDescr, JitCellToken, BasicFailDescr, BasicFinalDescr
from rpython.jit.backend.model import AbstractCPU
class Descr(AbstractDescr):
final_descr = False
def capturing(func, *args, **kwds):
log_stream = StringIO()
class MyDebugLog:
def debug_print(self, *args):
for arg in args:
print >> log_stream, arg,
print >> log_stream
def debug_start(self, *args):
pass
def debug_stop(self, *args):
pass
try:
debug._log = MyDebugLog()
func(*args, **kwds)
finally:
debug._log = None
return log_stream.getvalue()
class Logger(logger.Logger):
def log_loop(self, loop, namespace={}, ops_offset=None, name=''):
self.namespace = namespace
return capturing(logger.Logger.log_loop, self,
loop.inputargs, loop.operations, ops_offset=ops_offset,
name=name)
def _make_log_operations(self1, memo):
class LogOperations(logger.LogOperations):
def repr_of_descr(self, descr):
for k, v in self1.namespace.items():
if v == descr:
return k
return descr.repr_of_descr()
logops = LogOperations(self1.metainterp_sd, self1.guard_number, memo)
self1.logops = logops
return logops
class TestLogger(object):
ts = llhelper
def make_metainterp_sd(self):
class FakeJitDriver(object):
class warmstate(object):
get_location_str = staticmethod(lambda args: "dupa")
class FakeMetaInterpSd:
cpu = AbstractCPU()
cpu.ts = self.ts
jitdrivers_sd = [FakeJitDriver()]
def get_name_from_address(self, addr):
return 'Name'
return FakeMetaInterpSd()
def reparse(self, inp, namespace=None, check_equal=True):
""" parse loop once, then log it and parse again.
Checks that we get the same thing.
"""
if namespace is None:
namespace = {}
loop = pure_parse(inp, namespace=namespace)
logger = Logger(self.make_metainterp_sd())
output = logger.log_loop(loop, namespace)
oloop = pure_parse(output, namespace=namespace)
if check_equal:
remap = {}
for box1, box2 in zip(loop.inputargs, oloop.inputargs):
assert box1.__class__ == box2.__class__
remap[box2] = box1
equaloplists(loop.operations, oloop.operations, remap=remap)
return logger, loop, oloop
def test_simple(self):
inp = '''
[i0, i1, i2, p3, p4, p5]
i6 = int_add(i1, i2)
i8 = int_add(i6, 3)
jump(i0, i8, i6, p3, p4, p5)
'''
self.reparse(inp)
def test_descr(self):
inp = '''
[p0]
setfield_gc(p0, 3, descr=somedescr)
'''
somedescr = Descr()
self.reparse(inp, namespace=locals())
def test_guard(self):
inp = '''
[i0]
i1 = int_add(i0, 1)
guard_true(i0) [i0, i1]
finish(i1)
'''
self.reparse(inp)
def test_guard_not_invalidated(self):
inp = '''
[]
guard_not_invalidated(descr=descr) []
finish(descr=finaldescr)
'''
loop = pure_parse(inp, namespace={'descr': Descr(),
'finaldescr': BasicFinalDescr()})
logger = Logger(self.make_metainterp_sd())
output = logger.log_loop(loop, {'descr': Descr()})
assert 'guard_not_invalidated(descr=' in output
def test_guard_w_hole(self):
inp = '''
[i0]
i1 = int_add(i0, 1)
guard_true(i0) [i0, None, i1]
finish(i1)
'''
self.reparse(inp)
def test_debug_merge_point(self):
inp = '''
[]
debug_merge_point(0, 0, 0)
'''
_, loop, oloop = self.reparse(inp, check_equal=False)
assert loop.operations[0].getarg(1).getint() == 0
assert loop.operations[0].getarg(2).getint() == 0
assert oloop.operations[0].getarg(2)._get_str() == "dupa"
def test_jit_debug(self):
inp = '''
[]
jit_debug('foobar', -1, 5)
'''
_, loop, oloop = self.reparse(inp)
assert loop.operations[0].getarg(0)._get_str() == "foobar"
assert loop.operations[0].getarg(1).getint() == -1
assert oloop.operations[0].getarg(0)._get_str() == "foobar"
assert oloop.operations[0].getarg(1).getint() == -1
def test_floats(self):
inp = '''
[f0]
f1 = float_add(3.5, f0)
'''
_, loop, oloop = self.reparse(inp)
remap = {}
for box1, box2 in zip(loop.inputargs, oloop.inputargs):
assert box1.__class__ == box2.__class__
remap[box2] = box1
equaloplists(loop.operations, oloop.operations, remap=remap)
def test_jump(self):
namespace = {'target': JitCellToken()}
namespace['target'].number = 3
inp = '''
[i0]
jump(i0, descr=target)
'''
loop = pure_parse(inp, namespace=namespace)
logger = Logger(self.make_metainterp_sd())
output = logger.log_loop(loop)
assert output.splitlines()[-1] == "jump(i0, descr=<Loop3>)"
pure_parse(output)
def test_guard_descr(self):
namespace = {'fdescr': BasicFailDescr()}
inp = '''
[i0]
guard_true(i0, descr=fdescr) [i0]
'''
loop = pure_parse(inp, namespace=namespace)
logger = Logger(self.make_metainterp_sd(), guard_number=True)
output = logger.log_loop(loop)
assert re.match("guard_true\(i0, descr=<Guard0x[\da-f]+>\) \[i0\]", output.splitlines()[-1])
pure_parse(output)
logger = Logger(self.make_metainterp_sd(), guard_number=False)
output = logger.log_loop(loop)
lastline = output.splitlines()[-1]
assert lastline.startswith("guard_true(i0, descr=<")
assert not lastline.startswith("guard_true(i0, descr=<Guard")
def test_intro_loop(self):
bare_logger = logger.Logger(self.make_metainterp_sd())
output = capturing(bare_logger.log_loop, [], [], 1, "foo")
assert output.splitlines()[0] == "# Loop 1 () : foo with 0 ops"
pure_parse(output)
def test_intro_bridge(self):
bare_logger = logger.Logger(self.make_metainterp_sd())
output = capturing(bare_logger.log_bridge, [], [], 3)
assert re.match("# bridge out of Guard 0x[\da-f]+ with 0 ops",
output.splitlines()[0])
pure_parse(output)
def test_repr_single_op(self):
inp = '''
[i0, i1, i2, p3, p4, p5]
i6 = int_add(i1, i2)
i8 = int_add(i6, 3)
jump(i0, i8, i6, p3, p4, p5)
'''
logger, loop, _ = self.reparse(inp)
op = loop.operations[1]
assert logger.logops.repr_of_resop(op) == "i8 = int_add(i6, 3)"
def test_ops_offset(self):
inp = '''
[i0]
i1 = int_add(i0, 1)
i2 = int_mul(i1, 2)
jump(i2)
'''
loop = pure_parse(inp)
ops = loop.operations
ops_offset = {
ops[0]: 10,
ops[2]: 30,
None: 40
}
logger = Logger(self.make_metainterp_sd())
output = logger.log_loop(loop, ops_offset=ops_offset, name="foo")
assert output.strip() == """
# Loop 0 (foo) : noopt with 3 ops
[i0]
+10: i2 = int_add(i0, 1)
i4 = int_mul(i2, 2)
+30: jump(i4)
+40: --end of the loop--
""".strip()
|
{
"content_hash": "338f40c65cc55520d0934d24f8e96ddf",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 103,
"avg_line_length": 32.91769547325103,
"alnum_prop": 0.5488186023252907,
"repo_name": "jptomo/rpython-lang-scheme",
"id": "c787f452a273aa1f6be5df58980cbfe7a2492ad7",
"size": "8000",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rpython/jit/metainterp/test/test_logger.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "161293"
},
{
"name": "Batchfile",
"bytes": "5289"
},
{
"name": "C",
"bytes": "335765"
},
{
"name": "C++",
"bytes": "12638"
},
{
"name": "Emacs Lisp",
"bytes": "3149"
},
{
"name": "HCL",
"bytes": "155"
},
{
"name": "Makefile",
"bytes": "6988"
},
{
"name": "Objective-C",
"bytes": "1907"
},
{
"name": "Python",
"bytes": "16129160"
},
{
"name": "Scheme",
"bytes": "3"
},
{
"name": "Shell",
"bytes": "721"
},
{
"name": "VimL",
"bytes": "1107"
}
],
"symlink_target": ""
}
|
import numpy as np
from astropy.coordinates.baseframe import frame_transform_graph
from astropy.coordinates.matrix_utilities import matrix_transpose
from astropy.coordinates.transformations import DynamicMatrixTransform
from .fk4 import FK4NoETerms
from .fk5 import FK5
from .utils import EQUINOX_B1950, EQUINOX_J2000
# FK5 to/from FK4 ------------------->
# B1950->J2000 matrix from Murray 1989 A&A 218,325 eqn 28
_B1950_TO_J2000_M = np.array(
[[0.9999256794956877, -0.0111814832204662, -0.0048590038153592],
[0.0111814832391717, 0.9999374848933135, -0.0000271625947142],
[0.0048590037723143, -0.0000271702937440, 0.9999881946023742]])
_FK4_CORR = np.array(
[[-0.0026455262, -1.1539918689, +2.1111346190],
[+1.1540628161, -0.0129042997, +0.0236021478],
[-2.1112979048, -0.0056024448, +0.0102587734]]) * 1.e-6
def _fk4_B_matrix(obstime):
"""
This is a correction term in the FK4 transformations because FK4 is a
rotating system - see Murray 89 eqn 29
"""
# Note this is *julian century*, not besselian
T = (obstime.jyear - 1950.) / 100.
if getattr(T, 'shape', ()):
# Ensure we broadcast possibly arrays of times properly.
T.shape += (1, 1)
return _B1950_TO_J2000_M + _FK4_CORR * T
# This transformation can't be static because the observation date is needed.
@frame_transform_graph.transform(DynamicMatrixTransform, FK4NoETerms, FK5)
def fk4_no_e_to_fk5(fk4noecoord, fk5frame):
# Correction terms for FK4 being a rotating system
B = _fk4_B_matrix(fk4noecoord.obstime)
# construct both precession matricies - if the equinoxes are B1950 and
# J2000, these are just identity matricies
pmat1 = fk4noecoord._precession_matrix(fk4noecoord.equinox, EQUINOX_B1950)
pmat2 = fk5frame._precession_matrix(EQUINOX_J2000, fk5frame.equinox)
return pmat2 @ B @ pmat1
# This transformation can't be static because the observation date is needed.
@frame_transform_graph.transform(DynamicMatrixTransform, FK5, FK4NoETerms)
def fk5_to_fk4_no_e(fk5coord, fk4noeframe):
# Get transposed version of the rotating correction terms... so with the
# transpose this takes us from FK5/J200 to FK4/B1950
B = matrix_transpose(_fk4_B_matrix(fk4noeframe.obstime))
# construct both precession matricies - if the equinoxes are B1950 and
# J2000, these are just identity matricies
pmat1 = fk5coord._precession_matrix(fk5coord.equinox, EQUINOX_J2000)
pmat2 = fk4noeframe._precession_matrix(EQUINOX_B1950, fk4noeframe.equinox)
return pmat2 @ B @ pmat1
|
{
"content_hash": "e4d457b0bf7d0640b7329c8a22610fe5",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 78,
"avg_line_length": 40.698412698412696,
"alnum_prop": 0.7250390015600624,
"repo_name": "lpsinger/astropy",
"id": "169eced23a83cc24cfad6735570ea31b23e5646c",
"size": "2630",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "astropy/coordinates/builtin_frames/fk4_fk5_transforms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11040074"
},
{
"name": "C++",
"bytes": "47001"
},
{
"name": "Cython",
"bytes": "78755"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Lex",
"bytes": "183333"
},
{
"name": "M4",
"bytes": "18757"
},
{
"name": "Makefile",
"bytes": "52508"
},
{
"name": "Python",
"bytes": "12323563"
},
{
"name": "Shell",
"bytes": "17024"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
}
|
from .video_out import VideoOut
from .television import Television
|
{
"content_hash": "2917a6067438dc898d0d47d831381508",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 34,
"avg_line_length": 33.5,
"alnum_prop": 0.835820895522388,
"repo_name": "Hexadorsimal/pynes",
"id": "624bfa18c2f935ae5a8585e96429f96c57a32f20",
"size": "67",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nes/video/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42305"
}
],
"symlink_target": ""
}
|
"""
this script extracts commit info.
the commits are memorized only if one ore more java files are commited.
the log file is extracted by the following command:
git log --pretty=format:"commit:%H%nauthor:%an%ndate:%at" --name-only > log_(project_name).txt
this script is called by auto_top.sh
input: log_(project_name).txt
output: (project_name).csv
(project_name)_tc.csv
(project_name)_all.csv
"""
import re
import sys
import csv
import os.path
import os
import datetime
def csv_writer_tc(pjt_name, commit_hash, files):
f = open(pjt_name + '_tc.csv', 'a')
csvWriter = csv.writer(f, lineterminator="\n")
size = os.path.getsize(pjt_name + '.csv')
if size == 0:
title = ["Commit_hash", "File"]
csvWriter.writerow(title)
file_list = []
file_list.append(commit_hash)
for file in files:
if file[1] == "production":
continue
file_list.append(file[0])
csvWriter.writerow(file_list)
f.close()
return
def csv_writer(pjt_name, commit_hash, date, author, files):
f_pc = open(pjt_name + '.csv', 'a')
csvWriter_pc = csv.writer(f_pc, lineterminator="\n")
size = os.path.getsize(pjt_name + '.csv')
if size == 0:
title = ["Date", "Commit_hash", "Author", "File", "Attribution"]
csvWriter_pc.writerow(title)
f_all = open(pjt_name + '_all.csv', 'a')
csvWriter_all = csv.writer(f_all, lineterminator="\n")
size = os.path.getsize(pjt_name + '_all.csv')
if size == 0:
title = ["Date", "Commit_hash", "Author", "File", "Attribution"]
csvWriter_all.writerow(title)
for file in files:
file_list = []
file_list.append(commit_hash)
file_list.append(date)
file_list.append(author)
file_list.append(file[0])
file_list.append(file[1])
if file[1] == "test":
csvWriter_all.writerow(file_list)
continue
csvWriter_pc.writerow(file_list)
csvWriter_all.writerow(file_list)
f_pc.close()
f_all.close()
return
def log_scraper(pjt_name):
f = open("log_"+pjt_name+".txt", 'r')
line = f.readline()
tc_count = 0
while line:
#while not line.startswith("author:"):
while line == "\n":
line = f.readline()
if not line:
break
if line.startswith("commit:"):
commit_hash = line.split("commit:")[-1]
commit_hash = commit_hash.replace("\n", "")
line = f.readline()
if line.startswith("author:"):
author = line.split("author:")[-1]
author = author.replace("\n", "")
line = f.readline()
if line.startswith("date:"):
utime = line.split("date:")[-1]
utime = utime.replace("\n", "")
date = str(datetime.datetime.fromtimestamp(int(utime)))
date = date.split(" ")[0]
date = date.replace("-", "/")
line = f.readline()
file_list = []
while (not line == "\n") and (line):
#file_path = line.replace("\n", "")
#file_name = file_path.split("/")[-1]
file_name = line.replace("\n", "")
source_code_name = file_name.split("/")[-1]
if "/src/test/java/" in file_name and source_code_name.endswith(".java"):
file_attribution = "test"
elif source_code_name.endswith(".java"):
file_attribution = "production"
else:
line = f.readline()
continue
file_info = []
file_info.append(file_name)
file_info.append(file_attribution)
file_list.append(file_info)
line = f.readline()
if len(file_list) > 0:
csv_writer(pjt_name, commit_hash, date, author, file_list)
for file_info in file_list:
if file_info[1] == "test":
csv_writer_tc(pjt_name, commit_hash, file_list)
tc_count += 1
break
line = f.readline()
f.close()
print str(tc_count) + " commits contain TC modification."
return
if __name__ == "__main__":
argvs = sys.argv
pjt_name = argvs[1]
log_scraper(pjt_name)
|
{
"content_hash": "b737eda6e68f8c3c6575728e92571c64",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 95,
"avg_line_length": 29.129032258064516,
"alnum_prop": 0.6439645625692137,
"repo_name": "hideshis/scripts_for_research",
"id": "712b07958301fe0d2dce49858533f95d3a68d604",
"size": "3636",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "change_history_view/change_history_view_import_and_naming_convention/log_scraper.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14628"
},
{
"name": "HTML",
"bytes": "9246561"
},
{
"name": "Java",
"bytes": "5739"
},
{
"name": "JavaScript",
"bytes": "14576"
},
{
"name": "PostScript",
"bytes": "510108"
},
{
"name": "Python",
"bytes": "312380"
},
{
"name": "R",
"bytes": "5553"
},
{
"name": "Shell",
"bytes": "5927"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
import uuid
def gen_uuid(apps, schema_editor):
WooeyJob = apps.get_model('wooey', 'WooeyJob')
for obj in WooeyJob.objects.all():
obj.uuid = uuid.uuid4()
obj.save()
class Migration(migrations.Migration):
dependencies = [
('wooey', '0012_wooeyjob_uuid'),
]
operations = [
# Set the uuids for existing records
migrations.RunPython(gen_uuid),
]
|
{
"content_hash": "9d2f3f624a0d55e2bbfea9fdb38196a5",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 50,
"avg_line_length": 21.434782608695652,
"alnum_prop": 0.6369168356997972,
"repo_name": "waytai/Wooey",
"id": "5b94230501eb4fab999886b6c123abbd721125cf",
"size": "517",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "wooey/migrations/0013_wooeyjob_uuid_populate.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1924"
},
{
"name": "HTML",
"bytes": "72919"
},
{
"name": "JavaScript",
"bytes": "811"
},
{
"name": "Makefile",
"bytes": "359"
},
{
"name": "Python",
"bytes": "186980"
}
],
"symlink_target": ""
}
|
import logging
import time
try:
from statsd import statsd
statsd_installed = True
except:
statsd_installed = False
from worker import executors
from common import config
log = logging.getLogger(__name__)
def bind(t):
def f(cls):
executors.bindings[t] = cls
return cls
return f
class Executor(object):
def __init__(self):
self.script_file = None
self.stats_connected = False
self.host = config.get("stats.host", "localhost")
self.port = config.get("stats.port", 8125)
def prepare(self, script_file):
self.script_file = script_file
def run(self, task):
if not self.script_file:
log.error("Failed to run executor, script is not set")
return
start = time.time()
res = self.execute(task)
elapsed = time.time() - start
self.report("gauge", elapsed, scenario=task.scenario_id)
return res
def execute(self, task):
raise NotImplementedError()
def report(self, metric_type, value, **kwargs):
if not statsd_installed:
return
if not self.stats_connected:
statsd.connect(self.host, self.port)
self.stats_connected = True
key = "spike.test"
tags = ["%s:%s" % (k, v) for k, v in kwargs.iteritems()]
if "postfix" in kwargs:
key = ".".join([key, kwargs["postfix"]])
del kwargs["postfix"]
if metric_type == "counter":
statsd.increment(key, value, tags=tags)
elif metric_type == "gauge":
statsd.gauge(key, value, tags=tags)
|
{
"content_hash": "5cdceba0ae4c79ce012484451e487a37",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 66,
"avg_line_length": 25.184615384615384,
"alnum_prop": 0.5821624923640807,
"repo_name": "Unix4ever/spike",
"id": "6bbed08cfa7064d72ddeea90d1183b8ecb0c1091",
"size": "1637",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "worker/executors/executor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31788"
}
],
"symlink_target": ""
}
|
'''
:mod:`ophyd.controls.cas.pv` - CAS process variables
====================================================
.. module:: ophyd.controls.cas.pv
:synopsis: Epics process variables used in the channel access server, :class:`caServer`
'''
from __future__ import print_function
import time
import threading
import logging
import numpy as np
from pcaspy import cas
from ...utils.errors import (AlarmError, MajorAlarmError, MinorAlarmError)
from ...utils.errors import alarms
from ...utils.epics_pvs import record_field
from .server import caServer
from .errors import (casAsyncCompletion, casAsyncRunning, casError, casSuccess,
casUndefinedValueError)
logger = logging.getLogger(__name__)
class Limits(object):
'''Control and display limits for Epics PVs
Parameters
----------
lolim : float
The low control limit to which the value is clamped
hilim : float
The high control limit to which the value is clamped
hihi : float
The high-high alarm limit
lolo : float
The low-low alarm limit
high : float
The high alarm limit
low : float
The low alarm limit
Attributes
----------
lolim : float
The low control limit to which the value is clamped
hilim : float
The high control limit to which the value is clamped
hihi : float
The high-high alarm limit
lolo : float
The low-low alarm limit
high : float
The high alarm limit
low : float
The low alarm limit
'''
def __init__(self,
lolim=0.0,
hilim=0.0,
hihi=0.0,
lolo=0.0,
high=0.0,
low=0.0):
self.lolim = float(lolim)
self.hilim = float(hilim)
self.hihi = float(hihi)
self.lolo = float(lolo)
self.high = float(high)
self.low = float(low)
def check_alarm(self, value):
"""Raise an exception if an alarm would be set with the given value
Raises
------
AlarmError (MinorAlarmError, MajorAlarmError)
"""
lolo = self.lolo
low = self.low
high = self.high
hihi = self.hihi
if lolo < hihi:
if value >= hihi:
raise MajorAlarmError('%s >= %s' % (value, hihi),
alarm=alarms.HIHI_ALARM)
elif value <= lolo:
raise MajorAlarmError('%s <= %s' % (value, lolo),
alarm=alarms.LOLO_ALARM)
if low < high:
if value >= high:
raise MinorAlarmError('%s >= %s' % (value, high),
alarm=alarms.HIGH_ALARM)
elif value <= low:
raise MinorAlarmError('%s <= %s' % (value, low),
alarm=alarms.LOW_ALARM)
class CasPV(cas.casPV):
'''Channel access server process variable
Parameters
----------
name : str
The PV name (should not include server prefix)
value :
The initial value, also used to guess the CA type
count : int, optional
The number of elements in the array (must be >= len(value))
type_ : , optional
Override the default type detected from `value`
precision : int, optional
The precision clients should use for display
units : str, optional
The engineering units of the pv
limits : Limits or sequence, optional
Limit information (high, low, etc. See :class:`Limits`)
scan : float, optional
The rate at which to call scan()
asg : , optional
Access security group information (TODO)
minor_states : sequence, optional
For enums, the minor alarm states
major_states : sequence, optional
For enums, the major alarm states
written_cb : callable, optional
A callback called when the value is written to via channel access. This
overrides the default `written_to` method.
scan_cb : callable, optional
A callback called when the scan event happens -- when the PV should have
its value updated. This overrides the default `scan` method.
server : caServer, optional
The channel access server to attach to
Attributes
----------
minor_states : list
For enum types, the list of values which cause a MinorAlarm
major_states : list
For enum types, the list of values which cause a MajorAlarm
'''
def __init__(self, name, value,
count=0,
type_=None,
precision=1,
units='',
limits=None,
scan=0.0,
asg=None,
minor_states=[],
major_states=[],
server=None,
written_cb=None,
scan_cb=None,
):
# TODO: asg
if written_cb is None:
written_cb = self.written_to
elif not callable(written_cb):
raise ValueError('written_cb is not callable')
if scan_cb is None:
scan_cb = self.scan
elif not callable(scan_cb):
raise ValueError('scan_cb is not callable')
# PV type defaults to type(value)
if type_ is None:
type_ = type(value)
elif value is not None:
value = type_(value)
if server is not None:
name = server._strip_prefix(name)
self._name = str(name)
self._ca_type = caServer.type_map.get(type_, type_)
self._precision = precision
self._units = str(units)
self._scan_rate = float(scan)
self.scan = scan_cb
self._written_cb = written_cb
self._count = 0
count = max(count, 0)
if limits is None:
self.limits = Limits()
elif isinstance(limits, dict):
self.limits = Limits(**limits)
else:
# TODO: Don't copy so limits can easily be
# updated for a group?
self.limits = limits
self._server = None
self._value = value
self._enums = []
self._alarm = AlarmError.severity
self._severity = AlarmError.severity
self._updating = False
if count == 0 and self._ca_type in caServer.numerical_types:
alarm_fcn = self._check_numerical
elif self._ca_type in caServer.enum_types:
if type_ is bool:
self._enums = ['False', 'True']
self._value = self._enums[bool(value)]
else:
self._enums = list(self._value)
self._value = self._value[0]
if np.array(self._value).dtype.type != np.string_:
raise ValueError('Enum list item types should be strings (specify an np.array'
' as the value if you wanted a waveform)')
alarm_fcn = self._check_enum
self.minor_states = list(minor_states)
self.major_states = list(major_states)
elif self._ca_type in caServer.string_types:
alarm_fcn = self._check_string
elif count > 0 or (type_ is np.ndarray and isinstance(value, np.ndarray)):
try:
self._ca_type = caServer.type_map[value.dtype.type]
except KeyError:
raise ValueError('Unhandled numpy array type %s' % value.dtype)
value = value.flatten()
count = int(count)
if count <= 0:
self._count = value.size
else:
self._count = count
alarm_fcn = lambda self: None
if self._count < value.size:
raise ValueError('Initial value too large for specified size')
elif self._count > value.size:
self._value = np.zeros(self._count, dtype=value.dtype)
self._value[:value.size] = value
else:
self._value = value.copy()
else:
raise ValueError('Unhandled PV type "%s"' % type_)
self._check_alarm = alarm_fcn
self.touch()
cas.casPV.__init__(self)
if self._scan_rate > 0.0:
self.thread = threading.Thread(target=self._scan_thread)
self.thread.daemon = True
self.thread.start()
if server is not None:
server.add_pv(self)
@property
def full_pvname(self):
'''The full PV name, including the server prefix'''
if self._server is None:
raise ValueError('PV not yet added to a server (%s)' % self._name)
else:
return ''.join((self._server.prefix, self._name))
@property
def server(self):
'''The server the channel access PV is managed by'''
return self._server
def __getitem__(self, idx):
if self._count <= 0:
raise IndexError('(%d) Not an array' % idx)
else:
return self._value[idx]
def __setitem__(self, idx, value):
self._value[idx] = value
self.value = self._value
def stop(self):
'''Stop the scan loop'''
self._updating = False
def scan(self):
'''Called at every `scan` second intervals
Override this or specify scan_cb in the initializer.
'''
pass
def _scan_loop(self):
if self._scan_rate <= 0.0:
return
self._updating = True
while self._updating:
try:
self.scan()
except:
self._updating = False
raise
time.sleep(self._scan_rate)
def touch(self):
'''Update the timestamp and alarm status (without changing the value)'''
self._timestamp = cas.epicsTimeStamp()
self._status, self._severity = self.check_alarm()
@property
def name(self):
'''The PV name'''
return self._name
@property
def alarm(self):
'''Current alarm status'''
return self._alarm
@property
def count(self):
'''Array size'''
return self._count
@property
def severity(self):
'''Current alarm severity'''
return self._severity
def check_alarm(self, value=None):
'''Check a value against this PV's alarm settings'''
if value is None:
value = self._value
try:
self._check_alarm(value)
except (MinorAlarmError, MajorAlarmError) as ex:
return (ex.alarm, ex.severity)
return (alarms.NO_ALARM, 0)
def _check_string(self, value):
'''Alarm checking for string PVs'''
pass
def _check_numerical(self, value):
'''Alarm checking for numerical PVs'''
self.limits.check_alarm(value)
def _check_enum(self, value):
'''Alarm checking for enums'''
if isinstance(value, int):
value = self._enums[value]
if value in self.major_states:
raise MajorAlarmError('%s' % value,
alarm=alarms.STATE_ALARM)
elif value in self.minor_states:
raise MinorAlarmError('%s' % value,
alarm=alarms.STATE_ALARM)
def _gdd_to_dict(self, gdd):
'''Take a gdd value and dump the important parts into a dictionary'''
timestamp = cas.epicsTimeStamp()
gdd.getTimeStamp(timestamp)
value = gdd.get()
status, severity = self.check_alarm(value)
return dict(timestamp=timestamp,
value=value,
status=status,
severity=severity)
def _get_value(self):
return self._value
def _set_value(self, value, timestamp=None):
if isinstance(value, cas.gdd):
info = self._gdd_to_dict(value)
self._timestamp = info['timestamp']
self._value = info['value']
self._status = info['status']
self._severity = info['severity']
else:
gdd = cas.gdd()
gdd.setPrimType(self._ca_type)
if timestamp is None:
timestamp = cas.epicsTimeStamp()
self._timestamp = timestamp
self._value = value
self._status, self._severity = self.check_alarm()
self._gdd_set_value(gdd)
# Notify clients of the update
self.postEvent(gdd)
value = property(_get_value, _set_value)
def resize(self, count=None, value=None):
'''Resize an array PV, optionally specifying a new value
If `count` is not specified, the size of `value` is used
'''
# TODO this works on server side, pyepics doesn't handle it
# well though
raise NotImplementedError
if self._count <= 0:
raise ValueError('Cannot resize a scalar PV')
elif count is None or count <= 0:
if value is None:
raise ValueError('Must specify count or value')
count = value.size
if value is not None:
value = value.copy().flatten()
else:
value = self._value.copy()
value.resize(count)
self._count = count
# Set the value and post the event
self.value = value
def written_to(self, timestamp=None, value=None,
status=None, severity=None):
'''Default callback for when the PV is written to
Raises
------
casAsyncCompletion (when asynchronous completion is desired)
'''
pass
def get(self, **kwargs):
'''Get the current value
(acts like an epics.PV, otherwise just use pv.value)
'''
return self.value
def put(self, value, **kwargs):
'''Set the current value
(acts like an epics.PV, otherwise just use pv.value = value)
'''
self.value = value
def process(self, wait=True):
'''Cause the written-to callback to be fired'''
try:
ret = self._written_cb(timestamp=self._timestamp,
value=self._value,
status=self._status,
severity=self._severity)
except casAsyncCompletion:
while wait and self.hasAsyncWrite():
time.sleep(0.01)
ret = self.value
return ret
def write(self, context, value):
'''The PV was written to over channel access
(internal function, override `written_to` instead)
'''
if self._written_cb is not None:
try:
info = self._gdd_to_dict(value)
self._written_cb(**info)
except casAsyncCompletion as ex:
if self.hasAsyncWrite():
return casAsyncRunning.ret
else:
self.startAsyncWrite(context)
return ex.ret
except casError as ex:
return ex.ret
except Exception as ex:
logger.debug('written_cb failed: (%s) %s' % (ex.__class__.__name__, ex),
exc_info=ex)
# TODO: no error for rejected values?
return casSuccess.ret
self.value = value
return casSuccess.ret
def async_done(self, ret=casSuccess.ret):
'''Indicate to the server that the asynchronous write has completed'''
if self.hasAsyncWrite():
self.endAsyncWrite(ret)
def writeNotify(self, context, value):
'''An asynchronous write attempt was made
(internal function)
'''
if self.hasAsyncWrite():
# Another async task currently running
return casAsyncRunning.ret
return self.write(context, value)
def _gdd_set_value(self, gdd):
'''Update a gdd instance with the current value and alarm/severity'''
if gdd.primitiveType() == cas.aitEnumInvalid:
gdd.setPrimType(self._ca_type)
if self._value is None:
raise casUndefinedValueError()
gdd.put(self._value)
gdd.setStatSevr(self._alarm, self._severity)
gdd.setTimeStamp(self._timestamp)
# TODO can't get around writing these.
# underlying swigged C++ code needs to be modified.
def _gdd_function(fcn, **kwargs):
def wrapped(self, gdd):
'''Internal pcaspy function; do not use'''
try:
ret = fcn(self, gdd, **kwargs)
except casError as ex:
logger.debug('caserror %s' % ex, exc_info=ex)
return ex.ret
except Exception as ex:
logger.debug('gdd failed %s' % ex, exc_info=ex)
return casUndefinedValueError.ret
if ret is None:
return casSuccess.ret
return ret
return wrapped
def _gdd_attr(self, gdd, attr=''):
'''Set the gdd value to (some attribute of this instance)'''
try:
value = getattr(self, attr)
except:
pass
else:
gdd.put(value)
def _gdd_lim(self, gdd, attr=''):
'''Set the gdd value to (some part of the limits)'''
try:
value = getattr(self.limits, attr)
except:
pass
else:
gdd.put(value)
getValue = _gdd_function(_gdd_set_value)
getPrecision = _gdd_function(_gdd_attr, attr='_precision')
getHighLimit = _gdd_function(_gdd_lim, attr='hilim')
getLowLimit = _gdd_function(_gdd_lim, attr='lolim')
getHighAlarmLimit = _gdd_function(_gdd_lim, attr='hihi')
getLowAlarmLimit = _gdd_function(_gdd_lim, attr='lolo')
getHighWarnLimit = _gdd_function(_gdd_lim, attr='high')
getLowWarnLimit = _gdd_function(_gdd_lim, attr='low')
getUnits = _gdd_function(_gdd_attr, attr='_units')
getEnums = _gdd_function(_gdd_attr, attr='_enums')
def bestExternalType(self):
'''Internal pcaspy function; do not use'''
return self._ca_type
def maxDimension(self):
'''Internal pcaspy function; do not use'''
if self._count >= 1:
return 1
else:
return 0
def maxBound(self, dims):
'''Internal pcaspy function; do not use'''
return self._count
def __repr__(self):
return 'CasPV({0.name!r}, value={0.value!r}, ' \
'alarm={0.alarm}, severity={0.severity})'.format(self)
class CasRecord(CasPV):
'''A channel access server record
Starts out with just a VAL field. Additional fields can be added
dynamically.
Keyword arguments are passed through to the base class, CasPV
Parameters
----------
name : str
The record prefix
val_field
The default value for the value field
rtype : str, optional
The record type to use
desc : str, optional
The description field value
Attributes
----------
fields : dict
Field name to CasPV instance
'''
def __init__(self, name, val_field, rtype=None,
desc='',
**kwargs):
assert '.' not in name, 'Record name cannot have periods'
CasPV.__init__(self, name, val_field, **kwargs)
self.fields = {}
self.add_field('VAL', None, pv=self)
if rtype is not None:
self.add_field('RTYP', str(rtype))
if desc is not None:
self.add_field('DESC', str(desc))
def field_pvname(self, field):
return record_field(self.name, field)
def __getitem__(self, field):
return self.fields[field]
def __setitem__(self, field, value):
self.fields[field].value = value
def add_field(self, field, value, pv=None, **kwargs):
field = field.upper()
if field in self.fields:
raise ValueError('Field already exists')
if pv is None:
field_pv = self.field_pvname(field)
kwargs.pop('server', '')
pv = CasPV(field_pv, value, **kwargs)
self.fields[field] = pv
def __repr__(self):
return '{0}({1.name!r}, value={1.value!r}, alarm={1.alarm}, ' \
'severity={1.severity})'.format(self.__class__.__name__, self)
|
{
"content_hash": "e61ec79e060172166a8d67d97446cbb7",
"timestamp": "",
"source": "github",
"line_count": 678,
"max_line_length": 94,
"avg_line_length": 30.169616519174042,
"alnum_prop": 0.5435834759227572,
"repo_name": "ericdill/ophyd",
"id": "ffafc0122384ea233ec3feb6f1c862b8e496eac3",
"size": "20471",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ophyd/controls/cas/pv.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "530245"
},
{
"name": "Shell",
"bytes": "892"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import logging
import six
import threading
import time
class RefCount(object):
"""Thread-safe counter"""
def __init__(self, count=1):
self._lock = threading.Lock()
self._count = count
def incr(self):
with self._lock:
self._count += 1
return self._count
def decr(self):
with self._lock:
self._count -= 1
return self._count
def await_until(func, timeout=5.0):
"""Polls for func() to return True"""
end_time = time.time() + timeout
while time.time() < end_time and not func():
time.sleep(0.01)
def stop_loop_when(loop, cond_func, timeout=5.0):
"""
Registers a periodic callback that stops the loop when cond_func() == True.
Compatible with both Tornado and asyncio.
"""
if cond_func() or timeout <= 0.0:
loop.stop()
return
timeout -= 0.1
loop.call_later(0.1, stop_loop_when, loop, cond_func, timeout)
def get_logger(name):
"""Returns a logger with log level set to INFO"""
logging.basicConfig(level=logging.INFO)
return logging.getLogger(name)
def get_one_by_tag(spans, key, value):
"""Return a single Span with a tag value/key from a list,
errors if more than one is found."""
found = []
for span in spans:
if span.tags.get(key) == value:
found.append(span)
if len(found) > 1:
raise RuntimeError('Too many values')
return found[0] if len(found) > 0 else None
def get_one_by_operation_name(spans, name):
"""Return a single Span with a name from a list,
errors if more than one is found."""
found = []
for span in spans:
if span.operation_name == name:
found.append(span)
if len(found) > 1:
raise RuntimeError('Too many values')
return found[0] if len(found) > 0 else None
def get_tags_count(span, prefix):
"""Returns the tag count with the given prefix from a Span"""
test_keys = set()
for key in six.iterkeys(span.tags):
if key.startswith(prefix):
test_keys.add(key)
return len(test_keys)
|
{
"content_hash": "7674ced327fdbc2da9a1f8bd64477615",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 79,
"avg_line_length": 24.556818181818183,
"alnum_prop": 0.6057380842202684,
"repo_name": "opentracing/opentracing-python",
"id": "e591a656711b8a83bea16d2749d3c089dca814b9",
"size": "2161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testbed/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2598"
},
{
"name": "Python",
"bytes": "243543"
}
],
"symlink_target": ""
}
|
from bs4 import BeautifulSoup
import json
import requests
def ExtractJSONAPNS(fileloc):
file = open(fileloc, 'r')
extractedJSON = json.JSONDecoder().decode(file.read())
file.close()
apndict = dict()
apncount = 0
''' for geojson:
for key in extractedJSON["features"]:
properties = key['properties']
parcel = properties['apn']
apn = parcel.strip('JA')
if len(apn) is 17:
apndict[parcel] = '{0}-{1}-{2}-{3}-{4}-{5}-{6}-{7}'.format(apn[0:2], apn[2:5], apn[5:7], apn[7:9], apn[9:11], apn[11], apn[12:14], apn[14:len(apn)])
apncount += 1
'''
for key in extractedJSON:
parcel = key['apn']
apn = parcel.strip('JA')
if len(apn) is 17:
apndict[parcel] = '{0}-{1}-{2}-{3}-{4}-{5}-{6}-{7}'.format(apn[0:2], apn[2:5], apn[5:7], apn[7:9], apn[9:11], apn[11], apn[12:14], apn[14:len(apn)])
apncount += 1
del extractedJSON[:]
print("Total APNS Found: " + str(apncount))
return apndict
def ExtractHTMLTree(URL):
page = requests.get(URL)
return BeautifulSoup(page.text, 'lxml')
def saveToFiles(parcels):
JSONString = json.JSONEncoder().encode(parcels)
print("Writing to file: ParcelData.json")
file = open('ParcelData.json', 'w')
file.write(JSONString)
file.close()
#######TAX INFORMATION#######
def ScrapeTaxInfo(html):
table = html.find("table", attrs={"id":"mTabGroup_Values_mValues_mGrid_RealDataGrid"})
if str(type(table)) == "<type 'NoneType'>":
return None
rows = table.find_all("tr")
years = [td.get_text() for td in rows[0].find_all("td")]
marketvalues = [td.get_text() for td in rows[1].find_all("td")]
taxablevalues = [td.get_text() for td in rows[2].find_all("td")]
assessvalues = [td.get_text() for td in rows[3].find_all("td")]
yeardict = dict()
count = 1
for year in years[1:len(years)]:
values = dict()
values[str(marketvalues[0]).strip()] = int(marketvalues[count].replace(',', ''))
values[str(taxablevalues[0]).strip()] = int(taxablevalues[count].replace(',', ''))
values[str(assessvalues[0]).strip()] = int(assessvalues[count].replace(',', ''))
yeardict[str(year)] = values
count += 1
return yeardict
########EXEMPTION INFORMATION######
def ScrapeExemptions(html):
table = html.find("table", attrs={"id":"mTabGroup_Exemptions_mActiveExemptions_mGrid_RealDataGrid"})
if table:
rows = table.find_all("tr")
exemptions = []
if len(rows) > 0:
for row in rows:
exemptions.append(str(row.get_text().strip()))
return exemptions
else:
return ["NA"]
##########INCENTIVE INFORMATION#######
def ScrapeIncentives(html):
lx = html.find_all("span", attrs={"style":"margin-left:20px;"})
lm = lx[1].find_all("span")
incentivesDict = dict()
firstelem = True
for element in lm[0:6]:
key = ""
value = "null"
if firstelem:
key = "Capital Improvement Project"
firstelem = False
for a in element:
if str(type(a)) == "<class 'bs4.element.NavigableString'>":
a = a.strip()
if a != "":
value = str(a)
elif str(type(a)) == "<class 'bs4.element.Tag'>":
key = str(a.get_text()).strip().strip(':')
incentivesDict[key] = value
return incentivesDict
def main():
baseURL = "http://maps.jacksongov.org/PropertyReport/PropertyReport.cfm?pid="
apns = ExtractJSONAPNS("apns.json")
parceldict = dict()
for key, value in apns.iteritems():
parcelinfo = dict()
html = ExtractHTMLTree(baseURL + value)
parcelinfo["Property Values"] = ScrapeTaxInfo(html)
if parcelinfo["Property Values"] is None:
parcelinfo["Exemptions"] = None
parcelinfo["Incentives"] = None
else:
parcelinfo["Exemptions"] = ScrapeExemptions(html)
parcelinfo["Incentives"] = ScrapeIncentives(html)
parceldict[key] = parcelinfo
saveToFiles(parceldict)
if __name__ == "__main__":
main()
|
{
"content_hash": "1f0d859d42e9dd5420178255e72e446c",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 151,
"avg_line_length": 23.738853503184714,
"alnum_prop": 0.6458277434934263,
"repo_name": "starvagrant/address-api",
"id": "ccb187fcd87d95be8aad9abfb1613117c17a84c5",
"size": "3727",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scrapers/countyscraper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "950"
},
{
"name": "PHP",
"bytes": "170333"
},
{
"name": "Python",
"bytes": "3727"
},
{
"name": "Shell",
"bytes": "1558"
}
],
"symlink_target": ""
}
|
import operator
from django.contrib.admin.views.decorators import staff_member_required
from django.core.urlresolvers import reverse
from django.db import models
from django.shortcuts import redirect, get_object_or_404
from django.template import RequestContext
from django.views.generic import ListView, CreateView, RedirectView
from sorl.thumbnail.shortcuts import get_thumbnail
from .models import Image, ImageSet
class BrowseImages(ListView):
model = Image
template_name = 'images/admin_browse.html'
paginate_by = 32
def get_queryset(self):
fields = ['title', 'summary']
query = self.request.GET.get('q')
images = super(BrowseImages, self).get_queryset()
if not query:
return images
outer_q = []
for token in query.split():
inner_q = []
for field in fields:
inner_q.append(models.Q(**{field + '__icontains': token}))
outer_q.append(reduce(operator.or_, inner_q))
return images.filter(reduce(operator.and_, outer_q))
class UploadImage(CreateView):
model = Image
template_name = 'images/admin_upload.html'
def get_success_url(self):
return reverse('admin:images_admin_insert',
kwargs={'pk': self.object.id})
class RenderThumbnail(RedirectView):
permanent = False
def get_redirect_url(self, **kwargs):
image = get_object_or_404(Image, pk=self.kwargs.get('pk'))
geometry = self.kwargs.get('geometry')
if not geometry:
self.url = image.image.url
else:
self.url = get_thumbnail(image.image, geometry).url
return super(RenderThumbnail, self).get_redirect_url(**kwargs)
class BrowseImageSets(ListView):
model = ImageSet
template_name = 'images/admin_browse_imageset.html'
paginate_by = 10
def get_queryset(self):
fields = ['title', 'summary']
query = self.request.GET.get('q')
isets = super(BrowseImageSets, self).get_queryset()
if not query:
return isets
outer_q = []
for token in query.split():
inner_q = []
for field in fields:
inner_q.append(models.Q(**{field + '__icontains': token}))
outer_q.append(reduce(operator.or_, inner_q))
return isets.filter(reduce(operator.and_, outer_q))
|
{
"content_hash": "1e26e888db9744296816b2bc0caa0ae9",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 74,
"avg_line_length": 29.28048780487805,
"alnum_prop": 0.6259891711786756,
"repo_name": "armstrong/armstrong.apps.images",
"id": "36dfe540b14ff9a91438a58daa52c619bde73423",
"size": "2401",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "armstrong/apps/images/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "715"
},
{
"name": "Python",
"bytes": "34929"
},
{
"name": "Shell",
"bytes": "158"
}
],
"symlink_target": ""
}
|
"""
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import numpy as np
import matplotlib.pylab as plt
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution / subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=np.floor(n_components / 5))
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15, 'navy'),
('Lasso', 'lasso_cd', 2, None, 'turquoise'), ]
lw = 2
plt.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
plt.subplot(1, 2, subplot + 1)
plt.title('Sparse coding against %s dictionary' % title)
plt.plot(y, lw=lw, linestyle='--', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero, color in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y)
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
plt.plot(x, color=color, lw=lw,
label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y)
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
plt.plot(x, color='darkorange', lw=lw,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error'
% (len(idx), squared_error))
plt.axis('tight')
plt.legend(shadow=False, loc='best')
plt.subplots_adjust(.04, .07, .97, .90, .09, .2)
plt.show()
|
{
"content_hash": "8fef616e5307ed120363ad080ea4fd1b",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 78,
"avg_line_length": 40.07,
"alnum_prop": 0.6231594709258798,
"repo_name": "idlead/scikit-learn",
"id": "beeafc1bfbb9869713836ec0b523f9345b18bb6c",
"size": "4007",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "examples/decomposition/plot_sparse_coding.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "394788"
},
{
"name": "C++",
"bytes": "140225"
},
{
"name": "Makefile",
"bytes": "1588"
},
{
"name": "PowerShell",
"bytes": "17312"
},
{
"name": "Python",
"bytes": "6245914"
},
{
"name": "Shell",
"bytes": "5112"
}
],
"symlink_target": ""
}
|
'''
Management of APT/YUM package repos
===================================
Package repositories for APT-based and YUM-based distros can be managed with
these states. Here is some example SLS:
.. code-block:: yaml
base:
pkgrepo.managed:
- humanname: CentOS-$releasever - Base
- mirrorlist: http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=os
- comments:
- '#http://mirror.centos.org/centos/$releasever/os/$basearch/'
- gpgcheck: 1
- gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6
.. code-block:: yaml
base:
pkgrepo.managed:
- humanname: Logstash PPA
- name: deb http://ppa.launchpad.net/wolfnet/logstash/ubuntu precise main
- dist: precise
- file: /etc/apt/sources.list.d/logstash.list
- keyid: 28B04E4A
- keyserver: keyserver.ubuntu.com
- require_in:
- pkg: logstash
pkg.latest:
- name: logstash
- refresh: True
.. code-block:: yaml
base:
pkgrepo.managed:
- humanname: deb-multimedia
- name: deb http://www.deb-multimedia.org stable main
- file: /etc/apt/sources.list.d/deb-multimedia.list
- key_url: salt://deb-multimedia/files/marillat.pub
.. code-block:: yaml
base:
pkgrepo.managed:
- humanname: Google Chrome
- name: deb http://dl.google.com/linux/chrome/deb/ stable main
- dist: stable
- file: /etc/apt/sources.list.d/chrome-browser.list
- require_in:
- pkg: google-chrome-stable
- gpgcheck: 1
- key_url: https://dl-ssl.google.com/linux/linux_signing_key.pub
.. code-block:: yaml
base:
pkgrepo.managed:
- ppa: wolfnet/logstash
pkg.latest:
- name: logstash
- refresh: True
.. _bug: https://bugs.launchpad.net/ubuntu/+source/software-properties/+bug/1249080
.. note::
On Ubuntu systems, the ``python-software-properties`` package should be
installed for better support of PPA repositories. To check if this package
is installed, run ``dpkg -l python-software-properties``.
Also, some Ubuntu releases have a bug_ in their
``python-software-properties`` package, a missing dependency on pycurl, so
``python-pycurl`` will need to be manually installed if it is not present
once ``python-software-properties`` is installed.
On Ubuntu & Debian systems, the ```python-apt`` package is required to be installed.
To check if this package is installed, run ``dpkg -l python-software-properties``.
``python-apt`` will need to be manually installed if it is not present.
'''
from __future__ import absolute_import
# Import python libs
import sys
# Import salt libs
import salt.utils
from salt.exceptions import CommandExecutionError, SaltInvocationError
from salt.modules.aptpkg import _strip_uri
from salt.state import STATE_INTERNAL_KEYWORDS as _STATE_INTERNAL_KEYWORDS
def __virtual__():
'''
Only load if modifying repos is available for this package type
'''
return 'pkg.mod_repo' in __salt__
def managed(name, **kwargs):
'''
This function manages the configuration on a system that points to the
repositories for the system's package manager.
name
The name of the package repo, as it would be referred to when running
the regular package manager commands.
For yum-based systems, take note of the following configuration values:
humanname
On yum-based systems, this is stored as the "name" value in the .repo
file in /etc/yum.repos.d/. On yum-based systems, this is required.
baseurl
On yum-based systems, baseurl refers to a direct URL to be used for
this yum repo.
One of baseurl or mirrorlist is required.
mirrorlist
a URL which contains a collection of baseurls to choose from. On
yum-based systems.
One of baseurl or mirrorlist is required.
comments
Sometimes you want to supply additional information, but not as
enabled configuration. Anything supplied for this list will be saved
in the repo configuration with a comment marker (#) in front.
Additional configuration values, such as gpgkey or gpgcheck, are used
verbatim to update the options for the yum repo in question.
For apt-based systems, take note of the following configuration values:
ppa
On Ubuntu, you can take advantage of Personal Package Archives on
Launchpad simply by specifying the user and archive name. The keyid
will be queried from launchpad and everything else is set
automatically. You can override any of the below settings by simply
setting them as you would normally. For example:
.. code-block:: yaml
logstash-ppa:
pkgrepo.managed:
- ppa: wolfnet/logstash
ppa_auth
For Ubuntu PPAs there can be private PPAs that require authentication
to access. For these PPAs the username/password can be passed as an
HTTP Basic style username/password combination.
.. code-block:: yaml
logstash-ppa:
pkgrepo.managed:
- ppa: wolfnet/logstash
- ppa_auth: username:password
name
On apt-based systems this must be the complete entry as it would be
seen in the sources.list file. This can have a limited subset of
components (i.e. 'main') which can be added/modified with the
"comps" option.
.. code-block:: yaml
precise-repo:
pkgrepo.managed:
- name: deb http://us.archive.ubuntu.com/ubuntu precise main
disabled
Toggles whether or not the repo is used for resolving dependencies
and/or installing packages.
comps
On apt-based systems, comps dictate the types of packages to be
installed from the repository (e.g. main, nonfree, ...). For
purposes of this, comps should be a comma-separated list.
file
The filename for the .list that the repository is configured in.
It is important to include the full-path AND make sure it is in
a directory that APT will look in when handling packages
dist
This dictates the release of the distro the packages should be built
for. (e.g. unstable). This option is rarely needed.
keyid
The KeyID of the GPG key to install. This option also requires
the ``keyserver`` option to be set.
keyserver
This is the name of the keyserver to retrieve gpg keys from. The
``keyid`` option must also be set for this option to work.
key_url
URL to retrieve a GPG key from. Allows the usage of ``http://``,
``https://`` as well as ``salt://``.
.. note::
Use either ``keyid``/``keyserver`` or ``key_url``, but not both.
consolidate
If set to true, this will consolidate all sources definitions to
the sources.list file, cleanup the now unused files, consolidate
components (e.g. main) for the same URI, type, and architecture
to a single line, and finally remove comments from the sources.list
file. The consolidate will run every time the state is processed. The
option only needs to be set on one repo managed by salt to take effect.
clean_file
If set to true, empty file before config repo, dangerous if use
multiple sources in one file.
.. versionadded:: 2015.8.0
refresh_db
If set to false this will skip refreshing the apt package database on
debian based systems.
require_in
Set this to a list of pkg.installed or pkg.latest to trigger the
running of apt-get update prior to attempting to install these
packages. Setting a require in the pkg will not work for this.
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
repo = {}
# pkg.mod_repo has conflicting kwargs, so move 'em around
if 'name' in kwargs:
if 'ppa' in kwargs:
ret['result'] = False
ret['comment'] = 'You may not use both the "name" argument ' \
'and the "ppa" argument.'
return ret
kwargs['repo'] = kwargs['name']
if 'ppa' in kwargs and __grains__['os'] in ('Ubuntu', 'Mint'):
# overload the name/repo value for PPAs cleanly
# this allows us to have one code-path for PPAs
repo_name = 'ppa:{0}'.format(kwargs['ppa'])
kwargs['repo'] = repo_name
if 'repo' not in kwargs:
kwargs['repo'] = name
if 'humanname' in kwargs:
kwargs['name'] = kwargs['humanname']
if kwargs.pop('enabled', None):
kwargs['disabled'] = False
salt.utils.warn_until(
'Boron',
'The `enabled` argument has been deprecated in favor of '
'`disabled`.'
)
for kwarg in _STATE_INTERNAL_KEYWORDS:
kwargs.pop(kwarg, None)
try:
repo = __salt__['pkg.get_repo'](
kwargs['repo'],
ppa_auth=kwargs.get('ppa_auth', None)
)
except CommandExecutionError as exc:
ret['result'] = False
ret['comment'] = \
'Failed to configure repo {0!r}: {1}'.format(name, exc)
return ret
# this is because of how apt-sources works. This pushes distro logic
# out of the state itself and into a module that it makes more sense
# to use. Most package providers will simply return the data provided
# it doesn't require any "specialized" data massaging.
if 'pkg.expand_repo_def' in __salt__:
sanitizedkwargs = __salt__['pkg.expand_repo_def'](kwargs)
else:
sanitizedkwargs = kwargs
if __grains__['os_family'] == 'Debian':
kwargs['repo'] = _strip_uri(kwargs['repo'])
if repo:
notset = False
for kwarg in sanitizedkwargs:
if kwarg == 'repo':
pass
elif kwarg not in repo:
notset = True
elif kwarg == 'comps':
if sorted(sanitizedkwargs[kwarg]) != sorted(repo[kwarg]):
notset = True
elif kwarg == 'line' and __grains__['os_family'] == 'Debian':
# split the line and sort everything after the URL
sanitizedsplit = sanitizedkwargs[kwarg].split()
sanitizedsplit[3:] = sorted(sanitizedsplit[3:])
reposplit = repo[kwarg].split()
reposplit[3:] = sorted(reposplit[3:])
if sanitizedsplit != reposplit:
notset = True
else:
if str(sanitizedkwargs[kwarg]) != str(repo[kwarg]):
notset = True
if notset is False:
ret['result'] = True
ret['comment'] = ('Package repo {0!r} already configured'
.format(name))
return ret
if __opts__['test']:
ret['comment'] = ('Package repo {0!r} will be configured. This may '
'cause pkg states to behave differently than stated '
'if this action is repeated without test=True, due '
'to the differences in the configured repositories.'
.format(name))
return ret
# empty file before configure
if kwargs.get('clean_file', False):
salt.utils.fopen(kwargs['file'], 'w').close()
try:
if __grains__['os_family'] == 'Debian':
__salt__['pkg.mod_repo'](saltenv=__env__, **kwargs)
else:
__salt__['pkg.mod_repo'](**kwargs)
except Exception as exc:
# This is another way to pass information back from the mod_repo
# function.
ret['result'] = False
ret['comment'] = \
'Failed to configure repo {0!r}: {1}'.format(name, exc)
return ret
try:
repodict = __salt__['pkg.get_repo'](
kwargs['repo'], ppa_auth=kwargs.get('ppa_auth', None)
)
if repo:
for kwarg in sanitizedkwargs:
if repodict.get(kwarg) != repo.get(kwarg):
change = {'new': repodict[kwarg],
'old': repo.get(kwarg)}
ret['changes'][kwarg] = change
else:
ret['changes'] = {'repo': kwargs['repo']}
ret['result'] = True
ret['comment'] = 'Configured package repo {0!r}'.format(name)
except Exception as exc:
ret['result'] = False
ret['comment'] = \
'Failed to confirm config of repo {0!r}: {1}'.format(name, exc)
# Clear cache of available packages, if present, since changes to the
# repositories may change the packages that are available.
if ret['changes']:
sys.modules[
__salt__['test.ping'].__module__
].__context__.pop('pkg._avail', None)
return ret
def absent(name, **kwargs):
'''
This function deletes the specified repo on the system, if it exists. It
is essentially a wrapper around pkg.del_repo.
name
The name of the package repo, as it would be referred to when running
the regular package manager commands.
**UBUNTU-SPECIFIC OPTIONS**
ppa
On Ubuntu, you can take advantage of Personal Package Archives on
Launchpad simply by specifying the user and archive name.
.. code-block:: yaml
logstash-ppa:
pkgrepo.absent:
- ppa: wolfnet/logstash
ppa_auth
For Ubuntu PPAs there can be private PPAs that require authentication
to access. For these PPAs the username/password can be specified. This
is required for matching if the name format uses the ``ppa:`` specifier
and is private (requires username/password to access, which is encoded
in the URI).
.. code-block:: yaml
logstash-ppa:
pkgrepo.absent:
- ppa: wolfnet/logstash
- ppa_auth: username:password
keyid
If passed, then the GPG key corresponding to the passed KeyID will also
be removed.
keyid_ppa : False
If set to ``True``, the GPG key's ID will be looked up from
ppa.launchpad.net and removed, and the ``keyid`` argument will be
ignored.
.. note::
This option will be disregarded unless the ``ppa`` argument is
present.
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
repo = {}
if 'ppa' in kwargs and __grains__['os'] in ('Ubuntu', 'Mint'):
name = kwargs.pop('ppa')
if not name.startswith('ppa:'):
name = 'ppa:' + name
remove_key = any(kwargs.get(x) is not None
for x in ('keyid', 'keyid_ppa'))
if remove_key and 'pkg.del_repo_key' not in __salt__:
ret['result'] = False
ret['comment'] = \
'Repo key management is not implemented for this platform'
return ret
try:
repo = __salt__['pkg.get_repo'](
name, ppa_auth=kwargs.get('ppa_auth', None)
)
except CommandExecutionError as exc:
ret['result'] = False
ret['comment'] = \
'Failed to configure repo {0!r}: {1}'.format(name, exc)
return ret
if not repo:
ret['comment'] = 'Package repo {0} is absent'.format(name)
ret['result'] = True
return ret
if __opts__['test']:
ret['comment'] = ('Package repo {0!r} will be removed. This may '
'cause pkg states to behave differently than stated '
'if this action is repeated without test=True, due '
'to the differences in the configured repositories.'
.format(name))
return ret
try:
__salt__['pkg.del_repo'](repo=name, **kwargs)
except (CommandExecutionError, SaltInvocationError) as exc:
ret['result'] = False
ret['comment'] = exc.strerror
return ret
repos = __salt__['pkg.list_repos']()
if name not in repos:
ret['changes']['repo'] = name
ret['comment'] = 'Removed repo {0}'.format(name)
if not remove_key:
ret['result'] = True
else:
try:
removed_keyid = __salt__['pkg.del_repo_key'](name, **kwargs)
except (CommandExecutionError, SaltInvocationError) as exc:
ret['result'] = False
ret['comment'] += ', but failed to remove key: {0}'.format(exc)
else:
ret['result'] = True
ret['changes']['keyid'] = removed_keyid
ret['comment'] += ', and keyid {0}'.format(removed_keyid)
else:
ret['result'] = False
ret['comment'] = 'Failed to remove repo {0}'.format(name)
# Clear cache of available packages, if present, since changes to the
# repositories may change the packages that are available.
if ret['changes']:
sys.modules[
__salt__['test.ping'].__module__
].__context__.pop('pkg._avail', None)
return ret
|
{
"content_hash": "17f5002dd72c4c61d1e43f6261c0b09d",
"timestamp": "",
"source": "github",
"line_count": 499,
"max_line_length": 94,
"avg_line_length": 34.811623246492985,
"alnum_prop": 0.5894306602958954,
"repo_name": "smallyear/linuxLearn",
"id": "4238eee4e1851ecfbe01bc16770436f4139796fb",
"size": "17395",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "salt/salt/states/pkgrepo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "269"
},
{
"name": "CSS",
"bytes": "35"
},
{
"name": "HTML",
"bytes": "23373"
},
{
"name": "JavaScript",
"bytes": "510"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "12800734"
},
{
"name": "Shell",
"bytes": "240576"
}
],
"symlink_target": ""
}
|
"""Tests for the SHA-1 hasher implementation."""
import unittest
from plaso.analyzers.hashers import sha1
from tests import test_lib as shared_test_lib
from tests.analyzers.hashers import test_lib
class SHA1Test(test_lib.HasherTestCase):
"""Tests the SHA-1 hasher."""
@shared_test_lib.skipUnlessHasTestFile([u'empty_file'])
def testFileHashMatchesEmptyFile(self):
"""Tests that hasher matches the hash of an empty file."""
expected_sha1 = u'da39a3ee5e6b4b0d3255bfef95601890afd80709'
hasher = sha1.SHA1Hasher()
self._AssertTestPathStringDigestMatch(
hasher, [u'empty_file'], expected_sha1)
hasher = sha1.SHA1Hasher()
self._AssertTestPathBinaryDigestMatch(
hasher, [u'empty_file'], expected_sha1.decode(u'hex'))
@shared_test_lib.skipUnlessHasTestFile([u'ímynd.dd'])
def testFileHashMatchesKnownFile(self):
"""Tests that hasher matches the hash of an empty file."""
expected_sha1 = u'd9f264323004fd9518c0474967f80421e60e9813'
hasher = sha1.SHA1Hasher()
self._AssertTestPathStringDigestMatch(hasher, [u'ímynd.dd'], expected_sha1)
hasher = sha1.SHA1Hasher()
self._AssertTestPathBinaryDigestMatch(
hasher, [u'ímynd.dd'], expected_sha1.decode(u'hex'))
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "b14381849f4b2bca6decf9671c44de78",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 79,
"avg_line_length": 31.341463414634145,
"alnum_prop": 0.7214007782101167,
"repo_name": "dc3-plaso/plaso",
"id": "5cae85a8fc352a8f13d9ecb2123ebb614863e36a",
"size": "1312",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/analyzers/hashers/sha1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1683"
},
{
"name": "Makefile",
"bytes": "1151"
},
{
"name": "Python",
"bytes": "3875098"
},
{
"name": "Shell",
"bytes": "17861"
}
],
"symlink_target": ""
}
|
import os
from ingenico.connect.sdk.factory import Factory
from ingenico.connect.sdk.merchant.tokens.delete_token_params import DeleteTokenParams
class DeleteTokenExample(object):
def example(self):
with self.__get_client() as client:
query = DeleteTokenParams()
query.mandate_cancel_date = "20150102"
client.merchant("merchantId").tokens().delete("tokenId", query)
def __get_client(self):
api_key_id = os.getenv("connect.api.apiKeyId", "someKey")
secret_api_key = os.getenv("connect.api.secretApiKey", "someSecret")
configuration_file_name = os.path.abspath(os.path.join(os.path.dirname(__file__),
'../../example_configuration.ini'))
return Factory.create_client_from_file(configuration_file_name=configuration_file_name,
api_key_id=api_key_id, secret_api_key=secret_api_key)
|
{
"content_hash": "31bd67b34ddc41aec3730c8a146ebd5c",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 100,
"avg_line_length": 44.40909090909091,
"alnum_prop": 0.6161719549641761,
"repo_name": "Ingenico-ePayments/connect-sdk-python3",
"id": "3b421fc4b5fde8ea7be1a149ab32375a1526efe5",
"size": "1104",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/merchant/tokens/delete_token_example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "36"
},
{
"name": "Python",
"bytes": "1735057"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.db.models import Q
from django.contrib.auth.models import User
from colorful.fields import RGBColorField
class CellCountInstance(models.Model):
TISSUE_TYPE = (
('Blood film', 'Blood film'),
('Bone marrow', 'Bone marrow'))
user = models.ForeignKey(User)
datetime_submitted = models.DateTimeField(auto_now_add=True)
datetime_updated = models.DateTimeField(auto_now=True)
tissue_type = models.CharField(max_length=25, choices=TISSUE_TYPE)
overall_comment = models.TextField(blank=True)
def __unicode__(self):
return u'Count ID #%s' %(self.id)
def myeloid_erythroid_ratio(self):
"""Returns M/E ratio for a given count"""
if not self.erythroid_cellcount():
return 'Unable to calculate, erythroid count = 0'
else:
return round((float(self.myeloid_cellcount())/float(self.erythroid_cellcount())), 2)
def total_cellcount(self):
"""Returns a total count of all cells in count"""
total = 0
for count in self.cellcount_set.all():
total = total + count.get_total_count()
return total
def myeloid_cellcount(self):
"""Returns a total count of all myeloid cells in count"""
total = 0
for count in self.cellcount_set.filter(Q(cell__machine_name='blasts') |
Q(cell__machine_name='neutrophils') |
Q(cell__machine_name='band_forms') |
Q(cell__machine_name='myelocytes') |
Q(cell__machine_name='promyelocytes') |
Q(cell__machine_name='basophils') |
Q(cell__machine_name='eosinophils')):
total = total + count.get_total_count()
return total
def erythroid_cellcount(self):
"""Returns a total count of all erythroid cells in count"""
erythroid_count = self.cellcount_set.get(cell__machine_name='erythroid')
total = erythroid_count.get_total_count()
return total
class BoneMarrowBackground(models.Model):
CELLULARITY_CHOICES = (('Hypo', 'Hypo'),
('Normal', 'Normal'),
('Hyper', 'Hyper'),
('Acellular', 'Acellular'))
BM_PARTICULATE = (('No particles', 'No particles'),
('Few particles', 'Few particles'),
('Adequate particles', 'Adequate particles'))
BM_HAEMODILUTION =(('Mild', 'Mild'),
('Moderate', 'Moderate'),
('Severe', 'Severe'),)
BM_EASE_OF_ASPIRATION = (('Dry', 'Dry'),
('Easy', 'Easy'),
('Moderate', 'Moderate'),
('Hard', 'Hard'),
('Indeterminate', 'Indeterminate'))
cell_count_instance = models.OneToOneField(CellCountInstance)
trail_cellularity = models.CharField(max_length=50,
choices=CELLULARITY_CHOICES)
particle_cellularity = models.CharField(max_length=50,
choices=CELLULARITY_CHOICES)
particulate = models.CharField(max_length=50,
choices=BM_PARTICULATE)
haemodilution = models.CharField(max_length=50,
choices=BM_HAEMODILUTION)
site = models.CharField(max_length=50)
ease_of_aspiration = models.CharField(max_length=50,
choices=BM_EASE_OF_ASPIRATION)
class CellType(models.Model):
readable_name = models.CharField(max_length=50)
# TODO Use a slugfield
machine_name = models.CharField(max_length=50, unique=True)
comment = models.TextField(blank=True)
visualisation_colour = RGBColorField(blank=True)
def __unicode__(self):
return self.readable_name
class CellCount(models.Model):
cell_count_instance = models.ForeignKey(CellCountInstance)
cell = models.ForeignKey(CellType)
normal_count = models.IntegerField(default=0)
abnormal_count = models.IntegerField(default=0)
comment = models.TextField(blank=True)
def get_total_count(self):
return self.normal_count + self.abnormal_count
def percentage(self):
total = self.cell_count_instance.total_cellcount()
if total != 0:
return round((100 * float(self.normal_count+self.abnormal_count)/float(total)))
else:
return 0
class ErythropoiesisFindings(models.Model):
cell_count_instance = models.OneToOneField(CellCountInstance)
no_dysplasia = models.BooleanField(default=True)
nuclear_asynchrony = models.BooleanField(default=False)
multinucleated_forms = models.BooleanField(default=False)
ragged_haemoglobinisation = models.BooleanField(default=False)
megaloblastic_change = models.BooleanField(default=False)
comment = models.TextField(blank=True)
def get_dysplasia(self):
if self.no_dysplasia:
return None
else:
return [x[0] for x in [('Nuclear asynchrony', self.nuclear_asynchrony),
('Multinucleated forms', self.multinucleated_forms),
('Ragged haemoglobinisation', self.ragged_haemoglobinisation),
('Megaloblastic change', self.megaloblastic_change)]
if x[1]]
class GranulopoiesisFindings(models.Model):
cell_count_instance = models.OneToOneField(CellCountInstance)
no_dysplasia = models.BooleanField(default=True)
hypogranular = models.BooleanField(default=False)
pelger = models.BooleanField(default=False)
nuclear_atypia = models.BooleanField(default=False)
dohle_bodies = models.BooleanField(default=False)
comment = models.TextField(blank=True)
def get_dysplasia(self):
if self.no_dysplasia:
return None
else:
return [x[0] for x in [('Hypogranular', self.hypogranular),
('Pelger', self.pelger),
('Nuclear atypia', self.nuclear_atypia),
('Dohle bodies', self.dohle_bodies)]
if x[1]]
class MegakaryocyteFeatures(models.Model):
MEGAKARYOCYTE_RELATIVE_COUNT = (('Absent', 'Absent'),
('Reduced', 'Reduced'),
('Normal', 'Normal'),
('Increased', 'Increased'))
cell_count_instance = models.OneToOneField(CellCountInstance)
relative_count = models.CharField(max_length=50,
choices=MEGAKARYOCYTE_RELATIVE_COUNT)
no_dysplasia = models.BooleanField(default=True)
hypolobulated = models.BooleanField(default=False)
fragmented = models.BooleanField(default=False)
micromegakaryocytes = models.BooleanField(default=False)
comment = models.TextField(blank=True)
def get_dysplasia(self):
if self.no_dysplasia:
return None
else:
return [x[0] for x in [('Hypolobulated', self.hypolobulated),
('Fragmented', self.fragmented),
('Micromegakaryocytes', self.micromegakaryocytes)]
if x[1]]
class IronStain(models.Model):
ABSENT = 0
GRADE_1 = 1
GRADE_2 = 2
GRADE_3 = 3
GRADE_4 = 4
IRON_STAIN_GRADE = ((ABSENT, 'Absent'),
(GRADE_1, 'Grade 1'),
(GRADE_2, 'Grade 2'),
(GRADE_3, 'Grade 3'),
(GRADE_4, 'Grade 4'))
cell_count_instance = models.OneToOneField(CellCountInstance)
stain_performed = models.BooleanField()
iron_content = models.IntegerField(choices=IRON_STAIN_GRADE, blank=True, null=True)
ringed_sideroblasts = models.NullBooleanField(blank=True, null=True)
comment = models.TextField(blank=True)
class CellImage(models.Model):
title = models.CharField(max_length=100)
description = models.TextField()
file = models.ImageField(upload_to= "cell_images")
celltype = models.ForeignKey(CellType)
thumbnail_left = models.IntegerField()
thumbnail_top = models.IntegerField()
thumbnail_width = models.IntegerField()
def similar_cells(self):
groups = self.similarlookinggroup_set.all()
similarcells = []
for group in groups:
for image in group.cell_image.all():
similarcells.append(image)
return similarcells
def __unicode__(self):
return self.title
class SimilarLookingGroup(models.Model):
name = models.CharField(max_length=100)
cell_image = models.ManyToManyField("CellImage")
def __unicode__(self):
return self.name
|
{
"content_hash": "ea302940577aca716f76e7697670a665",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 97,
"avg_line_length": 42.825471698113205,
"alnum_prop": 0.5804604031280978,
"repo_name": "haematologic/cellcountr",
"id": "3163e4c643373c3975234df4395943ce1596e540",
"size": "9079",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cellcounter/main/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "22151"
},
{
"name": "Python",
"bytes": "107429"
}
],
"symlink_target": ""
}
|
"""Fichier contenant le masque <nv_ident_salle>."""
from primaires.interpreteur.masque.masque import Masque
from primaires.interpreteur.masque.fonctions import *
from primaires.interpreteur.masque.exceptions.erreur_validation \
import ErreurValidation
class NvIdent(Masque):
"""Masque <nv_ident_salle>.
On attend un identifiant de salle en paramètre, sous la forme 'picte:1'.
Cet identifiant doit être nouveau (non utilisé).
"""
nom = "nv_ident_salle"
nom_complet = "nouvel identifiant de salle"
def init(self):
"""Initialisation des attributs"""
self.identifiant = ""
self.zone = ""
self.mnemonic = ""
def repartir(self, personnage, masques, commande):
"""Répartition du masque."""
ident = liste_vers_chaine(commande)
if not ident:
raise ErreurValidation(
"Précisez un identifiant de salle.")
ident = ident.split(" ")[0].lower()
self.a_interpreter = ident
commande[:] = commande[len(ident):]
masques.append(self)
return True
def valider(self, personnage, dic_masques):
"""Validation du masque"""
Masque.valider(self, personnage, dic_masques)
ident = self.a_interpreter
try:
zone, mnemonic = ident.split(":")
except ValueError:
raise ErreurValidation(
"|err|L'identifiant '{}' n'est pas valide.|ff|".format(ident))
if ident in type(self).importeur.salle:
raise ErreurValidation(
"|err|Cet identifiant est déjà utilisé.|ff|")
self.identifiant = ident
self.zone = zone
self.mnemonic = mnemonic
return True
|
{
"content_hash": "0625c0194e550c37b73a139e9bf5c168",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 78,
"avg_line_length": 31.526315789473685,
"alnum_prop": 0.5904284919309961,
"repo_name": "stormi/tsunami",
"id": "817e563fcd1aca8d395d4ccc94149dea881c01aa",
"size": "3370",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/primaires/salle/masques/nv_ident/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7188300"
},
{
"name": "Ruby",
"bytes": "373"
}
],
"symlink_target": ""
}
|
"""
This file tests a couple of distinct examples from
'Aircraft Structures,' by Peery. These cases have
known results, and the output from SectionProperties
is compared for accuracy. These examples represent a
more rigourous 'proof' against a 'real' problem.
Only results that have values in the reference material
are tested here.
BibTeX Entry for reference:
@Book{Peery,
title = {Aircraft Structures},
author = {David J. Peery},
organization = {Pensylvania State University},
publisher = {McGraw-Hill Book Company},
year = {1950},
edition = {First},
ISBN = {978-0486485805}
}
"""
import pytest
import pytest_check as check
from typing import Tuple
from sectionproperties.pre.library import nastran_sections
from sectionproperties.analysis.section import Section
## Classes
class Z_Section:
"""
This is basically just a fixture for testing purposes.
It's called by the actual pytest fixtures to generate
the Z-sections for analysis.
We have this class for fixtures, just to have
a method for the load application, and simpler fixtures,
along with the same base for multiple Z_Sections.
"""
def __init__(self, DIM1, DIM2, DIM3, DIM4, shift, m, name):
# Setup the analysis, and calculate properties
base_geom = nastran_sections.nastran_zed(DIM1, DIM2, DIM3, DIM4)
self.geom = base_geom.shift_section(*shift)
self.geom = self.geom.create_mesh(mesh_sizes=[m])
self.xsect = Section(self.geom)
self.xsect.calculate_geometric_properties()
# This plotting code was just for verifying the section offsets.
# ax = self.xsect.plot_centroids(pause=False, render=False)
# ax.grid(1, which='both', linestyle=':')
# fig = ax.get_figure()
# fig.savefig(f'{name}_geom.png')
def apply_load(self, v):
"""
This method applies the suplied load to the section.
v is a list-like with the first entry being Mxx, and
second entry Myy.
"""
self.xsect.calculate_warping_properties()
self.stress = self.xsect.calculate_stress(Mxx=v[0], Myy=v[1])
## Utility
def get_node(nodes, coord) -> Tuple[int, tuple]:
"""
This function will loop over the node list provided,
finding the index of the coordinates you want.
Returns the index in the nodes list, and the coords.
"""
for index, var in enumerate(nodes):
if all(var == coord):
return index, var
else:
continue
raise ValueError(f"No node found with coordinates: {coord}")
## Fixtures
@pytest.fixture
def PeeryEx6_2_1():
"""
Example 1 in Sec. 6.2 (Symmetric Bending)
This is a symmetric I-section with no lateral supports,
undergoing pure unidirectional cantilever bending.
Note that units here are **inches**, to match the text.
"""
name = "Peery_6.2.1"
geom = nastran_sections.nastran_i(6, 3, 3, 1, 1, 1)
geom = geom.shift_section(0, -3)
geom = geom.create_mesh([0.25])
xsect = Section(geom)
xsect.calculate_geometric_properties()
# This plotting code was just for verifying the section offsets.
# ax = xsect.plot_centroids(pause=False, render=False)
# ax.grid(1, which='both', linestyle=':')
# fig = ax.get_figure()
# fig.savefig(f'{name}_geom.png')
return geom, xsect
@pytest.fixture
def PeeryEx7_2_1():
"""
Example 1 in Sec. 7.2. (Unsymmetric Bending)
This is an unsymmetric Z-section with no lateral supports.
Note that units here are **inches**, to match the text.
"""
return Z_Section(
DIM1=4, DIM2=2, DIM3=8, DIM4=12, shift=[-5, -6], m=0.25, name="Peery_7.2.1"
)
## Tests
def test_symmetric_ixx(PeeryEx6_2_1):
# Directly from the example, we know that
# the 2nd moment of inertia resisting bending is.
_geom, xsect = PeeryEx6_2_1
check.almost_equal(xsect.section_props.ixx_g, 43.3, rel=1e-3)
def test_symmetric_fb(PeeryEx6_2_1):
"Max bending stress on the section."
_geom, xsect = PeeryEx6_2_1
# Defined in the text
moment = 8e5
y = 3
I = xsect.section_props.ixx_g
xsect.calculate_warping_properties()
stress = xsect.calculate_stress(Mxx=moment)
# The number quoted in the book. (Peery rounds this to the hundreds)
# 55400 = 55427.3
perfect_result = 55427.3
# The number from the textbook equation
computed_result = moment * y / I
check.almost_equal(perfect_result, computed_result, rel=1e-3)
# The max stress, computed through FEA on our mesh.
numerical_result = max(stress.get_stress()[0]["sig_zz"])
check.almost_equal(numerical_result, perfect_result, rel=1e-3)
def test_unsymmetric_ixx(PeeryEx7_2_1):
# Directly from the example, we know what
# the section properties should be.
xsect = PeeryEx7_2_1.xsect
check.almost_equal(xsect.section_props.ixx_g, 693.3, rel=1e-3)
def test_unsymmetric_iyy(PeeryEx7_2_1):
# Directly from the example, we know what
# the section properties should be.
xsect = PeeryEx7_2_1.xsect
check.almost_equal(xsect.section_props.iyy_g, 173.3, rel=1e-3)
def test_unsymmetric_ixy(PeeryEx7_2_1):
# Directly from the example, we know what
# the section properties should be.
xsect = PeeryEx7_2_1.xsect
check.almost_equal(xsect.section_props.ixy_g, -240, rel=1e-3)
def test_unsymmetric_i11(PeeryEx7_2_1):
# Directly from the example, we know what
# the section properties should be.
xsect = PeeryEx7_2_1.xsect
check.almost_equal(xsect.section_props.i11_c, 787, rel=1e-3)
def test_unsymmetric_i22(PeeryEx7_2_1):
# Directly from the example, we know what
# the section properties should be.
xsect = PeeryEx7_2_1.xsect
check.almost_equal(xsect.section_props.i22_c, 79.5, rel=1e-3)
def test_fb_C(PeeryEx7_2_1):
"""Check the stress at point C."""
# Load from the text
v = [-1e5, 1e4]
# Coordinates of point C
C = (1, 6)
# The answer in the example
# For this point, Peery rounds to the tens place,
# thus -2380 is the exact number written in the book
# but -2384 is the answer computed from his values.
perfect_result = -2384
# The simplified textbook equation
text_result = round(-494 * 1 + -315 * 6)
nodes = PeeryEx7_2_1.xsect.mesh_nodes
assert len(nodes > 0)
index, _ = get_node(nodes, C)
_ = PeeryEx7_2_1.apply_load(v)
computed_result = PeeryEx7_2_1.stress.get_stress()[0]["sig_zz"][index]
check.almost_equal(text_result, perfect_result)
check.almost_equal(computed_result, perfect_result, rel=1e-3)
def test_fb_B(PeeryEx7_2_1):
"""Check the stress at point B."""
# Load from the text
v = [-1e5, 1e4]
# Coordinates of point B
B = (-5, 6)
# The answer in the example
perfect_result = 580
# The sipmlified textbook equation
text_result = round(-494 * -5 + -315 * 6)
nodes = PeeryEx7_2_1.xsect.mesh_nodes
index, _ = get_node(nodes, B)
_ = PeeryEx7_2_1.apply_load(v)
computed_result = PeeryEx7_2_1.stress.get_stress()[0]["sig_zz"][index]
check.almost_equal(text_result, perfect_result)
check.almost_equal(computed_result, perfect_result, rel=1e-3)
def test_fb_A(PeeryEx7_2_1):
"""Check the stress at point A."""
# Load from the text
v = [-1e5, 1e4]
# Coordinates of point A
A = (-5, 4)
# The answer in the example
perfect_result = 1210
# The simplified textbook equation
text_result = round(-494 * -5 + -315 * 4)
nodes = PeeryEx7_2_1.xsect.mesh_nodes
index, _ = get_node(nodes, A)
_ = PeeryEx7_2_1.apply_load(v)
computed_result = PeeryEx7_2_1.stress.get_stress()[0]["sig_zz"][index]
check.almost_equal(text_result, perfect_result)
check.almost_equal(computed_result, perfect_result, rel=1e-3)
|
{
"content_hash": "12ed5896a88ce311f95efd68d695ae4f",
"timestamp": "",
"source": "github",
"line_count": 242,
"max_line_length": 83,
"avg_line_length": 32.38429752066116,
"alnum_prop": 0.6603292076049508,
"repo_name": "robbievanleeuwen/section-properties",
"id": "91cb9a220e9e6d1bf9320ba5195e7a569766b57e",
"size": "7837",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sectionproperties/tests/test_peery.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "565737"
}
],
"symlink_target": ""
}
|
"""
Tests that dialects are properly handled during parsing
for all of the parsers defined in parsers.py
"""
import csv
from io import StringIO
import pytest
from pandas.errors import ParserWarning
from pandas import DataFrame
import pandas.util.testing as tm
@pytest.fixture
def custom_dialect():
dialect_name = "weird"
dialect_kwargs = dict(
doublequote=False,
escapechar="~",
delimiter=":",
skipinitialspace=False,
quotechar="~",
quoting=3,
)
return dialect_name, dialect_kwargs
def test_dialect(all_parsers):
parser = all_parsers
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = parser.read_csv(StringIO(data), dialect=dia)
data = """\
label1,label2,label3
index1,a,c,e
index2,b,d,f
"""
exp = parser.read_csv(StringIO(data))
exp.replace("a", '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(all_parsers):
dialect_name = "mydialect"
parser = all_parsers
data = """\
fruit:vegetable
apple:broccoli
pear:tomato
"""
exp = DataFrame({"fruit": ["apple", "pear"], "vegetable": ["broccoli", "tomato"]})
with tm.with_csv_dialect(dialect_name, delimiter=":"):
df = parser.read_csv(StringIO(data), dialect=dialect_name)
tm.assert_frame_equal(df, exp)
def test_invalid_dialect(all_parsers):
class InvalidDialect:
pass
data = "a\n1"
parser = all_parsers
msg = "Invalid dialect"
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), dialect=InvalidDialect)
@pytest.mark.parametrize(
"arg",
[None, "doublequote", "escapechar", "skipinitialspace", "quotechar", "quoting"],
)
@pytest.mark.parametrize("value", ["dialect", "default", "other"])
def test_dialect_conflict_except_delimiter(all_parsers, custom_dialect, arg, value):
# see gh-23761.
dialect_name, dialect_kwargs = custom_dialect
parser = all_parsers
expected = DataFrame({"a": [1], "b": [2]})
data = "a:b\n1:2"
warning_klass = None
kwds = dict()
# arg=None tests when we pass in the dialect without any other arguments.
if arg is not None:
if "value" == "dialect": # No conflict --> no warning.
kwds[arg] = dialect_kwargs[arg]
elif "value" == "default": # Default --> no warning.
from pandas.io.parsers import _parser_defaults
kwds[arg] = _parser_defaults[arg]
else: # Non-default + conflict with dialect --> warning.
warning_klass = ParserWarning
kwds[arg] = "blah"
with tm.with_csv_dialect(dialect_name, **dialect_kwargs):
with tm.assert_produces_warning(warning_klass):
result = parser.read_csv(StringIO(data), dialect=dialect_name, **kwds)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"kwargs,warning_klass",
[
(dict(sep=","), None), # sep is default --> sep_override=True
(dict(sep="."), ParserWarning), # sep isn't default --> sep_override=False
(dict(delimiter=":"), None), # No conflict
(dict(delimiter=None), None), # Default arguments --> sep_override=True
(dict(delimiter=","), ParserWarning), # Conflict
(dict(delimiter="."), ParserWarning), # Conflict
],
ids=[
"sep-override-true",
"sep-override-false",
"delimiter-no-conflict",
"delimiter-default-arg",
"delimiter-conflict",
"delimiter-conflict2",
],
)
def test_dialect_conflict_delimiter(all_parsers, custom_dialect, kwargs, warning_klass):
# see gh-23761.
dialect_name, dialect_kwargs = custom_dialect
parser = all_parsers
expected = DataFrame({"a": [1], "b": [2]})
data = "a:b\n1:2"
with tm.with_csv_dialect(dialect_name, **dialect_kwargs):
with tm.assert_produces_warning(warning_klass):
result = parser.read_csv(StringIO(data), dialect=dialect_name, **kwargs)
tm.assert_frame_equal(result, expected)
|
{
"content_hash": "faa2879be90c9ebe7f474f9980025e1d",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 88,
"avg_line_length": 28.430555555555557,
"alnum_prop": 0.6277479237909135,
"repo_name": "toobaz/pandas",
"id": "dc10352bc64601eed125711fbaedd0a9ff1b02ed",
"size": "4094",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pandas/tests/io/parser/test_dialect.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "541"
},
{
"name": "C",
"bytes": "394843"
},
{
"name": "C++",
"bytes": "17248"
},
{
"name": "HTML",
"bytes": "606963"
},
{
"name": "Makefile",
"bytes": "562"
},
{
"name": "Python",
"bytes": "15031623"
},
{
"name": "Shell",
"bytes": "27585"
},
{
"name": "Smarty",
"bytes": "2040"
}
],
"symlink_target": ""
}
|
"""indavant URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url, patterns
from django.contrib import admin
from django.conf.urls.static import static
from django.conf import settings
from django.views.generic.base import RedirectView
handler404 = 'news.views.page_not_found'
admin.autodiscover()
urlpatterns = patterns('',
url(r'^about/', include('about.urls')),
url(r'^insydiaadministrative/', include(admin.site.urls)),
url(r'^auth/', include('loginsys.urls')),
url(r'^beta/', include('beta_test.urls')),
url(r'^favourite/', include('favourite.urls')),
url(r'^news/', include('news.urls')),
url(r'^notify/', include('notify.urls')),
url(r'^profile/', include('userprofile.urls')),
url(r'^search/', include('search.urls')),
url(r'^pref/', include('loginsys.urls')),
url(r'^ckeditor/', include('ckeditor_uploader.urls')),
url(r'^c/', include('loginsys.urls')),
url(r'^rss/', include('rss.urls')),
url(r'^reports/', include('reports.urls')),
# url(r'^closet/check_email/email=(?P<email>\w+)$', 'news.views.check_email_subs'),
# url(r'^closet/subs/$', 'news.views.closet_subscribe'),
# url(r'^closet/(?P<lang>\w+)/$', 'news.views.render_close_page'),
url(r'^favicon\.ico$', RedirectView.as_view(url='/static/img/favicons/favicon.ico')),
url(r'^robots\.txt$', 'news.views.render_robots'),
(r'^media/(?P<path>.*)$', 'django.views.static.serve',{'document_root': settings.MEDIA_ROOT}),
url(r'^$', 'news.views.main_page_load'),
) + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
{
"content_hash": "f37164a4c52ed1aa5505d79b33b0f447",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 98,
"avg_line_length": 47.17391304347826,
"alnum_prop": 0.667741935483871,
"repo_name": "eprivalov/sendec",
"id": "eafe67969fa1f05ae22dabc1117427ff1151e63f",
"size": "2170",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "indavant/urls.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "453604"
},
{
"name": "HTML",
"bytes": "3542936"
},
{
"name": "JavaScript",
"bytes": "1202382"
},
{
"name": "Python",
"bytes": "361326"
}
],
"symlink_target": ""
}
|
"""
WSGI config for django_web_app project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_web_app.settings")
application = get_wsgi_application()
|
{
"content_hash": "d0d857e51253da794aaf270545949739",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 25.3125,
"alnum_prop": 0.7679012345679013,
"repo_name": "cloudfoundry/python-buildpack",
"id": "ea38ccd21f12ac8eded39d1cfdf6026cfc7087cc",
"size": "405",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fixtures/django_web_app/django_web_app/wsgi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "172616"
},
{
"name": "Procfile",
"bytes": "708"
},
{
"name": "Python",
"bytes": "22684"
},
{
"name": "Shell",
"bytes": "17191"
}
],
"symlink_target": ""
}
|
from tests import TestMinion
class TestExample(TestMinion):
def test_func(self, __salt__):
assert __salt__['example.func']()
def util_func(self, __salt__):
assert __salt__['example.util_func']() == "I'm helping"
|
{
"content_hash": "1d21749baba07b9320d4b73ceff06150",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 63,
"avg_line_length": 26.555555555555557,
"alnum_prop": 0.602510460251046,
"repo_name": "yagnik/saltstack-template",
"id": "00a98a75c071d0e0f358aa8fedccf90461032a8f",
"size": "239",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/_modules/test_example.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2461"
},
{
"name": "Makefile",
"bytes": "614"
},
{
"name": "Python",
"bytes": "19892"
},
{
"name": "SaltStack",
"bytes": "1476"
}
],
"symlink_target": ""
}
|
from genty import genty, genty_dataset
from app.master.build_request_handler import BuildRequestHandler
from test.framework.base_unit_test_case import BaseUnitTestCase
@genty
class TestBuildRequestHandler(BaseUnitTestCase):
@genty_dataset(
no_subjobs=([], True),
one_subjob=(['some subjob'], False),
)
def test_prepare_build_async_calls_finish_only_if_no_subjobs(self, subjobs, build_finish_called):
mock_project_lock = self.patch('threading.Lock').return_value
build_scheduler_mock = self.patch('app.master.build_scheduler.BuildScheduler').return_value
build_request_handler = BuildRequestHandler(build_scheduler_mock)
build_mock = self.patch('app.master.build.Build').return_value
build_mock.has_error = False
build_mock.all_subjobs.return_value = subjobs
build_request_handler._prepare_build_async(build_mock, mock_project_lock)
if build_finish_called:
build_mock.finish.assert_called_once_with()
else:
self.assertFalse(build_mock.finish.called)
|
{
"content_hash": "a385fcfe33dc34f906d4b428328a4fc4",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 101,
"avg_line_length": 41.42307692307692,
"alnum_prop": 0.7056638811513464,
"repo_name": "Medium/ClusterRunner",
"id": "cf23f715f242da322aef282984e8f520fd40b3d3",
"size": "1077",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/unit/master/test_build_request_handler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "66"
},
{
"name": "Makefile",
"bytes": "1860"
},
{
"name": "PowerShell",
"bytes": "1467"
},
{
"name": "Python",
"bytes": "678935"
},
{
"name": "Shell",
"bytes": "545"
}
],
"symlink_target": ""
}
|
"""
The :mod:`sklearn.pipeline` module implements utilities to build a composite
estimator, as a chain of transforms and estimators.
"""
# Author: Edouard Duchesnay
# Gael Varoquaux
# Virgile Fritsch
# Alexandre Gramfort
# Lars Buitinck
# Licence: BSD
from collections import defaultdict
import numpy as np
from scipy import sparse
from .base import BaseEstimator, TransformerMixin
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import tosequence
from .utils.metaestimators import if_delegate_has_method
from .externals.six import iteritems
__all__ = ['Pipeline', 'FeatureUnion']
class Pipeline(BaseEstimator):
"""Pipeline of transforms with a final estimator.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implement fit and transform methods.
The final estimator only needs to implement fit.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters.
For this, it enables setting parameters of the various steps using their
names and the parameter name separated by a '__', as in the example below.
Parameters
----------
steps: list
List of (name, transform) tuples (implementing fit/transform) that are
chained, in the order in which they are chained, with the last object
an estimator.
Examples
--------
>>> from sklearn import svm
>>> from sklearn.datasets import samples_generator
>>> from sklearn.feature_selection import SelectKBest
>>> from sklearn.feature_selection import f_regression
>>> from sklearn.pipeline import Pipeline
>>> # generate some data to play with
>>> X, y = samples_generator.make_classification(
... n_informative=5, n_redundant=0, random_state=42)
>>> # ANOVA SVM-C
>>> anova_filter = SelectKBest(f_regression, k=5)
>>> clf = svm.SVC(kernel='linear')
>>> anova_svm = Pipeline([('anova', anova_filter), ('svc', clf)])
>>> # You can set the parameters using the names issued
>>> # For instance, fit using a k of 10 in the SelectKBest
>>> # and a parameter 'C' of the svm
>>> anova_svm.set_params(anova__k=10, svc__C=.1).fit(X, y)
... # doctest: +ELLIPSIS
Pipeline(steps=[...])
>>> prediction = anova_svm.predict(X)
>>> anova_svm.score(X, y) # doctest: +ELLIPSIS
0.77...
"""
# BaseEstimator interface
def __init__(self, steps):
self.named_steps = dict(steps)
names, estimators = zip(*steps)
if len(self.named_steps) != len(steps):
raise ValueError("Names provided are not unique: %s" % (names,))
# shallow copy of steps
self.steps = tosequence(zip(names, estimators))
transforms = estimators[:-1]
estimator = estimators[-1]
for t in transforms:
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform")):
raise TypeError("All intermediate steps of the chain should "
"be transforms and implement fit and transform"
" '%s' (type %s) doesn't)" % (t, type(t)))
if not hasattr(estimator, "fit"):
raise TypeError("Last step of chain should implement fit "
"'%s' (type %s) doesn't)"
% (estimator, type(estimator)))
def get_params(self, deep=True):
if not deep:
return super(Pipeline, self).get_params(deep=False)
else:
out = self.named_steps.copy()
for name, step in six.iteritems(self.named_steps):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
@property
def _final_estimator(self):
return self.steps[-1][1]
# Estimator interface
def _pre_transform(self, X, y=None, **fit_params):
fit_params_steps = dict((step, {}) for step, _ in self.steps)
for pname, pval in six.iteritems(fit_params):
step, param = pname.split('__', 1)
fit_params_steps[step][param] = pval
Xt = X
for name, transform in self.steps[:-1]:
if hasattr(transform, "fit_transform"):
Xt = transform.fit_transform(Xt, y, **fit_params_steps[name])
else:
Xt = transform.fit(Xt, y, **fit_params_steps[name]) \
.transform(Xt)
return Xt, fit_params_steps[self.steps[-1][0]]
def fit(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then fit the transformed data using the final estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
self.steps[-1][-1].fit(Xt, y, **fit_params)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then use fit_transform on transformed data using the final
estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
if hasattr(self.steps[-1][-1], 'fit_transform'):
return self.steps[-1][-1].fit_transform(Xt, y, **fit_params)
else:
return self.steps[-1][-1].fit(Xt, y, **fit_params).transform(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict(self, X):
"""Applies transforms to the data, and the predict method of the
final estimator. Valid only if the final estimator implements
predict.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict_proba(self, X):
"""Applies transforms to the data, and the predict_proba method of the
final estimator. Valid only if the final estimator implements
predict_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def decision_function(self, X):
"""Applies transforms to the data, and the decision_function method of
the final estimator. Valid only if the final estimator implements
decision_function.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].decision_function(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict_log_proba(self, X):
"""Applies transforms to the data, and the predict_log_proba method of
the final estimator. Valid only if the final estimator implements
predict_log_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_log_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def transform(self, X):
"""Applies transforms to the data, and the transform method of the
final estimator. Valid only if the final estimator implements
transform.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps:
Xt = transform.transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def inverse_transform(self, X):
"""Applies inverse transform to the data.
Starts with the last step of the pipeline and applies ``inverse_transform`` in
inverse order of the pipeline steps.
Valid only if all steps of the pipeline implement inverse_transform.
Parameters
----------
X : iterable
Data to inverse transform. Must fulfill output requirements of the
last step of the pipeline.
"""
if X.ndim == 1:
X = X[None, :]
Xt = X
for name, step in self.steps[::-1]:
Xt = step.inverse_transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def score(self, X, y=None):
"""Applies transforms to the data, and the score method of the
final estimator. Valid only if the final estimator implements
score.
Parameters
----------
X : iterable
Data to score. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Targets used for scoring. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].score(Xt, y)
@property
def classes_(self):
return self.steps[-1][-1].classes_
@property
def _pairwise(self):
# check if first estimator expects pairwise input
return getattr(self.steps[0][1], '_pairwise', False)
def _name_estimators(estimators):
"""Generate names for estimators."""
names = [type(estimator).__name__.lower() for estimator in estimators]
namecount = defaultdict(int)
for est, name in zip(estimators, names):
namecount[name] += 1
for k, v in list(six.iteritems(namecount)):
if v == 1:
del namecount[k]
for i in reversed(range(len(estimators))):
name = names[i]
if name in namecount:
names[i] += "-%d" % namecount[name]
namecount[name] -= 1
return list(zip(names, estimators))
def make_pipeline(*steps):
"""Construct a Pipeline from the given estimators.
This is a shorthand for the Pipeline constructor; it does not require, and
does not permit, naming the estimators. Instead, they will be given names
automatically based on their types.
Examples
--------
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.preprocessing import StandardScaler
>>> make_pipeline(StandardScaler(), GaussianNB()) # doctest: +NORMALIZE_WHITESPACE
Pipeline(steps=[('standardscaler',
StandardScaler(copy=True, with_mean=True, with_std=True)),
('gaussiannb', GaussianNB())])
Returns
-------
p : Pipeline
"""
return Pipeline(_name_estimators(steps))
def _fit_one_transformer(transformer, X, y):
return transformer.fit(X, y)
def _transform_one(transformer, name, X, transformer_weights):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
return transformer.transform(X) * transformer_weights[name]
return transformer.transform(X)
def _fit_transform_one(transformer, name, X, y, transformer_weights,
**fit_params):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed * transformer_weights[name], transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed * transformer_weights[name], transformer
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed, transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed, transformer
class FeatureUnion(BaseEstimator, TransformerMixin):
"""Concatenates results of multiple transformer objects.
This estimator applies a list of transformer objects in parallel to the
input data, then concatenates the results. This is useful to combine
several feature extraction mechanisms into a single transformer.
Parameters
----------
transformer_list: list of (string, transformer) tuples
List of transformer objects to be applied to the data. The first
half of each tuple is the name of the transformer.
n_jobs: int, optional
Number of jobs to run in parallel (default 1).
transformer_weights: dict, optional
Multiplicative weights for features per transformer.
Keys are transformer names, values the weights.
"""
def __init__(self, transformer_list, n_jobs=1, transformer_weights=None):
self.transformer_list = transformer_list
self.n_jobs = n_jobs
self.transformer_weights = transformer_weights
def get_feature_names(self):
"""Get feature names from all transformers.
Returns
-------
feature_names : list of strings
Names of the features produced by transform.
"""
feature_names = []
for name, trans in self.transformer_list:
if not hasattr(trans, 'get_feature_names'):
raise AttributeError("Transformer %s does not provide"
" get_feature_names." % str(name))
feature_names.extend([name + "__" + f for f in
trans.get_feature_names()])
return feature_names
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data, used to fit transformers.
"""
transformers = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_one_transformer)(trans, X, y)
for name, trans in self.transformer_list)
self._update_transformer_list(transformers)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all transformers using X, transform the data and concatenate
results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
result = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_transform_one)(trans, name, X, y,
self.transformer_weights, **fit_params)
for name, trans in self.transformer_list)
Xs, transformers = zip(*result)
self._update_transformer_list(transformers)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, name, X, self.transformer_weights)
for name, trans in self.transformer_list)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def get_params(self, deep=True):
if not deep:
return super(FeatureUnion, self).get_params(deep=False)
else:
out = dict(self.transformer_list)
for name, trans in self.transformer_list:
for key, value in iteritems(trans.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
def _update_transformer_list(self, transformers):
self.transformer_list[:] = [
(name, new)
for ((name, old), new) in zip(self.transformer_list, transformers)
]
# XXX it would be nice to have a keyword-only n_jobs argument to this function,
# but that's not allowed in Python 2.x.
def make_union(*transformers):
"""Construct a FeatureUnion from the given transformers.
This is a shorthand for the FeatureUnion constructor; it does not require,
and does not permit, naming the transformers. Instead, they will be given
names automatically based on their types. It also does not allow weighting.
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> make_union(PCA(), TruncatedSVD()) # doctest: +NORMALIZE_WHITESPACE
FeatureUnion(n_jobs=1,
transformer_list=[('pca', PCA(copy=True, n_components=None,
whiten=False)),
('truncatedsvd',
TruncatedSVD(algorithm='randomized',
n_components=2, n_iter=5,
random_state=None, tol=0.0))],
transformer_weights=None)
Returns
-------
f : FeatureUnion
"""
return FeatureUnion(_name_estimators(transformers))
|
{
"content_hash": "58163b424bd2038164211b6c531d1d75",
"timestamp": "",
"source": "github",
"line_count": 525,
"max_line_length": 89,
"avg_line_length": 36.90095238095238,
"alnum_prop": 0.5959841015846797,
"repo_name": "mblondel/scikit-learn",
"id": "86d0e624cb54b58f58817c0d8d3eddfb0f5a9fd3",
"size": "19373",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "sklearn/pipeline.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "18609258"
},
{
"name": "C++",
"bytes": "1810938"
},
{
"name": "Makefile",
"bytes": "1364"
},
{
"name": "PowerShell",
"bytes": "13427"
},
{
"name": "Python",
"bytes": "5778355"
},
{
"name": "Shell",
"bytes": "5968"
}
],
"symlink_target": ""
}
|
from catch_lured_pokemon import CatchLuredPokemon
from catch_visible_pokemon import CatchVisiblePokemon
from evolve_pokemon import EvolvePokemon
from incubate_eggs import IncubateEggs
from move_to_fort import MoveToFort
from move_to_map_pokemon import MoveToMapPokemon
from nickname_pokemon import NicknamePokemon
from pokemon_catch_worker import PokemonCatchWorker
from pokemon_optimizer import PokemonOptimizer
from transfer_pokemon import TransferPokemon
from recycle_items import RecycleItems
from spin_fort import SpinFort
from handle_soft_ban import HandleSoftBan
from follow_path import FollowPath
from follow_spiral import FollowSpiral
from collect_level_up_reward import CollectLevelUpReward
from follow_cluster import FollowCluster
from sleep_schedule import SleepSchedule
from update_live_stats import UpdateLiveStats
|
{
"content_hash": "dede2803569f407489b7efbedf351217",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 56,
"avg_line_length": 43.63157894736842,
"alnum_prop": 0.873341375150784,
"repo_name": "AbelIngrand/PokemonGo-Bot",
"id": "538673cfe7f38754704a5885fdde08f83bebaf8f",
"size": "854",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "pokemongo_bot/cell_workers/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "8011"
},
{
"name": "Protocol Buffer",
"bytes": "43769"
},
{
"name": "Python",
"bytes": "354773"
},
{
"name": "Shell",
"bytes": "3845"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.