code
stringlengths 1
199k
|
|---|
import os, concurrent.futures
import const, rwlogging, dataloader
from pool import Pool
from rwlogging import log
if const.STRATEGY_TYPE == 1:
import maStrategy as strategy
elif const.STRATEGY_TYPE == 2:
import patternStrategy as strategy
elif const.STRATEGY_TYPE == 3:
import vmaStrategy as strategy
path = os.path.dirname(__file__)
def processStrategy(stock):
try:
if const.REFRESH_DATA: dataloader.downloadStockData(stock)
prices = dataloader.importData(4, stock['file'])
const.currentSecId = stock['id']
tr = strategy.runStrategy(prices)
return tr
except:
return None
#finalPool.estimate(tr)
#q.put(tr)
if __name__ == "__main__":
rwlogging.clearLog()
if const.SOURCE_TYPE < 3:
prices = dataloader.importData(const.SOURCE_TYPE, const.DATA_FILE)
strategy.runStrategy(prices)
if const.SOURCE_TYPE == 3:
stocks = dataloader.importStockList(const.LIST_FILE)
for stock in stocks:
dataloader.downloadStockData(stock)
if const.SOURCE_TYPE == 4:
stocks = dataloader.importStockList(const.LIST_FILE)
finalPool = Pool(100)
with concurrent.futures.ProcessPoolExecutor(max_workers=const.MAX_PROCESS_NUM) as executor:
trs = executor.map(processStrategy, stocks)
for tr in trs:
try:
if tr: finalPool.estimate(tr)
except:
pass
const.currentSecId = 'FINAL'
finalPool.showStrategies()
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fileuploads', '0025_result_group_name'),
]
operations = [
migrations.AlterField(
model_name='result',
name='group_name',
field=models.CharField(max_length=400),
),
]
|
"""Searches for albums in the MusicBrainz database.
"""
from __future__ import division, absolute_import, print_function
import musicbrainzngs
import re
import traceback
from six.moves.urllib.parse import urljoin
from beets import logging
import beets.autotag.hooks
import beets
from beets import util
from beets import config
import six
VARIOUS_ARTISTS_ID = '89ad4ac3-39f7-470e-963a-56509c546377'
if util.SNI_SUPPORTED:
BASE_URL = 'https://musicbrainz.org/'
else:
BASE_URL = 'http://musicbrainz.org/'
SKIPPED_TRACKS = ['[data track]']
FIELDS_TO_MB_KEYS = {
'catalognum': 'catno',
'country': 'country',
'label': 'label',
'media': 'format',
'year': 'date',
}
musicbrainzngs.set_useragent('beets', beets.__version__,
'https://beets.io/')
class MusicBrainzAPIError(util.HumanReadableException):
"""An error while talking to MusicBrainz. The `query` field is the
parameter to the action and may have any type.
"""
def __init__(self, reason, verb, query, tb=None):
self.query = query
if isinstance(reason, musicbrainzngs.WebServiceError):
reason = u'MusicBrainz not reachable'
super(MusicBrainzAPIError, self).__init__(reason, verb, tb)
def get_message(self):
return u'{0} in {1} with query {2}'.format(
self._reasonstr(), self.verb, repr(self.query)
)
log = logging.getLogger('beets')
RELEASE_INCLUDES = ['artists', 'media', 'recordings', 'release-groups',
'labels', 'artist-credits', 'aliases',
'recording-level-rels', 'work-rels',
'work-level-rels', 'artist-rels']
BROWSE_INCLUDES = ['artist-credits', 'work-rels',
'artist-rels', 'recording-rels', 'release-rels']
if "work-level-rels" in musicbrainzngs.VALID_BROWSE_INCLUDES['recording']:
BROWSE_INCLUDES.append("work-level-rels")
BROWSE_CHUNKSIZE = 100
BROWSE_MAXTRACKS = 500
TRACK_INCLUDES = ['artists', 'aliases']
if 'work-level-rels' in musicbrainzngs.VALID_INCLUDES['recording']:
TRACK_INCLUDES += ['work-level-rels', 'artist-rels']
if 'genres' in musicbrainzngs.VALID_INCLUDES['recording']:
RELEASE_INCLUDES += ['genres']
def track_url(trackid):
return urljoin(BASE_URL, 'recording/' + trackid)
def album_url(albumid):
return urljoin(BASE_URL, 'release/' + albumid)
def configure():
"""Set up the python-musicbrainz-ngs module according to settings
from the beets configuration. This should be called at startup.
"""
hostname = config['musicbrainz']['host'].as_str()
musicbrainzngs.set_hostname(hostname)
musicbrainzngs.set_rate_limit(
config['musicbrainz']['ratelimit_interval'].as_number(),
config['musicbrainz']['ratelimit'].get(int),
)
def _preferred_alias(aliases):
"""Given an list of alias structures for an artist credit, select
and return the user's preferred alias alias or None if no matching
alias is found.
"""
if not aliases:
return
# Only consider aliases that have locales set.
aliases = [a for a in aliases if 'locale' in a]
# Search configured locales in order.
for locale in config['import']['languages'].as_str_seq():
# Find matching primary aliases for this locale.
matches = [a for a in aliases
if a['locale'] == locale and 'primary' in a]
# Skip to the next locale if we have no matches
if not matches:
continue
return matches[0]
def _preferred_release_event(release):
"""Given a release, select and return the user's preferred release
event as a tuple of (country, release_date). Fall back to the
default release event if a preferred event is not found.
"""
countries = config['match']['preferred']['countries'].as_str_seq()
for country in countries:
for event in release.get('release-event-list', {}):
try:
if country in event['area']['iso-3166-1-code-list']:
return country, event['date']
except KeyError:
pass
return release.get('country'), release.get('date')
def _flatten_artist_credit(credit):
"""Given a list representing an ``artist-credit`` block, flatten the
data into a triple of joined artist name strings: canonical, sort, and
credit.
"""
artist_parts = []
artist_sort_parts = []
artist_credit_parts = []
for el in credit:
if isinstance(el, six.string_types):
# Join phrase.
artist_parts.append(el)
artist_credit_parts.append(el)
artist_sort_parts.append(el)
else:
alias = _preferred_alias(el['artist'].get('alias-list', ()))
# An artist.
if alias:
cur_artist_name = alias['alias']
else:
cur_artist_name = el['artist']['name']
artist_parts.append(cur_artist_name)
# Artist sort name.
if alias:
artist_sort_parts.append(alias['sort-name'])
elif 'sort-name' in el['artist']:
artist_sort_parts.append(el['artist']['sort-name'])
else:
artist_sort_parts.append(cur_artist_name)
# Artist credit.
if 'name' in el:
artist_credit_parts.append(el['name'])
else:
artist_credit_parts.append(cur_artist_name)
return (
''.join(artist_parts),
''.join(artist_sort_parts),
''.join(artist_credit_parts),
)
def track_info(recording, index=None, medium=None, medium_index=None,
medium_total=None):
"""Translates a MusicBrainz recording result dictionary into a beets
``TrackInfo`` object. Three parameters are optional and are used
only for tracks that appear on releases (non-singletons): ``index``,
the overall track number; ``medium``, the disc number;
``medium_index``, the track's index on its medium; ``medium_total``,
the number of tracks on the medium. Each number is a 1-based index.
"""
info = beets.autotag.hooks.TrackInfo(
title=recording['title'],
track_id=recording['id'],
index=index,
medium=medium,
medium_index=medium_index,
medium_total=medium_total,
data_source=u'MusicBrainz',
data_url=track_url(recording['id']),
)
if recording.get('artist-credit'):
# Get the artist names.
info.artist, info.artist_sort, info.artist_credit = \
_flatten_artist_credit(recording['artist-credit'])
# Get the ID and sort name of the first artist.
artist = recording['artist-credit'][0]['artist']
info.artist_id = artist['id']
if recording.get('length'):
info.length = int(recording['length']) / (1000.0)
lyricist = []
composer = []
composer_sort = []
for work_relation in recording.get('work-relation-list', ()):
if work_relation['type'] != 'performance':
continue
info.work = work_relation['work']['title']
info.mb_workid = work_relation['work']['id']
if 'disambiguation' in work_relation['work']:
info.work_disambig = work_relation['work']['disambiguation']
for artist_relation in work_relation['work'].get(
'artist-relation-list', ()):
if 'type' in artist_relation:
type = artist_relation['type']
if type == 'lyricist':
lyricist.append(artist_relation['artist']['name'])
elif type == 'composer':
composer.append(artist_relation['artist']['name'])
composer_sort.append(
artist_relation['artist']['sort-name'])
if lyricist:
info.lyricist = u', '.join(lyricist)
if composer:
info.composer = u', '.join(composer)
info.composer_sort = u', '.join(composer_sort)
arranger = []
for artist_relation in recording.get('artist-relation-list', ()):
if 'type' in artist_relation:
type = artist_relation['type']
if type == 'arranger':
arranger.append(artist_relation['artist']['name'])
if arranger:
info.arranger = u', '.join(arranger)
info.decode()
return info
def _set_date_str(info, date_str, original=False):
"""Given a (possibly partial) YYYY-MM-DD string and an AlbumInfo
object, set the object's release date fields appropriately. If
`original`, then set the original_year, etc., fields.
"""
if date_str:
date_parts = date_str.split('-')
for key in ('year', 'month', 'day'):
if date_parts:
date_part = date_parts.pop(0)
try:
date_num = int(date_part)
except ValueError:
continue
if original:
key = 'original_' + key
setattr(info, key, date_num)
def album_info(release):
"""Takes a MusicBrainz release result dictionary and returns a beets
AlbumInfo object containing the interesting data about that release.
"""
# Get artist name using join phrases.
artist_name, artist_sort_name, artist_credit_name = \
_flatten_artist_credit(release['artist-credit'])
ntracks = sum(len(m['track-list']) for m in release['medium-list'])
# The MusicBrainz API omits 'artist-relation-list' and 'work-relation-list'
# when the release has more than 500 tracks. So we use browse_recordings
# on chunks of tracks to recover the same information in this case.
if ntracks > BROWSE_MAXTRACKS:
log.debug(u'Album {} has too many tracks', release['id'])
recording_list = []
for i in range(0, ntracks, BROWSE_CHUNKSIZE):
log.debug(u'Retrieving tracks starting at {}', i)
recording_list.extend(musicbrainzngs.browse_recordings(
release=release['id'], limit=BROWSE_CHUNKSIZE,
includes=BROWSE_INCLUDES,
offset=i)['recording-list'])
track_map = {r['id']: r for r in recording_list}
for medium in release['medium-list']:
for recording in medium['track-list']:
recording_info = track_map[recording['recording']['id']]
recording['recording'] = recording_info
# Basic info.
track_infos = []
index = 0
for medium in release['medium-list']:
disctitle = medium.get('title')
format = medium.get('format')
if format in config['match']['ignored_media'].as_str_seq():
continue
all_tracks = medium['track-list']
if ('data-track-list' in medium
and not config['match']['ignore_data_tracks']):
all_tracks += medium['data-track-list']
track_count = len(all_tracks)
if 'pregap' in medium:
all_tracks.insert(0, medium['pregap'])
for track in all_tracks:
if ('title' in track['recording'] and
track['recording']['title'] in SKIPPED_TRACKS):
continue
if ('video' in track['recording'] and
track['recording']['video'] == 'true' and
config['match']['ignore_video_tracks']):
continue
# Basic information from the recording.
index += 1
ti = track_info(
track['recording'],
index,
int(medium['position']),
int(track['position']),
track_count,
)
ti.release_track_id = track['id']
ti.disctitle = disctitle
ti.media = format
ti.track_alt = track['number']
# Prefer track data, where present, over recording data.
if track.get('title'):
ti.title = track['title']
if track.get('artist-credit'):
# Get the artist names.
ti.artist, ti.artist_sort, ti.artist_credit = \
_flatten_artist_credit(track['artist-credit'])
ti.artist_id = track['artist-credit'][0]['artist']['id']
if track.get('length'):
ti.length = int(track['length']) / (1000.0)
track_infos.append(ti)
info = beets.autotag.hooks.AlbumInfo(
album=release['title'],
album_id=release['id'],
artist=artist_name,
artist_id=release['artist-credit'][0]['artist']['id'],
tracks=track_infos,
mediums=len(release['medium-list']),
artist_sort=artist_sort_name,
artist_credit=artist_credit_name,
data_source=u'MusicBrainz',
data_url=album_url(release['id']),
)
info.va = info.artist_id == VARIOUS_ARTISTS_ID
if info.va:
info.artist = config['va_name'].as_str()
info.asin = release.get('asin')
info.releasegroup_id = release['release-group']['id']
info.albumstatus = release.get('status')
# Get the disambiguation strings at the release and release group level.
if release['release-group'].get('disambiguation'):
info.releasegroupdisambig = \
release['release-group'].get('disambiguation')
if release.get('disambiguation'):
info.albumdisambig = release.get('disambiguation')
# Get the "classic" Release type. This data comes from a legacy API
# feature before MusicBrainz supported multiple release types.
if 'type' in release['release-group']:
reltype = release['release-group']['type']
if reltype:
info.albumtype = reltype.lower()
# Log the new-style "primary" and "secondary" release types.
# Eventually, we'd like to actually store this data, but we just log
# it for now to help understand the differences.
if 'primary-type' in release['release-group']:
rel_primarytype = release['release-group']['primary-type']
if rel_primarytype:
log.debug('primary MB release type: ' + rel_primarytype.lower())
if 'secondary-type-list' in release['release-group']:
if release['release-group']['secondary-type-list']:
log.debug('secondary MB release type(s): ' + ', '.join(
[secondarytype.lower() for secondarytype in
release['release-group']['secondary-type-list']]))
# Release events.
info.country, release_date = _preferred_release_event(release)
release_group_date = release['release-group'].get('first-release-date')
if not release_date:
# Fall back if release-specific date is not available.
release_date = release_group_date
_set_date_str(info, release_date, False)
_set_date_str(info, release_group_date, True)
# Label name.
if release.get('label-info-list'):
label_info = release['label-info-list'][0]
if label_info.get('label'):
label = label_info['label']['name']
if label != '[no label]':
info.label = label
info.catalognum = label_info.get('catalog-number')
# Text representation data.
if release.get('text-representation'):
rep = release['text-representation']
info.script = rep.get('script')
info.language = rep.get('language')
# Media (format).
if release['medium-list']:
first_medium = release['medium-list'][0]
info.media = first_medium.get('format')
genres = release.get('genre-list')
if config['musicbrainz']['genres'] and genres:
info.genre = ';'.join(g['name'] for g in genres)
info.decode()
return info
def match_album(artist, album, tracks=None, extra_tags=None):
"""Searches for a single album ("release" in MusicBrainz parlance)
and returns an iterator over AlbumInfo objects. May raise a
MusicBrainzAPIError.
The query consists of an artist name, an album name, and,
optionally, a number of tracks on the album and any other extra tags.
"""
# Build search criteria.
criteria = {'release': album.lower().strip()}
if artist is not None:
criteria['artist'] = artist.lower().strip()
else:
# Various Artists search.
criteria['arid'] = VARIOUS_ARTISTS_ID
if tracks is not None:
criteria['tracks'] = six.text_type(tracks)
# Additional search cues from existing metadata.
if extra_tags:
for tag in extra_tags:
key = FIELDS_TO_MB_KEYS[tag]
value = six.text_type(extra_tags.get(tag, '')).lower().strip()
if key == 'catno':
value = value.replace(u' ', '')
if value:
criteria[key] = value
# Abort if we have no search terms.
if not any(criteria.values()):
return
try:
log.debug(u'Searching for MusicBrainz releases with: {!r}', criteria)
res = musicbrainzngs.search_releases(
limit=config['musicbrainz']['searchlimit'].get(int), **criteria)
except musicbrainzngs.MusicBrainzError as exc:
raise MusicBrainzAPIError(exc, 'release search', criteria,
traceback.format_exc())
for release in res['release-list']:
# The search result is missing some data (namely, the tracks),
# so we just use the ID and fetch the rest of the information.
albuminfo = album_for_id(release['id'])
if albuminfo is not None:
yield albuminfo
def match_track(artist, title):
"""Searches for a single track and returns an iterable of TrackInfo
objects. May raise a MusicBrainzAPIError.
"""
criteria = {
'artist': artist.lower().strip(),
'recording': title.lower().strip(),
}
if not any(criteria.values()):
return
try:
res = musicbrainzngs.search_recordings(
limit=config['musicbrainz']['searchlimit'].get(int), **criteria)
except musicbrainzngs.MusicBrainzError as exc:
raise MusicBrainzAPIError(exc, 'recording search', criteria,
traceback.format_exc())
for recording in res['recording-list']:
yield track_info(recording)
def _parse_id(s):
"""Search for a MusicBrainz ID in the given string and return it. If
no ID can be found, return None.
"""
# Find the first thing that looks like a UUID/MBID.
match = re.search(u'[a-f0-9]{8}(-[a-f0-9]{4}){3}-[a-f0-9]{12}', s)
if match:
return match.group()
def album_for_id(releaseid):
"""Fetches an album by its MusicBrainz ID and returns an AlbumInfo
object or None if the album is not found. May raise a
MusicBrainzAPIError.
"""
log.debug(u'Requesting MusicBrainz release {}', releaseid)
albumid = _parse_id(releaseid)
if not albumid:
log.debug(u'Invalid MBID ({0}).', releaseid)
return
try:
res = musicbrainzngs.get_release_by_id(albumid,
RELEASE_INCLUDES)
except musicbrainzngs.ResponseError:
log.debug(u'Album ID match failed.')
return None
except musicbrainzngs.MusicBrainzError as exc:
raise MusicBrainzAPIError(exc, u'get release by ID', albumid,
traceback.format_exc())
return album_info(res['release'])
def track_for_id(releaseid):
"""Fetches a track by its MusicBrainz ID. Returns a TrackInfo object
or None if no track is found. May raise a MusicBrainzAPIError.
"""
trackid = _parse_id(releaseid)
if not trackid:
log.debug(u'Invalid MBID ({0}).', releaseid)
return
try:
res = musicbrainzngs.get_recording_by_id(trackid, TRACK_INCLUDES)
except musicbrainzngs.ResponseError:
log.debug(u'Track ID match failed.')
return None
except musicbrainzngs.MusicBrainzError as exc:
raise MusicBrainzAPIError(exc, u'get recording by ID', trackid,
traceback.format_exc())
return track_info(res['recording'])
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from le_utils.constants import format_presets
from kolibri.core.content import hooks as content_hooks
from kolibri.plugins import KolibriPluginBase
from kolibri.plugins.hooks import register_hook
from kolibri.utils.conf import OPTIONS
class HTML5AppPlugin(KolibriPluginBase):
kolibri_options = "options"
@register_hook
class HTML5AppAsset(content_hooks.ContentRendererHook):
bundle_id = "main"
presets = (format_presets.HTML5_ZIP,)
@property
def plugin_data(self):
return {"html5_sandbox_tokens": OPTIONS["HTML5"]["SANDBOX"]}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('smush', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Document',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('docfile', models.FileField(upload_to='images')),
],
),
]
|
import socket
import getpass
from threading import Thread
"""This module taces care of the server-specific logic."""
class ClientConnection:
"""Base class which takes care of initializing the client \
for either chat or transfer."""
def __init__(self, server_ip, client_ip, callbacks):
self.server_ip = server_ip
self.client_ip = client_ip
def start(self):
self.clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.clientsocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.clientsocket.connect((self.server_ip, 8087))
self.read_from_server()
class ChatClient(ClientConnection):
"""Takes care of initializing the client for chat."""
def __init__(self, server_ip, client_ip, callbacks):
ClientConnection.__init__(self, server_ip, client_ip, callbacks)
self.ready_for_chat, self.on_send, self.on_receive = callbacks
def read_from_server(self):
self.thread = Thread(target=self.printer)
self.thread.daemon = True
self.thread.start()
self.ready_for_chat(self.send_message)
def send_message(self, message):
self.clientsocket.send(message.encode())
self.on_send(message)
def printer(self):
while True:
buf = self.clientsocket.recv(128)
if len(buf) > 0:
user = getpass.getuser()
message = buf.decode()
self.on_receive(user, self.client_ip, message)
class TransferClient(ClientConnection):
"""Takes care of initializing the server endpoint for transfer."""
def __init__(self, server_ip, client_ip, callbacks):
ClientConnection.__init__(self, server_ip, client_ip, callbacks)
self.transfer_send, self.transfer_receive = callbacks
def read_from_server(self):
while True:
buf = self.clientsocket.recv(128)
if len(buf) > 0:
self.transfer_receive(buf)
else:
break
self.clientsocket.close()
|
import dork_compose.plugin
class Plugin(dork_compose.plugin.Plugin):
def preprocess_config(self, config):
config.services.append({
'name': 'dork_tracker',
'image': 'alpine:3.4',
'command': 'tail -f /dev/null',
'volumes': [],
'labels': {
'dork.tracker': '',
'dork.tracker.source': self.env.get('DORK_SOURCE'),
'dork.tracker.project': self.env.get('DORK_PROJECT'),
'dork.tracker.instance': self.env.get('DORK_INSTANCE'),
}
})
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "del_proj.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
"""Relative Components Analysis (RCA)
RCA learns a full rank Mahalanobis distance metric based on a
weighted sum of in-class covariance matrices.
It applies a global linear transformation to assign large weights to
relevant dimensions and low weights to irrelevant dimensions.
Those relevant dimensions are estimated using "chunklets",
subsets of points that are known to belong to the same class.
'Learning distance functions using equivalence relations', ICML 2003
'Learning a Mahalanobis metric from equivalence constraints', JMLR 2005
"""
from __future__ import absolute_import
import numpy as np
import warnings
from six.moves import xrange
from sklearn import decomposition
from sklearn.base import TransformerMixin
from .base_metric import MahalanobisMixin
from .constraints import Constraints
def _chunk_mean_centering(data, chunks):
num_chunks = chunks.max() + 1
chunk_mask = chunks != -1
chunk_data = data[chunk_mask]
chunk_labels = chunks[chunk_mask]
for c in xrange(num_chunks):
mask = chunk_labels == c
chunk_data[mask] -= chunk_data[mask].mean(axis=0)
return chunk_mask, chunk_data
class RCA(MahalanobisMixin, TransformerMixin):
"""Relevant Components Analysis (RCA)
Attributes
----------
transformer_ : `numpy.ndarray`, shape=(num_dims, n_features)
The learned linear transformation ``L``.
"""
def __init__(self, num_dims=None, pca_comps=None, preprocessor=None):
"""Initialize the learner.
Parameters
----------
num_dims : int, optional
embedding dimension (default: original dimension of data)
pca_comps : int, float, None or string
Number of components to keep during PCA preprocessing.
If None (default), does not perform PCA.
If ``0 < pca_comps < 1``, it is used as
the minimum explained variance ratio.
See sklearn.decomposition.PCA for more details.
preprocessor : array-like, shape=(n_samples, n_features) or callable
The preprocessor to call to get tuples from indices. If array-like,
tuples will be formed like this: X[indices].
"""
self.num_dims = num_dims
self.pca_comps = pca_comps
super(RCA, self).__init__(preprocessor)
def _check_dimension(self, rank, X):
d = X.shape[1]
if rank < d:
warnings.warn('The inner covariance matrix is not invertible, '
'so the transformation matrix may contain Nan values. '
'You should adjust pca_comps to remove noise and '
'redundant information.')
if self.num_dims is None:
dim = d
elif self.num_dims <= 0:
raise ValueError('Invalid embedding dimension: must be greater than 0.')
elif self.num_dims > d:
dim = d
warnings.warn('num_dims (%d) must be smaller than '
'the data dimension (%d)' % (self.num_dims, d))
else:
dim = self.num_dims
return dim
def fit(self, X, chunks):
"""Learn the RCA model.
Parameters
----------
data : (n x d) data matrix
Each row corresponds to a single instance
chunks : (n,) array of ints
When ``chunks[i] == -1``, point i doesn't belong to any chunklet.
When ``chunks[i] == j``, point i belongs to chunklet j.
"""
X = self._prepare_inputs(X, ensure_min_samples=2)
# PCA projection to remove noise and redundant information.
if self.pca_comps is not None:
pca = decomposition.PCA(n_components=self.pca_comps)
X_t = pca.fit_transform(X)
M_pca = pca.components_
else:
X_t = X - X.mean(axis=0)
M_pca = None
chunks = np.asanyarray(chunks, dtype=int)
chunk_mask, chunked_data = _chunk_mean_centering(X_t, chunks)
inner_cov = np.atleast_2d(np.cov(chunked_data, rowvar=0, bias=1))
dim = self._check_dimension(np.linalg.matrix_rank(inner_cov), X_t)
# Fisher Linear Discriminant projection
if dim < X_t.shape[1]:
total_cov = np.cov(X_t[chunk_mask], rowvar=0)
tmp = np.linalg.lstsq(total_cov, inner_cov)[0]
vals, vecs = np.linalg.eig(tmp)
inds = np.argsort(vals)[:dim]
A = vecs[:, inds]
inner_cov = np.atleast_2d(A.T.dot(inner_cov).dot(A))
self.transformer_ = _inv_sqrtm(inner_cov).dot(A.T)
else:
self.transformer_ = _inv_sqrtm(inner_cov).T
if M_pca is not None:
self.transformer_ = np.atleast_2d(self.transformer_.dot(M_pca))
return self
def _inv_sqrtm(x):
'''Computes x^(-1/2)'''
vals, vecs = np.linalg.eigh(x)
return (vecs / np.sqrt(vals)).dot(vecs.T)
class RCA_Supervised(RCA):
"""Supervised version of Relevant Components Analysis (RCA)
Attributes
----------
transformer_ : `numpy.ndarray`, shape=(num_dims, n_features)
The learned linear transformation ``L``.
"""
def __init__(self, num_dims=None, pca_comps=None, num_chunks=100,
chunk_size=2, preprocessor=None):
"""Initialize the supervised version of `RCA`.
`RCA_Supervised` creates chunks of similar points by first sampling a
class, taking `chunk_size` elements in it, and repeating the process
`num_chunks` times.
Parameters
----------
num_dims : int, optional
embedding dimension (default: original dimension of data)
num_chunks: int, optional
chunk_size: int, optional
preprocessor : array-like, shape=(n_samples, n_features) or callable
The preprocessor to call to get tuples from indices. If array-like,
tuples will be formed like this: X[indices].
"""
RCA.__init__(self, num_dims=num_dims, pca_comps=pca_comps,
preprocessor=preprocessor)
self.num_chunks = num_chunks
self.chunk_size = chunk_size
def fit(self, X, y, random_state=np.random):
"""Create constraints from labels and learn the RCA model.
Needs num_constraints specified in constructor.
Parameters
----------
X : (n x d) data matrix
each row corresponds to a single instance
y : (n) data labels
random_state : a random.seed object to fix the random_state if needed.
"""
X, y = self._prepare_inputs(X, y, ensure_min_samples=2)
chunks = Constraints(y).chunks(num_chunks=self.num_chunks,
chunk_size=self.chunk_size,
random_state=random_state)
return RCA.fit(self, X, chunks)
|
"""Module containing class `MetadataImporter`."""
from collections import defaultdict
import datetime
import logging
from django.db import transaction
from vesper.command.command import CommandSyntaxError
from vesper.django.app.models import (
AnnotationConstraint, AnnotationInfo, Device, DeviceConnection,
DeviceInput, DeviceModel, DeviceModelInput, DeviceModelOutput,
DeviceOutput, Job, Processor, Station, StationDevice, TagInfo)
import vesper.command.command_utils as command_utils
import vesper.util.time_utils as time_utils
import vesper.util.yaml_utils as yaml_utils
class MetadataImporter:
"""
Importer for metadata including stations, devices, etc.
The data to be archived are in the `metadata` command argument.
The value of the argument is a mapping from string keys like `'stations'`
and `'devices'` to collections of mappings, with each mapping in the
collection describing the fields of one archive object.
"""
extension_name = 'Metadata Importer'
def __init__(self, args):
self.metadata = command_utils.get_required_arg('metadata', args)
def execute(self, job_info):
self._logger = logging.getLogger()
try:
with transaction.atomic():
self._add_stations()
self._add_device_models()
self._add_devices()
self._add_station_devices()
self._add_detectors()
self._add_classifiers()
self._add_annotation_constraints(job_info)
self._add_annotations(job_info)
self._add_tags(job_info)
except Exception:
self._logger.error(
'Metadata import failed with an exception. Database '
'has been restored to its state before the import. See '
'below for exception traceback.')
raise
return True
def _add_stations(self):
stations_data = self.metadata.get('stations')
if stations_data is not None:
for data in stations_data:
name = _get_required(data, 'name', 'station')
self._logger.info('Adding station "{}"...'.format(name))
description = data.get('description', '')
latitude = _get_required(data, 'latitude', 'station')
longitude = _get_required(data, 'longitude', 'station')
elevation = _get_required(data, 'elevation', 'station')
time_zone = _get_required(data, 'time_zone', 'station')
Station.objects.create(
name=name,
description=description,
latitude=latitude,
longitude=longitude,
elevation=elevation,
time_zone=time_zone)
def _add_device_models(self):
device_models_data = self.metadata.get('device_models')
if device_models_data is not None:
for data in device_models_data:
model = self._add_device_model(data)
self._add_ports(model, data, 'input', DeviceModelInput)
self._add_ports(model, data, 'output', DeviceModelOutput)
def _add_device_model(self, data):
name = _get_required(data, 'name', 'device model')
self._logger.info('Adding device model "{}"...'.format(name))
type_ = _get_required(data, 'type', 'device model')
manufacturer = _get_required(data, 'manufacturer', 'device model')
model = _get_required(data, 'model', 'device model')
description = data.get('description', '')
model = DeviceModel.objects.create(
name=name,
type=type_,
manufacturer=manufacturer,
model=model,
description=description
)
return model
def _add_ports(self, model, data, port_type, port_class):
port_data = self._get_port_data(data, port_type)
for local_name, channel_num in port_data:
self._logger.info(
'Adding device model "{}" {} "{}"...'.format(
model.name, port_type, local_name))
port_class.objects.create(
model=model,
local_name=local_name,
channel_num=channel_num)
def _get_port_data(self, data, port_type):
names = data.get(port_type + 's')
if names is None:
key = 'num_{}s'.format(port_type)
num_ports = data.get(key, 0)
if num_ports == 0:
names = []
elif num_ports == 1:
names = [port_type.capitalize()]
else:
names = ['{} {}'.format(port_type.capitalize(), i)
for i in range(num_ports)]
return [(name, i) for i, name in enumerate(names)]
def _add_devices(self):
devices_data = self.metadata.get('devices')
if devices_data is not None:
models = _create_objects_dict(DeviceModel)
for data in devices_data:
device = self._add_device(data, models)
self._add_device_inputs(device)
self._add_device_outputs(device)
def _add_device(self, data, models):
name = _get_required(data, 'name', 'device')
self._logger.info('Adding device "{}"...'.format(name))
model = self._get_device_model(data, models)
serial_number = _get_required(data, 'serial_number', 'device')
description = data.get('description', '')
return Device.objects.create(
name=name,
model=model,
serial_number=serial_number,
description=description)
def _get_device_model(self, data, models):
name = _get_required(data, 'model', 'device')
try:
return models[name]
except KeyError:
raise CommandSyntaxError(
'Unrecognized device model name "{}".'.format(name))
def _add_device_inputs(self, device):
for model_input in device.model.inputs.all():
self._logger.info(
'Adding device "{}" input "{}"...'.format(
device.name, model_input.local_name))
DeviceInput.objects.create(
device=device,
model_input=model_input)
def _add_device_outputs(self, device):
for model_output in device.model.outputs.all():
self._logger.info(
'Adding device "{}" output "{}"...'.format(
device.name, model_output.local_name))
DeviceOutput.objects.create(
device=device,
model_output=model_output)
def _add_station_devices(self):
station_devices_data = self.metadata.get('station_devices')
if station_devices_data is not None:
devices = _create_objects_dict(Device)
inputs = _create_objects_dict(DeviceInput)
outputs = _create_objects_dict(DeviceOutput)
for data in station_devices_data:
station = self._get_station(data)
data_name = 'station devices array'
start_time = self._get_time(
data, 'start_time', station, data_name)
end_time = self._get_time(
data, 'end_time', station, data_name)
device_names = _get_required(data, 'devices', data_name)
station_devices = []
for name in device_names:
device = self._get_device(name, devices)
self._add_station_device(
station, device, start_time, end_time)
station_devices.append(device)
shorthand_inputs, shorthand_outputs = \
_get_shorthand_ports(station_devices)
connections = _get_required(data, 'connections', data_name)
for connection in connections:
output = self._get_port(
connection, 'output', shorthand_outputs, outputs)
input_ = self._get_port(
connection, 'input', shorthand_inputs, inputs)
self._add_connection(
station, output, input_, start_time, end_time)
def _get_station(self, data):
name = _get_required(data, 'station', 'station devices item')
try:
return Station.objects.get(name=name)
except Station.DoesNotExist:
raise CommandSyntaxError('Unrecognized station "{}".'.format(name))
def _get_time(self, data, key, station, data_name):
dt = _get_required(data, key, data_name)
if isinstance(dt, datetime.date):
dt = datetime.datetime(dt.year, dt.month, dt.day)
return station.local_to_utc(dt)
def _get_device(self, name, devices):
try:
return devices[name]
except KeyError:
raise CommandSyntaxError('Unrecognized device "{}".'.format(name))
def _add_station_device(self, station, device, start_time, end_time):
self._logger.info(
'Adding station "{}" device "{}" from {} to {}"...'.format(
station.name, device.name, str(start_time), str(end_time)))
StationDevice.objects.create(
station=station,
device=device,
start_time=start_time,
end_time=end_time)
def _get_port(self, connection, port_type, shorthand_ports, ports):
name = _get_required(connection, port_type, 'device connection')
port = shorthand_ports.get(name)
if port is None:
port = ports.get(name)
if port is None:
raise CommandSyntaxError(
'Unrecognized device {} "{}".'.format(port_type, name))
else:
return port
def _add_connection(self, station, output, input_, start_time, end_time):
self._logger.info((
'Adding station "{}" device connection "{} -> {} '
'from {} to {}"...').format(
station.name, output.name, input_.name,
str(start_time), str(end_time)))
DeviceConnection.objects.create(
output=output,
input=input_,
start_time=start_time,
end_time=end_time)
def _add_detectors(self):
self._add_processors('detectors', 'detector', 'Detector')
def _add_processors(self, data_key, log_type_name, db_type_name):
processors_data = self.metadata.get(data_key)
if processors_data is not None:
for data in processors_data:
name = _get_required(data, 'name', log_type_name)
self._logger.info(
'Adding {} "{}"...'.format(log_type_name, name))
description = data.get('description', '')
Processor.objects.create(
name=name,
type=db_type_name,
description=description)
def _add_classifiers(self):
self._add_processors('classifiers', 'classifier', 'Classifier')
def _add_annotation_constraints(self, job_info):
constraints_data = self.metadata.get('annotation_constraints')
if constraints_data is not None:
for data in constraints_data:
name = _get_required(data, 'name', 'annotation constraint')
self._logger.info(
'Adding annotation constraint "{}"...'.format(name))
description = data.get('description', '')
text = yaml_utils.dump(data)
creation_time = time_utils.get_utc_now()
creating_user = None
creating_job = Job.objects.get(id=job_info.job_id)
AnnotationConstraint.objects.create(
name=name,
description=description,
text=text,
creation_time=creation_time,
creating_user=creating_user,
creating_job=creating_job)
def _add_annotations(self, job_info):
annotations_data = self.metadata.get('annotations')
if annotations_data is not None:
for data in annotations_data:
name = _get_required(data, 'name', 'annotation')
self._logger.info('Adding annotation "{}"...'.format(name))
description = data.get('description', '')
type_ = data.get('type', 'String')
constraint = self._get_annotation_constraint(data)
creation_time = time_utils.get_utc_now()
creating_user = None
creating_job = Job.objects.get(id=job_info.job_id)
AnnotationInfo.objects.create(
name=name,
description=description,
type=type_,
constraint=constraint,
creation_time=creation_time,
creating_user=creating_user,
creating_job=creating_job)
def _get_annotation_constraint(self, data):
try:
name = data['constraint']
except KeyError:
return None
else:
return AnnotationConstraint.objects.get(name=name)
def _add_tags(self, job_info):
tags_data = self.metadata.get('tags')
if tags_data is not None:
for data in tags_data:
name = _get_required(data, 'name', 'tag')
self._logger.info('Adding tag "{}"...'.format(name))
description = data.get('description', '')
creation_time = time_utils.get_utc_now()
creating_user = None
creating_job = Job.objects.get(id=job_info.job_id)
TagInfo.objects.create(
name=name,
description=description,
creation_time=creation_time,
creating_user=creating_user,
creating_job=creating_job)
def _get_required(data, key, data_name):
try:
return data[key]
except KeyError:
raise CommandSyntaxError(
'{} missing required item "{}".'.format(
data_name.capitalize(), key))
def _create_objects_dict(cls):
objects = {}
for obj in cls.objects.all():
objects[obj.name] = obj
objects[obj.long_name] = obj
return objects
def _get_shorthand_ports(devices):
# Create mapping from model names to sets of devices.
model_devices = defaultdict(set)
for device in devices:
model_devices[device.model.name].add(device)
# Create mappings from shorthand port names to ports. A shorthand
# port name is like a regular port name except that it includes
# only a model name rather than a device name. We include an item
# in this mapping for each port of each device that is the only one
# of its model in `devices`.
shorthand_inputs = {}
shorthand_outputs = {}
for model_name, devices in model_devices.items():
if len(devices) == 1:
for device in devices:
_add_shorthand_ports(
shorthand_inputs, device.inputs.all(), model_name)
_add_shorthand_ports(
shorthand_outputs, device.outputs.all(), model_name)
return shorthand_inputs, shorthand_outputs
def _add_shorthand_ports(shorthand_ports, ports, model_name):
for port in ports:
name = '{} {}'.format(model_name, port.local_name)
shorthand_ports[name] = port
|
from django import forms
from tumblelog.fields import ImageURLField
POST_TYPES = {
'photo': {'url': forms.URLField(label='Source URL', required=False),
'photo': ImageURLField(label='Photo URL'),
'text': forms.CharField(required=False, widget=forms.Textarea),},
'video': {'html': forms.CharField(widget=forms.Textarea),
'text': forms.CharField(widget=forms.Textarea,required=False),},
'link': {'url': forms.URLField(label='Link'),
'title': forms.CharField(required=False),
'text': forms.CharField(widget=forms.Textarea,required=False),},
'quote': {'url': forms.URLField(label='Source URL', required=False),
'source': forms.CharField(required=False, label='Source Name'),
'body': forms.CharField(widget=forms.Textarea),
'text': forms.CharField(widget=forms.Textarea,required=False),}
}
|
from feature_extraction import feature_extraction_driver
from model_training import training_driver
from util.log import Log
import sys
class CommandEnum:
PRE_PROCESSING = "-pre"
POST_PROCESSING = "-post"
TRAIN_FACTS = '-train'
class Command:
@staticmethod
def execute(command_list):
"""
Executes machine learning with command line
Example Usage:
1) python3 main.py -pre 10000
2) python3 main.py -post
3) python3 main.py -train
:param command_list: Command line arguments
:return: None
"""
checkpoint = command_list[1]
if checkpoint == CommandEnum.PRE_PROCESSING:
feature_extraction_driver.run(command_list[1:])
elif checkpoint == CommandEnum.POST_PROCESSING:
feature_extraction_driver.run(command_list[1:])
elif checkpoint == CommandEnum.TRAIN_FACTS:
training_driver.run(command_list[2:])
else:
Log.write("Command not recognized: " + command_list[1])
return False
return True
if __name__ == "__main__":
Command.execute(sys.argv)
|
import sys
import bluetooth._bluetooth as bluez
import ble
def parse_events(sock, desired_event, loop_count=10):
flt = bluez.hci_filter_new()
bluez.hci_filter_all_events(flt)
bluez.hci_filter_set_ptype(flt, bluez.HCI_EVENT_PKT)
sock.setsockopt(bluez.SOL_HCI, bluez.HCI_FILTER, flt)
for i in range(loop_count):
pkt = sock.recv(255)
parsed_packet = ble.hci_le_parse_response_packet(pkt)
if parsed_packet["bluetooth_event_id"] == desired_event:
return parsed_packet
if "bluetooth_le_subevent_id" in parsed_packet and \
parsed_packet["bluetooth_le_subevent_id"] == desired_event:
return parsed_packet
dev_id = 0
try:
sock = bluez.hci_open_dev(dev_id)
except:
print "Error accessing bluetooth device", dev_id
sys.exit(1)
if len(sys.argv) < 2:
print "Please provide a bluetooth device address."
sys.exit(1)
ble.hci_le_connect(sock, sys.argv[1],
own_bdaddr_type=ble.constants.LE_RANDOM_ADDRESS)
result = parse_events(sock, ble.constants.EVT_LE_CONN_COMPLETE)
handle = result["handle"]
ble.hci_le_read_remote_used_features(sock, handle)
result = parse_events(
sock, ble.constants.EVT_LE_READ_REMOTE_USED_FEATURES_COMPLETE)
ble.hci_disconnect(sock, handle)
parse_events(sock, bluez.EVT_DISCONN_COMPLETE)
print "Features used for %s: %s" % (sys.argv[1], result["features"])
|
import typing
if typing.TYPE_CHECKING: # pragma: no cover
import importlib.metadata as importlib_metadata
else:
try:
import importlib.metadata as importlib_metadata
except ImportError:
import importlib_metadata
__version__ = importlib_metadata.version(__name__.split(".", 1)[0])
__all__ = ["__version__"]
|
"""Function to counts on the fly the spike in sequences given as a parameter in the yaml file"""
import os
import sys
import pandas as pd
import bcbio.pipeline.datadict as dd
from bcbio.utils import (file_exists, safe_makedir, is_gzipped, partition, rbind)
import bcbio.utils as utils
from bcbio.distributed.transaction import file_transaction
from bcbio.provenance import do
from bcbio.pipeline import config_utils
from bcbio.rnaseq import sailfish
from bcbio.log import logger
def counts_spikein(data):
data = utils.to_single_data(data)
samplename = dd.get_sample_name(data)
work_dir = dd.get_work_dir(data)
salmon_dir = os.path.join(work_dir, "spikein", samplename)
fasta_file = dd.get_spikein_fasta(data)
if not fasta_file:
return data
files = dd.get_input_sequence_files(data)
if len(files) == 2:
fq1, fq2 = files
else:
fq1, fq2 = files[0], None
assert file_exists(fasta_file), "%s was not found, exiting." % fasta_file
kmer = 31 if not dd.get_analysis(data).lower() == "smallrna-seq" else 15
fasta_index = _index_spikein(fasta_file, salmon_dir, data, kmer)
out_file = _salmon_quant_reads(fq1, fq2, salmon_dir, fasta_index, data)
data = dd.set_spikein_counts(data, out_file)
return data
def _salmon_quant_reads(fq1, fq2, salmon_dir, index, data):
samplename = dd.get_sample_name(data)
quant_dir = os.path.join(salmon_dir, "quant")
safe_makedir(salmon_dir)
out_file = os.path.join(quant_dir, "quant.sf")
if file_exists(out_file):
return out_file
num_cores = dd.get_num_cores(data)
salmon = config_utils.get_program("salmon", dd.get_config(data))
num_cores = dd.get_num_cores(data)
cmd = ("{salmon} quant -l A -i {index} -p {num_cores} "
"-o {tx_out_dir} ")
fq1_cmd = "<(cat {fq1})" if not is_gzipped(fq1) else "<(gzip -cd {fq1})"
fq1_cmd = fq1_cmd.format(fq1=fq1)
if not fq2:
cmd += " -r {fq1_cmd} "
else:
fq2_cmd = "<(cat {fq2})" if not is_gzipped(fq2) else "<(gzip -cd {fq2})"
fq2_cmd = fq2_cmd.format(fq2=fq2)
cmd += " -1 {fq1_cmd} -2 {fq2_cmd} "
with file_transaction(data, quant_dir) as tx_out_dir:
message = ("Quantifying transcripts in %s and %s with Salmon."
%(fq1, fq2))
do.run(cmd.format(**locals()), message, None)
return out_file
def _index_spikein(fasta, out_dir, data, kmer=31):
out_dir = safe_makedir(os.path.join(out_dir, "index"))
salmon = config_utils.get_program("salmon", dd.get_config(data))
num_cores = dd.get_num_cores(data)
out_file = os.path.join(out_dir, "versionInfo.json")
if file_exists(out_file):
return out_dir
with file_transaction(out_dir) as tx_out_dir:
cmd = "{salmon} index -k {kmer} -p {num_cores} -i {tx_out_dir} -t {fasta}"
message = "Creating Salmon index for {fasta}."
do.run(cmd.format(**locals()), message.format(**locals()), None)
return out_dir
def combine_spikein(samples):
work_dir = dd.get_in_samples(samples, dd.get_work_dir)
sailfish_dir = os.path.join(work_dir, "spikein")
dont_combine, to_combine = partition(dd.get_spikein_counts,
dd.sample_data_iterator(samples), True)
if not to_combine:
return samples
tidy_file = os.path.join(sailfish_dir, "spikein.sf")
if not file_exists(tidy_file):
logger.info("Combining count files into %s." % tidy_file)
df = pd.DataFrame()
for data in to_combine:
sailfish_file = dd.get_spikein_counts(data)
samplename = dd.get_sample_name(data)
new_df = sailfish._sailfish_expression_parser(sailfish_file, samplename)
if df.empty:
df = new_df
else:
df = rbind([df, new_df])
df["id"] = df.index
# some versions of the transcript annotations can have duplicated entries
df = df.drop_duplicates(["id", "sample"])
with file_transaction(tidy_file) as tx_out_file:
df.to_csv(tx_out_file, sep="\t", index_label="name")
logger.info("Finished combining count files into %s." % tidy_file)
updated_samples = []
for data in dd.sample_data_iterator(samples):
data = dd.set_spikein_counts(data, tidy_file)
updated_samples.append([data])
return updated_samples
|
class CpuStoppedCall(Exception):
pass
instruction_map = {}
instruction_names = {}
DEBUG = False
def instruction(alt=None):
def decorator(func):
number = 110 + len(instruction_map)
instruction_map[number] = func
instruction_names[alt or func.__name__] = number
return func
return decorator
def exception_wrapper(func):
def decorator(*args):
try:
if DEBUG:
print("OPcode: {.__name__}".format(func))
return func(*args)
except TypeError as e:
raise Exception("Instruction received invalid amount of arguments",
"expected {}, recieved {}".format(func.__code__.co_argcount, len(args)), e)
except ValueError as e: # we assume a value error is caused by attempting to treat an absolute value as a mutable object
raise Exception(
"Possible attempt to use absolute value (#) as mutable type", e)
decorator.__name__ = func.__name__
decorator.__doc__ = func.__doc__
return decorator
class InstructionSet:
"""Container for cpu instructions
Arguments
---------
cpu: <cpu object> CPU object to use to execute commands
Not needed if only being used to compile a program
Functions
---------
run_encoded( command, *args ): Execute a decoded instruction
encode_name( command_name ): Return numeric ID of a instruction, returns None if non existant instruction
"""
def __init__(self, cpu=None): # no cpu needed for compiler
self.encoded_commands = instruction_map.copy()
self.instruction_names = instruction_names.copy()
self.cpu = cpu
def run_encoded(self, command, *args):
"""Run an encoded instruction
Arguments
---------
command: <int> Decoded command to execute
*args: <str> Operands to run instruction with"""
command = self.encoded_commands.get(command)
if command:
command(self, *args)
else:
raise CpuStoppedCall(
"Invalid command, ID was: {}. Arguments were: {}".format(command, args))
def encode_name(self, command_name):
return self.instruction_names.get(command_name)
@instruction()
@exception_wrapper
def add(self, value):
self.cpu.registers["acc"] += self.cpu.interpret_read_address(value)
@instruction()
@exception_wrapper
def sub(self, value):
self.cpu.registers["acc"] -= self.cpu.interpret_read_address(value)
@instruction()
@exception_wrapper
def mul(self, value):
self.cpu.registers["acc"] *= self.cpu.interpret_read_address(value)
@instruction()
@exception_wrapper
def div(self, value):
self.cpu.registers["acc"] /= self.cpu.interpret_read_address(value)
@instruction()
@exception_wrapper
def set(self, value):
self.cpu.registers["acc"] = self.cpu.interpret_read_address(value)
@instruction()
@exception_wrapper
def mov(self, from_loc, to_loc):
if to_loc.startswith("@") and to_loc[1:] in self.cpu.registers.registers.keys():
self.cpu.registers[to_loc.lstrip(
"@")] = self.cpu.interpret_read_address(from_loc)
else:
self.cpu.memory[self.cpu.interpret_read_address(
to_loc)] = self.cpu.interpret_read_address(from_loc)
@instruction()
@exception_wrapper
def cmp(self, a, b=0):
av = self.cpu.interpret_read_address(a)
# print("comp interpreted as {}".format(av))
bv = self.cpu.interpret_read_address(b) if b else 0
functions = [
(lambda a, b: a < b),
(lambda a, b: a > b),
(lambda a, b: a <= b),
(lambda a, b: a >= b),
(lambda a, b: a == b),
(lambda a, b: a != b)
]
self.cpu.registers["cmp"] = "".join(
[str(1 if i(av, bv) else 0) for i in functions])
# jumps to memory address provided, no interpreting
def _internal_jump(self, location):
self.cpu.registers["cur"] = location
@instruction()
@exception_wrapper
def jump(self, jump):
self._internal_jump(self.cpu.interpret_read_address(jump))
def _test_cmp(self, index):
return int(self.cpu.registers["cmp"][index])
@instruction()
@exception_wrapper
def lje(self, jump): # less than
if self._test_cmp(0):
self.jump(jump)
@instruction()
@exception_wrapper
def mje(self, jump): # more than
if self._test_cmp(1):
self.jump(jump)
@instruction()
@exception_wrapper
def leje(self, jump): # less than equal
if self._test_cmp(2):
self.jump(jump)
@instruction()
@exception_wrapper
def meje(self, jump): # more than equal
if self._test_cmp(3):
self.jump(jump)
@instruction()
@exception_wrapper
def eqje(self, jump): # equal
if self._test_cmp(4):
self.jump(jump)
@instruction()
@exception_wrapper
def nqje(self, jump): # not equal
if self._test_cmp(5):
self.jump(jump)
@instruction()
@exception_wrapper
def prntint(self, memloc):
val = str(self.cpu.interpret_read_address(memloc))
print(val, end="")
self.cpu.stdout[-1] += val
@instruction()
@exception_wrapper
def prntstr(self, memloc):
val = chr(self.cpu.interpret_read_address(memloc))
print(val, end='')
self.cpu.stdout[-1] += val
@instruction()
@exception_wrapper
def prntnl(self):
print("\n")
self.cpu.stdout.append("")
@instruction(alt="input")
@exception_wrapper
def inp(self, memloc):
if memloc.startswith("@") and memloc[1:] in self.cpu.registers.registers.keys():
self.cpu.registers[memloc.strip("@").lower()] = int(
input("Enter number: "))
else:
self.cpu.memory[self.cpu.interpret_read_address(
memloc)] = int(input("Enter number: "))
# like anything wrong could happen here
@instruction()
@exception_wrapper
def halt(self):
raise CpuStoppedCall("CPU halt triggered")
@instruction()
@exception_wrapper
def pop(self, memloc=None):
if self.cpu.registers["stk"] > self.cpu.memory.size:
raise Exception("Stack underflow, attempt to pop from empty stack")
if memloc is not None:
if memloc.startswith("@") and memloc[1:] in self.cpu.registers.registers.keys():
self.cpu.registers[memloc.lstrip("@")] = self.cpu.memory[
self.cpu.registers["stk"]]
else:
self.cpu.memory[self.cpu.interpret_read_address(memloc)] = self.cpu.memory[
self.cpu.registers["stk"]]
self.cpu.registers["stk"] += 1 # stack descends upwardas
@instruction()
@exception_wrapper
def push(self, value):
# decrement first since last push will leave us one below
self.cpu.registers["stk"] -= 1
self.cpu.memory[self.cpu.registers["stk"]
] = self.cpu.interpret_read_address(value)
@instruction()
@exception_wrapper
def call(self, function_location, *args):
collected_args = [self.cpu.interpret_read_address(i) for i in args]
# since stack position will change once we push return location
self._push_stk_py(self.cpu.registers["cur"])
# push return address to stack
for i in collected_args:
self._push_stk_py(i) # push vars to stack
self.jump(function_location)
def _push_stk_py(self, value):
self.cpu.registers["stk"] -= 1
self.cpu.memory[self.cpu.registers["stk"]] = value
def _pop_stk_py(self):
if self.cpu.registers["stk"] > self.cpu.memory.size:
return 0
pre = self.cpu.memory[self.cpu.registers["stk"]]
self.cpu.registers["stk"] += 1
return pre
@instruction()
@exception_wrapper
def ret(self, retval=None):
ret_loc = self._pop_stk_py()
if retval is not None:
self._push_stk_py(self.cpu.interpret_read_address(retval))
self._internal_jump(ret_loc)
@instruction()
@exception_wrapper
def nop(self):
pass
|
import six
from blist import sorteddict
from recordtype import recordtype
from .authtree import MemoryPatriciaAuthTree
from .core import Output
from .hash import hash256
from .mixins import SerializableMixin
from .serialize import BigCompactSize, LittleInteger, VarInt
from .tools import compress_amount, decompress_amount
__all__ = (
'UnspentTransaction',
'OutPoint',
'Coin',
'BaseValidationIndex',
'MemoryValidationIndex',
'ContractOutPoint',
'ContractCoin',
'BaseContractIndex',
'MemoryContractIndex',
)
from .script import ScriptPickler
class UnspentTransaction(SerializableMixin, sorteddict):
"""Pruned version of core.Transaction: only retains metadata and unspent
transaction outputs.
Serialized format:
- VARINT(version)
- VARINT(code)
- unspentness bitvector, for outputs[2] and further; least significant
byte first
- the non-spent, compressed TransactionOutputs
- VARINT(height)
- VARINT(reference_height)
The code value consists of:
- bit 1: outputs[0] is not spent
- bit 2: outputs[1] is not spent
- bit 3: outputs[2] is not spent
- The higher bits encode N, the number of non-zero bytes in the following
bitvector.
- In case bit 1, bit 2 and bit 4 are all unset, they encode N-1, as
there must be at least one non-spent output.
Example: 0102835800816115944e077fe7c803cfa57f29b36bf87c1d358bb85e
<><><--------------------------------------------><---->
| \ | /
version code outputs[1] height
- version = 1
- code = 2 (outputs[1] is not spent, and 0 non-zero bytes of bitvector follow)
- unspentness bitvector: as 0 non-zero bytes follow, it has length 0
- outputs[1]: 835800816115944e077fe7c803cfa57f29b36bf87c1d35
* 8358: compact amount representation for 60000000000 (600 BTC)
* 00: special txout type pay-to-pubkey-hash
* 816115944e077fe7c803cfa57f29b36bf87c1d35: address uint160
- height = 203998
Example: 0208044086ef97d5790061b01caab50f1b8e9c50a5057eb43c2d9563a4ee...
<><><--><-------------------------------------------------->
/ | \ |
version code unspentness outputs[4]
...bbd123008c988f1a4a4de2161e0f50aac7f17e7f9555caa486af3b8668
<----------------------------------------------><----><-->
| / |
outputs[16] height reference_height
- version = 2
- code = 8: neither outputs[0], outputs[1], nor outputs[2] are unspent, 2
(1, +1 because both bit 2 and bit 4 are unset) non-zero bitvector bytes
follow.
- unspentness bitvector: bits 1 (0x02) and 13 (0x2000) are set, so
outputs[1+3] and outputs[13+3] are unspent
- outputs[4]: 86ef97d5790061b01caab50f1b8e9c50a5057eb43c2d9563a4ee
* 86ef97d579: compact amount representation for 234925952 (2.35 BTC)
* 00: special txout type pay-to-pubkey-hash
* 61b01caab50f1b8e9c50a5057eb43c2d9563a4ee: address uint160
- outputs[16]: bbd123008c988f1a4a4de2161e0f50aac7f17e7f9555caa4
* bbd123: compact amount representation for 110397 (0.001 BTC)
* 00: special txout type pay-to-pubkey-hash
* 8c988f1a4a4de2161e0f50aac7f17e7f9555caa4: address uint160
- height = 120891
- reference_height = 1000
"""
# We only need one script pickler, which every instance of UnspentTransaction
# can use (there's no concurrency issues with picklers, and it needs to be
# available to the class anyway for deserialize).
_pickler = ScriptPickler()
def __init__(self, *args, **kwargs):
# Since we behave like a dictionary object, we implement the copy
# constructor, which requires copying meta information not contained
# within the dictionary itself.
if args and all(hasattr(args[0], x) for x in
('version', 'height', 'reference_height')):
other = args[0]
else:
other = None
# You can either specify the transaction, another UnspentTransaction
# object, or the metadata directly. Choose one.
a = 'transaction' in kwargs
b = other is not None
c = any(x in kwargs for x in ('version', 'reference_height'))
if a + b + c >= 2: # <-- yes, you can do this
raise TypeError(u"instantiate by either specifying the "
u"transaction directly, another %s, or its individual "
u"metadata; choose one" % self.__class__.__name__)
# Extract captured parameters from kwargs, starting with the transaction
# because its metadata are used as the default.
transaction = kwargs.pop('transaction', None)
if other is None:
other = transaction
version = kwargs.pop('version', getattr(other, 'version', 1))
height = kwargs.pop('height', getattr(other, 'height', 0))
# Reference heights are added with transaction version=2, so we do
# not extract that parameter unless version=2.
reference_height = getattr(other, 'reference_height', 0)
if version in (2,):
reference_height = kwargs.pop('reference_height', reference_height)
# Perform construction of the dictionary object (our superclass)
super(UnspentTransaction, self).__init__(*args, **kwargs)
# Store metadata
self.version = version
self.height = height
self.reference_height = reference_height
# Add the transaction's outputs only if outputs are not separately
# specified (as is typically done if it is known in advance which
# outputs are not spent at time of creation).
if transaction is not None and not self:
for idx,output in enumerate(transaction.outputs):
self[idx] = output
def serialize(self):
# code&0x1: outputs[0] unspent
# code&0x2: outputs[1] unspent
# code&0x4: outputs[2] unspent
# code>>3: N, the minimal length of bitvector in bytes, or N-1 if
# outputs[0], outputs[1], and outputs[1] are all spent
bitvector = 0
for idx in six.iterkeys(self):
bitvector |= 1 << idx
if not bitvector:
raise TypeError()
code = bitvector & 0x7
bitvector >>= 3
bitvector = LittleInteger(bitvector).serialize()
bitvector_len = len(bitvector)
if not code:
bitvector_len -= 1
code |= bitvector_len << 3
result = VarInt(self.version).serialize()
result += VarInt(code).serialize()
result += bitvector
for output in six.itervalues(self):
result += VarInt(compress_amount(output.amount)).serialize()
result += self._pickler.dumps(output.contract)
result += VarInt(self.height).serialize()
if self.version in (2,):
result += VarInt(self.reference_height).serialize()
return result
@classmethod
def deserialize(cls, file_):
output_class = getattr(cls, 'get_output_class', lambda:
getattr(cls, 'output_class', Output))()
kwargs = {}
kwargs['version'] = VarInt.deserialize(file_)
# See description of code, bitvector above.
code, bitvector = VarInt.deserialize(file_), 0
bitvector |= code & 0x7
code >>= 3
if not bitvector:
code += 1
if code:
bitvector |= LittleInteger.deserialize(file_, code) << 3
idx, items = 0, []
while bitvector:
if bitvector & 0x1:
items.append(
(idx, output_class(
decompress_amount(VarInt.deserialize(file_)),
cls._pickler.load(file_))))
idx, bitvector = idx + 1, bitvector >> 1
kwargs['height'] = VarInt.deserialize(file_)
if kwargs['version'] in (2,):
kwargs['reference_height'] = VarInt.deserialize(file_)
return cls(items, **kwargs)
def __eq__(self, other):
# Compare metadata first, as it's probably less expensive
if any((self.height != other.height,
self.version != other.version)):
return False
if self.version in (2,) and self.reference_height != other.reference_height:
return False
return super(UnspentTransaction, self).__eq__(other)
__ne__ = lambda a,b:not a==b
def __repr__(self):
return '%s%s, version=%d, height=%d, reference_height=%d)' % (
self.__class__.__name__,
super(UnspentTransaction, self).__repr__()[10:-1],
self.version,
self.height,
self.reference_height)
OutPoint = recordtype('OutPoint', ['hash', 'index'])
def _serialize_outpoint(self):
parts = list()
parts.append(hash256.serialize(self.hash))
if self.index == -1:
parts.append(b'\xfe\xff\xff\xff\xff')
else:
parts.append(BigCompactSize(self.index).serialize())
return b''.join(parts)
OutPoint.serialize = _serialize_outpoint
def _deserialize_outpoint(cls, file_):
kwargs = dict()
kwargs['hash'] = hash256.deserialize(file_)
kwargs['index'] = BigCompactSize.deserialize(file_)
return cls(**kwargs)
OutPoint.deserialize = classmethod(_deserialize_outpoint)
def _repr_outpoint(self):
return '%s(hash=%064x, index=%d)' % (
self.__class__.__name__, self.hash, self.index==2**32-1 and -1 or self.index)
OutPoint.__repr__ = _repr_outpoint
Coin = recordtype('Coin',
['version', 'amount', 'contract', 'height', 'reference_height'])
Coin._pickler = ScriptPickler()
def _serialize_coin(self):
parts = list()
parts.append(VarInt(self.version).serialize())
parts.append(VarInt(compress_amount(self.amount)).serialize())
parts.append(self._pickler.dumps(self.contract.serialize()))
parts.append(VarInt(self.height).serialize())
if self.version in (2,):
parts.append(VarInt(self.reference_height).serialize())
return b''.join(parts)
Coin.serialize = _serialize_coin
def _deserialize_coin(cls, file_):
kwargs = dict()
kwargs['version'] = VarInt.deserialize(file_)
kwargs['amount'] = decompress_amount(VarInt.deserialize(file_))
kwargs['contract'] = cls._pickler.load(file_)
kwargs['height'] = VarInt.deserialize(file_)
if kwargs['version'] in (2,):
kwargs['reference_height'] = VarInt.deserialize(file_)
return cls(**kwargs)
Coin.deserialize = classmethod(_deserialize_coin)
def _repr_coin(self):
parts = list()
parts.append('version=%d' % self.version)
parts.append('amount=%d' % self.amount)
parts.append('contract=%s' % repr(self.contract))
parts.append('height=%d' % self.height)
if self.version in (2,):
parts.append('reference_height=%d' % self.reference_height)
return '%s(%s)' % (self.__class__.__name__, ', '.join(parts))
Coin.__repr__ = _repr_coin
class BaseValidationIndex(object):
key_class = OutPoint
value_class = Coin
class MemoryValidationIndex(BaseValidationIndex, MemoryPatriciaAuthTree):
pass
ContractOutPoint = recordtype('ContractOutPoint', ['contract', 'hash', 'index'])
ContractOutPoint._pickler = ScriptPickler()
def _serialize_contract_outpoint(self):
return b''.join([self._pickler.dumps(self.contract.serialize()),
hash256.serialize(self.hash),
BigCompactSize(self.index).serialize()])
ContractOutPoint.serialize = _serialize_contract_outpoint
def _deserialize_contract_outpoint(cls, file_):
kwargs = dict()
kwargs['contract'] = cls._pickler.load(file_)
kwargs['hash'] = hash256.deserialize(file_)
kwargs['index'] = BigCompactSize.deserialize(file_)
return cls(**kwargs)
ContractOutPoint.deserialize = classmethod(_deserialize_contract_outpoint)
def _repr_contract_outpoint(self):
return '%s(contract=%s, hash=%064x, index=%d)' % (
self.__class__.__name__, repr(self.contract), self.hash, self.index)
ContractOutPoint.__repr__ = _repr_contract_outpoint
ContractCoin = recordtype('ContractCoin',
['version', 'amount', 'height', 'reference_height'])
def _serialize_contract_coin(self):
parts = list()
parts.append(VarInt(self.version).serialize())
parts.append(VarInt(compress_amount(self.amount)).serialize())
parts.append(VarInt(self.height).serialize())
if self.version in (2,):
parts.append(VarInt(self.reference_height).serialize())
return b''.join(parts)
ContractCoin.serialize = _serialize_contract_coin
def _deserialize_contract_coin(cls, file_):
kwargs = dict()
kwargs['version'] = VarInt.deserialize(file_)
kwargs['height'] = VarInt.deserialize(file_)
kwargs['amount'] = decompress_amount(VarInt.deserialize(file_))
if kwargs['version'] in (2,):
kwargs['reference_height'] = VarInt.deserialize(file_)
return cls(**kwargs)
ContractCoin.deserialize = classmethod(_deserialize_contract_coin)
def _repr_contract_coin(self):
parts = list()
parts.append('version=%d' % self.version)
parts.append('amount=%d' % self.amount)
parts.append('height=%d' % self.height)
if self.version in (2,):
parts.append('reference_height=%d' % self.reference_height)
return '%s(%s)' % (self.__class__.__name__, ', '.join(parts))
ContractCoin.__repr__ = _repr_contract_coin
class BaseContractIndex(object):
key_class = ContractOutPoint
value_class = ContractCoin
class MemoryContractIndex(BaseContractIndex, MemoryPatriciaAuthTree):
pass
|
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class RawTransactionsTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
#connect to a local machine for debugging
#url = "http://bitcoinrpc:DP6DvqZtqXarpeNWyN3LZTFchCCyCUuHwNF7E8pX99x1@%s:%d" % ('127.0.0.1', 18332)
#proxy = AuthServiceProxy(url)
#proxy.url = url # store URL on proxy for info
#self.nodes.append(proxy)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test(self):
#return #TODO
#prepare some coins for multiple *rawtransaction commands
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(101)
self.sync_all()
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),5.0)
self.sync_all()
self.nodes[0].generate(5)
self.sync_all()
#########################################
# sendrawtransaction with missing input #
#########################################
'''inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'nValue' : "21000000"}] #won't exist
outputs = { self.nodes[0].getnewaddress() : 4.998 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
rawtx = self.nodes[2].blindrawtransaction(rawtx)
rawtx = self.nodes[2].signrawtransaction(rawtx)
try:
rawtx = self.nodes[2].sendrawtransaction(rawtx['hex'])
except JSONRPCException as e:
assert("Missing inputs" in e.error['message'])
else:
assert(False)
'''
#########################
# RAW TX MULTISIG TESTS #
#########################
# 2of2 test
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
mSigObjValid = self.nodes[2].validateaddress(mSigObj)
#use balance deltas instead of absolute values
bal = self.nodes[2].getbalance()
# send 1.2 BTC to msig adr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal+Decimal('1.20000000')) #node2 has both keys of the 2of2 ms addr., tx should affect the balance
# 2of3 test from different nodes
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr3 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
addr3Obj = self.nodes[2].validateaddress(addr3)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey']])
mSigObjValid = self.nodes[2].validateaddress(mSigObj)
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx = self.nodes[0].decoderawtransaction(decTx['hex'])
sPK = rawTx['vout'][0]['scriptPubKey']['hex']
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#THIS IS A INCOMPLETE FEATURE
#NODE2 HAS TWO OF THREE KEY AND THE FUNDS SHOULD BE SPENDABLE AND COUNT AT BALANCE CALCULATION
assert_equal(self.nodes[2].getbalance(), bal) #for now, assume the funds of a 2of3 multisig tx are not marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = False
''' TODO: Tests are largely incompatible with CT
for outpoint in rawTx['vout']:
if outpoint['value'] == Decimal('2.20000000'):
vout = outpoint
break
bal = self.nodes[0].getbalance()
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex'], "nValue" : Decimal('2.2')}]
outputs = { self.nodes[0].getnewaddress() : 2.19 }
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
rawTx = self.nodes[2].blindrawtransaction(rawTx)
rawTxPartialSigned = self.nodes[1].signrawtransaction(rawTx, inputs)
assert_equal(rawTxPartialSigned['complete'], False) #node1 only has one key, can't comp. sign the tx
rawTxSigned = self.nodes[2].signrawtransaction(rawTx, inputs)
assert_equal(rawTxSigned['complete'], True) #node2 can sign the tx compl., own two of three keys
self.nodes[2].sendrawtransaction(rawTxSigned['hex'])
rawTx = self.nodes[0].decoderawtransaction(rawTxSigned['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 1000}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 1000)
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : -1}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises(JSONRPCException, self.nodes[0].createrawtransaction, inputs, outputs)
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967296}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises(JSONRPCException, self.nodes[0].createrawtransaction, inputs, outputs)
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967294}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 4294967294)
'''
if __name__ == '__main__':
RawTransactionsTest().main()
|
from espeak import espeak
from mpd import MPDClient
def say(msg):
client = MPDClient()
client.connect("localhost", 6600)
s = client.status()['state']
if s == 'play':
client.pause()
client.close()
client.disconnect()
print(msg)
espeak.set_voice("fr")
espeak.synth(msg)
|
import collections
import itertools
import six
from purplex.exception import TableConflictError, StartSymbolNotReducedError
from purplex.grammar import Grammar, Production, END_OF_INPUT
from purplex.lex import Lexer
from purplex.token import Token
END_OF_INPUT_TOKEN = Token(END_OF_INPUT, '', '', 0, 0)
LEFT = 'left'
RIGHT = 'right'
DEFAULT_PREC = (LEFT, 0)
def attach(rule, prec_symbol=None):
def wrapper(func):
if not hasattr(func, 'productions'):
func.productions = set()
func.productions.add((Production(rule, func), prec_symbol))
return func
return wrapper
def attach_list(nonterminal, singular, single=True, epsilon=False):
def wrapper(func):
productions = [
'{} : {} {}'.format(nonterminal, nonterminal, singular),
]
if single:
productions.append('{} : {}'.format(nonterminal, singular))
if epsilon:
productions.append('{} : '.format(nonterminal))
for production in productions:
attach(production)(func)
return func
return wrapper
def attach_sep_list(nonterminal, singular, separator, epsilon=False):
def wrapper(func):
inner_nonterminal = '{}_inner'.format(nonterminal)
productions = [
'{} : {}'.format(nonterminal, inner_nonterminal),
'{} : {} {} {}'.format(inner_nonterminal, inner_nonterminal,
separator, singular),
'{} : {}'.format(inner_nonterminal, singular),
]
if epsilon:
productions.append('{} : '.format(nonterminal))
for producution in productions:
attach(producution)(func)
return func
return wrapper
class ParserBase(type):
def __new__(cls, name, bases, dct):
productions = set()
for _, attr in dct.items():
if hasattr(attr, 'productions'):
productions |= attr.productions
grammar = Grammar(dct['LEXER'].token_map.keys(),
[production for production, _ in productions],
start=dct['START'])
precedence = cls.compute_precedence(grammar.terminals,
productions,
dct.get('PRECEDENCE') or ())
INITIAL_STATE, ACTION, GOTO = cls.make_tables(grammar, precedence)
dct.update({
'grammar': grammar,
'INITIAL_STATE': INITIAL_STATE,
'ACTION': ACTION,
'GOTO': GOTO,
})
return type.__new__(cls, name, bases, dct)
@staticmethod
def compute_precedence(terminals, productions, precedence_levels):
"""Computes the precedence of terminal and production.
The precedence of a terminal is it's level in the PRECEDENCE tuple. For
a production, the precedence is the right-most terminal (if it exists).
The default precedence is DEFAULT_PREC - (LEFT, 0).
Returns:
precedence - dict[terminal | production] = (assoc, level)
"""
precedence = collections.OrderedDict()
for terminal in terminals:
precedence[terminal] = DEFAULT_PREC
level_precs = range(len(precedence_levels), 0, -1)
for i, level in zip(level_precs, precedence_levels):
assoc = level[0]
for symbol in level[1:]:
precedence[symbol] = (assoc, i)
for production, prec_symbol in productions:
if prec_symbol is None:
prod_terminals = [symbol for symbol in production.rhs
if symbol in terminals] or [None]
precedence[production] = precedence.get(prod_terminals[-1],
DEFAULT_PREC)
else:
precedence[production] = precedence.get(prec_symbol,
DEFAULT_PREC)
return precedence
@staticmethod
def make_tables(grammar, precedence):
"""Generates the ACTION and GOTO tables for the grammar.
Returns:
action - dict[state][lookahead] = (action, ...)
goto - dict[state][just_reduced] = new_state
"""
ACTION = {}
GOTO = {}
labels = {}
def get_label(closure):
if closure not in labels:
labels[closure] = len(labels)
return labels[closure]
def resolve_shift_reduce(lookahead, s_action, r_action):
s_assoc, s_level = precedence[lookahead]
r_assoc, r_level = precedence[r_action[1]]
if s_level < r_level:
return r_action
elif s_level == r_level and r_assoc == LEFT:
return r_action
else:
return s_action
initial, closures, goto = grammar.closures()
for closure in closures:
label = get_label(closure)
for rule in closure:
new_action, lookahead = None, rule.lookahead
if not rule.at_end:
symbol = rule.rhs[rule.pos]
is_terminal = symbol in grammar.terminals
has_goto = symbol in goto[closure]
if is_terminal and has_goto:
next_state = get_label(goto[closure][symbol])
new_action, lookahead = ('shift', next_state), symbol
elif rule.production == grammar.start and rule.at_end:
new_action = ('accept',)
elif rule.at_end:
new_action = ('reduce', rule.production)
if new_action is None:
continue
prev_action = ACTION.get((label, lookahead))
if prev_action is None or prev_action == new_action:
ACTION[label, lookahead] = new_action
else:
types = (prev_action[0], new_action[0])
if types == ('shift', 'reduce'):
chosen = resolve_shift_reduce(lookahead,
prev_action,
new_action)
elif types == ('reduce', 'shift'):
chosen = resolve_shift_reduce(lookahead,
new_action,
prev_action)
else:
raise TableConflictError(prev_action, new_action)
ACTION[label, lookahead] = chosen
for symbol in grammar.nonterminals:
if symbol in goto[closure]:
GOTO[label, symbol] = get_label(goto[closure][symbol])
return get_label(initial), ACTION, GOTO
@six.add_metaclass(ParserBase)
class Parser(object):
LEXER = Lexer
START = 'S'
PRECEDENCE = ()
grammar = None
INITIAL_STATE = 0
ACTION = {}
GOTO = {}
def parse(self, raw):
"""Parses an input string and applies the parser's grammar."""
lexer = self.LEXER(raw)
tokens = iter(itertools.chain(lexer, [END_OF_INPUT_TOKEN]))
stack = [(self.INITIAL_STATE, '<initial>', '<begin>')]
token = next(tokens)
while stack:
state, _, _ = stack[-1]
action = self.ACTION.get((state, token.name))
if action is None:
raise StartSymbolNotReducedError(self.START)
if action[0] == 'reduce':
production = action[1]
# Special case for epsilon rules
if len(production):
args = (item[2] for item in stack[-len(production):])
del stack[-len(production):]
else:
args = []
prev_state, _, _ = stack[-1]
new_state = self.GOTO[prev_state, production.lhs]
stack.append((
new_state,
production.lhs,
production.func(self, *args),
))
elif action[0] == 'shift':
stack.append((action[1], token.name, token.value))
token = next(tokens)
elif action[0] == 'accept':
return stack[-1][2]
|
from setuptools import setup, find_packages # Always prefer setuptools over distutils
from codecs import open # To use a consistent encoding
from os import path
here = path.abspath(path.dirname(__file__))
long_description = """
Edgesense
=========
This is the python library and scripts for the Edgsense social network analysis tool (see: https://github.com/Wikitalia/edgesense )
The python scripts build the network from source json files and compute all the metrics.
See https://github.com/Wikitalia/edgesense/python/README.md for more informations
"""
setup(
name='edgesense',
version='0.14.0',
description='Edgesense Social Network Analysis and Visualization',
long_description=long_description,
# The project's main homepage.
url='https://github.com/Wikitalia/edgesense/python',
# Author details
author='Luca Mearelli',
author_email='l.mearelli@spazidigitali.com',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'Topic :: Software Development :: Libraries',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
# What does your project relate to?
keywords='edgesense sna network socialnetwork catalyst',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(),
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/technical.html#install-requires-vs-requirements-files
install_requires=[
'networkx==1.8.1',
'python-louvain==0.3'
],
# List additional groups of dependencies here (e.g. development dependencies).
# You can install these using the following syntax, for example:
# $ pip install -e .[dev,test]
extras_require = {
'dev': [],
'test': [],
},
package_data={
'edgesense': ['datapackage_template.json']
},
data_files=[],
entry_points={
'console_scripts': [
'edgesense_drupal=edgesense.drupal_script:main',
'edgesense_build_network=edgesense.build_network:main',
'edgesense_catalyst_server=edgesense.catalyst_server:main',
'edgesense_parse_catalyst=edgesense.parse_catalyst:main',
'edgesense_parse_tweets=edgesense.parse_tweets:main',
'edgesense_parse_mailinglist=edgesense.parse_mailinglist:main',
],
},
)
|
from test.helper import (
execute_add,
wait_for_processes,
)
from test.helper import command_factory
def test_remove_fails(daemon_setup):
"""Fail if removing a non existant key."""
response = command_factory('remove')({'keys': [0]})
assert response['status'] == 'error'
def test_remove_running(daemon_setup):
"""Can't remove a running process."""
execute_add('sleep 60')
response = command_factory('remove')({'keys': [0]})
assert response['status'] == 'error'
def test_remove(daemon_setup):
"""Remove a process from the queue."""
# Pause the daemon. Otherwise we may try to remove a running entry.
command_factory('pause')()
# Add entry and instantly remove it.
execute_add('ls')
response = command_factory('remove')({'keys': [0]})
assert response['status'] == 'success'
# The queue should be empty
status = command_factory('status')()
assert status['data'] == 'Queue is empty'
def test_remove_multiple_specific_success(daemon_setup):
"""Remove various entries from the queue."""
# Pause the daemon.
command_factory('pause')()
# Add entries
execute_add('ls')
execute_add('ls')
execute_add('ls')
# Remove two entries.
response = command_factory('remove')({'keys': [0, 1]})
assert response['status'] == 'success'
status = command_factory('status')()
assert 0 not in status['data']
assert 1 not in status['data']
def test_remove_multiple_specific(daemon_setup):
"""Remove various entries from the queue."""
# Pause the daemon.
command_factory('pause')()
# Add 4 entries to get a `failing`, `done`, `queued` and `running` entry.
execute_add('failingtestcommand')
execute_add('sleep 60')
execute_add('ls')
execute_add('ls')
# Start 0, 1 and 2 and wait for the `failed` and `done` entry to finish.
response = command_factory('start')({'keys': [0, 1, 2]})
status = wait_for_processes([0, 2])
assert status['data'][0]['status'] == 'failed'
assert status['data'][1]['status'] == 'running'
assert status['data'][2]['status'] == 'done'
assert status['data'][3]['status'] == 'queued'
# Remove all entries. The response should be an error, as we try to remove
# a running process.
response = command_factory('remove')({'keys': [0, 1, 2, 3]})
assert response['status'] == 'error'
status = command_factory('status')()
assert 0 not in status['data']
assert status['data'][1]['status'] == 'running'
assert 2 not in status['data']
assert 3 not in status['data']
|
""" Implementacao do algoritmo de busca sentinela """
def busca_sentinela(list_to_search, value):
"""
Implementacao de um algoritmo de busca sentinela.
Argumentos:
value: Any. Valor a ser buscado na lista
list_to_search: list. lista na qual o valor sera buscado
Retorna o indice do valor em "list_to_search" ou -1 caso nao exista nela.
"""
list_to_search.append(value)
list_index = 0
while list_to_search[list_index] != value:
list_index = list_index + 1
list_to_search.pop()
if list_index == len(list_to_search):
return -1
return list_index
if __name__ == "__main__":
some_list = [1, 9, 2, 8, 7, 4, 5, 6, 4, 3, 10, 0]
NUMBER_TO_FIND = 4
NUMBER_INDEX = busca_sentinela(some_list, NUMBER_TO_FIND)
print(some_list)
if NUMBER_INDEX >= 0:
print(
"Found value {} at position {}.".format(
NUMBER_TO_FIND, NUMBER_INDEX
)
)
else:
print("Could not find value {}.".format(NUMBER_TO_FIND))
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('download_manager', '0002_auto_20141202_0152'),
]
operations = [
migrations.AddField(
model_name='communityuser',
name='country',
field=models.CharField(default='', max_length=255),
preserve_default=False,
),
]
|
"""
Karl Persson, Mac OSX 10.8.4/Windows 8, Python 2.7.5, Pygame 1.9.2pre
Main class for Pulse game
"""
import sys
import pygame
from pygame.locals import *
import Explosion, FileManager, GameObjects, HUD, LevelManager, Menues
class PulseGame:
def __init__(self):
pygame.init()
# Initializing file manager and loading icon
self.fileManager = FileManager.FileManager('Textures', 'Sounds')
# Display properties
pygame.display.set_icon(self.fileManager.icon)
self.screen = pygame.display.set_mode((800, 600))
pygame.display.set_caption('Pulse')
# Setting background
self.fileManager.loadBackground()
self.background = self.fileManager.backgroundTexture
# Display update
self.cleanWindow()
pygame.display.flip()
# Initializing game files
self.fileManager.loadGameFiles()
# Initializing game objects
self.gameObjects = GameObjects.GameObjects()
# Initializing level manager
self.levelManager = LevelManager.LevelManager(self.fileManager, self.gameObjects)
# Initializing menues
self.menues = Menues.Menues(self.fileManager, self.levelManager)
# Initializing HUD
self.HUD = pygame.sprite.RenderPlain()
self.HUDObject = HUD.HUD(self.levelManager.currentLevel(), True)
self.HUD.add(self.HUDObject)
# Adding notifieras to gameObjects
self.gameObjects.addNotifier(self.HUDObject)
self.gameObjects.addNotifier(self.levelManager)
# Initializing clock
self.clock = pygame.time.Clock()
# Game running. Showing nenu on False.
self.gameRunning = False
# Starting music
pygame.mixer.music.play(-1)
# Showing main menu
self.menues.main()
# Rendering and updating game (fill old elements with background, update elements, draw new elements)
def render(self):
# Adding rects to update on screen
rects = []
# Erasing previous balls
# (Blitting to screen and saving rect-copies for updating)
for sprite in self.gameObjects.allSprites:
self.screen.blit(self.background, sprite.rect.topleft, sprite.rect)
rects.append(sprite.rect.copy())
# Erasing HUD
self.screen.blit(self.background, self.HUDObject.rect.topleft, self.HUDObject.rect)
rects.append(self.HUDObject.rect.copy())
# Erasing menues
for rect in self.menues.getRects():
self.screen.blit(self.background, rect.topleft, rect)
rects.append(rect.copy())
# Updating stuff
self.update()
# Rendering
# Sprites
self.gameObjects.render(self.screen)
for sprite in self.gameObjects.allSprites:
rects.append(sprite.rect)
# HUD
self.HUD.draw(self.screen)
rects.append(self.HUDObject.rect)
# Menu
self.menues.render(self.screen)
for rect in self.menues.getRects():
rects.append(rect)
# Updating specific rects on screen, for better framerate
pygame.display.update(rects)
# Updating game
def update(self):
# Updating game objects, HUD, menues etc.
self.gameObjects.update()
self.HUD.update()
self.menues.update(pygame.mouse.get_pos())
# Checking game status
if self.levelManager.playerWon():
self.gameRunning = False
# opening menu if it's not already open
if not self.menues.menuActive():
# Checking if it's the last level
if self.levelManager.lastLevel():
self.menues.gameWon()
else:
self.menues.levelWon()
elif self.levelManager.playerLost():
self.gameRunning = False
# Opening menu if it's not already open
if not self.menues.menuActive():
self.menues.levelLost()
else:
# If the game was previously not running hud and background will be reset
if not self.gameRunning:
#self.cleanWindow()
self.HUDObject.setLevel(self.levelManager.currentLevel())
# Making sure HUD isn't hidden
self.HUDObject.show()
self.gameRunning = True
# Cleaning window (blitting background)
def cleanWindow(self):
self.screen.blit(self.background, (0,0))
if __name__ == '__main__':
game = PulseGame()
# Game loop
while True:
# 60Hz framerate
game.clock.tick(60)
game.render()
for event in pygame.event.get():
if event.type == QUIT:
sys.exit()
elif event.type == MOUSEBUTTONDOWN and event.button == 1:
if game.gameRunning:
# Only creating a pulse when the number of pulses left i sufficient
if game.levelManager.pulses > 0:
# Creating explosion
game.gameObjects.addEntity(Explosion.Explosion(game.fileManager, game.levelManager.currentLevel().scale, pygame.mouse.get_pos()))
game.gameObjects.updateNotifierPulses(-1)
game.fileManager.playPulse()
else:
# Checking menu mouse over (gameRunning False means menu!)
game.menues.mouseClicked(pygame.mouse.get_pos())
|
import sys
import os
import errno
import paramiko
from .base import BaseBackend
from ..utils import filter_delete_filename_list
from ..six import reraise
class SFTPBackend(BaseBackend):
"""
ssh后端
"""
host = None
port = None
username = None
password = None
remote_dir = None
transport = None
sftp = None
def __init__(self, host, username, password, remote_dir, port=None, keeps=None):
super(SFTPBackend, self).__init__(keeps)
self.host = host
self.port = port or 22
self.username = username
self.password = password
self.remote_dir = remote_dir
# 这一步会建立连接
try:
self.transport = paramiko.Transport((self.host, self.port))
# 登录验证
self.transport.connect(username=self.username, password=self.password)
self.sftp = paramiko.SFTPClient.from_transport(self.transport)
except:
# 这个的关闭,并不会关闭transport的连接
if self.sftp:
self.sftp.close()
# 所以如果登录失败,要记得关闭连接
if self.transport:
self.transport.close()
t, v, tb = sys.exc_info()
reraise(t, v, tb)
def upload(self, file_path, category):
"""
上传
"""
filename = os.path.basename(file_path)
dst_dir = os.path.join(self.remote_dir, category)
try:
self.sftp.stat(dst_dir)
except IOError as e:
# 说明没有文件
if e.errno == errno.ENOENT:
self.sftp.mkdir(dst_dir)
else:
raise e
remote_path = os.path.join(dst_dir, filename)
self.sftp.put(file_path, remote_path)
def clean(self, category, keeps=None):
keeps = keeps or self.keeps
if not keeps:
return
dst_dir = os.path.join(self.remote_dir, category)
delete_filename_list = filter_delete_filename_list(self.sftp.listdir(dst_dir), keeps)
for filename in delete_filename_list:
self.sftp.remove(os.path.join(dst_dir, filename))
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
DEBUG = True
PORT = 5000
HOST = "127.0.0.1"
SQLALCHEMY_ECHO = False
SQLALCHEMY_TRACK_MODIFICATIONS = True
SQLALCHEMY_DATABASE_URI = "postgresql://{DB_USER}:{DB_PASS}@{DB_ADDR}/{DB_NAME}".format(DB_USER="test2", DB_PASS="abc@123", DB_ADDR="127.0.0.1", DB_NAME="test_messages")
SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')
PAGINATION_PAGE_SIZE = 5
PAGINATION_PAGE_ARGUMENT_NAME = 'page'
TESTING = True
SERVER_NAME = '127.0.0.1:5000'
WTF_CSRF_ENABLED = False
|
from peewee import *
from .base import IS_MYSQL
from .base import IS_SQLITE
from .base import ModelTestCase
from .base import TestModel
from .base import db
from .base import get_in_memory_db
from .base import requires_sqlite
class Package(TestModel):
barcode = CharField(unique=True)
class PackageItem(TestModel):
title = CharField()
package = ForeignKeyField(Package, Package.barcode, backref='items')
class Manufacturer(TestModel):
name = CharField()
class Component(TestModel):
name = CharField()
manufacturer = ForeignKeyField(Manufacturer, null=True)
class Computer(TestModel):
hard_drive = ForeignKeyField(Component, backref='c1')
memory = ForeignKeyField(Component, backref='c2')
processor = ForeignKeyField(Component, backref='c3')
class User(TestModel):
username = CharField()
class Meta:
table_name = 'users'
class Relationship(TestModel):
from_user = ForeignKeyField(User, backref='relationships')
to_user = ForeignKeyField(User, backref='related_to')
class Note(TestModel):
user = ForeignKeyField(User, backref='notes')
content = TextField()
class CompositeKeyModel(TestModel):
f1 = CharField()
f2 = IntegerField()
f3 = FloatField()
class Meta:
primary_key = CompositeKey('f1', 'f2')
class UserThing(TestModel):
thing = CharField()
user = ForeignKeyField(User, backref='things')
class Meta:
primary_key = CompositeKey('thing', 'user')
class Post(TestModel):
title = CharField()
class Tag(TestModel):
tag = CharField()
class TagPostThrough(TestModel):
tag = ForeignKeyField(Tag, backref='posts')
post = ForeignKeyField(Post, backref='tags')
class Meta:
primary_key = CompositeKey('tag', 'post')
class TagPostThroughAlt(TestModel):
tag = ForeignKeyField(Tag, backref='posts_alt')
post = ForeignKeyField(Post, backref='tags_alt')
class TestForeignKeyToNonPrimaryKey(ModelTestCase):
requires = [Package, PackageItem]
def setUp(self):
super(TestForeignKeyToNonPrimaryKey, self).setUp()
for barcode in ['101', '102']:
Package.create(barcode=barcode)
for i in range(2):
PackageItem.create(
package=barcode,
title='%s-%s' % (barcode, i))
def test_fk_resolution(self):
pi = PackageItem.get(PackageItem.title == '101-0')
self.assertEqual(pi.__data__['package'], '101')
self.assertEqual(pi.package, Package.get(Package.barcode == '101'))
def test_select_generation(self):
p = Package.get(Package.barcode == '101')
self.assertEqual(
[item.title for item in p.items.order_by(PackageItem.title)],
['101-0', '101-1'])
class TestMultipleForeignKey(ModelTestCase):
requires = [Manufacturer, Component, Computer]
test_values = [
['3TB', '16GB', 'i7'],
['128GB', '1GB', 'ARM'],
]
def setUp(self):
super(TestMultipleForeignKey, self).setUp()
intel = Manufacturer.create(name='Intel')
amd = Manufacturer.create(name='AMD')
kingston = Manufacturer.create(name='Kingston')
for hard_drive, memory, processor in self.test_values:
c = Computer.create(
hard_drive=Component.create(name=hard_drive),
memory=Component.create(name=memory, manufacturer=kingston),
processor=Component.create(name=processor, manufacturer=intel))
# The 2nd computer has an AMD processor.
c.processor.manufacturer = amd
c.processor.save()
def test_multi_join(self):
HDD = Component.alias('hdd')
HDDMf = Manufacturer.alias('hddm')
Memory = Component.alias('mem')
MemoryMf = Manufacturer.alias('memm')
Processor = Component.alias('proc')
ProcessorMf = Manufacturer.alias('procm')
query = (Computer
.select(
Computer,
HDD,
Memory,
Processor,
HDDMf,
MemoryMf,
ProcessorMf)
.join(HDD, on=(
Computer.hard_drive_id == HDD.id).alias('hard_drive'))
.join(
HDDMf,
JOIN.LEFT_OUTER,
on=(HDD.manufacturer_id == HDDMf.id))
.switch(Computer)
.join(Memory, on=(
Computer.memory_id == Memory.id).alias('memory'))
.join(
MemoryMf,
JOIN.LEFT_OUTER,
on=(Memory.manufacturer_id == MemoryMf.id))
.switch(Computer)
.join(Processor, on=(
Computer.processor_id == Processor.id).alias('processor'))
.join(
ProcessorMf,
JOIN.LEFT_OUTER,
on=(Processor.manufacturer_id == ProcessorMf.id))
.order_by(Computer.id))
with self.assertQueryCount(1):
vals = []
manufacturers = []
for computer in query:
components = [
computer.hard_drive,
computer.memory,
computer.processor]
vals.append([component.name for component in components])
for component in components:
if component.manufacturer:
manufacturers.append(component.manufacturer.name)
else:
manufacturers.append(None)
self.assertEqual(vals, self.test_values)
self.assertEqual(manufacturers, [
None, 'Kingston', 'Intel',
None, 'Kingston', 'AMD',
])
class TestMultipleForeignKeysJoining(ModelTestCase):
requires = [User, Relationship]
def test_multiple_fks(self):
a = User.create(username='a')
b = User.create(username='b')
c = User.create(username='c')
self.assertEqual(list(a.relationships), [])
self.assertEqual(list(a.related_to), [])
r_ab = Relationship.create(from_user=a, to_user=b)
self.assertEqual(list(a.relationships), [r_ab])
self.assertEqual(list(a.related_to), [])
self.assertEqual(list(b.relationships), [])
self.assertEqual(list(b.related_to), [r_ab])
r_bc = Relationship.create(from_user=b, to_user=c)
following = User.select().join(
Relationship, on=Relationship.to_user
).where(Relationship.from_user == a)
self.assertEqual(list(following), [b])
followers = User.select().join(
Relationship, on=Relationship.from_user
).where(Relationship.to_user == a.id)
self.assertEqual(list(followers), [])
following = User.select().join(
Relationship, on=Relationship.to_user
).where(Relationship.from_user == b.id)
self.assertEqual(list(following), [c])
followers = User.select().join(
Relationship, on=Relationship.from_user
).where(Relationship.to_user == b.id)
self.assertEqual(list(followers), [a])
following = User.select().join(
Relationship, on=Relationship.to_user
).where(Relationship.from_user == c.id)
self.assertEqual(list(following), [])
followers = User.select().join(
Relationship, on=Relationship.from_user
).where(Relationship.to_user == c.id)
self.assertEqual(list(followers), [b])
class TestCompositePrimaryKey(ModelTestCase):
requires = [Tag, Post, TagPostThrough, CompositeKeyModel, User, UserThing]
def setUp(self):
super(TestCompositePrimaryKey, self).setUp()
tags = [Tag.create(tag='t%d' % i) for i in range(1, 4)]
posts = [Post.create(title='p%d' % i) for i in range(1, 4)]
p12 = Post.create(title='p12')
for t, p in zip(tags, posts):
TagPostThrough.create(tag=t, post=p)
TagPostThrough.create(tag=tags[0], post=p12)
TagPostThrough.create(tag=tags[1], post=p12)
def test_create_table_query(self):
query, params = TagPostThrough._schema._create_table().query()
sql = ('CREATE TABLE IF NOT EXISTS "tag_post_through" ('
'"tag_id" INTEGER NOT NULL, '
'"post_id" INTEGER NOT NULL, '
'PRIMARY KEY ("tag_id", "post_id"), '
'FOREIGN KEY ("tag_id") REFERENCES "tag" ("id"), '
'FOREIGN KEY ("post_id") REFERENCES "post" ("id"))')
if IS_MYSQL:
sql = sql.replace('"', '`')
self.assertEqual(query, sql)
def test_get_set_id(self):
tpt = (TagPostThrough
.select()
.join(Tag)
.switch(TagPostThrough)
.join(Post)
.order_by(Tag.tag, Post.title)).get()
# Sanity check.
self.assertEqual(tpt.tag.tag, 't1')
self.assertEqual(tpt.post.title, 'p1')
tag = Tag.select().where(Tag.tag == 't1').get()
post = Post.select().where(Post.title == 'p1').get()
self.assertEqual(tpt._pk, (tag.id, post.id))
# set_id is a no-op.
with self.assertRaisesCtx(TypeError):
tpt._pk = None
self.assertEqual(tpt._pk, (tag.id, post.id))
t3 = Tag.get(Tag.tag == 't3')
p3 = Post.get(Post.title == 'p3')
tpt._pk = (t3, p3)
self.assertEqual(tpt.tag.tag, 't3')
self.assertEqual(tpt.post.title, 'p3')
def test_querying(self):
posts = (Post.select()
.join(TagPostThrough)
.join(Tag)
.where(Tag.tag == 't1')
.order_by(Post.title))
self.assertEqual([p.title for p in posts], ['p1', 'p12'])
tags = (Tag.select()
.join(TagPostThrough)
.join(Post)
.where(Post.title == 'p12')
.order_by(Tag.tag))
self.assertEqual([t.tag for t in tags], ['t1', 't2'])
def test_composite_key_model(self):
CKM = CompositeKeyModel
values = [
('a', 1, 1.0),
('a', 2, 2.0),
('b', 1, 1.0),
('b', 2, 2.0)]
c1, c2, c3, c4 = [
CKM.create(f1=f1, f2=f2, f3=f3) for f1, f2, f3 in values]
# Update a single row, giving it a new value for `f3`.
CKM.update(f3=3.0).where((CKM.f1 == 'a') & (CKM.f2 == 2)).execute()
c = CKM.get((CKM.f1 == 'a') & (CKM.f2 == 2))
self.assertEqual(c.f3, 3.0)
# Update the `f3` value and call `save()`, triggering an update.
c3.f3 = 4.0
c3.save()
c = CKM.get((CKM.f1 == 'b') & (CKM.f2 == 1))
self.assertEqual(c.f3, 4.0)
# Only 1 row updated.
query = CKM.select().where(CKM.f3 == 4.0)
self.assertEqual(query.count(), 1)
# Unfortunately this does not work since the original value of the
# PK is lost (and hence cannot be used to update).
c4.f1 = 'c'
c4.save()
self.assertRaises(
CKM.DoesNotExist,
lambda: CKM.get((CKM.f1 == 'c') & (CKM.f2 == 2)))
def test_count_composite_key(self):
CKM = CompositeKeyModel
values = [
('a', 1, 1.0),
('a', 2, 2.0),
('b', 1, 1.0),
('b', 2, 1.0)]
for f1, f2, f3 in values:
CKM.create(f1=f1, f2=f2, f3=f3)
self.assertEqual(CKM.select().count(), 4)
self.assertTrue(CKM.select().where(
(CKM.f1 == 'a') &
(CKM.f2 == 1)).exists())
self.assertFalse(CKM.select().where(
(CKM.f1 == 'a') &
(CKM.f2 == 3)).exists())
def test_delete_instance(self):
u1, u2 = [User.create(username='u%s' % i) for i in range(2)]
ut1 = UserThing.create(thing='t1', user=u1)
ut2 = UserThing.create(thing='t2', user=u1)
ut3 = UserThing.create(thing='t1', user=u2)
ut4 = UserThing.create(thing='t3', user=u2)
res = ut1.delete_instance()
self.assertEqual(res, 1)
self.assertEqual(
[x.thing for x in UserThing.select().order_by(UserThing.thing)],
['t1', 't2', 't3'])
def test_composite_key_inheritance(self):
class Person(TestModel):
first = TextField()
last = TextField()
class Meta:
primary_key = CompositeKey('first', 'last')
self.assertTrue(isinstance(Person._meta.primary_key, CompositeKey))
self.assertEqual(Person._meta.primary_key.field_names,
('first', 'last'))
class Employee(Person):
title = TextField()
self.assertTrue(isinstance(Employee._meta.primary_key, CompositeKey))
self.assertEqual(Employee._meta.primary_key.field_names,
('first', 'last'))
sql = ('CREATE TABLE IF NOT EXISTS "employee" ('
'"first" TEXT NOT NULL, "last" TEXT NOT NULL, '
'"title" TEXT NOT NULL, PRIMARY KEY ("first", "last"))')
if IS_MYSQL:
sql = sql.replace('"', '`')
self.assertEqual(Employee._schema._create_table().query(), (sql, []))
class TestForeignKeyConstraints(ModelTestCase):
requires = [User, Note]
def setUp(self):
super(TestForeignKeyConstraints, self).setUp()
self.set_foreign_key_pragma(True)
def tearDown(self):
self.set_foreign_key_pragma(False)
super(TestForeignKeyConstraints, self).tearDown()
def set_foreign_key_pragma(self, is_enabled):
if IS_SQLITE:
self.database.foreign_keys = 'on' if is_enabled else 'off'
def test_constraint_exists(self):
max_id = User.select(fn.MAX(User.id)).scalar() or 0
with self.assertRaisesCtx(IntegrityError):
with self.database.atomic():
Note.create(user=max_id + 1, content='test')
@requires_sqlite
def test_disable_constraint(self):
self.set_foreign_key_pragma(False)
Note.create(user=0, content='test')
class FK_A(TestModel):
key = CharField(max_length=16, unique=True)
class FK_B(TestModel):
fk_a = ForeignKeyField(FK_A, field='key')
class TestFKtoNonPKField(ModelTestCase):
requires = [FK_A, FK_B]
def test_fk_to_non_pk_field(self):
a1 = FK_A.create(key='a1')
a2 = FK_A.create(key='a2')
b1 = FK_B.create(fk_a=a1)
b2 = FK_B.create(fk_a=a2)
args = (b1.fk_a, b1.fk_a_id, a1, a1.key)
for arg in args:
query = FK_B.select().where(FK_B.fk_a == arg)
self.assertSQL(query, (
'SELECT "t1"."id", "t1"."fk_a_id" FROM "fk_b" AS "t1" '
'WHERE ("t1"."fk_a_id" = ?)'), ['a1'])
b1_db = query.get()
self.assertEqual(b1_db.id, b1.id)
def test_fk_to_non_pk_insert_update(self):
a1 = FK_A.create(key='a1')
b1 = FK_B.create(fk_a=a1)
self.assertEqual(FK_B.select().where(FK_B.fk_a == a1).count(), 1)
exprs = (
{FK_B.fk_a: a1},
{'fk_a': a1},
{FK_B.fk_a: a1.key},
{'fk_a': a1.key})
for n, expr in enumerate(exprs, 2):
self.assertTrue(FK_B.insert(expr).execute())
self.assertEqual(FK_B.select().where(FK_B.fk_a == a1).count(), n)
a2 = FK_A.create(key='a2')
exprs = (
{FK_B.fk_a: a2},
{'fk_a': a2},
{FK_B.fk_a: a2.key},
{'fk_a': a2.key})
b_list = list(FK_B.select().where(FK_B.fk_a == a1))
for i, (b, expr) in enumerate(zip(b_list[1:], exprs), 1):
self.assertTrue(FK_B.update(expr).where(FK_B.id == b.id).execute())
self.assertEqual(FK_B.select().where(FK_B.fk_a == a2).count(), i)
class TestDeferredForeignKeyIntegration(ModelTestCase):
database = get_in_memory_db()
def test_deferred_fk_simple(self):
class Base(TestModel):
class Meta:
database = self.database
class DFFk(Base):
fk = DeferredForeignKey('DFPk')
# Deferred key not bound yet.
self.assertTrue(isinstance(DFFk.fk, DeferredForeignKey))
class DFPk(Base): pass
# Deferred key is bound correctly.
self.assertTrue(isinstance(DFFk.fk, ForeignKeyField))
self.assertEqual(DFFk.fk.rel_model, DFPk)
self.assertEqual(DFFk._meta.refs, {DFFk.fk: DFPk})
self.assertEqual(DFFk._meta.backrefs, {})
self.assertEqual(DFPk._meta.refs, {})
self.assertEqual(DFPk._meta.backrefs, {DFFk.fk: DFFk})
self.assertSQL(DFFk._schema._create_table(False), (
'CREATE TABLE "df_fk" ("id" INTEGER NOT NULL PRIMARY KEY, '
'"fk_id" INTEGER NOT NULL)'), [])
def test_deferred_fk_as_pk(self):
class Base(TestModel):
class Meta:
database = self.database
class DFFk(Base):
fk = DeferredForeignKey('DFPk', primary_key=True)
# Deferred key not bound yet.
self.assertTrue(isinstance(DFFk.fk, DeferredForeignKey))
self.assertTrue(DFFk._meta.primary_key is DFFk.fk)
class DFPk(Base): pass
# Resolved and primary-key set correctly.
self.assertTrue(isinstance(DFFk.fk, ForeignKeyField))
self.assertTrue(DFFk._meta.primary_key is DFFk.fk)
self.assertEqual(DFFk.fk.rel_model, DFPk)
self.assertEqual(DFFk._meta.refs, {DFFk.fk: DFPk})
self.assertEqual(DFFk._meta.backrefs, {})
self.assertEqual(DFPk._meta.refs, {})
self.assertEqual(DFPk._meta.backrefs, {DFFk.fk: DFFk})
self.assertSQL(DFFk._schema._create_table(False), (
'CREATE TABLE "df_fk" ("fk_id" INTEGER NOT NULL PRIMARY KEY)'), [])
|
from Products.CMFCore.utils import getToolByName
from Products.CMFPlone.utils import _createObjectByType, safe_unicode
from bika.lims import bikaMessageFactory as _
from bika.lims.utils import t
from bika.lims.exportimport.instruments.logger import Logger
from bika.lims.idserver import renameAfterCreation
from bika.lims.utils import tmpID
from Products.Archetypes.config import REFERENCE_CATALOG
from datetime import datetime
from DateTime import DateTime
import os
class InstrumentResultsFileParser(Logger):
def __init__(self, infile, mimetype):
Logger.__init__(self)
self._infile = infile
self._header = {}
self._rawresults = {}
self._mimetype = mimetype
self._numline = 0
def getInputFile(self):
""" Returns the results input file
"""
return self._infile
def parse(self):
""" Parses the input results file and populates the rawresults dict.
See getRawResults() method for more info about rawresults format
Returns True if the file has been parsed successfully.
Is highly recommended to use _addRawResult method when adding
raw results.
IMPORTANT: To be implemented by child classes
"""
raise NotImplementedError
def getAttachmentFileType(self):
""" Returns the file type name that will be used when creating the
AttachmentType used by the importer for saving the results file as
an attachment in each Analysis matched.
By default returns self.getFileMimeType()
"""
return self.getFileMimeType()
def getFileMimeType(self):
""" Returns the results file type
"""
return self._mimetype
def getHeader(self):
""" Returns a dictionary with custom key, values
"""
return self._header
def _addRawResult(self, resid, values={}, override=False):
""" Adds a set of raw results for an object with id=resid
resid is usually an Analysis Request ID or Worksheet's Reference
Analysis ID. The values are a dictionary in which the keys are
analysis service keywords and the values, another dictionary with
the key,value results.
The column 'DefaultResult' must be provided, because is used to map
to the column from which the default result must be retrieved.
Example:
resid = 'DU13162-001-R1'
values = {
'D2': {'DefaultResult': 'Final Conc',
'Remarks': '',
'Resp': '5816',
'ISTD Resp': '274638',
'Resp Ratio': '0.0212',
'Final Conc': '0.9145',
'Exp Conc': '1.9531',
'Accuracy': '98.19' },
'D3': {'DefaultResult': 'Final Conc',
'Remarks': '',
'Resp': '5816',
'ISTD Resp': '274638',
'Resp Ratio': '0.0212',
'Final Conc': '0.9145',
'Exp Conc': '1.9531',
'Accuracy': '98.19' }
}
"""
if override == True or resid not in self._rawresults.keys():
self._rawresults[resid] = [values]
else:
self._rawresults[resid].append(values)
def _emptyRawResults(self):
""" Remove all grabbed raw results
"""
self._rawresults = {}
def getObjectsTotalCount(self):
""" The total number of objects (ARs, ReferenceSamples, etc.) parsed
"""
return len(self.getRawResults())
def getResultsTotalCount(self):
""" The total number of analysis results parsed
"""
count = 0
for val in self.getRawResults().values():
for row in val:
count += len(val)
return count
def getAnalysesTotalCount(self):
""" The total number of different analyses parsed
"""
return len(self.getAnalysisKeywords())
def getAnalysisKeywords(self):
""" The analysis service keywords found
"""
analyses = []
for rows in self.getRawResults().values():
for row in rows:
analyses = list(set(analyses + row.keys()))
return analyses
def getRawResults(self):
""" Returns a dictionary containing the parsed results data
Each dict key is the results row ID (usually AR ID or Worksheet's
Reference Sample ID). Each item is another dictionary, in which the
key is a the AS Keyword.
Inside the AS dict, the column 'DefaultResult' must be
provided, that maps to the column from which the default
result must be retrieved.
If 'Remarks' column is found, it value will be set in Analysis
Remarks field when using the deault Importer.
Example:
raw_results['DU13162-001-R1'] = [{
'D2': {'DefaultResult': 'Final Conc',
'Remarks': '',
'Resp': '5816',
'ISTD Resp': '274638',
'Resp Ratio': '0.0212',
'Final Conc': '0.9145',
'Exp Conc': '1.9531',
'Accuracy': '98.19' },
'D3': {'DefaultResult': 'Final Conc',
'Remarks': '',
'Resp': '5816',
'ISTD Resp': '274638',
'Resp Ratio': '0.0212',
'Final Conc': '0.9145',
'Exp Conc': '1.9531',
'Accuracy': '98.19' }]
in which:
- 'DU13162-001-R1' is the Analysis Request ID,
- 'D2' column is an analysis service keyword,
- 'DefaultResult' column maps to the column with default result
- 'Remarks' column with Remarks results for that Analysis
- The rest of the dict columns are results (or additional info)
that can be set to the analysis if needed (the default importer
will look for them if the analysis has Interim fields).
In the case of reference samples:
Control/Blank:
raw_results['QC13-0001-0002'] = {...}
Duplicate of sample DU13162-009 (from AR DU13162-009-R1)
raw_results['QC-DU13162-009-002'] = {...}
"""
return self._rawresults
def resume(self):
""" Resumes the parse process
Called by the Results Importer after parse() call
"""
if len(self.getRawResults()) == 0:
self.err("No results found")
return False
return True
class InstrumentCSVResultsFileParser(InstrumentResultsFileParser):
def __init__(self, infile):
InstrumentResultsFileParser.__init__(self, infile, 'CSV')
def parse(self):
infile = self.getInputFile()
self.log("Parsing file ${file_name}", mapping={"file_name":infile.filename})
jump = 0
# We test in import functions if the file was uploaded
try:
f = open(infile.name, 'rU')
except AttributeError:
f = infile
for line in f.readlines():
self._numline += 1
if jump == -1:
# Something went wrong. Finish
self.err("File processing finished due to critical errors")
return False
if jump > 0:
# Jump some lines
jump -= 1
continue
if not line or not line.strip():
continue
line = line.strip()
jump = 0
if line:
jump = self._parseline(line)
self.log(
"End of file reached successfully: ${total_objects} objects, "
"${total_analyses} analyses, ${total_results} results",
mapping={"total_objects": self.getObjectsTotalCount(),
"total_analyses": self.getAnalysesTotalCount(),
"total_results":self.getResultsTotalCount()}
)
return True
def splitLine(self, line):
sline = line.split(',')
return [token.strip() for token in sline]
def _parseline(self, line):
""" Parses a line from the input CSV file and populates rawresults
(look at getRawResults comment)
returns -1 if critical error found and parser must end
returns the number of lines to be jumped in next read. If 0, the
parser reads the next line as usual
"""
raise NotImplementedError
class AnalysisResultsImporter(Logger):
def __init__(self, parser, context,
idsearchcriteria=None,
override=[False, False],
allowed_ar_states=None,
allowed_analysis_states=None,
instrument_uid=None):
Logger.__init__(self)
self._parser = parser
self.context = context
self._allowed_ar_states = allowed_ar_states
self._allowed_analysis_states = allowed_analysis_states
self._override = override
self._idsearch = idsearchcriteria
self._priorizedsearchcriteria = ''
self.bsc = getToolByName(self.context, 'bika_setup_catalog')
self.bac = getToolByName(self.context, 'bika_analysis_catalog')
self.pc = getToolByName(self.context, 'portal_catalog')
self.bc = getToolByName(self.context, 'bika_catalog')
self.wf = getToolByName(self.context, 'portal_workflow')
if not self._allowed_ar_states:
self._allowed_ar_states=['sample_received',
'attachment_due',
'to_be_verified']
if not self._allowed_analysis_states:
self._allowed_analysis_states=['sampled',
'sample_received',
'attachment_due',
'to_be_verified']
if not self._idsearch:
self._idsearch=['getRequestID']
self.instrument_uid=instrument_uid
def getParser(self):
""" Returns the parser that will be used for the importer
"""
return self._parser
def getAllowedARStates(self):
""" The allowed Analysis Request states
The results import will only take into account the analyses
contained inside an Analysis Request which current state is one
from these.
"""
return self._allowed_ar_states
def getAllowedAnalysisStates(self):
""" The allowed Analysis states
The results import will only take into account the analyses
if its current state is in the allowed analysis states.
"""
return self._allowed_analysis_states
def getOverride(self):
""" If the importer must override previously entered results.
[False, False]: The results will not be overriden
[True, False]: The results will be overriden only if there's no
result entered yet,
[True, True]: The results will be always overriden, also if the
parsed result is empty.
"""
return self._override
def getIdSearchCriteria(self):
""" Returns the search criteria for retrieving analyses.
Example:
serachcriteria=['getRequestID', 'getSampleID', 'getClientSampleID']
"""
return self._idsearch
def getKeywordsToBeExcluded(self):
""" Returns an array with the analysis codes/keywords to be excluded
by the importer. By default, an empty array
"""
return []
def process(self):
self._parser.parse()
parsed = self._parser.resume()
self._errors = self._parser.errors
self._warns = self._parser.warns
self._logs = self._parser.logs
self._priorizedsearchcriteria = ''
if parsed == False:
return False
# Allowed analysis states
allowed_ar_states_msg = [t(_(s)) for s in self.getAllowedARStates()]
allowed_an_states_msg = [t(_(s)) for s in self.getAllowedAnalysisStates()]
self.log("Allowed Analysis Request states: ${allowed_states}",
mapping={'allowed_states': ', '.join(allowed_ar_states_msg)})
self.log("Allowed analysis states: ${allowed_states}",
mapping={'allowed_states': ', '.join(allowed_an_states_msg)})
# Exclude non existing ACODEs
acodes = []
ancount = 0
arprocessed = []
instprocessed = []
importedars = {}
importedinsts = {}
rawacodes = self._parser.getAnalysisKeywords()
exclude = self.getKeywordsToBeExcluded()
for acode in rawacodes:
if acode in exclude or not acode:
continue
service = self.bsc(getKeyword=acode)
if not service:
self.warn('Service keyword ${analysis_keyword} not found',
mapping={"analysis_keyword": acode})
else:
acodes.append(acode)
if len(acodes) == 0:
self.err("Service keywords: no matches found")
searchcriteria = self.getIdSearchCriteria();
#self.log(_("Search criterias: %s") % (', '.join(searchcriteria)))
for objid, results in self._parser.getRawResults().iteritems():
# Allowed more than one result for the same sample and analysis.
# Needed for calibration tests
for result in results:
analyses = self._getZODBAnalyses(objid)
inst = None
if len(analyses) == 0 and self.instrument_uid:
# No registered analyses found, but maybe we need to
# create them first if an instruemnt id has been set in
insts = self.bsc(portal_type='Instrument', UID=self.instrument_uid)
if len(insts) == 0:
# No instrument found
self.err("No Analysis Request with '${allowed_ar_states}' "
"states found, And no QC analyses found for ${object_id}",
mapping={"allowed_ar_states": ', '.join(allowed_ar_states_msg),
"object_id": objid})
self.err("Instrument not found")
continue
inst = insts[0].getObject()
# Create a new ReferenceAnalysis and link it to the Instrument
# Here we have an objid (i.e. R01200012) and
# a dict with results (the key is the AS keyword).
# How can we create a ReferenceAnalysis if we don't know
# which ReferenceSample we might use?
# Ok. The objid HAS to be the ReferenceSample code.
refsample = self.bc(portal_type='ReferenceSample', id=objid)
if refsample and len(refsample) == 1:
refsample = refsample[0].getObject()
elif refsample and len(refsample) > 1:
# More than one reference sample found!
self.err(
"More than one reference sample found for '${object_id}'",
mapping={"object_id": objid})
continue
else:
# No reference sample found!
self.err("No Reference Sample found for ${object_id}",
mapping={"object_id": objid})
continue
# For each acode, create a ReferenceAnalysis and attach it
# to the Reference Sample
service_uids = []
reference_type = 'b' if refsample.getBlank() == True else 'c'
services = self.bsc(portal_type='AnalysisService')
service_uids = [service.UID for service in services \
if service.getObject().getKeyword() in result.keys()]
analyses = inst.addReferences(refsample, service_uids)
elif len(analyses) == 0:
# No analyses found
self.err("No Analysis Request with '${allowed_ar_states}' "
"states neither QC analyses found for ${object_id}",
mapping={
"allowed_ar_states":', '.join(allowed_ar_states_msg),
"object_id": objid})
continue
# Look for timestamp
capturedate = result.get('DateTime',{}).get('DateTime',None)
if capturedate:
del result['DateTime']
for acode, values in result.iteritems():
if acode not in acodes:
# Analysis keyword doesn't exist
continue
ans = [analysis for analysis in analyses \
if analysis.getKeyword() == acode]
if len(ans) > 1:
self.err("More than one analysis found for ${object_id} and ${analysis_keyword}",
mapping={"object_id": objid,
"analysis_keyword": acode})
continue
elif len(ans) == 0:
self.err("No analyses found for ${object_id} and ${analysis_keyword}",
mapping={"object_id": objid,
"analysis_keyword": acode})
continue
analysis = ans[0]
if capturedate:
values['DateTime'] = capturedate
processed = self._process_analysis(objid, analysis, values)
if processed:
ancount += 1
if inst:
# Calibration Test (import to Instrument)
instprocessed.append(inst.UID())
importedinst = inst.title in importedinsts.keys() \
and importedinsts[inst.title] or []
if acode not in importedinst:
importedinst.append(acode)
importedinsts[inst.title] = importedinst
else:
ar = analysis.portal_type == 'Analysis' and analysis.aq_parent or None
if ar and ar.UID:
# Set AR imported info
arprocessed.append(ar.UID())
importedar = ar.getRequestID() in importedars.keys() \
and importedars[ar.getRequestID()] or []
if acode not in importedar:
importedar.append(acode)
importedars[ar.getRequestID()] = importedar
# Create the AttachmentType for mime type if not exists
attuid = None
attachmentType = self.bsc(portal_type="AttachmentType",
title=self._parser.getAttachmentFileType())
if len(attachmentType) == 0:
try:
folder = self.context.bika_setup.bika_attachmenttypes
obj = _createObjectByType("AttachmentType", folder, tmpID())
obj.edit(title=self._parser.getAttachmentFileType(),
description="Autogenerated file type")
obj.unmarkCreationFlag()
renameAfterCreation(obj)
attuid = obj.UID()
except:
attuid = None
self.err(
"Unable to create the Attachment Type ${mime_type}",
mapping={
"mime_type": self._parser.getFileMimeType()})
else:
attuid = attachmentType[0].UID
if attuid is not None:
try:
# Attach the file to the Analysis
wss = analysis.getBackReferences('WorksheetAnalysis')
if wss and len(wss) > 0:
#TODO: Mirar si es pot evitar utilitzar el WS i utilitzar directament l'Anàlisi (útil en cas de CalibrationTest)
ws = wss[0]
attachment = _createObjectByType("Attachment", ws, tmpID())
attachment.edit(
AttachmentFile=self._parser.getInputFile(),
AttachmentType=attuid,
AttachmentKeys='Results, Automatic import')
attachment.reindexObject()
others = analysis.getAttachment()
attachments = []
for other in others:
if other.getAttachmentFile().filename != attachment.getAttachmentFile().filename:
attachments.append(other.UID())
attachments.append(attachment.UID())
analysis.setAttachment(attachments)
except:
# self.err(_("Unable to attach results file '${file_name}' to AR ${request_id}",
# mapping={"file_name": self._parser.getInputFile().filename,
# "request_id": ar.getRequestID()}))
pass
# Calculate analysis dependencies
for aruid in arprocessed:
ar = self.bc(portal_type='AnalysisRequest',
UID=aruid)
ar = ar[0].getObject()
analyses = ar.getAnalyses()
for analysis in analyses:
analysis = analysis.getObject()
if analysis.calculateResult(True, True):
self.log(
"${request_id} calculated result for '${analysis_keyword}': '${analysis_result}'",
mapping={"request_id": ar.getRequestID(),
"analysis_keyword": analysis.getKeyword(),
"analysis_result": str(analysis.getResult())}
)
# Not sure if there's any reason why ReferenceAnalyses have not
# defined the method calculateResult...
# Needs investigation.
#for instuid in instprocessed:
# inst = self.bsc(portal_type='Instrument',UID=instuid)[0].getObject()
# analyses = inst.getAnalyses()
# for analysis in analyses:
# if (analysis.calculateResult(True, True)):
# self.log(_("%s calculated result for '%s': '%s'") %
# (inst.title, analysis.getKeyword(), str(analysis.getResult())))
for arid, acodes in importedars.iteritems():
acodesmsg = ["Analysis %s" % acod for acod in acodes]
self.log("${request_id}: ${analysis_keywords} imported sucessfully",
mapping={"request_id": arid,
"analysis_keywords": acodesmsg})
for instid, acodes in importedinsts.iteritems():
acodesmsg = ["Analysis %s" % acod for acod in acodes]
msg = "%s: %s %s" % (instid, ", ".join(acodesmsg), "imported sucessfully")
self.log(msg)
if self.instrument_uid:
self.log(
"Import finished successfully: ${nr_updated_ars} ARs, "
"${nr_updated_instruments} Instruments and ${nr_updated_results} "
"results updated",
mapping={"nr_updated_ars": str(len(importedars)),
"nr_updated_instruments": str(len(importedinsts)),
"nr_updated_results": str(ancount)})
else:
self.log(
"Import finished successfully: ${nr_updated_ars} ARs and "
"${nr_updated_results} results updated",
mapping={"nr_updated_ars": str(len(importedars)),
"nr_updated_results": str(ancount)})
def _getObjects(self, objid, criteria, states):
#self.log("Criteria: %s %s") % (criteria, obji))
obj = []
if (criteria == 'arid'):
obj = self.bc(portal_type='AnalysisRequest',
getRequestID=objid,
review_state=states)
elif (criteria == 'sid'):
obj = self.bc(portal_type='AnalysisRequest',
getSampleID=objid,
review_state=states)
elif (criteria == 'csid'):
obj = self.bc(portal_type='AnalysisRequest',
getClientSampleID=objid,
review_state=states)
elif (criteria == 'aruid'):
obj = self.bc(portal_type='AnalysisRequest',
UID=objid,
review_state=states)
elif (criteria == 'rgid'):
obj = self.bac(portal_type=['ReferenceAnalysis',
'DuplicateAnalysis'],
getReferenceAnalysesGroupID=objid)
elif (criteria == 'rid'):
obj = self.bac(portal_type=['ReferenceAnalysis',
'DuplicateAnalysis'], id=objid)
elif (criteria == 'ruid'):
obj = self.bac(portal_type=['ReferenceAnalysis',
'DuplicateAnalysis'], UID=objid)
if obj and len(obj) > 0:
self._priorizedsearchcriteria = criteria
return obj
def _getZODBAnalyses(self, objid):
""" Searches for analyses from ZODB to be filled with results.
objid can be either AR ID or Worksheet's Reference Sample IDs.
It uses the getIdSearchCriteria() for searches
Only analyses that matches with getAnallowedAnalysisStates() will
be returned. If not a ReferenceAnalysis, getAllowedARStates() is
also checked.
Returns empty array if no analyses found
"""
ars = []
analyses = []
# HACK: Use always the full search workflow
#searchcriteria = self.getIdSearchCriteria()
searchcriteria = ['getRequestID', 'getSampleID', 'getClientSampleID']
allowed_ar_states = self.getAllowedARStates()
allowed_an_states = self.getAllowedAnalysisStates()
allowed_ar_states_msg = [_(s) for s in allowed_ar_states]
allowed_an_states_msg = [_(s) for s in allowed_an_states]
# Acceleration of searches using priorization
if (self._priorizedsearchcriteria in ['rgid','rid','ruid']):
# Look from reference analyses
analyses = self._getZODBAnalysesFromReferenceAnalyses(objid,
self._priorizedsearchcriteria)
if (len(analyses) == 0):
# Look from ar and derived
analyses = self._getZODBAnalysesFromAR(objid,
'',
searchcriteria,
allowed_ar_states)
# Discard analyses that don't match with allowed_an_states
allowed_an_states.append("sample_due")
analyses = [analysis for analysis in analyses \
if analysis.portal_type != 'Analysis' \
or self.wf.getInfoFor(analysis, 'review_state') \
in allowed_an_states]
if len(analyses) == 0:
self.err(
"No analyses '${allowed_analysis_states}' states found for ${object_id}",
mapping={"allowed_analysis_states": ', '.join(allowed_an_states_msg),
"object_id": objid})
return analyses
def _getZODBAnalysesFromAR(self, objid, criteria, allowedsearches, arstates):
ars = []
analyses = []
if criteria:
ars = self._getObjects(objid, criteria, arstates)
if not ars or len(ars) == 0:
return self._getZODBAnalysesFromAR(objid, None,
allowedsearches, arstates)
else:
sortorder = ['arid', 'sid', 'csid', 'aruid'];
for crit in sortorder:
if (crit == 'arid' and 'getRequestID' in allowedsearches) \
or (crit == 'sid' and 'getSampleID' in allowedsearches) \
or (crit == 'csid' and 'getClientSampleID' in allowedsearches) \
or (crit == 'aruid' and 'getRequestID' in allowedsearches):
ars = self._getObjects(objid, crit, arstates)
if ars and len(ars) > 0:
break
if not ars or len(ars) == 0:
return self._getZODBAnalysesFromReferenceAnalyses(objid, None)
elif len(ars) > 1:
self.err("More than one Analysis Request found for ${object_id}",
mapping={"object_id": objid})
return []
ar = ars[0].getObject()
analyses = [analysis.getObject() for analysis in ar.getAnalyses()]
return analyses
def _getZODBAnalysesFromReferenceAnalyses(self, objid, criteria):
analyses = []
if criteria:
refans = self._getObjects(objid, criteria, [])
if len(refans) == 0:
return []
elif criteria == 'rgid':
return [an.getObject() for an in refans]
elif len(refans) == 1:
# The search has been made using the internal identifier
# from a Reference Analysis (id or uid). That is not usual.
an = refans[0].getObject()
wss = an.getBackReferences('WorksheetAnalysis')
if wss and len(wss) > 0:
# A regular QC test (assigned to a Worksheet)
return [an,]
elif an.getInstrument():
# An Internal Calibration Test
return [an,]
else:
# Oops. This should never happen!
# A ReferenceAnalysis must be always assigned to
# a Worksheet (Regular QC) or to an Instrument
# (Internal Calibration Test)
self.err("The Reference Analysis ${object_id} has neither "
"instrument nor worksheet assigned",
mapping={"object_id":objid})
return []
else:
# This should never happen!
# Fetching ReferenceAnalysis for its id or uid should
# *always* return a unique result
self.err("More than one Reference Analysis found for ${obect_id}",
mapping={"object_id": objid})
return []
else:
sortorder = ['rgid', 'rid', 'ruid'];
for crit in sortorder:
analyses = self._getZODBAnalysesFromReferenceAnalyses(objid, crit)
if len(analyses) > 0:
return analyses
return analyses
def _process_analysis(self, objid, analysis, values):
resultsaved = False
acode = analysis.getKeyword()
defresultkey = values.get("DefaultResult", "")
capturedate = None
# Look for timestamp
if "DateTime" in values.keys():
try:
dt = values.get('DateTime')
capturedate = DateTime(datetime.strptime(dt, '%Y%m%d %H:%M:%S'))
except:
capturedate = None
pass
del values['DateTime']
interimsout = []
interims = hasattr(analysis, 'getInterimFields') \
and analysis.getInterimFields() or []
for interim in interims:
keyword = interim['keyword']
title = interim['title']
if values.get(keyword, '') or values.get(keyword, '') == 0:
res = values.get(keyword)
self.log("${request_id} result for '${analysis_keyword}:${interim_keyword}': '${result}'",
mapping={"request_id": objid,
"analysis_keyword": acode,
"interim_keyword": keyword,
"result": str(res)
})
ninterim = interim
ninterim['value'] = res
interimsout.append(ninterim)
resultsaved = True
elif values.get(title, '') or values.get(title, '') == 0:
res = values.get(title)
self.log("%s/'%s:%s': '%s'"%(objid, acode, title, str(res)))
ninterim = interim
ninterim['value'] = res
interimsout.append(ninterim)
resultsaved = True
else:
interimsout.append(interim)
if len(interimsout) > 0:
analysis.setInterimFields(interimsout)
if resultsaved == False and (values.get(defresultkey, '')
or values.get(defresultkey, '') == 0
or self._override[1] == True):
# set the result
res = values.get(defresultkey, '')
# self.log("${object_id} result for '${analysis_keyword}': '${result}'",
# mapping={"obect_id": obid,
# "analysis_keyword": acode,
# "result": str(res)})
#TODO incorporar per veure detall d'importacio
analysis.setResult(res)
if capturedate:
analysis.setResultCaptureDate(capturedate)
resultsaved = True
elif resultsaved == False:
self.log("${request_id} result for '${analysis_keyword}': '${result}'",
mapping={"request_id": objid,
"analysis_keyword": acode,
"result":""
})
if (resultsaved or len(interimsout) > 0) \
and values.get('Remarks', '') \
and analysis.portal_type == 'Analysis' \
and (analysis.getRemarks() != '' or self._override[1] == True):
analysis.setRemarks(values['Remarks'])
return resultsaved or len(interimsout) > 0
|
from functools import wraps
from flask import request
def authenticate(func):
@wraps(func)
def auth_call(*args, **kwargs):
if request.json: # TODO STEVE IMPLEMENT AUTH (Auth currently based on request.json existing)
return func(*args, **kwargs)
else:
return "Authentication Failed", 401
return auth_call
def validate(obj, *args):
args = set(args)
errors = ()
for required in args:
if required not in obj:
errors = errors + ((required + " is required"),)
return errors
|
try:
from tkinter import *
except ImportError:
from Tkinter import *
from PIL import Image, ImageTk
import sys
class UI(Frame):
def __init__(self, master, im, value=128):
Frame.__init__(self, master)
self.image = im
self.value = value
self.canvas = Canvas(self, width=im.size[0], height=im.size[1])
self.backdrop = ImageTk.PhotoImage(im)
self.canvas.create_image(0, 0, image=self.backdrop, anchor=NW)
self.canvas.pack()
scale = Scale(self, orient=HORIZONTAL, from_=0, to=255,
resolution=1, command=self.update_scale, length=256)
scale.set(value)
scale.bind("<ButtonRelease-1>", self.redraw)
scale.pack()
# uncomment the following line for instant feedback (might
# be too slow on some platforms)
# self.redraw()
def update_scale(self, value):
self.value = float(value)
self.redraw()
def redraw(self, event=None):
# create overlay (note the explicit conversion to mode "1")
im = self.image.point(lambda v, t=self.value: v >= t, "1")
self.overlay = ImageTk.BitmapImage(im, foreground="green")
# update canvas
self.canvas.delete("overlay")
self.canvas.create_image(0, 0, image=self.overlay, anchor=NW,
tags="overlay")
if len(sys.argv) != 2:
print("Usage: thresholder file")
sys.exit(1)
root = Tk()
im = Image.open(sys.argv[1])
if im.mode != "L":
im = im.convert("L")
UI(root, im).pack()
root.mainloop()
|
from datetime import datetime
import constants
from lib.connectwise_py.connectwise.agreement import Agreement
from lib.connectwise_py.connectwise.contact import Contact
from .connectwise import Connectwise
class Member:
def __init__(self, identifier, **kwargs):
self.officeEmail = None
self.identifier = identifier
for kwarg in kwargs:
setattr(self, kwarg, kwargs[kwarg])
def __repr__(self):
return "<Member {}>".format(self.identifier)
@classmethod
def fetch_active(cls):
conditions = ['identifier!="APIMember" and identifier!="screenconnect" and identifier!="quosal" and identifier!="labtech"']
conditions.append('inactiveFlag=false')
filters = {'orderBy': 'lastName asc'}
return [cls(**member) for member in Connectwise.submit_request('system/members', conditions, filters)]
@classmethod
def fetch_member_by_office_email(cls, officeEmail):
conditions = ['officeEmail="{}"'.format(officeEmail)]
member = Connectwise.submit_request('system/members', conditions)[0]
return cls(**member)
@classmethod
def fetch_member_by_identifier(cls, identifier):
conditions = ['identifier="{}"'.format(identifier)]
member = Connectwise.submit_request('system/members', conditions)[0]
return cls(**member)
@classmethod
def fetch_all_members(cls):
conditions = ['identifier!="APIMember" and identifier!="screenconnect" and identifier!="quosal" and identifier!="labtech"']
filters = {'orderBy': 'lastName asc'}
return [cls(**member) for member in Connectwise.submit_request('system/members', conditions, filters)]
@classmethod
def fetch_by_type_name(cls, type_name):
"""
Return members filtered by type. For example, "Salaried Employee"
:param type_name: str: member type, e.g. "Salaried Employee"
:return: list of Members
"""
conditions = 'type/name="{}"'.format(type_name)
filters = {'orderBy': 'lastName asc'}
return [cls(**member) for member in Connectwise.submit_request('system/members', conditions, filters)]
def hourly_cost(self, on_date='today'):
if on_date.lower() == 'today' or on_date >= Connectwise.current_fy()[0]:
return self.hourlyCost
on_date = datetime.strptime(on_date, '%Y-%m-%d')
if on_date.month >= 7:
fy = '{}-{}'.format(on_date.year, on_date.year + 1)
else:
fy = '{}-{}'.format(on_date.year - 1, on_date.year)
return constants.CONSULTANT_HOURLY_COSTS[self.identifier.lower()][fy]
def daily_cost(self, on_date='today'):
return round(self.hourly_cost(on_date) * 8, 2)
def fetch_internal_contact(self):
return Contact.fetch_by_email(self.officeEmail)
def fetch_vacation_agreements(self, agreements=[], contacts=[]):
contact = None
if len(contacts) > 0:
contact = [c for c in contacts if self.officeEmail == c.get_email()]
if len(contact) > 0: contact = contact[0]
else:
contact = self.fetch_internal_contact()
if len(agreements) == 0:
agreements = Agreement.fetch_vacation_agreements()
agreements = [a for a in agreements if contact and contact.id == a.contact['id']]
# print(self.identifier, contact, agreements)
if contact:
return [a for a in agreements if a.contact['id'] == contact.id]
else:
return []
|
"""Template jinja filters."""
from datetime import datetime
import copy
import hashlib
import json as json_lib
import random
import re
import jinja2
import slugify
from babel import dates as babel_dates
from babel import numbers as babel_numbers
from grow.common import json_encoder
from grow.common import urls
from grow.common import utils
from grow.templates.tags import _gettext_alias
def _deep_gettext(ctx, fields):
if isinstance(fields, dict):
new_dct = {}
for key, val in fields.items():
if isinstance(val, (dict, list, set)):
new_dct[key] = _deep_gettext(ctx, val)
elif isinstance(val, str):
new_dct[key] = _gettext_alias(ctx, val)
else:
new_dct[key] = val
return new_dct
elif isinstance(fields, (list, set)):
for i, val in enumerate(fields):
if isinstance(val, (dict, list, set)):
fields[i] = _deep_gettext(ctx, val)
elif isinstance(val, str):
fields[i] = _gettext_alias(ctx, val)
else:
fields[i] = val
return fields
@jinja2.contextfilter
def deeptrans(ctx, obj):
"""Deep translate an object."""
# Avoid issues (related to sharing the same object across locales and
# leaking translations from one locale to another) by copying the object
# before it's sent to deeptrans.
new_item = copy.deepcopy(obj)
return _deep_gettext(ctx, new_item)
@jinja2.contextfilter
def expand_partial(_ctx, partial_name):
"""Filter for expanding partial path from name of partial."""
return '/partials/{0}/{0}.html'.format(partial_name)
@jinja2.contextfilter
def hash_value(_ctx, value, algorithm='sha'):
"""Hash the value using the algorithm."""
value = value.encode('utf-8')
if algorithm in ('md5',):
return hashlib.md5(value).hexdigest()
if algorithm in ('sha1',):
return hashlib.sha1(value).hexdigest()
if algorithm in ('sha224',):
return hashlib.sha224(value).hexdigest()
if algorithm in ('sha384',):
return hashlib.sha384(value).hexdigest()
if algorithm in ('sha512',):
return hashlib.sha512(value).hexdigest()
return hashlib.sha256(value).hexdigest()
@jinja2.contextfilter
def jsonify(_ctx, obj, *args, **kwargs):
"""Filter for JSON dumping an object."""
return json_lib.dumps(obj, cls=json_encoder.GrowJSONEncoder, *args, **kwargs)
@jinja2.contextfilter
def markdown_filter(ctx, value):
"""Filters content through a markdown processor."""
doc = ctx['doc']
m_down = doc.pod.markdown
try:
if isinstance(value, str):
value = value
value = value or ''
return m_down.convert(value)
except UnicodeEncodeError:
return m_down.convert(value)
@jinja2.contextfilter
def parsedatetime_filter(_ctx, date_string, string_format):
"""Filter dor parsing a datetime."""
return datetime.strptime(date_string, string_format)
@jinja2.contextfilter
def relative_filter(ctx, path):
"""Calculates the relative path from the current url to the given url."""
doc = ctx['doc']
return urls.Url.create_relative_path(
path, relative_to=doc.url.path)
@jinja2.contextfilter
def render_filter(ctx, template):
"""Creates jinja template from string and renders."""
if isinstance(template, str):
template = ctx.environment.from_string(template)
return template.render(ctx)
@jinja2.contextfilter
def shuffle_filter(_ctx, seq):
"""Shuffles the list into a random order."""
try:
result = list(seq)
random.shuffle(result)
return result
except TypeError:
return seq
def regex_replace():
"""A regex replace filter with regex cache."""
regex_cache = {}
def regex_replace_filter(string, find, replace):
"""A template regex filter"""
if find not in regex_cache:
regex_cache[find] = re.compile(find)
return regex_cache[find].sub(replace, string)
return regex_replace_filter
def slug_filter(pod=None):
"""Filters string to remove url unfriendly characters."""
use_legacy_slugify = pod and pod.is_enabled(pod.FEATURE_OLD_SLUGIFY)
def _slug_filter(value, delimiter='-'):
if not value:
return value
if use_legacy_slugify:
return utils.slugify(value, delimiter)
return slugify.slugify(value, separator=delimiter)
return _slug_filter
def wrap_locale_context(func):
"""Wraps the func with the current locale."""
@jinja2.contextfilter
def _locale_filter(ctx, value, *args, **kwargs):
doc = ctx['doc']
if not kwargs.get('locale', None):
kwargs['locale'] = str(doc.locale)
return func(value, *args, **kwargs)
return _locale_filter
def create_builtin_filters(env, pod=None, locale=None):
"""Filters standard for the template rendering."""
return (
('currency', wrap_locale_context(babel_numbers.format_currency)),
('date', wrap_locale_context(babel_dates.format_date)),
('datetime', wrap_locale_context(babel_dates.format_datetime)),
('decimal', wrap_locale_context(babel_numbers.format_decimal)),
('deeptrans', deeptrans),
('expand_partial', expand_partial),
('hash', hash_value),
('jsonify', jsonify),
('markdown', markdown_filter),
('number', wrap_locale_context(babel_numbers.format_number)),
('percent', wrap_locale_context(babel_numbers.format_percent)),
('relative', relative_filter),
('re_replace', regex_replace()),
('render', render_filter),
('shuffle', shuffle_filter),
('slug', slug_filter(pod=pod)),
('time', wrap_locale_context(babel_dates.format_time)),
)
|
import os
ENV = os.environ.get('ACHOO_DEBUG')
class Config(object):
MSG = '(beta)'
VERSION = '0.1'
DEBUG = True if ENV == 'local' else False
|
import os, os.path as op
import glob
from nipype.utils.filemanip import split_filename
list_to_reg = glob.glob("p0*_maths.nii")
for in_file in list_to_reg:
path, name, ext = split_filename(in_file)
outfile = op.abspath(name + '_rl' + ext)
blah = "mri_convert %s --out_i_count 200 --out_j_count 200 --out_k_count 200 %s" % (in_file, outfile)
print(blah)
os.system(blah)
|
from utils.munin.base import MuninGraph
class NBMuninGraph(MuninGraph):
@property
def graph_config(self):
return {
'graph_category' : 'PyTune',
'graph_title' : 'PyTune Classifiers',
'graph_vlabel' : '# of classifiers',
'graph_args' : '-l 0',
'feeds.label': 'feeds',
'authors.label': 'authors',
'tags.label': 'tags',
'titles.label': 'titles',
}
def calculate_metrics(self):
from apps.analyzer.models import MClassifierFeed, MClassifierAuthor, MClassifierTag, MClassifierTitle
return {
'feeds': MClassifierFeed.objects.count(),
'authors': MClassifierAuthor.objects.count(),
'tags': MClassifierTag.objects.count(),
'titles': MClassifierTitle.objects.count(),
}
if __name__ == '__main__':
NBMuninGraph().run()
|
from flask import Flask, render_template
from flask_bootstrap import Bootstrap
app = Flask(__name__)
Bootstrap(app)
app.jinja_env.add_extension('pyjade.ext.jinja.PyJadeExtension')
@app.route('/')
def home():
return render_template('home.jade')
@app.route('/a')
def extra():
return render_template('parallax.jade')
if __name__ == '__main__':
app.run(debug=True)
|
__requires__ = 'scikit-image==0.11.3'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('scikit-image==0.11.3', 'console_scripts', 'skivi')()
)
|
"""Placeholder module, that's where the smart things happen."""
import logging
import os
import time
import re
from django import forms
from django.core.mail import send_mail
from django import template
from django.template import TemplateSyntaxError
from django.core.files.storage import default_storage
from django.forms import Textarea, ImageField, CharField, FileField
from django.forms import TextInput
from django.conf import settings as global_settings
from django.utils.translation import ugettext_lazy as _
from django.utils.safestring import mark_safe
from django.utils.text import unescape_string_literal
from django.template.loader import render_to_string
from sitesngine.pages.widgets_registry import get_widget
from sitesngine.pages import settings
from sitesngine.pages.models import Content
from sitesngine.pages.widgets import ImageInput, VideoWidget, FileInput
__author__ = 'fearless' # "from birth till death"
PLACEHOLDER_ERROR = _("[Placeholder %(name)s had syntax error: %(error)s]")
logger = logging.getLogger(__name__)
def parse_placeholder(parser, token):
"""Parse the `PlaceholderNode` parameters.
Return a tuple with the name and parameters."""
bits = token.split_contents()
count = len(bits)
error_string = '%r tag requires at least one argument' % bits[0]
if count <= 1:
raise TemplateSyntaxError(error_string)
try:
name = unescape_string_literal(bits[1])
except ValueError:
name = bits[1]
remaining = bits[2:]
params = {}
simple_options = ['parsed', 'inherited', 'untranslated']
param_options = ['as', 'on', 'with']
all_options = simple_options + param_options
while remaining:
bit = remaining[0]
if bit not in all_options:
raise TemplateSyntaxError(
"%r is not an correct option for a placeholder" % bit)
if bit in param_options:
if len(remaining) < 2:
raise TemplateSyntaxError(
"Placeholder option '%s' need a parameter" % bit)
if bit == 'as':
params['as_varname'] = remaining[1]
if bit == 'with':
params['widget'] = remaining[1]
if bit == 'on':
params['page'] = remaining[1]
remaining = remaining[2:]
elif bit == 'parsed':
params['parsed'] = True
remaining = remaining[1:]
elif bit == 'inherited':
params['inherited'] = True
remaining = remaining[1:]
elif bit == 'untranslated':
params['untranslated'] = True
remaining = remaining[1:]
return name, params
class PlaceholderNode(template.Node):
"""This template node is used to output and save page content and
dynamically generate input fields in the admin.
:param name: the name of the placeholder you want to show/create
:param page: the optional page object
:param widget: the widget you want to use in the admin interface. Take
a look into :mod:`pages.widgets` to see which widgets
are available.
:param parsed: if the ``parsed`` word is given, the content of the
placeholder is evaluated as template code, within the current
context.
:param as_varname: if ``as_varname`` is defined, no value will be
returned. A variable will be created in the context
with the defined name.
:param inherited: inherit content from parent's pages.
:param untranslated: the placeholder's content is the same for
every language.
"""
field = CharField
widget = TextInput
def __init__(self, name, page=None, widget=None, parsed=False,
as_varname=None, inherited=False, untranslated=False, has_revision=True):
"""Gather parameters for the `PlaceholderNode`.
These values should be thread safe and don't change between calls."""
self.page = page or 'current_page'
self.name = name
if widget:
self.widget = widget
self.parsed = parsed
self.inherited = inherited
self.untranslated = untranslated
self.as_varname = as_varname
self.found_in_block = None
def get_widget(self, page, language, fallback=Textarea):
"""Given the name of a placeholder return a `Widget` subclass
like Textarea or TextInput."""
is_str = type(self.widget) == type(str())
is_unicode = type(self.widget) == type(unicode())
if is_str or is_unicode:
widget = get_widget(self.widget)
else:
widget = self.widget
try:
return widget(page=page, language=language)
except:
pass
return widget()
def get_extra_data(self, data):
"""Get eventual extra data for this placeholder from the
admin form. This method is called when the Page is
saved in the admin and passed to the placeholder save
method."""
result = {}
for key in data.keys():
if key.startswith(self.name + '-'):
new_key = key.replace(self.name + '-', '')
result[new_key] = data[key]
return result
def get_field(self, page, language, initial=None):
"""The field that will be shown within the admin."""
if self.parsed:
help_text = _('Note: This field is evaluated as template code.')
else:
help_text = ''
widget = self.get_widget(page, language)
return self.field(widget=widget, initial=initial,
help_text=help_text, required=False)
def save(self, page, language, data, change, extra_data=None):
"""Actually save the placeholder data into the Content object."""
# if this placeholder is untranslated, we save everything
# in the default language
if self.untranslated:
language = settings.SITESNGINE_PAGE_DEFAULT_LANGUAGE
# the page is being changed
if change:
# we need create a new content if revision is enabled
if (settings.SITESNGINE_PAGE_CONTENT_REVISION and self.name
not in settings.SITESNGINE_PAGE_CONTENT_REVISION_EXCLUDE_LIST):
Content.objects.create_content_if_changed(
page,
language,
self.name,
data
)
else:
Content.objects.set_or_create_content(
page,
language,
self.name,
data
)
# the page is being added
else:
Content.objects.set_or_create_content(
page,
language,
self.name,
data
)
def get_content(self, page_obj, lang, lang_fallback=True):
if self.untranslated:
lang = settings.SITESNGINE_PAGE_DEFAULT_LANGUAGE
lang_fallback = False
content = Content.objects.get_content(page_obj, lang, self.name,
lang_fallback)
if self.inherited and not content:
for ancestor in page_obj.get_ancestors():
content = Content.objects.get_content(ancestor, lang,
self.name, lang_fallback)
if content:
break
return content
def get_content_from_context(self, context):
if not self.page in context:
return ''
# current_page can be set to None
if not context[self.page]:
return ''
if self.untranslated:
lang_fallback = False
lang = settings.SITESNGINE_PAGE_DEFAULT_LANGUAGE
else:
lang_fallback = True
lang = context.get('lang', settings.SITESNGINE_PAGE_DEFAULT_LANGUAGE)
return self.get_content(context[self.page], lang, lang_fallback)
def get_render_content(self, context):
return mark_safe(self.get_content_from_context(context))
def render(self, context):
"""Output the content of the `PlaceholdeNode` in the template."""
content = self.get_render_content(context)
if not content:
return ''
if self.parsed:
try:
t = template.Template(content, name=self.name)
content = mark_safe(t.render(context))
except TemplateSyntaxError, error:
if global_settings.DEBUG:
content = PLACEHOLDER_ERROR % {
'name': self.name,
'error': error,
}
else:
content = ''
if self.as_varname is None:
return content
context[self.as_varname] = content
return ''
def __repr__(self):
return "<Placeholder Node: %s>" % self.name
def get_filename(page, placeholder, data):
filename = os.path.join(
settings.SITESNGINE_PAGE_UPLOAD_ROOT,
'page_' + str(page.id),
placeholder.name + '-' + str(time.time()) + '-' + str(data)
)
return filename
class ImagePlaceholderNode(PlaceholderNode):
"""A `PlaceholderNode` that saves one image on disk.
`PAGE_UPLOAD_ROOT` setting define where to save the image.
"""
def get_field(self, page, language, initial=None):
help_text = ""
widget = ImageInput(page, language)
return ImageField(
widget=widget,
initial=initial,
help_text=help_text,
required=False
)
def save(self, page, language, data, change, extra_data=None):
if 'delete' in extra_data:
return super(ImagePlaceholderNode, self).save(
page,
language,
"",
change
)
filename = ''
if change and data:
# the image URL is posted if not changed
if type(data) is unicode:
return
filename = get_filename(page, self, data)
filename = default_storage.save(filename, data)
return super(ImagePlaceholderNode, self).save(
page,
language,
filename,
change
)
class FilePlaceholderNode(PlaceholderNode):
"""A `PlaceholderNode` that saves one file on disk.
`PAGE_UPLOAD_ROOT` setting define where to save the file.
"""
def get_field(self, page, language, initial=None):
help_text = ""
widget = FileInput(page, language)
return FileField(
widget=widget,
initial=initial,
help_text=help_text,
required=False
)
def save(self, page, language, data, change, extra_data=None):
if 'delete' in extra_data:
return super(FilePlaceholderNode, self).save(
page,
language,
"",
change
)
filename = ''
if change and data:
# the image URL is posted if not changed
if type(data) is unicode:
return
filename = get_filename(page, self, data)
filename = default_storage.save(filename, data)
return super(FilePlaceholderNode, self).save(
page,
language,
filename,
change
)
class ContactForm(forms.Form):
email = forms.EmailField(label=_('Your email'))
subject = forms.CharField(label=_('Subject'),
max_length=150)
message = forms.CharField(widget=forms.Textarea(),
label=_('Your message'))
class ContactPlaceholderNode(PlaceholderNode):
"""A contact `PlaceholderNode` example."""
def render(self, context):
content = self.get_content_from_context(context)
request = context.get('request', None)
if not request:
raise ValueError('request no available in the context.')
if request.method == 'POST':
form = ContactForm(request.POST)
if form.is_valid():
data = form.cleaned_data
recipients = [adm[1] for adm in global_settings.ADMINS]
try:
send_mail(data['subject'], data['message'],
data['email'], recipients, fail_silently=False)
return _("Your email has been sent. Thank you.")
except:
return _("An error as occured: your email has not been sent.")
else:
form = ContactForm()
renderer = render_to_string('sitesngine/pages/contact.html', {'form': form})
return mark_safe(renderer)
class VideoPlaceholderNode(PlaceholderNode):
"""A youtube `PlaceholderNode`, just here as an example."""
widget = VideoWidget
def render(self, context):
content = self.get_content_from_context(context)
if not content:
return ''
if content:
video_url, w, h = content.split('\\')
m = re.search('youtube\.com\/watch\?v=([^&]+)', content)
if m:
video_url = 'http://www.youtube.com/v/' + m.group(1)
if not w:
w = 425
if not h:
h = 344
context = {'video_url': video_url, 'w': w, 'h': h}
renderer = render_to_string('sitesngine/pages/embed.html', context)
return mark_safe(renderer)
return ''
class JsonPlaceholderNode(PlaceholderNode):
def get_render_content(self, context):
import json
content = self.get_content_from_context(context)
try:
return json.loads(str(content))
except:
logger.error("Problem decoding json")
return content
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import os
def dump_sibling_file(file_name):
with open(os.path.join(os.path.dirname(__file__), file_name), 'r') as f:
return f.read()
setup(
name = 'subdue',
packages = ['subdue',
'subdue.core',
'subdue.core.color',
'subdue.sub',
],
version = '0.1.0',
description = 'A framework to create commands with subcommands',
author='Jacobo de Vera',
author_email='devel@jacobodevera.com',
url='https://www.github.com/jdevera/subdue',
license='MIT',
scripts=['scripts/subdue'],
classifiers=[
'Programming Language :: Python',
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Topic :: Utilities'
],
long_description=dump_sibling_file('README.rst'),
test_suite='test'
)
|
import logging
import coreapi
from django.http.response import FileResponse
from django.http.response import Http404
from rest_framework import mixins
from rest_framework import renderers
from rest_framework import viewsets
from rest_framework.filters import BaseFilterBackend
from rest_framework.pagination import PageNumberPagination
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from . import drf_helpers
from . import models
from . import serializers
from . import view_helpers
log = logging.getLogger(__name__)
class InfrastructureView(APIView):
"""
List kinds in infrastructures.
"""
def get(self, request, content_format=None):
# We only support cloud infrastructures for the time being
response = {'url': request.build_absolute_uri('clouds')}
return Response(response)
class CloudViewSet(viewsets.ModelViewSet):
"""
API endpoint to view and or edit cloud infrastructure info.
"""
queryset = models.Cloud.objects.all()
serializer_class = serializers.CloudPolymorphicSerializer
class CloudRegionViewSet(viewsets.ModelViewSet):
"""
API endpoint to view and or edit cloud regions
"""
queryset = models.Region.objects.all()
serializer_class = serializers.CloudRegionPolymorphicSerializer
def get_queryset(self):
return models.Region.objects.filter(cloud=self.kwargs['cloud_pk'])
def get_object(self):
return models.Region.objects.get(cloud=self.kwargs['cloud_pk'],
region_id=self.kwargs["pk"])
class CloudZoneViewSet(viewsets.ModelViewSet):
"""
API endpoint to view and or edit cloud zones
"""
queryset = models.Zone.objects.all()
serializer_class = serializers.CloudZoneSerializer
def get_queryset(self):
return models.Zone.objects.filter(region__cloud=self.kwargs['cloud_pk'],
region__region_id=self.kwargs['region_pk'])
def get_object(self):
return models.Zone.objects.get(region__cloud=self.kwargs['cloud_pk'],
region__region_id=self.kwargs['region_pk'],
zone_id=self.kwargs["pk"])
class ComputeViewSet(drf_helpers.CustomReadOnlySingleViewSet):
"""
List compute related urls.
"""
permission_classes = (IsAuthenticated,)
serializer_class = serializers.ComputeSerializer
class ComputeRegionViewSet(drf_helpers.CustomReadOnlyModelViewSet):
"""
List regions in a given cloud.
"""
permission_classes = (IsAuthenticated,)
# Required for the Browsable API renderer to have a nice form.
serializer_class = serializers.ComputeRegionSerializer
def list_objects(self):
provider = view_helpers.get_cloud_provider(self)
return provider.compute.regions.list()
def get_object(self):
provider = view_helpers.get_cloud_provider(self)
obj = provider.compute.regions.get(self.kwargs["pk"])
return obj
class MachineImageViewSet(drf_helpers.CustomModelViewSet):
"""
List machine images in a given cloud.
"""
permission_classes = (IsAuthenticated,)
# Required for the Browsable API renderer to have a nice form.
serializer_class = serializers.MachineImageSerializer
def list_objects(self):
provider = view_helpers.get_cloud_provider(self)
return provider.compute.images.list()
def get_object(self):
provider = view_helpers.get_cloud_provider(self)
obj = provider.compute.images.get(self.kwargs["pk"])
return obj
class ComputeZoneViewSet(drf_helpers.CustomReadOnlyModelViewSet):
"""
List zones in a given cloud.
"""
permission_classes = (IsAuthenticated,)
# Required for the Browsable API renderer to have a nice form.
serializer_class = serializers.ComputeZoneSerializer
def list_objects(self):
provider = view_helpers.get_cloud_provider(self)
region_pk = self.kwargs.get("compute_region_pk")
region = provider.compute.regions.get(region_pk)
if region:
return region.zones
else:
raise Http404
def get_object(self):
return next((s for s in self.list_objects()
if s.id == self.kwargs["pk"]), None)
class CloudConnectionTestViewSet(mixins.CreateModelMixin,
viewsets.GenericViewSet):
"""
Authenticates given credentials against a provider
"""
serializer_class = serializers.CloudConnectionAuthSerializer
class SecurityViewSet(drf_helpers.CustomReadOnlySingleViewSet):
"""
List security related urls.
"""
permission_classes = (IsAuthenticated,)
serializer_class = serializers.SecuritySerializer
class KeyPairViewSet(drf_helpers.CustomModelViewSet):
"""
List key pairs in a given cloud.
"""
permission_classes = (IsAuthenticated,)
# Required for the Browsable API renderer to have a nice form.
serializer_class = serializers.KeyPairSerializer
def list_objects(self):
provider = view_helpers.get_cloud_provider(self)
return provider.security.key_pairs.list()
def get_object(self):
provider = view_helpers.get_cloud_provider(self)
obj = provider.security.key_pairs.get(self.kwargs["pk"])
return obj
class VMFirewallViewSet(drf_helpers.CustomModelViewSet):
"""
List VM firewalls in a given cloud.
"""
permission_classes = (IsAuthenticated,)
# Required for the Browsable API renderer to have a nice form.
serializer_class = serializers.VMFirewallSerializer
def list_objects(self):
provider = view_helpers.get_cloud_provider(self)
return provider.security.vm_firewalls.list()
def get_object(self):
provider = view_helpers.get_cloud_provider(self)
obj = provider.security.vm_firewalls.get(self.kwargs["pk"])
return obj
class VMFirewallRuleViewSet(drf_helpers.CustomModelViewSet):
"""
List VM firewall rules in a given cloud.
"""
permission_classes = (IsAuthenticated,)
serializer_class = serializers.VMFirewallRuleSerializer
def list_objects(self):
provider = view_helpers.get_cloud_provider(self)
vmf_pk = self.kwargs.get("vm_firewall_pk")
vmf = provider.security.vm_firewalls.get(vmf_pk)
if vmf:
return vmf.rules.list()
else:
raise Http404
def get_object(self):
provider = view_helpers.get_cloud_provider(self)
vmf_pk = self.kwargs.get("vm_firewall_pk")
vmf = provider.security.vm_firewalls.get(vmf_pk)
if not vmf:
raise Http404
else:
pk = self.kwargs.get("pk")
for rule in vmf.rules.list():
if rule.id == pk:
return rule
raise Http404
class NetworkingViewSet(drf_helpers.CustomReadOnlySingleViewSet):
"""
List networking related urls.
"""
permission_classes = (IsAuthenticated,)
serializer_class = serializers.NetworkingSerializer
class NetworkViewSet(drf_helpers.CustomModelViewSet):
"""
List networks in a given cloud.
"""
permission_classes = (IsAuthenticated,)
# Required for the Browsable API renderer to have a nice form.
serializer_class = serializers.NetworkSerializer
def list_objects(self):
provider = view_helpers.get_cloud_provider(self)
return provider.networking.networks.list()
def get_object(self):
provider = view_helpers.get_cloud_provider(self)
obj = provider.networking.networks.get(self.kwargs["pk"])
return obj
class SubnetViewSet(drf_helpers.CustomModelViewSet):
"""
List networks in a given cloud.
"""
permission_classes = (IsAuthenticated,)
def list_objects(self):
provider = view_helpers.get_cloud_provider(self)
return provider.networking.subnets.list(
network=self.kwargs["network_pk"])
def get_object(self):
provider = view_helpers.get_cloud_provider(self)
return provider.networking.subnets.get(self.kwargs["pk"])
def get_serializer_class(self):
if self.request.method == 'PUT':
return serializers.SubnetSerializerUpdate
return serializers.SubnetSerializer
class GatewayViewSet(drf_helpers.CustomModelViewSet):
"""
List internet gateways in a given cloud.
"""
permission_classes = (IsAuthenticated,)
# Required for the Browsable API renderer to have a nice form.
serializer_class = serializers.GatewaySerializer
def list_objects(self):
provider = view_helpers.get_cloud_provider(self)
net = provider.networking.networks.get(self.kwargs['network_pk'])
return net.gateways.list()
def get_object(self):
provider = view_helpers.get_cloud_provider(self)
net = provider.networking.networks.get(self.kwargs['network_pk'])
obj = net.gateways.get_or_create()
return obj
class RouterViewSet(drf_helpers.CustomModelViewSet):
"""
List routers in a given cloud.
"""
permission_classes = (IsAuthenticated,)
# Required for the Browsable API renderer to have a nice form.
serializer_class = serializers.RouterSerializer
def list_objects(self):
provider = view_helpers.get_cloud_provider(self)
return provider.networking.routers.list()
def get_object(self):
provider = view_helpers.get_cloud_provider(self)
obj = provider.networking.routers.get(self.kwargs["pk"])
return obj
class FloatingIPViewSet(drf_helpers.CustomModelViewSet):
"""
List user's floating IP addresses.
"""
permission_classes = (IsAuthenticated,)
serializer_class = serializers.FloatingIPSerializer
def list_objects(self):
provider = view_helpers.get_cloud_provider(self)
ips = []
net = provider.networking.networks.get(self.kwargs['network_pk'])
gateway = net.gateways.get_or_create()
for ip in gateway.floating_ips.list():
if not ip.in_use:
ips.append({'id': ip.id, 'ip': ip.public_ip,
'state': ip.state})
return ips
class LargeResultsSetPagination(PageNumberPagination):
"""Modify aspects of the pagination style, primarily page size."""
page_size = 500
page_size_query_param = 'page_size'
max_page_size = 1000
class SimpleFilterBackend(BaseFilterBackend):
def get_schema_fields(self, view):
return [
coreapi.Field(
name='min_vcpus',
location='query',
required=False,
type='float'),
coreapi.Field(
name='min_ram',
location='query',
required=False,
type='float'),
coreapi.Field(
name='vm_type_prefix',
location='query',
required=False,
type='string'),
]
def filter_queryset(self, request, queryset, view):
min_vcpus = float(request.query_params.get('min_vcpus', 0))
min_ram = float(request.query_params.get('min_ram', 0))
vm_type_prefix = request.query_params.get('vm_type_prefix', "")
if vm_type_prefix:
prefix_list = vm_type_prefix.split(",")
else:
prefix_list = [""]
return [vm_type for vm_type in queryset
if vm_type.name.startswith(tuple(prefix_list)) and
vm_type.vcpus >= min_vcpus and
vm_type.ram >= min_ram]
class VMTypeViewSet(drf_helpers.CustomReadOnlyModelViewSet):
"""List compute VM types in a given cloud."""
permission_classes = (IsAuthenticated,)
# Required for the Browsable API renderer to have a nice form.
serializer_class = serializers.VMTypeSerializer
pagination_class = LargeResultsSetPagination
filter_backends = (SimpleFilterBackend,)
lookup_value_regex = '[^/]+'
def list_objects(self):
provider = view_helpers.get_cloud_provider(self)
try:
return provider.compute.vm_types.list(limit=500)
except Exception as exc:
log.error("Exception listing vm types: %s", exc)
return []
def get_object(self):
provider = view_helpers.get_cloud_provider(self)
return provider.compute.vm_types.get(self.kwargs.get('pk'))
class InstanceViewSet(drf_helpers.CustomModelViewSet):
"""
List compute instances in a given cloud.
"""
permission_classes = (IsAuthenticated,)
# Required for the Browsable API renderer to have a nice form.
serializer_class = serializers.InstanceSerializer
def list_objects(self):
provider = view_helpers.get_cloud_provider(self)
return provider.compute.instances.list()
def get_object(self):
provider = view_helpers.get_cloud_provider(self)
obj = provider.compute.instances.get(self.kwargs["pk"])
return obj
def perform_destroy(self, instance):
instance.delete()
class StorageViewSet(drf_helpers.CustomReadOnlySingleViewSet):
"""
List storage urls.
"""
permission_classes = (IsAuthenticated,)
serializer_class = serializers.StorageSerializer
class VolumeViewSet(drf_helpers.CustomModelViewSet):
"""
List volumes in a given cloud.
"""
permission_classes = (IsAuthenticated,)
# Required for the Browsable API renderer to have a nice form.
serializer_class = serializers.VolumeSerializer
def list_objects(self):
provider = view_helpers.get_cloud_provider(self)
return provider.storage.volumes.list()
def get_object(self):
provider = view_helpers.get_cloud_provider(self)
obj = provider.storage.volumes.get(self.kwargs["pk"])
return obj
class SnapshotViewSet(drf_helpers.CustomModelViewSet):
"""
List snapshots in a given cloud.
"""
permission_classes = (IsAuthenticated,)
serializer_class = serializers.SnapshotSerializer
def list_objects(self):
provider = view_helpers.get_cloud_provider(self)
return provider.storage.snapshots.list()
def get_object(self):
provider = view_helpers.get_cloud_provider(self)
obj = provider.storage.snapshots.get(self.kwargs["pk"])
return obj
class ObjectStoreViewSet(drf_helpers.CustomReadOnlySingleViewSet):
"""
List compute related urls.
"""
permission_classes = (IsAuthenticated,)
serializer_class = serializers.StorageSerializer
class BucketViewSet(drf_helpers.CustomModelViewSet):
"""
List buckets in a given cloud.
"""
permission_classes = (IsAuthenticated,)
serializer_class = serializers.BucketSerializer
def list_objects(self):
provider = view_helpers.get_cloud_provider(self)
return provider.storage.buckets.list()
def get_object(self):
provider = view_helpers.get_cloud_provider(self)
obj = provider.storage.buckets.get(self.kwargs["pk"])
return obj
class BucketObjectBinaryRenderer(renderers.BaseRenderer):
media_type = 'application/octet-stream'
format = 'binary'
charset = None
render_style = 'binary'
def render(self, data, media_type=None, renderer_context=None):
return data
class BucketObjectViewSet(drf_helpers.CustomModelViewSet):
"""
List objects in a given cloud bucket.
"""
permission_classes = (IsAuthenticated,)
# Required for the Browsable API renderer to have a nice form.
serializer_class = serializers.BucketObjectSerializer
# Capture everything as a single value
lookup_value_regex = '.*'
renderer_classes = drf_helpers.CustomModelViewSet.renderer_classes + \
[BucketObjectBinaryRenderer]
def list_objects(self):
provider = view_helpers.get_cloud_provider(self)
bucket_pk = self.kwargs.get("bucket_pk")
bucket = provider.storage.buckets.get(bucket_pk)
if bucket:
return bucket.objects.list()
else:
raise Http404
def retrieve(self, request, *args, **kwargs):
bucket_object = self.get_object()
content_format = request.query_params.get('format')
# TODO: This is a bit ugly, since ideally, only the renderer
# should be aware of the format
if content_format == "binary":
response = FileResponse(
streaming_content=bucket_object.iter_content(),
content_type='application/octet-stream')
response['Content-Disposition'] = ('attachment; filename="%s"'
% bucket_object.name)
return response
else:
serializer = self.get_serializer(bucket_object)
return Response(serializer.data)
def get_object(self):
provider = view_helpers.get_cloud_provider(self)
bucket_pk = self.kwargs.get("bucket_pk")
bucket = provider.storage.buckets.get(bucket_pk)
if bucket:
return bucket.objects.get(self.kwargs["pk"])
else:
raise Http404
class CredentialsViewSet(viewsets.ModelViewSet):
queryset = models.Credentials.objects.all()
permission_classes = (IsAuthenticated,)
serializer_class = serializers.CredentialsPolymorphicSerializer
def get_queryset(self):
user = self.request.user
if hasattr(user, 'userprofile'):
return user.userprofile.credentials.all()
return models.Credentials.objects.none()
def perform_create(self, serializer):
if not hasattr(self.request.user, 'userprofile'):
# Create a user profile if it does not exist
models.UserProfile.objects.create(user=self.request.user)
serializer.save(user_profile=self.request.user.userprofile)
class DnsViewSet(drf_helpers.CustomReadOnlySingleViewSet):
"""
List dns urls.
"""
permission_classes = (IsAuthenticated,)
serializer_class = serializers.DnsSerializer
class DnsZoneViewSet(drf_helpers.CustomModelViewSet):
"""
List dns zones in a given cloud.
"""
permission_classes = (IsAuthenticated,)
serializer_class = serializers.DnsZoneSerializer
def list_objects(self):
provider = view_helpers.get_cloud_provider(self)
return provider.dns.host_zones.list()
def get_object(self):
provider = view_helpers.get_cloud_provider(self)
obj = provider.dns.host_zones.get(self.kwargs["pk"])
return obj
class DnsRecordViewSet(drf_helpers.CustomModelViewSet):
"""
List records in a given dns zone.
"""
permission_classes = (IsAuthenticated,)
# Required for the Browsable API renderer to have a nice form.
serializer_class = serializers.DnsRecordSerializer
lookup_value_regex = '[^/]+'
def list_objects(self):
provider = view_helpers.get_cloud_provider(self)
dns_zone_pk = self.kwargs.get("dns_zone_pk")
dns_zone = provider.dns.host_zones.get(dns_zone_pk)
if dns_zone:
return dns_zone.records.list()
else:
raise Http404
def get_object(self):
provider = view_helpers.get_cloud_provider(self)
dns_zone_pk = self.kwargs.get("dns_zone_pk")
dns_zone = provider.dns.host_zones.get(dns_zone_pk)
if dns_zone:
return dns_zone.records.get(self.kwargs["pk"])
else:
raise Http404
|
import json
from rhino import Mapper, Resource, ok
from rhino.errors import BadRequest
def read_json(f):
try:
obj = json.load(f)
except ValueError as e:
raise BadRequest("Invalid JSON: %s" % e)
return obj
def json_api_wrapper(app):
def wrap(request, ctx):
if request.content_type == 'application/json':
request._body_reader = read_json
response = app(request, ctx)
if isinstance(response.body, (dict, list, tuple)):
response.body = json.dumps(response.body)
response.headers['Content-Type'] = 'application/json'
return response
return wrap
data = {'message': 'hello, world!'}
data_resource = Resource()
@data_resource.get
def get_data(request):
return data
@data_resource.put
def update_data(request):
data['message'] = request.body['message']
return data
app = Mapper()
app.add_wrapper(json_api_wrapper)
app.add('/', data_resource)
if __name__ == '__main__':
app.start_server()
|
"""
WSGI config for kohrsupply project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "kohrsupply.settings")
application = get_wsgi_application()
|
import requests
import json
import os
from time import sleep
from pprint import pprint
f = open("creds.txt")
lines = f.readlines()
f.close
token = lines[0].strip()
print "Token loaded"
last_update = 0
if os.path.isfile("offset"):
f = open("offset", 'r')
last_update = int(f.read())
f.close
print "Offset loaded"
else:
print "No offset - starting new"
url = 'https://api.telegram.org/bot%s/' % token
while True:
gotUpdate = False
get_updates = json.loads(requests.get(url + 'getUpdates').content)
for update in get_updates['result']:
if last_update < update['update_id']:
gotUpdate = True
print ""
pprint(update)
print ""
last_update = update['update_id']
requests.get(url + 'sendMessage', params=dict(chat_id=update['message']['chat']['id'], text="Received"))
print "Acknowledged"
if gotUpdate:
f = open("offset", 'w')
f.write(str(last_update))
f.close
print "Stored new offset"
sleep(3)
|
"""app URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from . import views
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^admin/', include(admin.site.urls)),
]
|
from django.contrib import admin
from .models import Link
@admin.register(Link)
class LinksAdmin(admin.ModelAdmin):
list_display = ('slug', 'url', 'create_date')
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^$', 'applications.views.index'),
url(r'^login/$', 'django.contrib.auth.views.login', {
'template_name': 'login.html'}),
url(r'^logout/$', 'applications.views.logout_view'),
# Uncomment the admin/doc line below to enable admin documentation:
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
|
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
import argparse
import numpy as np
import chainer
from chainer import cuda
from chainer import optimizers
from chainer import serializers
import alex
from mlimages.gather.imagenet import ImagenetAPI
from mlimages.label import LabelingMachine
from mlimages.training import TrainingData
from mlimages.model import ImageProperty
DATA_DIR = os.path.join(os.path.dirname(__file__), "./data/imagenet/")
IMAGES_ROOT = os.path.join(DATA_DIR, "./images")
LABEL_FILE = os.path.join(os.path.dirname(__file__), "./data/imagenet/label.txt")
LABEL_DEF_FILE = os.path.join(os.path.dirname(__file__), "./data/imagenet/label_def.txt")
MEAN_IMAGE_FILE = os.path.join(os.path.dirname(__file__), "./data/imagenet/mean_image.png")
MODEL_FILE = os.path.join(os.path.dirname(__file__), "./data/imagenet/chainer_alex.model")
IMAGE_PROP = ImageProperty(width=227, resize_by_downscale=True)
def download_imagenet(wnid, limit=-1):
api = ImagenetAPI(data_root=DATA_DIR, limit=limit, debug=True)
api.logger.info("start to gather the ImageNet images.")
folders = api.gather(wnid, include_subset=True)
# rename images root folder
images_root = os.path.join(DATA_DIR, folders[0])
os.rename(images_root, IMAGES_ROOT)
print("Down load has done.")
def make_label():
machine = LabelingMachine(data_root=IMAGES_ROOT)
lf = machine.label_dir_auto(label_file=LABEL_FILE, label_def_file=LABEL_DEF_FILE)
def show(limit, shuffle=True):
td = TrainingData(LABEL_FILE, img_root=IMAGES_ROOT, mean_image_file=MEAN_IMAGE_FILE, image_property=IMAGE_PROP)
_limit = limit if limit > 0 else 5
iterator = td.generate()
if shuffle:
import random
shuffled = list(iterator)
random.shuffle(shuffled)
iterator = iter(shuffled)
i = 0
for arr, im in iterator:
restored = td.data_to_image(arr, im.label, raw=True)
print(im.path)
restored.image.show()
i += 1
if i >= _limit:
break
def train(epoch=10, batch_size=32, gpu=False):
if gpu:
cuda.check_cuda_available()
xp = cuda.cupy if gpu else np
td = TrainingData(LABEL_FILE, img_root=IMAGES_ROOT, image_property=IMAGE_PROP)
# make mean image
if not os.path.isfile(MEAN_IMAGE_FILE):
print("make mean image...")
td.make_mean_image(MEAN_IMAGE_FILE)
else:
td.mean_image_file = MEAN_IMAGE_FILE
# train model
label_def = LabelingMachine.read_label_def(LABEL_DEF_FILE)
model = alex.Alex(len(label_def))
optimizer = optimizers.MomentumSGD(lr=0.01, momentum=0.9)
optimizer.setup(model)
epoch = epoch
batch_size = batch_size
print("Now our model is {0} classification task.".format(len(label_def)))
print("begin training the model. epoch:{0} batch size:{1}.".format(epoch, batch_size))
if gpu:
model.to_gpu()
for i in range(epoch):
print("epoch {0}/{1}: (learning rate={2})".format(i + 1, epoch, optimizer.lr))
td.shuffle(overwrite=True)
for x_batch, y_batch in td.generate_batches(batch_size):
x = chainer.Variable(xp.asarray(x_batch))
t = chainer.Variable(xp.asarray(y_batch))
optimizer.update(model, x, t)
print("loss: {0}, accuracy: {1}".format(float(model.loss.data), float(model.accuracy.data)))
serializers.save_npz(MODEL_FILE, model)
optimizer.lr *= 0.97
def predict(limit):
_limit = limit if limit > 0 else 5
td = TrainingData(LABEL_FILE, img_root=IMAGES_ROOT, mean_image_file=MEAN_IMAGE_FILE, image_property=IMAGE_PROP)
label_def = LabelingMachine.read_label_def(LABEL_DEF_FILE)
model = alex.Alex(len(label_def))
serializers.load_npz(MODEL_FILE, model)
i = 0
for arr, im in td.generate():
x = np.ndarray((1,) + arr.shape, arr.dtype)
x[0] = arr
x = chainer.Variable(np.asarray(x), volatile="on")
y = model.predict(x)
p = np.argmax(y.data)
print("predict {0}, actual {1}".format(label_def[p], label_def[im.label]))
im.image.show()
i += 1
if i >= _limit:
break
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Example of Imagenet x AlexNet")
parser.add_argument("task", type=str, help="task of script. " + "".join([
"g: gather images", "l: make label file", "s: show training images (shuffle data when 'ss')",
"t: train model", "p: predict"
]))
parser.add_argument("-wnid", type=str, help="imagenet id (default is cats(n02121808))", default="n02121808")
parser.add_argument("-limit", type=int, help="g: download image limit, s,p: show/predict image limit", default=-1)
parser.add_argument("-epoch", type=int, help="when t: epoch count", default=10)
parser.add_argument("-batchsize", type=int, help="when t: batch size", default=32)
parser.add_argument("-gpu", action="store_true", help="when t: use gpu")
args = parser.parse_args()
if args.task == "g":
download_imagenet(args.wnid, args.limit)
elif args.task == "l":
print("create label data automatically.")
make_label()
elif args.task == "s":
show(args.limit, shuffle=False)
elif args.task == "ss":
show(args.limit, shuffle=True)
elif args.task == "t":
train(epoch=args.epoch, batch_size=args.batchsize, gpu=args.gpu)
elif args.task == "p":
predict(args.limit)
|
import os
import sys
import csv
sys.setrecursionlimit(100000000)
import wave
import xml.etree.ElementTree as ET
from textgrid import TextGrid, IntervalTier
from bs4 import BeautifulSoup
from alignment.sequence import Sequence
from alignment.vocabulary import Vocabulary
from alignment.sequencealigner import SimpleScoring, GlobalSequenceAligner, StrictGlobalSequenceAligner
base_dir = '/media/share/corpora/AudioBNC'
textgrid_dir = os.path.join(base_dir, 'textgrids')
textgrids = os.listdir(textgrid_dir)
wav_dir = os.path.join(base_dir, 'wavs')
wavs = os.listdir(wav_dir)
bnc_xml_dir = r'/media/share/corpora/BNC/Texts'
speaker_header = ['id', 'sex', 'agegroup', 'dialect_group', 'age', 'dialect']
def load_bnc_code(code):
path = os.path.join(bnc_xml_dir, code[0], code[:2], code + '.xml')
with open(path, 'r', encoding='utf8') as f:
soup = BeautifulSoup(f, 'html.parser')
recording_data = {x['n']: {h: x[h] for h in ['date', 'dur', 'time', 'type', 'xml:id'] if h in x} for x in
soup.find_all('recording')}
# print(recording_data)
# print(soup)
partcipant_description = soup.find('particdesc')
if partcipant_description is None:
speakers = {}
else:
n_participants = partcipant_description['n']
people = partcipant_description.find_all('person')
speakers = {}
for p in people:
# print(p)
d = {}
d['sex'] = p['sex']
d['agegroup'] = p['agegroup']
d['dialect_group'] = p['dialect']
try:
d['name'] = p.find('persname').get_text()
except AttributeError:
d['name'] = None
try:
d['age'] = p.find('age').get_text()
except AttributeError:
d['age'] = None
try:
d['dialect'] = p.find('dialect').get_text()
except AttributeError:
d['dialect'] = None
speakers[p['xml:id']] = d
# print(partcipant_description)
# print(speakers)
# print(soup)
transcripts = {}
for r in recording_data.keys():
d = soup.find('div', n=r)
if d is not None:
utts = d.find_all('u')
else:
utts = soup.find_all('u')
data = []
for u in utts:
words = u.find_all('w')
new_words = []
for w in words:
if w['c5'] == 'PUN':
continue
w = w.get_text().upper().strip()
if new_words and (w.startswith("'") or w == "N'T"):
new_words[-1] = (new_words[-1][0] +w, u['who'])
else:
new_words.append((w, u['who']))
data.extend(new_words)
transcripts[r] = data
# for k, v in transcripts.items():
# print(k)
# print(v)
return speakers, recording_data, transcripts
def calc_duration(path):
with wave.open(path, 'rb') as f:
frames = f.getnframes()
rate = f.getframerate()
duration = frames / float(rate)
return duration
bnc_cache = {}
speakers = {}
analysis = []
read_errors = 0
dup_min_errors = 0
dup_max_errors = 0
overlap_errors = 0
for f in wavs:
if not f.endswith('.wav'):
continue
path = os.path.join(wav_dir, f)
duration = calc_duration(path)
name, _ = os.path.splitext(f)
print(f)
#print(duration)
relevant_tgs = sorted([os.path.join(textgrid_dir, x) for x in textgrids if x.startswith(name)])
tgs = [TextGrid() for x in relevant_tgs]
for i, t in enumerate(tgs):
try:
t.read(relevant_tgs[i])
except:
print('Error reading {}'.format(relevant_tgs[i]))
read_errors += 1
tgs[i] = None
continue
mins = [x.minTime for x in tgs if x]
maxs = [x.maxTime for x in tgs if x]
for i, m in enumerate(mins):
if not m:
w = tgs[i].getFirst('word')
p = tgs[i].getFirst('phone')
if w.minTime is not None:
mins[i] = w.minTime
elif p.minTime is not None:
mins[i] = p.minTime
if not maxs[i]:
w = tgs[i].getFirst('word')
p = tgs[i].getFirst('phone')
if w.minTime is not None:
maxs[i] = w.minTime
elif p.minTime is not None:
maxs[i] = p.minTime
error = False
if len(set(mins)) != len(mins):
error = True
print('Duplicate mins!')
dup_min_errors += 1
if len(set(maxs)) != len(maxs):
error = True
print('Duplicate maxs!')
dup_max_errors += 1
intervals = list(zip(mins, maxs))
for i, interval in enumerate(intervals):
if i != len(intervals) - 1:
if interval[1] > intervals[i+1][0]:
error = True
print('overlapping intervals!')
overlap_errors += 1
if error:
print(intervals)
print(relevant_tgs)
continue
for tg_path in relevant_tgs:
#print(tg_path)
r_code, bnc_code = tg_path.split('_')[-3:-1]
if bnc_code not in bnc_cache:
bnc_cache[bnc_code] = load_bnc_code(bnc_code)
speakers.update(bnc_cache[bnc_code][0])
_, recording_data, transcripts = bnc_cache[bnc_code]
transcript = transcripts[r_code]
tg = TextGrid(strict=False)
try:
tg.read(tg_path)
except:
print('Error reading {}'.format(tg_path))
#print(tg.minTime, tg.maxTime, tg.maxTime - tg.minTime)
analysis.append([f, duration, os.path.basename(tg_path), tg.minTime, tg.maxTime])
word_tier = tg.getFirst('word')
#print([x.mark for x in word_tier])
phone_tier = tg.getFirst('phone')
print ('There were {} read errors, {} duplicated mins, {} duplicated maxs, and {} overlaps.'.format(read_errors, dup_min_errors, dup_max_errors, overlap_errors))
error
with open(os.path.join(base_dir, 'analysis.txt'), 'w') as f:
writer = csv.writer(f)
writer.writerow(['wav', 'duration', 'tg', 'tg_min', 'tg_max'])
for line in analysis:
writer.writerow(line)
error
for f in wavs:
if not f.endswith('.wav'):
continue
path = os.path.join(wav_dir, f)
duration = calc_duration(path)
name, _ = os.path.splitext(f)
relevant_tgs = [os.path.join(textgrid_dir, x) for x in textgrids if x.startswith(name)]
speaker_word_tiers = {}
speaker_phone_tiers = {}
out_path = path.replace('.wav', '.TextGrid')
if os.path.exists(out_path):
print ('{} already exists, skipping.'.format(out_path))
continue
for tg_path in relevant_tgs:
print(tg_path)
r_code, bnc_code = tg_path.split('_')[-3:-1]
if bnc_code == 'KDP' and r_code == '000419':
continue
if bnc_code == 'KPM' and r_code == '075702':
continue
if bnc_code not in bnc_cache:
bnc_cache[bnc_code] = load_bnc_code(bnc_code)
speakers.update(bnc_cache[bnc_code][0])
_, recording_data, transcripts = bnc_cache[bnc_code]
transcript = transcripts[r_code]
try:
tg = TextGrid(strict=False)
tg.read(tg_path)
except Exception as e:
print(out_path)
print(e)
continue
word_tier = tg.getFirst('word')
#print([x.mark for x in word_tier])
phone_tier = tg.getFirst('phone')
trans_ind = 0
prev_oov = False
a = Sequence([x[0] for x in transcript])
b = Sequence([x.mark for x in word_tier])
# Create a vocabulary and encode the sequences.
v = Vocabulary()
aEncoded = v.encodeSequence(a)
bEncoded = v.encodeSequence(b)
# Create a scoring and align the sequences using global aligner.
scoring = SimpleScoring(2, -1)
aligner = GlobalSequenceAligner(scoring, -2)
score, encodeds = aligner.align(aEncoded, bEncoded, backtrace=True)
# Iterate over optimal alignments and print them.
for encoded in encodeds:
alignment = v.decodeSequenceAlignment(encoded)
#print(alignment)
#print('Alignment score:', alignment.score)
#print('Percent identity:', alignment.percentIdentity())
trans_ind = 0
inds = ['-']
for x in alignment:
if x[0] != '-':
inds.append(trans_ind)
trans_ind += 1
else:
inds.append('-')
inds.append('-')
#print(inds)
#print([x for x in range(len(inds))])
#print(len(word_tier))
word_speakers = []
cur_speaker = None
cur_turn = [0, None]
for j, i in enumerate(inds):
if i == '-':
continue
s = transcript[i][1]
if cur_speaker != s:
if cur_turn[1] is not None:
word_speakers.append((cur_speaker, cur_turn))
cur_turn = [None, None]
cur_speaker = s
if cur_turn[0] is None:
cur_turn[0] = j
cur_turn[1] = j + 1
if cur_turn[0] is not None:
word_speakers.append((cur_speaker, cur_turn))
#print(word_speakers)
for i, w in enumerate(word_tier):
for s, r in word_speakers:
if r[0] == r[1] and i == r[0]:
speaker = s
break
elif i >= r[0] and i < r[1]:
speaker = s
break
else:
speaker = word_speakers[-1][0]
if speaker not in speaker_word_tiers:
speaker_word_tiers[speaker] = []
speaker_phone_tiers[speaker] = []
speaker_word_tiers[speaker].append(w)
from decimal import Decimal
if w.minTime == Decimal('1385.9725'):
print('found', w, tg_path)
for p in phone_tier:
mid_point = p.minTime + (p.maxTime - p.minTime) / 2
if mid_point > w.minTime and mid_point < w.maxTime:
speaker_phone_tiers[speaker].append(p)
new_tg = TextGrid(strict=False)
if not speaker_word_tiers:
print('could not find tiers for {}'.format(out_path))
continue
try:
for s in sorted(speaker_word_tiers.keys()):
w_tier = IntervalTier('{} - word'.format(s), 0, duration)
p_tier = IntervalTier('{} - phone'.format(s), 0, duration)
for w in sorted(speaker_word_tiers[s]):
if len(w_tier) and w_tier[-1].mark in ['sp','{OOV}'] and w_tier[-1].maxTime > w.minTime:
w_tier[-1].maxTime = w.minTime
if len(w_tier) and w.mark in ['sp','{OOV}'] and w_tier[-1].maxTime > w.minTime:
w.minTime = w_tier[-1].maxTime
#print(w)
if w.maxTime > duration:
w.maxTime = duration
w_tier.addInterval(w)
for p in sorted(speaker_phone_tiers[s]):
if len(p_tier) and p_tier[-1].mark == 'sil' and p_tier[-1].maxTime > p.minTime:
p_tier[-1].maxTime = p.minTime
if len(p_tier) and p.mark == 'sil' and p_tier[-1].maxTime > p.minTime:
p.minTime = p_tier[-1].maxTime
#print(p)
if p.maxTime > duration:
p.maxTime = duration
try:
p_tier.addInterval(p)
except ValueError:
pass
new_tg.append(w_tier)
new_tg.append(p_tier)
new_tg.write(out_path)
except Exception as e:
print(out_path)
print(e)
# print(tg)
|
"""
Craft, sign and broadcast Tetcoin transactions.
Interface with Tetcoind.
"""
import os
import sys
import binascii
import json
import hashlib
import re
import time
import getpass
import decimal
import logging
import requests
from pycoin.ecdsa import generator_secp256k1, public_pair_for_secret_exponent
from pycoin.encoding import wif_to_tuple_of_secret_exponent_compressed, public_pair_to_sec, is_sec_compressed, EncodingError
from Crypto.Cipher import ARC4
from . import config, exceptions, util, blockchain
OP_RETURN = b'\x6a'
OP_PUSHDATA1 = b'\x4c'
OP_DUP = b'\x76'
OP_HASH160 = b'\xa9'
OP_EQUALVERIFY = b'\x88'
OP_CHECKSIG = b'\xac'
OP_1 = b'\x51'
OP_2 = b'\x52'
OP_3 = b'\x53'
OP_CHECKMULTISIG = b'\xae'
b58_digits = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
D = decimal.Decimal
dhash = lambda x: hashlib.sha256(hashlib.sha256(x).digest()).digest()
def hash160(x):
x = hashlib.sha256(x).digest()
m = hashlib.new('ripemd160')
m.update(x)
return m.digest()
def pubkey_to_pubkeyhash(pubkey):
pubkeyhash = hash160(pubkey)
pubkey = base58_check_encode(binascii.hexlify(pubkeyhash).decode('utf-8'), config.ADDRESSVERSION)
return pubkey
def pubkeyhash_to_pubkey(pubkeyhash):
# TODO: convert to python-tetcoinlib.
raw_transactions = search_raw_transactions(pubkeyhash)
for tx in raw_transactions:
for vin in tx['vin']:
scriptsig = vin['scriptSig']
asm = scriptsig['asm'].split(' ')
pubkey = asm[1]
if pubkeyhash == pubkey_to_pubkeyhash(binascii.unhexlify(bytes(pubkey, 'utf-8'))):
return pubkey
raise exceptions.AddressError('Public key for address ‘{}’ not published in blockchain.'.format(pubkeyhash))
def multisig_pubkeyhashes_to_pubkeys(address):
array = address.split('_')
signatures_required = int(array[0])
signatures_possible = int(array[-1])
pubkeyhashes = array[1:-1]
pubkeys = [pubkeyhash_to_pubkey(pubkeyhash) for pubkeyhash in pubkeyhashes]
address = '_'.join([str(signatures_required)] + sorted(pubkeys) + [str(len(pubkeys))])
return address
tetcoin_rpc_session = None
def print_coin(coin):
return 'amount: {}; txid: {}; vout: {}; confirmations: {}'.format(coin['amount'], coin['txid'], coin['vout'], coin.get('confirmations', '?')) # simplify and make deterministic
def get_block_count():
return int(rpc('getblockcount', []))
def get_block_hash(block_index):
return rpc('getblockhash', [block_index])
def get_raw_transaction (tx_hash):
return rpc('getrawtransaction', [tx_hash, 1])
def get_block (block_hash):
return rpc('getblock', [block_hash])
def get_block_hash (block_index):
return rpc('getblockhash', [block_index])
def decode_raw_transaction (unsigned_tx_hex):
return rpc('decoderawtransaction', [unsigned_tx_hex])
def get_info():
return rpc('getinfo', [])
def is_valid (address):
return rpc('validateaddress', [address])['isvalid']
def is_mine (address):
return rpc('validateaddress', [address])['ismine']
def sign_raw_transaction (unsigned_tx_hex):
return rpc('signrawtransaction', [unsigned_tx_hex])
def send_raw_transaction (tx_hex):
return rpc('sendrawtransaction', [tx_hex])
def get_private_key (address):
return rpc('dumpprivkey', [address])
def search_raw_transactions (address):
return rpc('searchrawtransactions', [address, 1, 0, 9999999])
def get_wallet ():
for group in rpc('listaddressgroupings', []):
for bunch in group:
yield bunch
def get_mempool ():
return rpc('getrawmempool', [])
def list_unspent ():
return rpc('listunspent', [0, 999999])
def backend_check (db):
"""Checks blocktime of last block to see if {} Core is running behind.""".format(config.TET_NAME)
block_count = get_block_count()
block_hash = get_block_hash(block_count)
block = get_block(block_hash)
time_behind = time.time() - block['time'] # TODO: Block times are not very reliable.
if time_behind > 60 * 60 * 8: # Two hours.
raise exceptions.TetcoindError('Tetcoind is running about {} seconds behind.'.format(round(time_behind)))
def connect (url, payload, headers):
global tetcoin_rpc_session
if not tetcoin_rpc_session: tetcoin_rpc_session = requests.Session()
TRIES = 12
for i in range(TRIES):
try:
response = tetcoin_rpc_session.post(url, data=json.dumps(payload), headers=headers, verify=config.BACKEND_RPC_SSL_VERIFY)
if i > 0: print('Successfully connected.', file=sys.stderr)
return response
except requests.exceptions.SSLError as e:
raise e
except requests.exceptions.ConnectionError:
logging.debug('Could not connect to Tetcoind. (Try {}/{})'.format(i+1, TRIES))
time.sleep(5)
return None
def wallet_unlock ():
getinfo = get_info()
if 'unlocked_until' in getinfo:
if getinfo['unlocked_until'] >= 60:
return True # Wallet is unlocked for at least the next 60 seconds.
else:
passphrase = getpass.getpass('Enter your Tetcoind[‐Qt] wallet passhrase: ')
print('Unlocking wallet for 60 (more) seconds.')
rpc('walletpassphrase', [passphrase, 60])
else:
return True # Wallet is unencrypted.
def rpc (method, params):
starttime = time.time()
headers = {'content-type': 'application/json'}
payload = {
"method": method,
"params": params,
"jsonrpc": "2.0",
"id": 0,
}
response = connect(config.BACKEND_RPC, payload, headers)
if response == None:
if config.TESTNET: network = 'testnet'
else: network = 'mainnet'
raise exceptions.TetcoindRPCError('Cannot communicate with {} Core. ({} is set to run on {}, is {} Core?)'.format(config.TET_NAME, config.XTN_CLIENT, network, config.TET_NAME))
elif response.status_code not in (200, 500):
raise exceptions.TetcoindRPCError(str(response.status_code) + ' ' + response.reason)
# Return result, with error handling.
response_json = response.json()
if 'error' not in response_json.keys() or response_json['error'] == None:
return response_json['result']
elif response_json['error']['code'] == -5: # RPC_INVALID_ADDRESS_OR_KEY
raise exceptions.TetcoindError('{} Is txindex enabled in {} Core?'.format(response_json['error'], config.TET_NAME))
elif response_json['error']['code'] == -4: # Unknown private key (locked wallet?)
# If address in wallet, attempt to unlock.
address = params[0]
if is_valid(address):
if is_mine(address):
raise exceptions.TetcoindError('Wallet is locked.')
else: # When will this happen?
raise exceptions.TetcoindError('Source address not in wallet.')
else:
raise exceptions.AddressError('Invalid address. (Multi‐signature?)')
elif response_json['error']['code'] == -1 and response_json['message'] == 'Block number out of range.':
time.sleep(10)
return get_block_hash(block_index)
else:
raise exceptions.TetcoindError('{}'.format(response_json['error']))
def validate_address(address, block_index):
addresses = address.split('_')
multisig = len(addresses) > 1
if multisig:
if not (config.TESTNET and block_index >= config.FIRST_MULTISIG_BLOCK_TESTNET):
raise exceptions.AddressError('Multi‐signature addresses currently disabled on mainnet.')
try:
assert int(addresses[0]) in (1,2,3)
assert int(addresses[-1]) in (1,2,3)
except (AssertionError):
raise exceptions.AddressError('Invalid multi‐signature address:', address)
addresses = addresses[1:-1]
# Check validity by attempting to decode.
for address in addresses:
base58_check_decode(address, config.ADDRESSVERSION)
def base58_encode(binary):
# Convert big‐endian bytes to integer
n = int('0x0' + binascii.hexlify(binary).decode('utf8'), 16)
# Divide that integer into base58
res = []
while n > 0:
n, r = divmod (n, 58)
res.append(b58_digits[r])
res = ''.join(res[::-1])
return res
def base58_check_encode(original, version):
b = binascii.unhexlify(bytes(original, 'utf-8'))
d = version + b
binary = d + dhash(d)[:4]
res = base58_encode(binary)
# Encode leading zeros as base58 zeros
czero = 0
pad = 0
for c in d:
if c == czero: pad += 1
else: break
address = b58_digits[0] * pad + res
if bytes(original, 'utf-8') != binascii.hexlify(base58_check_decode(address, version)):
raise exceptions.AddressError('encoded address does not decode properly')
return address
def base58_check_decode (s, version):
# Convert the string to an integer
n = 0
for c in s:
n *= 58
if c not in b58_digits:
raise exceptions.InvalidBase58Error('Not a valid base58 character:', c)
digit = b58_digits.index(c)
n += digit
# Convert the integer to bytes
h = '%x' % n
if len(h) % 2:
h = '0' + h
res = binascii.unhexlify(h.encode('utf8'))
# Add padding back.
pad = 0
for c in s[:-1]:
if c == b58_digits[0]: pad += 1
else: break
k = version * pad + res
addrbyte, data, chk0 = k[0:1], k[1:-4], k[-4:]
if addrbyte != version:
raise exceptions.VersionByteError('incorrect version byte')
chk1 = dhash(addrbyte + data)[:4]
if chk0 != chk1:
raise exceptions.Base58ChecksumError('Checksum mismatch: %r ≠ %r' % (chk0, chk1))
return data
def var_int (i):
if i < 0xfd:
return (i).to_bytes(1, byteorder='little')
elif i <= 0xffff:
return b'\xfd' + (i).to_bytes(2, byteorder='little')
elif i <= 0xffffffff:
return b'\xfe' + (i).to_bytes(4, byteorder='little')
else:
return b'\xff' + (i).to_bytes(8, byteorder='little')
def op_push (i):
if i < 0x4c:
return (i).to_bytes(1, byteorder='little') # Push i bytes.
elif i <= 0xff:
return b'\x4c' + (i).to_bytes(1, byteorder='little') # OP_PUSHDATA1
elif i <= 0xffff:
return b'\x4d' + (i).to_bytes(2, byteorder='little') # OP_PUSHDATA2
else:
return b'\x4e' + (i).to_bytes(4, byteorder='little') # OP_PUSHDATA4
def serialise (block_index, encoding, inputs, destination_outputs, data_output=None, change_output=None, source=None, self_public_key=None):
s = (1).to_bytes(4, byteorder='little') # Version
# Number of inputs.
s += var_int(int(len(inputs)))
# List of Inputs.
for i in range(len(inputs)):
txin = inputs[i]
s += binascii.unhexlify(bytes(txin['txid'], 'utf-8'))[::-1] # TxOutHash
s += txin['vout'].to_bytes(4, byteorder='little') # TxOutIndex
script = binascii.unhexlify(bytes(txin['scriptPubKey'], 'utf-8'))
s += var_int(int(len(script))) # Script length
s += script # Script
s += b'\xff' * 4 # Sequence
# Number of outputs.
n = 0
n += len(destination_outputs)
if data_output:
data_array, value = data_output
for data_chunk in data_array: n += 1
else:
data_array = []
if change_output: n += 1
s += var_int(n)
# Destination output.
for destination, value in destination_outputs:
addresses = destination.split('_')
s += value.to_bytes(8, byteorder='little') # Value
if len(addresses) > 1:
# Unpack multi‐sig address.
signatures_required = int(addresses[0])
signatures_possible = int(addresses[-1])
addresses = sorted(addresses[1:-1])
if signatures_possible != len(addresses):
raise exceptions.InputError('Incorrect number of public keys in multi‐signature destination.')
# Required signatures.
if signatures_required == 1:
op_required = OP_1
elif signatures_required == 2:
op_required = OP_2
elif signatures_required == 3:
op_required = OP_3
else:
raise exceptions.InputError('Required signatures must be 1, 2 or 3.')
# Required signatures.
if len(addresses) == 1:
op_total = OP_1
elif len(addresses) == 2:
op_total = OP_2
elif len(addresses) == 3:
op_total = OP_3
else:
raise exceptions.InputError('Total possible signatures must be 1, 2 or 3.')
# Construct script.
script = op_required # Required signatures
for address in addresses:
destination_public_key = binascii.unhexlify(address)
script += op_push(len(destination_public_key)) # Push bytes of public key
script += destination_public_key # Data chunk (fake) public key
script += op_total # Total signatures
script += OP_CHECKMULTISIG # OP_CHECKMULTISIG
else:
# Construct script.
pubkeyhash = base58_check_decode(addresses[0], config.ADDRESSVERSION)
script = OP_DUP # OP_DUP
script += OP_HASH160 # OP_HASH160
script += op_push(20) # Push 0x14 bytes
script += pubkeyhash # pubKeyHash
script += OP_EQUALVERIFY # OP_EQUALVERIFY
script += OP_CHECKSIG # OP_CHECKSIG
s += var_int(int(len(script))) # Script length
s += script
# Data output.
for data_chunk in data_array:
data_array, value = data_output # DUPE
s += value.to_bytes(8, byteorder='little') # Value
if config.TESTNET and block_index >= config.FIRST_MULTISIG_BLOCK_TESTNET: # Protocol change.
data_chunk = config.PREFIX + data_chunk
# Initialise encryption key (once per output).
key = ARC4.new(binascii.unhexlify(inputs[0]['txid'])) # Arbitrary, easy‐to‐find, unique key.
if encoding == 'multisig':
# Get data (fake) public key.
if config.TESTNET and block_index >= config.FIRST_MULTISIG_BLOCK_TESTNET: # Protocol change.
pad_length = (33 * 2) - 1 - len(data_chunk)
assert pad_length >= 0
data_chunk = bytes([len(data_chunk)]) + data_chunk + (pad_length * b'\x00')
data_chunk = key.encrypt(data_chunk)
# Construct script.
script = OP_1 # OP_1
script += op_push(len(self_public_key)) # Push bytes of source public key
script += self_public_key # Source public key
script += op_push(33) # Push bytes of data chunk (fake) public key (1/2)
script += data_chunk[:33] # (Fake) public key (1/2)
script += op_push(33) # Push bytes of data chunk (fake) public key (2/2)
script += data_chunk[33:] # (Fake) public key (2/2)
script += OP_3 # OP_3
script += OP_CHECKMULTISIG # OP_CHECKMULTISIG
else:
pad_length = 33 - 1 - len(data_chunk)
assert pad_length >= 0
data_chunk = bytes([len(data_chunk)]) + data_chunk + (pad_length * b'\x00')
# Construct script.
script = OP_1 # OP_1
script += op_push(len(self_public_key)) # Push bytes of source public key
script += self_public_key # Source public key
script += op_push(len(data_chunk)) # Push bytes of data chunk (fake) public key
script += data_chunk # (Fake) public key
script += OP_2 # OP_2
script += OP_CHECKMULTISIG # OP_CHECKMULTISIG
elif encoding == 'opreturn':
data_chunk = key.encrypt(data_chunk)
script = OP_RETURN # OP_RETURN
script += op_push(len(data_chunk)) # Push bytes of data chunk (NOTE: OP_SMALLDATA?)
script += data_chunk # Data
elif encoding == 'pubkeyhash':
pad_length = 20 - 1 - len(data_chunk)
assert pad_length >= 0
data_chunk = bytes([len(data_chunk)]) + data_chunk + (pad_length * b'\x00')
data_chunk = key.encrypt(data_chunk)
# Construct script.
script = OP_DUP # OP_DUP
script += OP_HASH160 # OP_HASH160
script += op_push(20) # Push 0x14 bytes
script += data_chunk # (Fake) pubKeyHash
script += OP_EQUALVERIFY # OP_EQUALVERIFY
script += OP_CHECKSIG # OP_CHECKSIG
else:
raise exceptions.TransactionError('Unknown encoding‐scheme.')
s += var_int(int(len(script))) # Script length
s += script
# Change output.
if change_output:
address, value = change_output
pubkeyhash = base58_check_decode(address, config.ADDRESSVERSION)
s += value.to_bytes(8, byteorder='little') # Value
script = OP_DUP # OP_DUP
script += OP_HASH160 # OP_HASH160
script += op_push(20) # Push 0x14 bytes
script += pubkeyhash # pubKeyHash
script += OP_EQUALVERIFY # OP_EQUALVERIFY
script += OP_CHECKSIG # OP_CHECKSIG
s += var_int(int(len(script))) # Script length
s += script
s += (0).to_bytes(4, byteorder='little') # LockTime
return s
def input_value_weight(amount):
# Prefer outputs less than dust size, then bigger is better.
if amount * config.UNIT <= config.DEFAULT_REGULAR_DUST_SIZE:
return 0
else:
return 1 / amount
def sort_unspent_txouts(unspent, allow_unconfirmed_inputs):
# Get deterministic results (for multiAPIConsensus type requirements), sort by timestamp and vout index.
# (Oldest to newest so the nodes don’t have to be exactly caught up to each other for consensus to be achieved.)
# searchrawtransactions doesn’t support unconfirmed transactions
try:
unspent = sorted(unspent, key=util.sortkeypicker(['ts', 'vout']))
except KeyError: # If timestamp isn’t given.
pass
# Sort by amount.
unspent = sorted(unspent,key=lambda x:input_value_weight(x['amount']))
# Remove unconfirmed txouts, if desired.
if allow_unconfirmed_inputs:
# Hackish: Allow only inputs which are either already confirmed or were seen only recently. (Skip outputs from slow‐to‐confirm transanctions.)
try:
unspent = [coin for coin in unspent if (coin['confirmations'] > 0 or (time.time() - coin['ts']) < 6 * 3600)] # Cutoff: six hours
except (KeyError, TypeError):
pass
else:
unspent = [coin for coin in unspent if coin['confirmations'] > 0]
return unspent
def private_key_to_public_key (private_key_wif):
if config.TESTNET:
allowable_wif_prefixes = [config.PRIVATEKEY_VERSION_TESTNET]
else:
allowable_wif_prefixes = [config.PRIVATEKEY_VERSION_MAINNET]
try:
secret_exponent, compressed = wif_to_tuple_of_secret_exponent_compressed(
private_key_wif, allowable_wif_prefixes=allowable_wif_prefixes)
#secret_exponent, compressed = wif_to_tuple_of_secret_exponent_compressed(private_key_wif, [wif_prefix(is_test=config.TESTNET)])
except EncodingError:
raise exceptions.AltcoinSupportError('pycoin: unsupported WIF prefix')
public_pair = public_pair_for_secret_exponent(generator_secp256k1, secret_exponent)
public_key = public_pair_to_sec(public_pair, compressed=compressed)
public_key_hex = binascii.hexlify(public_key).decode('utf-8')
return public_key_hex
def transaction (db, tx_info, encoding='auto', fee_per_kb=config.DEFAULT_FEE_PER_KB,
regular_dust_size=config.DEFAULT_REGULAR_DUST_SIZE,
multisig_dust_size=config.DEFAULT_MULTISIG_DUST_SIZE,
op_return_value=config.DEFAULT_OP_RETURN_VALUE, exact_fee=None,
fee_provided=0, self_public_key_hex=None,
allow_unconfirmed_inputs=False):
block_index = util.last_block(db)['block_index']
(source, destination_outputs, data) = tx_info
multisig_source = len(source.split('_')) > 1
# Data encoding methods.
if data:
if encoding == 'auto':
if len(data) <= 40:
# encoding = 'opreturn'
encoding = 'multisig' # TETGuild isn’t mining OP_RETURN?!
else:
encoding = 'multisig'
if encoding not in ('pubkeyhash', 'multisig', 'opreturn'):
raise exceptions.TransactionError('Unknown encoding‐scheme.')
if exact_fee and not isinstance(exact_fee, int):
raise exceptions.TransactionError('Exact fees must be in satoshis.')
if not isinstance(fee_provided, int):
raise exceptions.TransactionError('Fee provided must be in satoshis.')
# If public key is necessary for construction of (unsigned) transaction,
# either use the public key provided, or derive it from a private key
# retrieved from wallet.
self_public_key = None
if encoding in ('multisig', 'pubkeyhash') and not multisig_source:
# If no public key was provided, derive from private key.
if not self_public_key_hex:
# Get private key.
private_key_wif = get_private_key(source)
# Derive public key.
self_public_key_hex = private_key_to_public_key(private_key_wif)
#convert public key hex into public key pair (sec)
try:
sec = binascii.unhexlify(self_public_key_hex)
is_compressed = is_sec_compressed(sec)
self_public_key = sec
except (EncodingError, binascii.Error):
raise exceptions.InputError('Invalid private key.')
# Protocol change.
if encoding == 'pubkeyhash' and get_block_count() < 65000 and not config.TESTNET:
raise exceptions.TransactionError('pubkeyhash encoding unsupported before block 293000')
# Validate source and all destination addresses.
destinations = [address for address, value in destination_outputs]
for destination in destinations + [source]:
if destination:
try:
validate_address(destination, block_index)
except exceptions.AddressError as e:
raise exceptions.AddressError('Invalid destination address:', destination)
# Check that the source is in wallet.
if encoding in ('multisig') and not self_public_key and not multisig_source:
if not is_mine(source):
raise exceptions.AddressError('Not one of your Tetcoin addresses:', source)
# Check that the destination output isn't a dust output.
# Set null values to dust size.
new_destination_outputs = []
for address, value in destination_outputs:
if encoding == 'multisig':
if value == None: value = multisig_dust_size
if not value >= multisig_dust_size:
raise exceptions.TransactionError('Destination output is below the dust target value.')
else:
if value == None: value = regular_dust_size
if not value >= regular_dust_size:
raise exceptions.TransactionError('Destination output is below the dust target value.')
new_destination_outputs.append((address, value))
destination_outputs = new_destination_outputs
# Divide data into chunks.
if data:
def chunks(l, n):
""" Yield successive n‐sized chunks from l.
"""
for i in range(0, len(l), n): yield l[i:i+n]
if config.TESTNET and block_index >= config.FIRST_MULTISIG_BLOCK_TESTNET: # Protocol change.
if encoding == 'pubkeyhash':
data_array = list(chunks(data, 20 - 1 - 8)) # Prefix is also a suffix here.
elif encoding == 'multisig':
data_array = list(chunks(data, (33 * 2) - 1 - 8))
else:
data = config.PREFIX + data
if encoding == 'pubkeyhash':
data_array = list(chunks(data + config.PREFIX, 20 - 1)) # Prefix is also a suffix here.
elif encoding == 'multisig':
data_array = list(chunks(data, 33 - 1))
if encoding == 'opreturn':
data_array = list(chunks(data, config.OP_RETURN_MAX_SIZE))
assert len(data_array) == 1 # Only one OP_RETURN output currently supported (OP_RETURN messages should all be shorter than 40 bytes, at the moment).
else:
data_array = []
# Calculate total TET to be sent.
tet_out = 0
if encoding == 'multisig': data_value = multisig_dust_size
elif encoding == 'opreturn': data_value = op_return_value
else: data_value = regular_dust_size # Pay‐to‐PubKeyHash
tet_out = sum([data_value for data_chunk in data_array])
tet_out += sum([value for address, value in destination_outputs])
# Get size of outputs.
if encoding == 'multisig': data_output_size = 81 # 71 for the data
elif encoding == 'opreturn': data_output_size = 90 # 80 for the data
else: data_output_size = 25 + 9 # Pay‐to‐PubKeyHash (25 for the data?)
outputs_size = ((25 + 9) * len(destination_outputs)) + (len(data_array) * data_output_size)
# Get inputs.
unspent = get_unspent_txouts(source)
unspent = sort_unspent_txouts(unspent, allow_unconfirmed_inputs)
logging.debug('Sorted UTXOs: {}'.format([print_coin(coin) for coin in unspent]))
inputs, tet_in = [], 0
change_quantity = 0
sufficient_funds = False
final_fee = fee_per_kb
for coin in unspent:
logging.debug('New input: {}'.format(print_coin(coin)))
inputs.append(coin)
tet_in += round(coin['amount'] * config.UNIT)
# If exact fee is specified, use that. Otherwise, calculate size of tx and base fee on that (plus provide a minimum fee for selling TET).
if exact_fee:
final_fee = exact_fee
else:
size = 181 * len(inputs) + outputs_size + 10
necessary_fee = (int(size / 1000) + 1) * fee_per_kb
final_fee = max(fee_provided, necessary_fee)
assert final_fee >= 1 * fee_per_kb
# Check if good.
change_quantity = tet_in - (tet_out + final_fee)
logging.debug('Change quantity: {} TET'.format(change_quantity / config.UNIT))
if change_quantity == 0 or change_quantity >= regular_dust_size: # If change is necessary, must not be a dust output.
sufficient_funds = True
break
if not sufficient_funds:
# Approximate needed change, fee by with most recently calculated quantities.
total_tet_out = tet_out + max(change_quantity, 0) + final_fee
raise exceptions.BalanceError('Insufficient tetcoins at address {}. (Need approximately {} {}.) To spend unconfirmed coins, use the flag `--unconfirmed`. (Unconfirmed coins cannot be spent from multi‐sig addresses.)'.format(source, total_tet_out / config.UNIT, config.TET))
# Construct outputs.
if data: data_output = (data_array, data_value)
else: data_output = None
if change_quantity:
if multisig_source:
change_output = (source.split('_')[1], change_quantity)
else:
change_output = (source, change_quantity)
else: change_output = None
# Replace multi‐sig addresses with multi‐sig pubkeys.
if multisig_source:
source = multisig_pubkeyhashes_to_pubkeys(source)
self_public_key = binascii.unhexlify(source.split('_')[1])
destination_outputs_new = []
for (destination, value) in destination_outputs:
if len(destination.split('_')) > 1:
destination_outputs_new.append((multisig_pubkeyhashes_to_pubkeys(destination), value))
else:
destination_outputs_new.append((destination, value))
if len(destination_outputs) != len(destination_outputs_new):
raise exceptions.AddressError('Could not convert destination pubkeyhashes to pubkeys.')
destination_outputs = destination_outputs_new
# Serialise inputs and outputs.
unsigned_tx = serialise(block_index, encoding, inputs, destination_outputs, data_output, change_output, source=source, self_public_key=self_public_key)
unsigned_tx_hex = binascii.hexlify(unsigned_tx).decode('utf-8')
return unsigned_tx_hex
def sign_tx (unsigned_tx_hex, private_key_wif=None):
"""Sign unsigned transaction serialisation."""
if private_key_wif:
# TODO: Hack! (pytetcointools is Python 2 only)
import subprocess
i = 0
tx_hex = unsigned_tx_hex
while True: # pytettool doesn’t implement `signall`
try:
tx_hex = subprocess.check_output(['pytettool', 'sign', tx_hex, str(i), private_key_wif], stderr=subprocess.DEVNULL)
except Exception as e:
break
if tx_hex != unsigned_tx_hex:
signed_tx_hex = tx_hex.decode('utf-8')
return signed_tx_hex[:-1] # Get rid of newline.
else:
raise exceptions.TransactionError('Could not sign transaction with pytettool.')
else: # Assume source is in wallet and wallet is unlocked.
result = sign_raw_transaction(unsigned_tx_hex)
if result['complete']:
signed_tx_hex = result['hex']
else:
raise exceptions.TransactionError('Could not sign transaction with Tetcoin Core.')
return signed_tx_hex
def broadcast_tx (signed_tx_hex):
return send_raw_transaction(signed_tx_hex)
def normalize_quantity(quantity, divisible=True):
if divisible:
return float((D(quantity) / D(config.UNIT)).quantize(D('.00000000'), rounding=decimal.ROUND_HALF_EVEN))
else: return quantity
def get_tet_supply(normalize=False):
"""returns the total supply of {} (based on what Tetcoin Core says the current block height is)""".format(config.TET)
block_count = get_block_count()
blocks_remaining = block_count
total_supply = 0
reward = 128.0
while blocks_remaining > 0:
if blocks_remaining >= 210000:
blocks_remaining -= 210000
total_supply += 210000 * reward
reward /= 2
else:
total_supply += (blocks_remaining * reward)
blocks_remaining = 0
return total_supply if normalize else int(total_supply * config.UNIT)
def get_unspent_txouts(source):
"""returns a list of unspent outputs for a specific address
@return: A list of dicts, with each entry in the dict having the following keys:
"""
addresses = source.split('_')
if len(addresses) > 1:
outputs = []
raw_transactions = search_raw_transactions(addresses[1])
# Get all coins.
for tx in raw_transactions:
for vout in tx['vout']:
scriptpubkey = vout['scriptPubKey']
if scriptpubkey['type'] == 'multisig' and 'addresses' in scriptpubkey.keys():
found = True
for address in addresses[1:-1]:
if not address in scriptpubkey['addresses']:
found = False
if found:
coin = {'amount': vout['value'],
'confirmations': tx['confirmations'],
'scriptPubKey': scriptpubkey['hex'],
'txid': tx['txid'],
'vout': vout['n']
}
outputs.append(coin)
# Prune away spent coins.
unspent = []
for output in outputs:
spent = False
for tx in raw_transactions:
for vin in tx['vin']:
if (vin['txid'], vin['vout']) == (output['txid'], output['vout']):
spent = True
if not spent:
unspent.append(output)
else:
# TODO: remove account (and address?) fields
if is_mine(source):
wallet_unspent = list_unspent()
unspent = []
for output in wallet_unspent:
try:
if output['address'] == source:
unspent.append(output)
except KeyError:
pass
else:
unspent = blockchain.listunspent(source)
return unspent
|
from argparse import ArgumentParser
from getpass import getuser, getpass
from sys import stderr
from edsudoku.server import app
from edsudoku.server.database import Base, engine, commit
from edsudoku.server.users import User, UserPermission
__author__ = 'Eli Daian <elidaian@gmail.com>'
def _parse_args():
"""
Parse command line arguments.
:return: The parsed arguments.
:rtype: :class:`argparse.Namespace`
"""
parser = ArgumentParser(description='Initialize an empty DB')
parser.add_argument('-u', '--user',
default=getuser(),
metavar='USERNAME',
dest='user',
help='Root user name')
parser.add_argument('-p', '--password',
default=None,
metavar='PASSWORD',
dest='password',
help='Root user password')
parser.add_argument('-d', '--drop-old',
action='store_true',
dest='drop',
help='Use for dropping all information in the DB')
return parser.parse_args()
def main():
"""
Main entry point for this script.
"""
args = _parse_args()
user = args.user
password = args.password or getpass()
print 'Initializing DB...'
if args.drop:
print >> stderr, 'WARNING: All information is being dropped.'
Base.metadata.drop_all(bind=engine)
Base.metadata.create_all(bind=engine)
with app.app_context():
User.new_user(user, password, UserPermission.PERMISSIONS).add()
commit()
print 'Done!'
if __name__ == '__main__':
main()
|
import _plotly_utils.basevalidators
class TickformatValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="tickformat", parent_name="mesh3d.colorbar", **kwargs
):
super(TickformatValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs
)
|
from figment import Component
import random
class Sticky(Component):
"""A difficult-to-drop item."""
def __init__(self, stickiness=0.5):
self.stickiness = stickiness
def to_dict(self):
return {"stickiness": self.stickiness}
def roll_for_drop(self):
return random.random() < self.stickiness
|
from unittest import TestCase
class RegressionTestCase(TestCase):
'''
Used for regression tests. Allows to compare files and update test data if desired.
'''
def regressionTest(self, referenceFile, outputFile):
f = open(outputFile, 'r')
actualText = f.read()
f.close()
try:
f = open(referenceFile, 'r')
referenceText = f.read()
f.close()
except FileNotFoundError:
print('File not found: ' + referenceFile)
print('Actual output:\n')
print(actualText)
print('\nUse this as new test data? (y/n)')
response = input()
if response in ('y', 'Y'):
f = open(referenceFile, 'w')
f.write(actualText)
f.close()
return
self.fail('No valid test data found.')
return
# Compare files
if actualText != referenceText:
print('Expected:\n')
print(referenceText)
print('\nActual output:\n')
print(actualText)
print('\nUse this as new test data? (y/N)')
response = input()
if response in ('y', 'Y'):
f = open(referenceFile, 'w')
f.write(actualText)
f.close()
return
self.assertEqual(actualText, referenceText)
|
import py.test
import threading
from Queue import Queue
from draco2.draco.context import DracoContext
class BaseTestContext(object):
context_class = None
def setup_method(cls, method):
cls.context = cls.context_class()
def teardown_method(cls, method):
# Required because py.test captures sys.stdout/err as well.
cls.context._unregister_proxies()
def test_eval(self):
code = 'x'
globals = { 'x': 10 }
assert self.context.eval(code, globals=globals) == 10
def test_run(self):
code = 'print x'
globals = { 'x': 10 }
assert self.context.run(code, globals=globals) == ('10\n', '')
def test_streams(self):
code = 'import sys\n'
code += 'print >>sys.stdout, "stdout"\n'
code += 'print >>sys.stderr, "stderr"\n'
assert self.context.run(code) == ('stdout\n', 'stderr\n')
def test_default(self):
code = 'x'
assert self.context.eval(code) == ''
def test_default_sub(self):
code = '\ndef func():\n return x\nprint func()\n'
assert self.context.run(code) == ('\n', '')
def test_update_locals(self):
code = 'x = 10'
globals = {}
locals = {}
self.context.run(code, globals=globals, locals=locals)
assert locals['x'] == 10
def test_update_globals(self):
code = 'global x\nx = 10'
globals = {}
locals = {}
self.context.run(code, globals=globals, locals=locals)
assert globals['x'] == 10
def test_exception(self):
code = 'syntax error'
py.test.raises(SyntaxError, self.context.run, code)
code = 'raise TypeError'
py.test.raises(TypeError, self.context.run, code)
def test_unicode_output(self):
code = r'print u"\u20ac"'
assert self.context.run(code) == (u'\u20ac\n', '')
def _test_thread_safety(self, queue):
context = self.context_class()
code = ['import time']
for i in range(1000):
code.append('print %d' % i)
if not i % 100:
code.append('time.sleep(0.1)')
code = '\n'.join(code) + '\n'
stdout, stderr = context.run(code)
numbers = map(int, stdout.splitlines())
sorted = numbers[:]
sorted.sort()
queue.put(numbers == sorted)
def test_thread_safety(self):
threads = []
nthreads = 10
queue = Queue(nthreads)
for i in range(nthreads):
threads.append(threading.Thread(target=self._test_thread_safety,
args=(queue,)))
for i in range(nthreads):
threads[i].start()
for i in range(nthreads):
assert queue.get() is True
for i in range(nthreads):
threads[i].join()
class TestContext(BaseTestContext):
context_class = DracoContext
|
from subprocess import check_output
import re
import string
import sys
import os.path
projectdir = sys.argv[1]
inpath = os.path.join(projectdir, "AssemblyInfo.fs.tmpl")
outpath = os.path.join(projectdir, "AssemblyInfo.fs")
tag = check_output(["git", "describe", "--dirty=-d"])
main_match = re.match("v([^\-]+)(?:-([0-9a-z\-]+))?", tag)
main_version_text = main_match.group(1)
extra_text = main_match.group(2)
version_match = re.match("([0-9]+)\.([0-9]+)(?:\.([0-9]+))?", main_version_text)
major = version_match.group(1)
minor = version_match.group(2)
hotfix = version_match.group(3) or "0"
revision = "0"
hashref = None
if (extra_text != None):
parts = extra_text.split("-", 1)
if len(parts) > 1:
revision = parts[0]
hashref = parts[1]
else:
hashref = parts[0]
version = string.join([major, minor, hotfix, revision], ".")
infoversion = version
if (hashref != None):
infoversion += "-" + hashref
f = open(inpath, "r")
out = open(outpath, "w")
for line in f:
line = line.replace("{VERSION}", version)
line = line.replace("{INFOVERSION}", infoversion)
out.write(line)
out.close()
f.close()
|
import argparse
import logging
from pathlib import Path
import tempfile
from swaggertosdk.SwaggerToSdkNewCLI import (
build_project,
)
from swaggertosdk.SwaggerToSdkCore import (
CONFIG_FILE,
read_config,
solve_relative_path,
extract_conf_from_readmes,
get_input_paths,
get_repo_tag_meta,
)
_LOGGER = logging.getLogger(__name__)
def generate(config_path, sdk_folder, project_pattern, readme, restapi_git_folder, autorest_bin=None):
sdk_folder = Path(sdk_folder).expanduser()
config = read_config(sdk_folder, config_path)
global_conf = config["meta"]
repotag = get_repo_tag_meta(global_conf)
global_conf["autorest_options"] = solve_relative_path(global_conf.get("autorest_options", {}), sdk_folder)
global_conf["envs"] = solve_relative_path(global_conf.get("envs", {}), sdk_folder)
global_conf["advanced_options"] = solve_relative_path(global_conf.get("advanced_options", {}), sdk_folder)
if restapi_git_folder:
restapi_git_folder = Path(restapi_git_folder).expanduser()
# Look for configuration in Readme
if readme:
swagger_files_in_pr = [readme]
else:
if not restapi_git_folder:
raise ValueError("RestAPI folder must be set if you don't provide a readme.")
swagger_files_in_pr = list(restapi_git_folder.glob('specification/**/readme.md'))
_LOGGER.info(f"Readme files: {swagger_files_in_pr}")
extract_conf_from_readmes(swagger_files_in_pr, restapi_git_folder, repotag, config)
with tempfile.TemporaryDirectory() as temp_dir:
for project, local_conf in config.get("projects", {}).items():
if readme:
if str(readme) not in project:
_LOGGER.info("Skip project %s (readme was %s)", project, readme)
continue
else:
if project_pattern and not any(p in project for p in project_pattern):
_LOGGER.info("Skip project %s", project)
continue
local_conf["autorest_options"] = solve_relative_path(local_conf.get("autorest_options", {}), sdk_folder)
if readme and readme.startswith("http"):
# Simplify here, do not support anything else than Readme.md
absolute_markdown_path = readme
_LOGGER.info(f"HTTP Markdown input: {absolute_markdown_path}")
else:
markdown_relative_path, optional_relative_paths = get_input_paths(global_conf, local_conf)
_LOGGER.info(f"Markdown input: {markdown_relative_path}")
_LOGGER.info(f"Optional inputs: {optional_relative_paths}")
absolute_markdown_path = None
if markdown_relative_path:
absolute_markdown_path = Path(restapi_git_folder or "", markdown_relative_path).resolve()
if optional_relative_paths:
local_conf.setdefault('autorest_options', {})['input-file'] = [
Path(restapi_git_folder or "", input_path).resolve()
for input_path
in optional_relative_paths
]
build_project(
temp_dir,
project,
absolute_markdown_path,
sdk_folder,
global_conf,
local_conf,
autorest_bin
)
def generate_main():
"""Main method"""
parser = argparse.ArgumentParser(
description='Build SDK using Autorest, offline version.',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--rest-folder', '-r',
dest='restapi_git_folder', default=None,
help='Rest API git folder. [default: %(default)s]')
parser.add_argument('--project', '-p',
dest='project', action='append',
help='Select a specific project. Do all by default. You can use a substring for several projects.')
parser.add_argument('--readme', '-m',
dest='readme',
help='Select a specific readme. Must be a path')
parser.add_argument('--config', '-c',
dest='config_path', default=CONFIG_FILE,
help='The JSON configuration format path [default: %(default)s]')
parser.add_argument('--autorest',
dest='autorest_bin',
help='Force the Autorest to be executed. Must be a executable command.')
parser.add_argument("-v", "--verbose",
dest="verbose", action="store_true",
help="Verbosity in INFO mode")
parser.add_argument("--debug",
dest="debug", action="store_true",
help="Verbosity in DEBUG mode")
parser.add_argument('--sdk-folder', '-s',
dest='sdk_folder', default='.',
help='A Python SDK folder. [default: %(default)s]')
args = parser.parse_args()
main_logger = logging.getLogger()
if args.verbose or args.debug:
logging.basicConfig()
main_logger.setLevel(logging.DEBUG if args.debug else logging.INFO)
generate(args.config_path,
args.sdk_folder,
args.project,
args.readme,
args.restapi_git_folder,
args.autorest_bin)
if __name__ == "__main__":
generate_main()
|
import json
import pytest
from pyre_extensions import none_throws
from backend.common.consts.alliance_color import AllianceColor
from backend.common.consts.comp_level import CompLevel
from backend.common.helpers.match_tiebreakers import MatchTiebreakers
from backend.common.models.alliance import MatchAlliance
from backend.common.models.match import Match
@pytest.fixture(autouse=True)
def auto_add_ndb_context(ndb_context) -> None:
pass
def test_not_elim_match() -> None:
m = Match(
comp_level=CompLevel.QM,
)
assert MatchTiebreakers.tiebreak_winner(m) == ""
def test_no_breakdowns() -> None:
m = Match(comp_level=CompLevel.SF)
assert MatchTiebreakers.tiebreak_winner(m) == ""
def test_match_not_played() -> None:
m = Match(
comp_level=CompLevel.SF,
alliances_json=json.dumps(
{
AllianceColor.RED: MatchAlliance(
teams=["frc1", "frc2", "frc3"],
score=-1,
),
AllianceColor.BLUE: MatchAlliance(
teams=["frc4", "frc5", "frc6"],
score=-1,
),
}
),
)
assert MatchTiebreakers.tiebreak_winner(m) == ""
def test_2016_tiebreakers(test_data_importer) -> None:
test_data_importer.import_match(__file__, "data/2016cmp_f1m3.json")
match: Match = none_throws(Match.get_by_id("2016cmp_f1m3"))
assert match.winning_alliance == AllianceColor.RED
def test_2017_tiebreakers(test_data_importer) -> None:
test_data_importer.import_match(__file__, "data/2017dal_qf3m2.json")
match: Match = none_throws(Match.get_by_id("2017dal_qf3m2"))
assert match.winning_alliance == AllianceColor.RED
def test_2019_tiebreakers(test_data_importer) -> None:
test_data_importer.import_match(__file__, "data/2019hiho_qf4m1.json")
match: Match = none_throws(Match.get_by_id("2019hiho_qf4m1"))
assert match.winning_alliance == AllianceColor.RED
def test_2020_tiebreakers(test_data_importer) -> None:
test_data_importer.import_match(__file__, "data/2020mndu2_sf2m2.json")
match: Match = none_throws(Match.get_by_id("2020mndu2_sf2m2"))
assert match.winning_alliance == AllianceColor.BLUE
|
"""Tests for the views of the ``django-user-media`` app."""
import os
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from django_libs.tests.factories import UserFactory
from django_libs.tests.mixins import ViewTestMixin
from user_media.tests.factories import DummyModelFactory, UserMediaImageFactory
class CreateImageViewTestCase(ViewTestMixin, TestCase):
"""
Tests for the ``CreateImageView`` generic view class.
Tests the case when the view is called without content type and object
id.
"""
def setUp(self):
self.dummy = DummyModelFactory()
self.user = self.dummy.user
self.other_dummy = DummyModelFactory()
def get_view_name(self):
return 'user_media_image_create'
def get_view_kwargs(self):
ctype = ContentType.objects.get_for_model(self.dummy)
return {
'content_type': ctype.model,
'object_id': self.dummy.pk,
}
def test_view(self):
self.should_be_callable_when_authenticated(self.user)
test_file = test_file = os.path.join(
settings.PROJECT_ROOT, 'test_media/img.png')
with open(test_file) as fp:
data = {'image': fp, }
resp = self.client.post(self.get_url(), data=data)
self.assertRedirects(resp, self.dummy.get_absolute_url(),
msg_prefix=(
'When a content object given, view should redirect to the'
' absolute URL of the content object.'))
with open(test_file) as fp:
data = {'image': fp, 'next': '/?foo=bar'}
resp = self.client.post(self.get_url(), data=data)
self.assertRedirects(resp, '/?foo=bar', msg_prefix=(
'When a content object and ``next`` in POST data is given,'
' view should redirect to the URL given in ``next`` and ignore'
' the content object absolute url.'))
with open(test_file) as fp:
data = {'image': fp, }
resp = self.client.post(self.get_url() + '?next=/', data=data)
self.assertRedirects(resp, '/', msg_prefix=(
'When a content object and ``next`` in GET data is given,'
' view should redirect to the URL given in ``next`` and ignore'
' the content object absolute url.'))
resp = self.client.post(self.get_url(
view_kwargs={'content_type': 'dummymodel', 'object_id': 999}))
self.assertEqual(resp.status_code, 404, msg=(
'Should raise 404 if the content object does not exist'))
resp = self.client.post(self.get_url(
view_kwargs={'content_type': 'foobarmodel', 'object_id': 1}))
self.assertEqual(resp.status_code, 404, msg=(
'Should raise 404 if the content type does not exist'))
view_kwargs = {
'content_type': 'dummymodel',
'object_id': self.other_dummy.pk
}
resp = self.client.post(self.get_url(view_kwargs=view_kwargs))
self.assertEqual(resp.status_code, 404, msg=(
"Should raise 404 if the user tries to add an image to another"
" uers's content object"))
class CreateImageViewNoCtypeTestCase(ViewTestMixin, TestCase):
"""
Tests for the ``CreateImageView`` generic view class.
Tests the case when the view is called without content type and object
id.
"""
def setUp(self):
self.user = UserFactory()
def get_view_name(self):
return 'user_media_image_create_no_ctype'
def test_view(self):
self.should_be_callable_when_authenticated(self.user)
test_file = test_file = os.path.join(
settings.PROJECT_ROOT, 'test_media/img.png')
with open(test_file) as fp:
data = {'image': fp, 'next': '/?foo=bar'}
resp = self.client.post(self.get_url(), data=data)
self.assertRedirects(resp, '/?foo=bar', msg_prefix=(
'When no content object given, view should redirect to the'
' POST data ``next`` which must be given.'))
with open(test_file) as fp:
data = {'image': fp, }
try:
resp = self.client.post(self.get_url(), data=data)
except Exception, ex:
self.assertTrue('No content object' in ex.message, msg=(
'If no content object and no ``next`` parameter given,'
' view should raise an exception'))
class EditAndDeleteTestCaseMixin(object):
"""Tests that are the same for both views."""
def setUp(self):
self.dummy = DummyModelFactory()
self.user = self.dummy.user
self.image = UserMediaImageFactory(user=self.user)
self.image.content_object = self.dummy
self.image.save()
self.image_no_content_object = UserMediaImageFactory(user=self.user)
self.other_image = UserMediaImageFactory()
def test_view_with_content_object(self):
self.should_be_callable_when_authenticated(self.user)
resp = self.client.post(self.get_url())
self.assertRedirects(resp, self.dummy.get_absolute_url(), msg_prefix=(
"If the image had a content object, view should redirect to"
" that object's absolute url"))
self.image = UserMediaImageFactory(user=self.user)
resp = self.client.post(self.get_url(), data={'next': '/?foo=bar'})
self.assertRedirects(resp, '/?foo=bar', msg_prefix=(
"If the image had a content object and ``next`` in the POST data,"
" view should redirect to the URL given in ``next`` and ignore"
" the content object's absolute URL"))
self.image = UserMediaImageFactory(user=self.user)
resp = self.client.post(self.get_url() + '?next=/')
self.assertRedirects(resp, '/', msg_prefix=(
"If the image had a content object and ``next`` in the GET data,"
" view should redirect to the URL given in ``next`` and ignore"
" the content object's absolute URL"))
resp = self.client.post(self.get_url(
view_kwargs={'pk': self.other_image.pk}))
self.assertEqual(resp.status_code, 404, msg=(
"Should return 404 if the user tries to manipulate another user's"
" object"))
resp = self.client.post(self.get_url(view_kwargs={'pk': 999}))
self.assertEqual(resp.status_code, 404, msg=(
'Should return 404 if the user tries to manipulate a non existing'
' object'))
def test_view_without_content_object(self):
self.login(self.user)
data = {'next': '/?foo=bar', }
resp = self.client.post(self.get_url(
view_kwargs={'pk': self.image_no_content_object.pk}), data=data)
self.assertRedirects(resp, '/?foo=bar', msg_prefix=(
'If the image had no content object, view should redirect to'
' the POST data ``next`` that must be given'))
self.image_no_content_object = UserMediaImageFactory(user=self.user)
try:
resp = self.client.post(self.get_url(
view_kwargs={'pk': self.image_no_content_object.pk}))
except Exception, ex:
self.assertTrue('No content object' in ex.message, msg=(
'If no content object and no ``next`` parameter given,'
' view should raise an exception'))
class DeleteImageViewTestCase(ViewTestMixin, EditAndDeleteTestCaseMixin,
TestCase):
"""Tests for the ``DeleteImageView`` generic view class."""
def get_view_name(self):
return 'user_media_image_delete'
def get_view_kwargs(self):
return {'pk': self.image.pk}
class EditImageViewTestCase(ViewTestMixin, EditAndDeleteTestCaseMixin,
TestCase):
"""Tests for the ``EditImageView`` view class."""
def get_view_name(self):
return 'user_media_image_edit'
def get_view_kwargs(self):
return {'pk': self.image.pk}
|
import re
import operator
debug = False
test = True
def is_number(s):
try:
float(s) if '.' in s else int(s)
return True
except ValueError:
return False
def load_stop_words(stop_word_file):
"""
Utility function to load stop words from a file and return as a list of words
@param stop_word_file Path and file name of a file containing stop words.
@return list A list of stop words.
"""
stop_words = []
for line in open(stop_word_file):
if line.strip()[0:1] != "#":
for word in line.split(): # in case more than one per line
stop_words.append(word)
return stop_words
def separate_words(text, min_word_return_size):
"""
Utility function to return a list of all words that are have a length greater than a specified number of characters.
@param text The text that must be split in to words.
@param min_word_return_size The minimum no of characters a word must have to be included.
"""
splitter = re.compile('[^a-zA-Z0-9_\\+\\-/]')
words = []
for single_word in splitter.split(text):
current_word = single_word.strip().lower()
#leave numbers in phrase, but don't count as words, since they tend to invalidate scores of their phrases
if len(current_word) > min_word_return_size and current_word != '' and not is_number(current_word):
words.append(current_word)
return words
def split_sentences(text):
"""
Utility function to return a list of sentences.
@param text The text that must be split in to sentences.
"""
sentence_delimiters = re.compile(u'[.!?,;:\t\\\\"\\(\\)\\\'\u2019\u2013]|\\s\\-\\s')
sentences = sentence_delimiters.split(text)
return sentences
def build_stop_word_regex(stop_word_file_path):
stop_word_list = load_stop_words(stop_word_file_path)
stop_word_regex_list = []
for word in stop_word_list:
word_regex = r'\b' + word + r'(?![\w-])' # added look ahead for hyphen
stop_word_regex_list.append(word_regex)
stop_word_pattern = re.compile('|'.join(stop_word_regex_list), re.IGNORECASE)
return stop_word_pattern
def generate_candidate_keywords(sentence_list, stopword_pattern):
phrase_list = []
for s in sentence_list:
tmp = re.sub(stopword_pattern, '|', s.strip())
phrases = tmp.split("|")
for phrase in phrases:
phrase = phrase.strip().lower()
if phrase != "":
phrase_list.append(phrase)
return phrase_list
def calculate_word_scores(phraseList):
word_frequency = {}
word_degree = {}
for phrase in phraseList:
word_list = separate_words(phrase, 0)
word_list_length = len(word_list)
word_list_degree = word_list_length - 1
#if word_list_degree > 3: word_list_degree = 3 #exp.
for word in word_list:
word_frequency.setdefault(word, 0)
word_frequency[word] += 1
word_degree.setdefault(word, 0)
word_degree[word] += word_list_degree #orig.
#word_degree[word] += 1/(word_list_length*1.0) #exp.
for item in word_frequency:
word_degree[item] = word_degree[item] + word_frequency[item]
# Calculate Word scores = deg(w)/frew(w)
word_score = {}
for item in word_frequency:
word_score.setdefault(item, 0)
word_score[item] = word_degree[item] / (word_frequency[item] * 1.0) #orig.
#word_score[item] = word_frequency[item]/(word_degree[item] * 1.0) #exp.
return word_score
def generate_candidate_keyword_scores(phrase_list, word_score):
keyword_candidates = {}
for phrase in phrase_list:
keyword_candidates.setdefault(phrase, 0)
word_list = separate_words(phrase, 0)
candidate_score = 0
for word in word_list:
candidate_score += word_score[word]
keyword_candidates[phrase] = candidate_score
return keyword_candidates
class Rake(object):
def __init__(self, stop_words_path):
self.stop_words_path = stop_words_path
self.__stop_words_pattern = build_stop_word_regex(stop_words_path)
def run(self, text):
sentence_list = split_sentences(text)
phrase_list = generate_candidate_keywords(sentence_list, self.__stop_words_pattern)
word_scores = calculate_word_scores(phrase_list)
keyword_candidates = generate_candidate_keyword_scores(phrase_list, word_scores)
sorted_keywords = sorted(keyword_candidates.items(), key=operator.itemgetter(1), reverse=True)
return sorted_keywords
def test():
text = "Compatibility of systems of linear constraints over the set of natural numbers. Criteria of compatibility of a system of linear Diophantine equations, strict inequations, and nonstrict inequations are considered. Upper bounds for components of a minimal set of solutions and algorithms of construction of minimal generating sets of solutions for all types of systems are given. These criteria and the corresponding algorithms for constructing a minimal supporting set of solutions can be used in solving all the considered types of systems and systems of mixed types."
# Split text into sentences
sentenceList = split_sentences(text)
#stoppath = "FoxStoplist.txt" #Fox stoplist contains "numbers", so it will not find "natural numbers" like in Table 1.1
stoppath = "SmartStoplist.txt" #SMART stoplist misses some of the lower-scoring keywords in Figure 1.5, which means that the top 1/3 cuts off one of the 4.0 score words in Table 1.1
stopwordpattern = build_stop_word_regex(stoppath)
# generate candidate keywords
phraseList = generate_candidate_keywords(sentenceList, stopwordpattern)
# calculate individual word scores
wordscores = calculate_word_scores(phraseList)
# generate candidate keyword scores
keywordcandidates = generate_candidate_keyword_scores(phraseList, wordscores)
if debug: print(keywordcandidates)
sortedKeywords = sorted(keywordcandidates.items(), key=operator.itemgetter(1), reverse=True)
if debug: print(sortedKeywords)
totalKeywords = len(sortedKeywords)
if debug: print(totalKeywords)
print(sortedKeywords[0:(totalKeywords // 3)])
rake = Rake("SmartStoplist.txt")
keywords = rake.run(text)
print(keywords)
if __name__ == '__main__':
if test: test()
|
import logging
import time
import pywintypes
import win32con
import win32event
import win32file
import win32security
class WinFile(object):
def __init__(self, filename):
self._hfile = win32file.CreateFile(
filename,
win32con.GENERIC_READ | win32con.GENERIC_WRITE,
win32con.FILE_SHARE_READ | win32con.FILE_SHARE_WRITE,
win32security.SECURITY_ATTRIBUTES(),
win32con.OPEN_EXISTING,
win32con.FILE_FLAG_OVERLAPPED,
0)
self._read_ovrlpd = pywintypes.OVERLAPPED()
self._read_ovrlpd.hEvent = win32event.CreateEvent(None, True, False,
None)
self._write_ovrlpd = pywintypes.OVERLAPPED()
self._write_ovrlpd.hEvent = win32event.CreateEvent(None, True, False,
None)
def read(self, n):
(nr, buf) = (0, ())
try:
(hr, buf) = win32file.ReadFile(
self._hfile,
win32file.AllocateReadBuffer(n),
self._read_ovrlpd)
nr = win32file.GetOverlappedResult(self._hfile,
self._read_ovrlpd,
True)
except:
logging.debug("Exception on reading from VirtIO", exc_info=True)
# We do sleep here to avoid constant reads spike the CPU
time.sleep(1)
return buf[:nr]
def write(self, s):
try:
win32file.WriteFile(self._hfile, s, self._write_ovrlpd)
return win32file.GetOverlappedResult(self._hfile,
self._write_ovrlpd,
True)
except:
logging.debug("Exception writing to VirtIO", exc_info=True)
# We do sleep here to avoid constant writes to spike the CPU
time.sleep(1)
return 0
|
from django.contrib.auth.models import Group
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
import mptt
from mptt.models import MPTTModel, TreeForeignKey
from mptt.managers import TreeManager
class CustomTreeManager(TreeManager):
pass
class Category(MPTTModel):
name = models.CharField(max_length=50)
parent = models.ForeignKey('self', null=True, blank=True, related_name='children')
@python_2_unicode_compatible
def __str__(self):
return self.name
def delete(self):
super(Category, self).delete()
class Genre(MPTTModel):
name = models.CharField(max_length=50, unique=True)
parent = models.ForeignKey('self', null=True, blank=True, related_name='children')
@python_2_unicode_compatible
def __str__(self):
return self.name
class Insert(MPTTModel):
parent = models.ForeignKey('self', null=True, blank=True, related_name='children')
class MultiOrder(MPTTModel):
name = models.CharField(max_length=50)
size = models.PositiveIntegerField()
date = models.DateField()
parent = models.ForeignKey('self', null=True, blank=True, related_name='children')
class MPTTMeta:
order_insertion_by = ['name', 'size', '-date']
@python_2_unicode_compatible
def __str__(self):
return self.name
class Node(MPTTModel):
parent = models.ForeignKey('self', null=True, blank=True, related_name='children')
class MPTTMeta:
left_attr = 'does'
right_attr = 'zis'
level_attr = 'madness'
tree_id_attr = 'work'
class OrderedInsertion(MPTTModel):
name = models.CharField(max_length=50)
parent = models.ForeignKey('self', null=True, blank=True, related_name='children')
class MPTTMeta:
order_insertion_by = ['name']
@python_2_unicode_compatible
def __str__(self):
return self.name
class Tree(MPTTModel):
parent = models.ForeignKey('self', null=True, blank=True, related_name='children')
class NewStyleMPTTMeta(MPTTModel):
parent = models.ForeignKey('self', null=True, blank=True, related_name='children')
class MPTTMeta(object):
left_attr = 'testing'
class Person(MPTTModel):
name = models.CharField(max_length=50)
parent = models.ForeignKey('self', null=True, blank=True, related_name='children')
# just testing it's actually possible to override the tree manager
objects = models.Manager()
my_tree_manager = CustomTreeManager()
@python_2_unicode_compatible
def __str__(self):
return self.name
class Student(Person):
type = models.CharField(max_length=50)
class CustomPKName(MPTTModel):
my_id = models.AutoField(db_column='my_custom_name', primary_key=True)
name = models.CharField(max_length=50)
parent = models.ForeignKey('self', null=True, blank=True,
related_name='children', db_column="my_cusom_parent")
@python_2_unicode_compatible
def __str__(self):
return self.name
class MultiTableInheritanceA1(MPTTModel):
parent = models.ForeignKey('self', null=True, blank=True, related_name='children')
class MultiTableInheritanceA2(MultiTableInheritanceA1):
name = models.CharField(max_length=50)
class MultiTableInheritanceB1(MPTTModel):
name = models.CharField(max_length=50)
class MultiTableInheritanceB2(MultiTableInheritanceB1):
parent = models.ForeignKey('self', null=True, blank=True, related_name='children')
class AbstractModel(MPTTModel):
parent = models.ForeignKey('self', null=True, blank=True, related_name='children')
ghosts = models.CharField(max_length=50)
class Meta:
abstract = True
class ConcreteModel(AbstractModel):
name = models.CharField(max_length=50)
class AbstractConcreteAbstract(ConcreteModel):
# abstract --> concrete --> abstract
class Meta:
abstract = True
class ConcreteAbstractConcreteAbstract(ConcreteModel):
# concrete --> abstract --> concrete --> abstract
pass
class ConcreteConcrete(ConcreteModel):
# another subclass (concrete this time) of the root concrete model
pass
class SingleProxyModel(ConcreteModel):
class Meta:
proxy = True
class DoubleProxyModel(SingleProxyModel):
class Meta:
proxy = True
class AutoNowDateFieldModel(MPTTModel):
parent = models.ForeignKey('self', null=True, blank=True, related_name='children')
now = models.DateTimeField(auto_now_add=True)
class MPTTMeta:
order_insertion_by = ('now',)
TreeForeignKey(Group, blank=True, null=True).contribute_to_class(Group, 'parent')
mptt.register(Group, order_insertion_by=('name',))
|
from . import vdp_utils
import json
__author__ = '2b||!2b'
BASE_URL = 'https://sandbox.api.visa.com'
def reverse_funds(S):
uri = '/visadirect/fundstransfer/v1/reversefundstransactions/'
body = json.loads('''{
"acquirerCountryCode": "608",
"acquiringBin": "408999",
"amount": "24.01",
"cardAcceptor": {
"address": {
"country": "USA",
"county": "San Mateo",
"state": "CA",
"zipCode": "94404"
},
"idCode": "VMT200911026070",
"name": "Visa Inc. USA-Foster City",
"terminalId": "365539"
},
"localTransactionDateTime": "2016-04-16T22:24:51",
"originalDataElements": {
"acquiringBin": "408999",
"approvalCode": "20304B",
"systemsTraceAuditNumber": "897825",
"transmissionDateTime": "2016-04-16T22:24:51"
},
"pointOfServiceCapability": {
"posTerminalEntryCapability": "2",
"posTerminalType": "4"
},
"pointOfServiceData": {
"motoECIIndicator": "0",
"panEntryMode": "90",
"posConditionCode": "00"
},
"retrievalReferenceNumber": "330000550000",
"senderCardExpiryDate": "2015-10",
"senderCurrencyCode": "USD",
"senderPrimaryAccountNumber": "4895100000055127",
"systemsTraceAuditNumber": "451050",
"transactionIdentifier": "381228649430011"
}''')
r = S.post(BASE_URL + uri, json=body)
return r
def pull_funds(S):
uri = '/visadirect/fundstransfer/v1/pullfundstransactions/'
body = json.loads('''{
"acquirerCountryCode": "840",
"acquiringBin": "408999",
"amount": "1124.02",
"businessApplicationId": "AA",
"cardAcceptor": {
"address": {
"country": "USA",
"county": "San Mateo",
"state": "CA",
"zipCode": "94404"
},
"idCode": "ABCD1234ABCD123",
"name": "Visa Inc. USA-Foster City",
"terminalId": "ABCD1234"
},
"cavv": "0700100038238906000013405823891061668252",
"foreignExchangeFeeTransaction": "11.99",
"localTransactionDateTime": "2016-04-16T14:44:04",
"retrievalReferenceNumber": "330000550000",
"senderCardExpiryDate": "2015-10",
"senderCurrencyCode": "USD",
"senderPrimaryAccountNumber": "4895142232120006",
"surcharge": "0",
"systemsTraceAuditNumber": "451001"
}''')
r = S.post(BASE_URL + uri, json=body)
return r
def push_funds(S):
uri = '/visadirect/fundstransfer/v1/pushfundstransactions/'
body = json.loads('''{
"acquirerCountryCode": "840",
"acquiringBin": "408999",
"amount": "124.05",
"businessApplicationId": "AA",
"cardAcceptor": {
"address": {
"country": "USA",
"county": "San Mateo",
"state": "CA",
"zipCode": "94404"
},
"idCode": "CA-IDCode-77765",
"name": "Visa Inc. USA-Foster City",
"terminalId": "TID-9999"
},
"localTransactionDateTime": "2016-04-16T21:40:04",
"merchantCategoryCode": "6012",
"pointOfServiceData": {
"motoECIIndicator": "0",
"panEntryMode": "90",
"posConditionCode": "00"
},
"recipientName": "rohan",
"recipientPrimaryAccountNumber": "4957030420210496",
"retrievalReferenceNumber": "412770451018",
"senderAccountNumber": "4653459515756154",
"senderAddress": "901 Metro Center Blvd",
"senderCity": "Foster City",
"senderCountryCode": "124",
"senderName": "Mohammed Qasim",
"senderReference": "",
"senderStateCode": "CA",
"sourceOfFundsCode": "05",
"systemsTraceAuditNumber": "451018",
"transactionCurrencyCode": "USD",
"transactionIdentifier": "381228649430015"
}''')
r = S.post(BASE_URL + uri, json=body)
return r
def reverse():
user_id = '5RR5JQ34LMHPV8ZRQYOG21U5avMv8m06-Sv04yCoWlP_xI6ag'
password = 'k7zV8AgB4F3q33BDJ'
cert = './visa_api/cert_newest.pem'
key = './visa_api/key_newest.pem'
with vdp_utils.MSession(user_id, password, cert, key) as S:
S.headers.update({'content-type': 'application/json',
'accept': 'application/json'})
r = reverse_funds(S)
print(r.status_code)
print(r.content)
return r
def push():
user_id = '5RR5JQ34LMHPV8ZRQYOG21U5avMv8m06-Sv04yCoWlP_xI6ag'
password = 'k7zV8AgB4F3q33BDJ'
cert = './visa_api/cert_newest.pem'
key = './visa_api/key_newest.pem'
with vdp_utils.MSession(user_id, password, cert, key) as S:
S.headers.update({'content-type': 'application/json',
'accept': 'application/json'})
r = push_funds(S)
print(r.status_code)
print(r.content)
return r
def pull():
user_id = '5RR5JQ34LMHPV8ZRQYOG21U5avMv8m06-Sv04yCoWlP_xI6ag'
password = 'k7zV8AgB4F3q33BDJ'
cert = './visa_api/cert_newest.pem'
key = './visa_api/key_newest.pem'
with vdp_utils.MSession(user_id, password, cert, key) as S:
S.headers.update({'content-type': 'application/json',
'accept': 'application/json'})
r = pull_funds(S)
print(r.status_code)
print(r.content)
return r
|
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import grey_dilation
from skimage import img_as_float
from skimage import color
from skimage import exposure
from skimage.util.dtype import dtype_limits
__all__ = ['imshow_all', 'imshow_with_histogram', 'mean_filter_demo',
'mean_filter_interactive_demo', 'plot_cdf', 'plot_histogram']
plt.rcParams['image.cmap'] = 'gray'
def imshow_rgb_shifted(rgb_image, shift=100, ax=None):
"""Plot each RGB layer with an x, y shift."""
if ax is None:
ax = plt.gca()
height, width, n_channels = rgb_image.shape
x = y = 0
for i_channel, channel in enumerate(iter_channels(rgb_image)):
image = np.zeros((height, width, n_channels), dtype=channel.dtype)
image[:, :, i_channel] = channel
ax.imshow(image, extent=[x, x+width, y, y+height], alpha=0.7)
x += shift
y += shift
# `imshow` fits the extents of the last image shown, so we need to rescale.
ax.autoscale()
ax.set_axis_off()
def imshow_all(*images, **kwargs):
""" Plot a series of images side-by-side.
Convert all images to float so that images have a common intensity range.
Parameters
----------
limits : str
Control the intensity limits. By default, 'image' is used set the
min/max intensities to the min/max of all images. Setting `limits` to
'dtype' can also be used if you want to preserve the image exposure.
titles : list of str
Titles for subplots. If the length of titles is less than the number
of images, empty strings are appended.
kwargs : dict
Additional keyword-arguments passed to `imshow`.
"""
images = [img_as_float(img) for img in images]
titles = kwargs.pop('titles', [])
if len(titles) != len(images):
titles = list(titles) + [''] * (len(images) - len(titles))
limits = kwargs.pop('limits', 'image')
if limits == 'image':
kwargs.setdefault('vmin', min(img.min() for img in images))
kwargs.setdefault('vmax', max(img.max() for img in images))
elif limits == 'dtype':
vmin, vmax = dtype_limits(images[0])
kwargs.setdefault('vmin', vmin)
kwargs.setdefault('vmax', vmax)
nrows, ncols = kwargs.get('shape', (1, len(images)))
size = nrows * kwargs.pop('size', 5)
width = size * len(images)
if nrows > 1:
width /= nrows * 1.33
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(width, size))
for ax, img, label in zip(axes.ravel(), images, titles):
ax.imshow(img, **kwargs)
ax.set_title(label)
def imshow_with_histogram(image, **kwargs):
""" Plot an image side-by-side with its histogram.
- Plot the image next to the histogram
- Plot each RGB channel separately (if input is color)
- Automatically flatten channels
- Select reasonable bins based on the image's dtype
See `plot_histogram` for information on how the histogram is plotted.
"""
width, height = plt.rcParams['figure.figsize']
fig, (ax_image, ax_hist) = plt.subplots(ncols=2, figsize=(2*width, height))
kwargs.setdefault('cmap', plt.cm.gray)
ax_image.imshow(image, **kwargs)
plot_histogram(image, ax=ax_hist)
# pretty it up
ax_image.set_axis_off()
match_axes_height(ax_image, ax_hist)
return ax_image, ax_hist
def match_axes_height(ax_src, ax_dst):
""" Match the axes height of two axes objects.
The height of `ax_dst` is synced to that of `ax_src`.
"""
# HACK: plot geometry isn't set until the plot is drawn
plt.draw()
dst = ax_dst.get_position()
src = ax_src.get_position()
ax_dst.set_position([dst.xmin, src.ymin, dst.width, src.height])
def plot_cdf(image, ax=None):
img_cdf, bins = exposure.cumulative_distribution(image)
ax.plot(bins, img_cdf, 'r')
ax.set_ylabel("Fraction of pixels below intensity")
def plot_histogram(image, ax=None, **kwargs):
""" Plot the histogram of an image (gray-scale or RGB) on `ax`.
Calculate histogram using `skimage.exposure.histogram` and plot as filled
line. If an image has a 3rd dimension, assume it's RGB and plot each
channel separately.
"""
ax = ax if ax is not None else plt.gca()
if image.ndim == 2:
_plot_histogram(ax, image, color='black', **kwargs)
elif image.ndim == 3:
# `channel` is the red, green, or blue channel of the image.
for channel, channel_color in zip(iter_channels(image), 'rgb'):
_plot_histogram(ax, channel, color=channel_color, **kwargs)
def _plot_histogram(ax, image, alpha=0.3, **kwargs):
# Use skimage's histogram function which has nice defaults for
# integer and float images.
hist, bin_centers = exposure.histogram(image)
ax.fill_between(bin_centers, hist, alpha=alpha, **kwargs)
ax.set_xlabel('intensity')
ax.set_ylabel('# pixels')
def iter_channels(color_image):
"""Yield color channels of an image."""
# Roll array-axis so that we iterate over the color channels of an image.
for channel in np.rollaxis(color_image, -1):
yield channel
def mean_filter_demo(image, vmax=1):
mean_factor = 1.0 / 9.0 # This assumes a 3x3 kernel.
iter_kernel_and_subimage = iter_kernel(image)
image_cache = []
def mean_filter_step(i_step):
while i_step >= len(image_cache):
filtered = image if i_step == 0 else image_cache[-1][1]
filtered = filtered.copy()
(i, j), mask, subimage = next(iter_kernel_and_subimage)
filter_overlay = color.label2rgb(mask, image, bg_label=0,
colors=('yellow', 'red'))
filtered[i, j] = np.sum(mean_factor * subimage)
image_cache.append((filter_overlay, filtered))
imshow_all(*image_cache[i_step], vmax=vmax)
plt.show()
return mean_filter_step
def mean_filter_interactive_demo(image):
from IPython.html import widgets
mean_filter_step = mean_filter_demo(image)
step_slider = widgets.IntSliderWidget(min=0, max=image.size-1, value=0)
widgets.interact(mean_filter_step, i_step=step_slider)
def iter_kernel(image, size=1):
""" Yield position, kernel mask, and image for each pixel in the image.
The kernel mask has a 2 at the center pixel and 1 around it. The actual
width of the kernel is 2*size + 1.
"""
width = 2*size + 1
for (i, j), pixel in iter_pixels(image):
mask = np.zeros(image.shape, dtype='int16')
mask[i, j] = 1
mask = grey_dilation(mask, size=width)
mask[i, j] = 2
subimage = image[bounded_slice((i, j), image.shape[:2], size=size)]
yield (i, j), mask, subimage
def iter_pixels(image):
""" Yield pixel position (row, column) and pixel intensity. """
height, width = image.shape[:2]
for i in range(height):
for j in range(width):
yield (i, j), image[i, j]
def bounded_slice(center, xy_max, size=1, i_min=0):
slices = []
for i, i_max in zip(center, xy_max):
slices.append(slice(max(i - size, i_min), min(i + size + 1, i_max)))
return slices
|
"""
https://leetcode.com/problems/judge-route-circle/
https://leetcode.com/submissions/detail/129339056/
https://leetcode.com/submissions/detail/129339394/
"""
class Solution1:
def judgeCircle(self, moves):
"""
:type moves: str
:rtype: bool
"""
place = [0, 0]
for move in moves:
if move == 'U':
place[0] -= 1
if move == 'D':
place[0] += 1
if move == 'L':
place[1] -= 1
if move == 'R':
place[1] += 1
if place[0] == 0 and place[1] == 0:
return True
return False
class Solution:
def judgeCircle(self, moves):
"""
:type moves: str
:rtype: bool
"""
return moves.count('U') == moves.count('D') and moves.count('L') == moves.count('R')
import unittest
class Test(unittest.TestCase):
def test(self):
solution = Solution()
self.assertEqual(solution.judgeCircle('UD'), True)
self.assertEqual(solution.judgeCircle('LL'), False)
if __name__ == '__main__':
unittest.main()
|
"""prix polynomial reversed init-value xor-out finder"""
from crcmod import mkCrcFun
from faulhaber_const import commands
import time
match = open('prix.txt', 'w+')
start = time.clock()
lastpos = 0
for possibility in range(0x2000000):
polynomial = (possibility & 0xff) | 0x100
init_value = (possibility & 0xff00) >> 8
xor_output = (possibility & 0xff0000) >> 16
revers = bool(possibility & 0x1000000) >> 17
crcunction = mkCrcFun(polynomial, init_value, revers, xor_output)
collist = [False] * 0x100
collision = False
for cmd in commands:
crc = crcunction( bytes(cmd, 'ascii'))
if collist[crc]:
collision = True
break
collist[crc] = True
if not collision:
print('crc-8', possibility, 'WORKS' * 3, file=match)
if time.clock() > start + 1:
progress = possibility - lastpos
lastpos = possibility
print(progress, 'pr second')
start = time.clock()
|
"""Get the branch year."""
import sys
script_dir = sys.path[0]
import os
import pdb
import argparse
import iris
import remove_drift_year_axis as rdya
def main(inargs):
"""Run the program."""
exp_cube = iris.load(inargs.experiment_file)[0]
cntrl_cube = iris.load(inargs.control_file)[0]
if inargs.branch_year:
branch_year = inargs.branch_year
else:
branch_year = rdya.get_branch_year(exp_cube)
cntrl_years = list(cntrl_cube.coord('year').points)
try:
branch_index = cntrl_years.index(branch_year)
print(f'Branch index: {branch_index}')
except ValueError as error_message:
print(error_message)
print(cntrl_years)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__,
argument_default=argparse.SUPPRESS,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("experiment_file", type=str)
parser.add_argument("control_file", type=str)
parser.add_argument("--branch_year", type=int, default=None, help="override metadata")
args = parser.parse_args()
main(args)
|
"""
dedupe provides the main user interface for the library the
Dedupe class
"""
from __future__ import print_function, division
from future.utils import viewitems, viewvalues, viewkeys
import itertools
import logging
import pickle
import numpy
import multiprocessing
import random
import warnings
import copy
import os
from collections import defaultdict, OrderedDict
import simplejson as json
import rlr
import dedupe
import dedupe.sampling as sampling
import dedupe.core as core
import dedupe.training as training
import dedupe.serializer as serializer
import dedupe.predicates as predicates
import dedupe.blocking as blocking
import dedupe.clustering as clustering
from dedupe.datamodel import DataModel
logger = logging.getLogger(__name__)
class Matching(object):
"""
Base Class for Record Matching Classes
Public methods:
- `__init__`
- `thresholdBlocks`
- `matchBlocks`
"""
def __init__(self) :
pass
def thresholdBlocks(self, blocks, recall_weight=1.5): # pragma : nocover
"""
Returns the threshold that maximizes the expected F score,
a weighted average of precision and recall for a sample of
blocked data.
Keyword arguments:
blocks -- Sequence of tuples of records, where each
tuple is a set of records covered by a blocking
predicate
recall_weight -- Sets the tradeoff between precision and
recall. I.e. if you care twice as much about
recall as you do precision, set recall_weight
to 2.
"""
probability = core.scoreDuplicates(self._blockedPairs(blocks),
self.data_model,
self.classifier,
self.num_cores)['score']
probability.sort()
probability = probability[::-1]
expected_dupes = numpy.cumsum(probability)
recall = expected_dupes / expected_dupes[-1]
precision = expected_dupes / numpy.arange(1, len(expected_dupes) + 1)
score = recall * precision / (recall + recall_weight ** 2 * precision)
i = numpy.argmax(score)
logger.info('Maximum expected recall and precision')
logger.info('recall: %2.3f', recall[i])
logger.info('precision: %2.3f', precision[i])
logger.info('With threshold: %2.3f', probability[i])
return probability[i]
def matchBlocks(self, blocks, threshold=.5, *args, **kwargs): # pragma : no cover
"""
Partitions blocked data and returns a list of clusters, where
each cluster is a tuple of record ids
Keyword arguments:
blocks -- Sequence of tuples of records, where each
tuple is a set of records covered by a blocking
predicate
threshold -- Number between 0 and 1 (default is .5). We will
only consider as duplicates record pairs as
duplicates if their estimated duplicate likelihood is
greater than the threshold.
Lowering the number will increase recall, raising it
will increase precision
"""
# Setting the cluster threshold this ways is not principled,
# but seems to reliably help performance
cluster_threshold = threshold * 0.7
candidate_records = self._blockedPairs(blocks)
matches = core.scoreDuplicates(candidate_records,
self.data_model,
self.classifier,
self.num_cores,
threshold)
logger.debug("matching done, begin clustering")
clusters = self._cluster(matches,
cluster_threshold, *args, **kwargs)
try :
match_file = matches.filename
del matches
os.remove(match_file)
except AttributeError :
pass
return clusters
class DedupeMatching(Matching) :
"""
Class for Deduplication, extends Matching.
Use DedupeMatching when you have a dataset that can contain
multiple references to the same entity.
Public methods:
- `__init__`
- `match`
- `threshold`
"""
def __init__(self, *args, **kwargs) :
super(DedupeMatching, self).__init__(*args, **kwargs)
self._cluster = clustering.cluster
self._linkage_type = "Dedupe"
def match(self, data, threshold = 0.5) : # pragma : no cover
"""
Identifies records that all refer to the same entity, returns tuples
containing a set of record ids and a confidence score as a float between 0
and 1. The record_ids within each set should refer to the
same entity and the confidence score is a measure of our confidence that
all the records in a cluster refer to the same entity.
This method should only used for small to moderately sized datasets
for larger data, use matchBlocks
Arguments:
data -- Dictionary of records, where the keys are record_ids
and the values are dictionaries with the keys being
field names
threshold -- Number between 0 and 1 (default is .5). We will consider
records as potential duplicates if the predicted probability
of being a duplicate is above the threshold.
Lowering the number will increase recall, raising it
will increase precision
"""
blocked_pairs = self._blockData(data)
return self.matchBlocks(blocked_pairs, threshold)
def threshold(self, data, recall_weight = 1.5) : # pragma : no cover
"""
Returns the threshold that maximizes the expected F score,
a weighted average of precision and recall for a sample of
data.
Arguments:
data -- Dictionary of records, where the keys are record_ids
and the values are dictionaries with the keys being
field names
recall_weight -- Sets the tradeoff between precision and
recall. I.e. if you care twice as much about
recall as you do precision, set recall_weight
to 2.
"""
blocked_pairs = self._blockData(data)
return self.thresholdBlocks(blocked_pairs, recall_weight)
def _blockedPairs(self, blocks) :
"""
Generate tuples of pairs of records from a block of records
Arguments:
blocks -- an iterable sequence of blocked records
"""
block, blocks = core.peek(blocks)
self._checkBlock(block)
combinations = itertools.combinations
pairs = (combinations(sorted(block), 2) for block in blocks)
return itertools.chain.from_iterable(pairs)
def _checkBlock(self, block) :
if block :
try :
if len(block[0]) < 3 :
raise ValueError("Each item in a block must be a "
"sequence of record_id, record, and smaller ids "
"and the records also must be dictionaries")
except :
raise ValueError("Each item in a block must be a "
"sequence of record_id, record, and smaller ids "
"and the records also must be dictionaries")
try :
block[0][1].items()
block[0][2].isdisjoint([])
except :
raise ValueError("The record must be a dictionary and "
"smaller_ids must be a set")
self.data_model.check(block[0][1])
def _blockData(self, data_d):
blocks = defaultdict(dict)
self.blocker.indexAll(data_d)
for block_key, record_id in self.blocker(viewitems(data_d)) :
blocks[block_key][record_id] = data_d[record_id]
self.blocker.resetIndices()
seen_blocks = set()
blocks = [records for records in viewvalues(blocks)
if len(records) > 1
and not (frozenset(records.keys()) in seen_blocks
or seen_blocks.add(frozenset(records.keys())))]
for block in self._redundantFree(blocks) :
yield block
def _redundantFree(self, blocks) :
"""
Redundant-free Comparisons from Kolb et al, "Dedoop:
Efficient Deduplication with Hadoop"
http://dbs.uni-leipzig.de/file/Dedoop.pdf
"""
coverage = defaultdict(list)
for block_id, records in enumerate(blocks) :
for record_id, record in viewitems(records) :
coverage[record_id].append(block_id)
for block_id, records in enumerate(blocks) :
if block_id % 10000 == 0 :
logger.info("%s blocks" % block_id)
marked_records = []
for record_id, record in viewitems(records) :
smaller_ids = {covered_id for covered_id
in coverage[record_id]
if covered_id < block_id}
marked_records.append((record_id, record, smaller_ids))
yield marked_records
class RecordLinkMatching(Matching) :
"""
Class for Record Linkage, extends Matching.
Use RecordLinkMatching when you have two datasets that you want to merge
where each dataset, individually, contains no duplicates.
Public methods:
- `__init__`
- `match`
- `threshold`
"""
def __init__(self, *args, **kwargs) :
super(RecordLinkMatching, self).__init__(*args, **kwargs)
self._cluster = clustering.greedyMatching
self._linkage_type = "RecordLink"
def match(self, data_1, data_2, threshold = 0.5) : # pragma : no cover
"""
Identifies pairs of records that refer to the same entity, returns
tuples containing a set of record ids and a confidence score as a float
between 0 and 1. The record_ids within each set should refer to the
same entity and the confidence score is the estimated probability that
the records refer to the same entity.
This method should only used for small to moderately sized datasets
for larger data, use matchBlocks
Arguments:
data_1 -- Dictionary of records from first dataset, where the
keys are record_ids and the values are dictionaries
with the keys being field names
data_2 -- Dictionary of records from second dataset, same form
as data_1
threshold -- Number between 0 and 1 (default is .5). We will consider
records as potential duplicates if the predicted
probability of being a duplicate is above the threshold.
Lowering the number will increase recall, raising it
will increase precision
"""
blocked_pairs = self._blockData(data_1, data_2)
return self.matchBlocks(blocked_pairs, threshold)
def threshold(self, data_1, data_2, recall_weight = 1.5) : # pragma : no cover
"""
Returns the threshold that maximizes the expected F score,
a weighted average of precision and recall for a sample of
data.
Arguments:
data_1 -- Dictionary of records from first dataset, where the
keys are record_ids and the values are dictionaries
with the keys being field names
data_2 -- Dictionary of records from second dataset, same form
as data_1
recall_weight -- Sets the tradeoff between precision and
recall. I.e. if you care twice as much about
recall as you do precision, set recall_weight
to 2.
x """
blocked_pairs = self._blockData(data_1, data_2)
return self.thresholdBlocks(blocked_pairs, recall_weight)
def _blockedPairs(self, blocks) :
"""
Generate tuples of pairs of records from a block of records
Arguments:
blocks -- an iterable sequence of blocked records
"""
block, blocks = core.peek(blocks)
self._checkBlock(block)
product = itertools.product
pairs = (product(base, target) for base, target in blocks)
return itertools.chain.from_iterable(pairs)
def _checkBlock(self, block) :
if block :
try :
base, target = block
except :
raise ValueError("Each block must be a made up of two "
"sequences, (base_sequence, target_sequence)")
if base :
if len(base[0]) < 3 :
raise ValueError("Each sequence must be made up of 3-tuple "
"like (record_id, record, covered_blocks)")
self.data_model.check(base[0][1])
if target :
if len(target[0]) < 3 :
raise ValueError("Each sequence must be made up of 3-tuple "
"like (record_id, record, covered_blocks)")
self.data_model.check(target[0][1])
def _blockGenerator(self, messy_data, blocked_records) :
block_groups = itertools.groupby(self.blocker(viewitems(messy_data)),
lambda x : x[1])
for i, (record_id, block_keys) in enumerate(block_groups) :
if i % 100 == 0 :
logger.info("%s records" % i)
A = [(record_id, messy_data[record_id], set())]
B = {}
for block_key, _ in block_keys :
if block_key in blocked_records :
B.update(blocked_records[block_key])
B = [(rec_id, record, set())
for rec_id, record
in B.items()]
if B :
yield (A, B)
def _blockData(self, data_1, data_2) :
blocked_records = defaultdict(dict)
self.blocker.indexAll(data_2)
for block_key, record_id in self.blocker(data_2.items()) :
blocked_records[block_key][record_id] = data_2[record_id]
for each in self._blockGenerator(data_1, blocked_records) :
yield each
self.blocker.resetIndices()
class StaticMatching(Matching) :
"""
Class for initializing a dedupe object from a settings file, extends Matching.
Public methods:
- __init__
"""
def __init__(self,
settings_file,
num_cores=None) : # pragma : no cover
"""
Initialize from a settings file
#### Example usage
# initialize from a settings file
with open('my_learned_settings', 'rb') as f:
deduper = dedupe.StaticDedupe(f)
#### Keyword arguments
`settings_file`
A file object containing settings data.
Settings files are typically generated by saving the settings
learned from ActiveMatching. If you need details for this
file see the method [`writeSettings`][[api.py#writesettings]].
"""
if num_cores is None :
self.num_cores = multiprocessing.cpu_count()
else :
self.num_cores = num_cores
try:
self.data_model = pickle.load(settings_file)
self.classifier = pickle.load(settings_file)
self.predicates = pickle.load(settings_file)
except (KeyError, AttributeError) :
raise SettingsFileLoadingException("This settings file is not compatible with "
"the current version of dedupe. This can happen "
"if you have recently upgraded dedupe.")
except :
raise SettingsFileLoadingException("Something has gone wrong with loading the settings file. Try deleting the file")
logger.info(self.predicates)
self.blocker = blocking.Blocker(self.predicates)
class ActiveMatching(Matching) :
classifier = rlr.RegularizedLogisticRegression()
"""
Class for training dedupe extends Matching.
Public methods:
- __init__
- readTraining
- train
- writeSettings
- writeTraining
- uncertainPairs
- markPairs
- cleanupTraining
"""
def __init__(self,
variable_definition,
data_sample = None,
num_cores = None) :
"""
Initialize from a data model and data sample.
#### Example usage
# initialize from a defined set of fields
fields = [{'field' : 'Site name', 'type': 'String'},
{'field' : 'Address', 'type': 'String'},
{'field' : 'Zip', 'type': 'String', 'Has Missing':True},
{'field' : 'Phone', 'type': 'String', 'Has Missing':True},
]
data_sample = [
(
(854, {'city': 'san francisco',
'address': '300 de haro st.',
'name': "sally's cafe & bakery",
'cuisine': 'american'}),
(855, {'city': 'san francisco',
'address': '1328 18th st.',
'name': 'san francisco bbq',
'cuisine': 'thai'})
)
]
deduper = dedupe.Dedupe(fields, data_sample)
#### Additional detail
A field definition is a list of dictionaries where each dictionary
describes a variable to use for comparing records.
For details about variable types, check the documentation.
<http://dedupe.readthedocs.org>`_
In the data_sample, each element is a tuple of two
records. Each record is, in turn, a tuple of the record's key and
a record dictionary.
In in the record dictionary the keys are the names of the
record field and values are the record values.
"""
self.data_model = DataModel(variable_definition)
if num_cores is None :
self.num_cores = multiprocessing.cpu_count()
else :
self.num_cores = num_cores
self.data_sample = data_sample
if self.data_sample :
self._checkDataSample(self.data_sample)
self.activeLearner = training.ActiveLearning(self.data_sample,
self.data_model,
self.num_cores)
else :
self.data_sample = []
self.activeLearner = None
training_dtype = [('label', 'S8'),
('distances', 'f4',
(len(self.data_model), ))]
self.training_data = numpy.zeros(0, dtype=training_dtype)
self.training_pairs = OrderedDict({u'distinct': [],
u'match': []})
self.blocker = None
def cleanupTraining(self) : # pragma : no cover
'''
Clean up data we used for training. Free up memory.
'''
del self.training_data
del self.training_pairs
del self.activeLearner
del self.data_sample
def readTraining(self, training_file) :
'''
Read training from previously built training data file object
Arguments:
training_file -- file object containing the training data
'''
logger.info('reading training from file')
training_pairs = json.load(training_file,
cls=serializer.dedupe_decoder)
if not any(training_pairs.values()) :
raise EmptyTrainingException("The training file seems to contain no training examples")
for (label, examples) in training_pairs.items():
if examples :
self._checkRecordPairType(examples[0])
examples = core.freezeData(examples)
training_pairs[label] = examples
self.training_pairs[label].extend(examples)
self._addTrainingData(training_pairs)
self._trainClassifier()
def train(self, ppc=None, uncovered_dupes=None, maximum_comparisons=1000000, recall=0.95, index_predicates=True) : # pragma : no cover
"""Keyword arguments:
maximum_comparisons -- The maximum number of comparisons a
blocking rule is allowed to make.
Defaults to 1000000
recall -- The proportion of true dupe pairs in our training
data that that we the learned blocks must cover. If
we lower the recall, there will be pairs of true
dupes that we will never directly compare.
recall should be a float between 0.0 and 1.0, the default
is 0.975
index_predicates -- Should dedupe consider predicates that
rely upon indexing the data. Index predicates can
be slower and take susbstantial memory.
Defaults to True.
"""
if ppc is not None :
warnings.warn('`ppc` is a deprecated argument to train. Use `maximum_comparisons` to set the maximum number records a block is allowed to cover')
if uncovered_dupes is not None :
warnings.warn('`uncovered_dupes` is a deprecated argument to traing. Use recall to set the proportion of true pairs that the blocking rules must cover')
self._trainClassifier()
self._trainBlocker(maximum_comparisons,
recall,
index_predicates)
def _trainClassifier(self) : # pragma : no cover
labels = numpy.array(self.training_data['label'] == b'match',
dtype='i4')
examples = self.training_data['distances']
self.classifier.fit(examples, labels)
def _trainBlocker(self, maximum_comparisons, recall, index_predicates) : # pragma : no cover
matches = self.training_pairs['match'][:]
predicate_set = self.data_model.predicates(index_predicates,
self.canopies)
block_learner = self._blockLearner(predicate_set)
self.predicates = block_learner.learn(matches,
maximum_comparisons,
recall)
self.blocker = blocking.Blocker(self.predicates)
def writeSettings(self, file_obj): # pragma : no cover
"""
Write a settings file containing the
data model and predicates to a file object
Keyword arguments:
file_obj -- file object to write settings data into
"""
pickle.dump(self.data_model, file_obj)
pickle.dump(self.classifier, file_obj)
pickle.dump(self.predicates, file_obj)
def writeTraining(self, file_obj): # pragma : no cover
"""
Write to a json file that contains labeled examples
Keyword arguments:
file_obj -- file object to write training data to
"""
json.dump(self.training_pairs,
file_obj,
default=serializer._to_json,
tuple_as_array=False,
ensure_ascii=True)
def uncertainPairs(self) :
'''
Provides a list of the pairs of records that dedupe is most
curious to learn if they are matches or distinct.
Useful for user labeling.
'''
if self.training_data.shape[0] == 0 :
rand_int = random.randint(0, len(self.data_sample)-1)
random_pair = self.data_sample[rand_int]
exact_match = (random_pair[0], random_pair[0])
self._addTrainingData({u'match':[exact_match, exact_match],
u'distinct':[random_pair]})
self._trainClassifier()
bias = len(self.training_pairs[u'match'])
if bias :
bias /= (bias
+ len(self.training_pairs[u'distinct']))
min_examples = min(len(self.training_pairs[u'match']),
len(self.training_pairs[u'distinct']))
regularizer = 10
bias = ((0.5 * min_examples + bias * regularizer)
/(min_examples + regularizer))
return self.activeLearner.uncertainPairs(self.classifier, bias)
def markPairs(self, labeled_pairs) :
'''
Add a labeled pairs of record to dedupes training set and update the
matching model
Argument :
labeled_pairs -- A dictionary with two keys, `match` and `distinct`
the values are lists that can contain pairs of records
'''
try :
labeled_pairs.items()
labeled_pairs[u'match']
labeled_pairs[u'distinct']
except :
raise ValueError('labeled_pairs must be a dictionary with keys '
'"distinct" and "match"')
if labeled_pairs[u'match'] :
pair = labeled_pairs[u'match'][0]
self._checkRecordPairType(pair)
if labeled_pairs[u'distinct'] :
pair = labeled_pairs[u'distinct'][0]
self._checkRecordPairType(pair)
if not labeled_pairs[u'distinct'] and not labeled_pairs[u'match'] :
warnings.warn("Didn't return any labeled record pairs")
for label, pairs in labeled_pairs.items() :
self.training_pairs[label].extend(core.freezeData(pairs))
self._addTrainingData(labeled_pairs)
def _checkRecordPairType(self, record_pair) :
try :
record_pair[0]
except :
raise ValueError("The elements of data_sample must be pairs "
"of record_pairs (ordered sequences of length 2)")
if len(record_pair) != 2 :
raise ValueError("The elements of data_sample must be pairs "
"of record_pairs")
try :
record_pair[0].keys() and record_pair[1].keys()
except :
raise ValueError("A pair of record_pairs must be made up of two "
"dictionaries ")
self.data_model.check(record_pair[0])
self.data_model.check(record_pair[1])
def _checkDataSample(self, data_sample) :
try :
len(data_sample)
except TypeError :
raise ValueError("data_sample must be a sequence")
if len(data_sample) :
self._checkRecordPairType(data_sample[0])
else :
warnings.warn("You submitted an empty data_sample")
def _addTrainingData(self, labeled_pairs) :
"""
Appends training data to the training data collection.
"""
for label, examples in labeled_pairs.items () :
n_examples = len(examples)
labels = [label] * n_examples
new_data = numpy.empty(n_examples,
dtype=self.training_data.dtype)
new_data['label'] = labels
new_data['distances'] = self.data_model.distances(examples)
self.training_data = numpy.append(self.training_data,
new_data)
def _loadSample(self, data_sample) :
self._checkDataSample(data_sample)
self.data_sample = data_sample
self.activeLearner = training.ActiveLearning(self.data_sample,
self.data_model,
self.num_cores)
class StaticDedupe(DedupeMatching, StaticMatching) :
"""
Mixin Class for Static Deduplication
"""
class Dedupe(DedupeMatching, ActiveMatching) :
"""
Mixin Class for Active Learning Deduplication
Public Methods
- sample
"""
canopies = True
def sample(self, data, sample_size=15000,
blocked_proportion=0.5) :
'''Draw a sample of record pairs from the dataset
(a mix of random pairs & pairs of similar records)
and initialize active learning with this sample
Arguments: data -- Dictionary of records, where the keys are
record_ids and the values are dictionaries with the keys being
field names
sample_size -- Size of the sample to draw
blocked_proportion -- Proportion of the sample that will be blocked
'''
data = core.index(data)
self.sampled_records = Sample(data, 900)
blocked_sample_size = int(blocked_proportion * sample_size)
predicates = list(self.data_model.predicates(index_predicates=False,
canopies=self.canopies))
data = sampling.randomDeque(data)
blocked_sample_keys = sampling.dedupeBlockedSample(blocked_sample_size,
predicates,
data)
random_sample_size = sample_size - len(blocked_sample_keys)
random_sample_keys = set(dedupe.core.randomPairs(len(data),
random_sample_size))
data = dict(data)
data_sample = [(data[k1], data[k2])
for k1, k2
in blocked_sample_keys | random_sample_keys]
data_sample = core.freezeData(data_sample)
self._loadSample(data_sample)
def _blockLearner(self, predicates) :
return training.DedupeBlockLearner(predicates,
self.sampled_records)
class StaticRecordLink(RecordLinkMatching, StaticMatching) :
"""
Mixin Class for Static Record Linkage
"""
class RecordLink(RecordLinkMatching, ActiveMatching) :
"""
Mixin Class for Active Learning Record Linkage
Public Methods
- sample
"""
canopies = False
def sample(self, data_1, data_2, sample_size=150000,
blocked_proportion=.5) :
'''
Draws a random sample of combinations of records from
the first and second datasets, and initializes active
learning with this sample
Arguments:
data_1 -- Dictionary of records from first dataset, where the
keys are record_ids and the values are dictionaries
with the keys being field names
data_2 -- Dictionary of records from second dataset, same
form as data_1
sample_size -- Size of the sample to draw
'''
if len(data_1) == 0:
raise ValueError('Dictionary of records from first dataset is empty.')
elif len(data_2) == 0:
raise ValueError('Dictionary of records from second dataset is empty.')
if len(data_1) > len(data_2) :
data_1, data_2 = data_2, data_1
data_1 = core.index(data_1)
self.sampled_records_1 = Sample(data_1, 500)
offset = len(data_1)
data_2 = core.index(data_2, offset)
self.sampled_records_2 = Sample(data_2, 500)
blocked_sample_size = int(blocked_proportion * sample_size)
predicates = list(self.data_model.predicates(index_predicates=False,
canopies=self.canopies))
data_1 = sampling.randomDeque(data_1)
data_2 = sampling.randomDeque(data_2)
blocked_sample_keys = sampling.linkBlockedSample(blocked_sample_size,
predicates,
data_1,
data_2)
random_sample_size = sample_size - len(blocked_sample_keys)
random_sample_keys = dedupe.core.randomPairsMatch(len(data_1),
len(data_2),
random_sample_size)
random_sample_keys = {(a, b + offset)
for a, b in random_sample_keys}
data_1 = dict(data_1)
data_2 = dict(data_2)
data_sample = ((data_1[k1], data_2[k2])
for k1, k2
in blocked_sample_keys | random_sample_keys)
data_sample = core.freezeData(data_sample)
self._loadSample(data_sample)
def _blockLearner(self, predicates) :
return training.RecordLinkBlockLearner(predicates,
self.sampled_records_1,
self.sampled_records_2)
class GazetteerMatching(RecordLinkMatching) :
def __init__(self, *args, **kwargs) :
super(GazetteerMatching, self).__init__(*args, **kwargs)
self._cluster = clustering.gazetteMatching
self._linkage_type = "GazetteerMatching"
self.blocked_records = OrderedDict({})
def _blockData(self, messy_data) :
for each in self._blockGenerator(messy_data, self.blocked_records) :
yield each
def index(self, data) : # pragma : no cover
self.blocker.indexAll(data)
for block_key, record_id in self.blocker(data.items()) :
if block_key not in self.blocked_records :
self.blocked_records[block_key] = {}
self.blocked_records[block_key][record_id] = data[record_id]
def unindex(self, data) : # pragma : no cover
for field in self.blocker.index_fields :
self.blocker.unindex((record[field]
for record
in viewvalues(data)),
field)
for block_key, record_id in self.blocker(viewitems(data)) :
try :
del self.blocked_records[block_key][record_id]
except KeyError :
pass
def match(self, messy_data, threshold = 0.5, n_matches = 1) : # pragma : no cover
"""Identifies pairs of records that refer to the same entity, returns
tuples containing a set of record ids and a confidence score as a float
between 0 and 1. The record_ids within each set should refer to the
same entity and the confidence score is the estimated probability that
the records refer to the same entity.
This method should only used for small to moderately sized datasets
for larger data, use matchBlocks
Arguments:
messy_data -- Dictionary of records from messy dataset, where the
keys are record_ids and the values are dictionaries with
the keys being field names
threshold -- Number between 0 and 1 (default is .5). We will consider
records as potential duplicates if the predicted
probability of being a duplicate is above the threshold.
Lowering the number will increase recall, raising it
will increase precision
n_matches -- Maximum number of possible matches from the canonical
record set to match against each record in the messy
record set
"""
blocked_pairs = self._blockData(messy_data)
return self.matchBlocks(blocked_pairs, threshold, n_matches)
class Gazetteer(RecordLink, GazetteerMatching):
pass
class StaticGazetteer(StaticRecordLink, GazetteerMatching):
pass
class EmptyTrainingException(Exception) :
pass
class SettingsFileLoadingException(Exception) :
pass
class Sample(dict) :
def __init__(self, d, sample_size) :
if len(d) <= sample_size :
super(Sample, self).__init__(d)
else :
super(Sample, self).__init__({k : d[k]
for k
in random.sample(viewkeys(d), sample_size)})
self.original_length = len(d)
|
from Sockets.TCP_Socket_Client import SocketTCPClient
from .manager import Manager
class Client():
def __init__(self, _parent=None):
self.parent = _parent
# Sockets
self.socketTCP = None
self.manager = None
def start(self):
self.setSocket()
self.socketTCP.start()
# Manager
self.manager = Manager(self)
self.manager.start()
# Connect to server
self.socketTCP.connectToServer()
def setSocket(self):
self.socketTCP = SocketTCPClient()
def sendPacket(self, _type, _data, _connection, _channel=0): #0=tcp, 1=udp
self.manager.buildPacket(_type, _data, _connection, _channel)
|
from Solver1b import Solver1b
def main():
solver = Solver1b()
with open('input.txt', 'r') as input_file:
directions_string = input_file.read().replace('\n', '')
solution = solver.get_distance_to_first_repeat_visit(directions_string)
print(solution)
if __name__ == '__main__':
main()
|
from bnol import workflows
specimenClasses = ['TissueA']*30 + ['TissueB']*29 + ['TissueC']*30 + ['TissueD']*30
analysis = workflows.CuffnormInformativeGenes('tests/data/genes.count_table', specimenClasses)
genes = analysis.informativeGenes(allGenes=True)
print(genes)
|
import os
import sys
import json
import shutil
import subprocess
import tempfile
import traceback
from test_framework.util import *
from test_framework.netutil import *
def run_bind_test(tmpdir, allow_ips, connect_to, addresses, expected):
'''
Start a node with requested rpcallowip and rpcbind parameters,
then try to connect, and check if the set of bound addresses
matches the expected set.
'''
expected = [(addr_to_hex(addr), port) for (addr, port) in expected]
base_args = ['-disablewallet', '-nolisten']
if allow_ips:
base_args += ['-rpcallowip=' + x for x in allow_ips]
binds = ['-rpcbind='+addr for addr in addresses]
nodes = start_nodes(1, tmpdir, [base_args + binds], connect_to)
try:
pid = bitcoind_processes[0].pid
assert_equal(set(get_bind_addrs(pid)), set(expected))
finally:
stop_nodes(nodes)
wait_bitcoinds()
def run_allowip_test(tmpdir, allow_ips, rpchost, rpcport):
'''
Start a node with rpcwallow IP, and request getinfo
at a non-localhost IP.
'''
base_args = ['-disablewallet', '-nolisten'] + ['-rpcallowip='+x for x in allow_ips]
nodes = start_nodes(1, tmpdir, [base_args])
try:
# connect to node through non-loopback interface
url = "http://rt:rt@%s:%d" % (rpchost, rpcport,)
node = AuthServiceProxy(url)
node.getinfo()
finally:
node = None # make sure connection will be garbage collected and closed
stop_nodes(nodes)
wait_bitcoinds()
def run_test(tmpdir):
assert(sys.platform == 'linux2') # due to OS-specific network stats queries, this test works only on Linux
# find the first non-loopback interface for testing
non_loopback_ip = None
for name,ip in all_interfaces():
if ip != '127.0.0.1':
non_loopback_ip = ip
break
if non_loopback_ip is None:
assert(not 'This test requires at least one non-loopback IPv4 interface')
print("Using interface %s for testing" % non_loopback_ip)
defaultport = rpc_port(0)
# check default without rpcallowip (IPv4 and IPv6 localhost)
run_bind_test(tmpdir, None, '127.0.0.1', [],
[('127.0.0.1', defaultport), ('::1', defaultport)])
# check default with rpcallowip (IPv6 any)
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', [],
[('::0', defaultport)])
# check only IPv4 localhost (explicit)
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', ['127.0.0.1'],
[('127.0.0.1', defaultport)])
# check only IPv4 localhost (explicit) with alternative port
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'],
[('127.0.0.1', 32171)])
# check only IPv4 localhost (explicit) with multiple alternative ports on same host
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171', '127.0.0.1:32172'],
[('127.0.0.1', 32171), ('127.0.0.1', 32172)])
# check only IPv6 localhost (explicit)
run_bind_test(tmpdir, ['[::1]'], '[::1]', ['[::1]'],
[('::1', defaultport)])
# check both IPv4 and IPv6 localhost (explicit)
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'],
[('127.0.0.1', defaultport), ('::1', defaultport)])
# check only non-loopback interface
run_bind_test(tmpdir, [non_loopback_ip], non_loopback_ip, [non_loopback_ip],
[(non_loopback_ip, defaultport)])
# Check that with invalid rpcallowip, we are denied
run_allowip_test(tmpdir, [non_loopback_ip], non_loopback_ip, defaultport)
try:
run_allowip_test(tmpdir, ['1.1.1.1'], non_loopback_ip, defaultport)
assert(not 'Connection not denied by rpcallowip as expected')
except ValueError:
pass
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave lemoncoinds and test.* datadir on exit or error")
parser.add_option("--srcdir", dest="srcdir", default="../../src",
help="Source directory containing lemoncoind/lemoncoin-cli (default: %default%)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
(options, args) = parser.parse_args()
os.environ['PATH'] = options.srcdir+":"+os.environ['PATH']
check_json_precision()
success = False
nodes = []
try:
print("Initializing test directory "+options.tmpdir)
if not os.path.isdir(options.tmpdir):
os.makedirs(options.tmpdir)
initialize_chain(options.tmpdir)
run_test(options.tmpdir)
success = True
except AssertionError as e:
print("Assertion failed: "+e.message)
except Exception as e:
print("Unexpected exception caught during testing: "+str(e))
traceback.print_tb(sys.exc_info()[2])
if not options.nocleanup:
print("Cleaning up")
wait_bitcoinds()
shutil.rmtree(options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
if __name__ == '__main__':
main()
|
from database import db
import arrow
class Appointment(db.Model):
__tablename__ = 'appointments'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), nullable=False)
phone_number = db.Column(db.String(50), nullable=False)
delta = db.Column(db.Integer, nullable=False)
time = db.Column(db.DateTime, nullable=False)
timezone = db.Column(db.String(50), nullable=False)
def __repr__(self):
return '<Appointment %r>' % self.name
def get_notification_time(self):
appointment_time = arrow.get(self.time)
reminder_time = appointment_time.shift(minutes=-self.delta)
return reminder_time
|
from django.apps import AppConfig
class FormSubmissionsConfig(AppConfig):
name = 'form_submissions'
|
import getch
import os
import sys
import time
import argparse
import urllib2
import subprocess
import cv2
import numpy as np
from math import pi
import select
from train import process_image, model
max_angle = pi / 4.0
key = 0
def display_img():
test = subprocess.check_output(fetch_last_img, shell=True)
img_name = args.data_dir + "/" + test.decode("utf-8").strip()
img = cv2.imread(img_name, 1)
if type(img) != type(None):
cv2.destroyAllWindows()
cv2.imshow(img_name, img)
cv2.waitKey(1)
return img_name
print ("Error: couldn't get an image")
return ""
def send_control(act_i):
try:
print("Sending command %s" % links[act_i])
# os.system(clinks[act_i])
r = urllib2.urlopen(clinks[act_i], timeout=1)
except:
print("Command %s couldn't reach a vehicle" % clinks[act_i])
def maunal_drive():
getch.getch()
key = getch.getch()
for act_i in range(len(actions)):
if key == actions[act_i]:
send_control(act_i)
break
def auto_drive(img_name):
md_img, _ = process_image(img_name, None, False)
pred_angle = model.predict(np.array([md_img]))[0][0]
if pred_angle >= max_angle / 2.0:
act_i = 1
if pred_angle > max_angle:
pred_angle = max_angle
elif pred_angle <= -max_angle / 2.0:
act_i = 2
if pred_angle < -max_angle:
pred_angle = -max_angle
else:
act_i = 0
# send_control(act_i)
return pred_angle, act_i
def drive(auto):
ot = 0
wait_time = 0
curr_auto = auto
img_name = ""
drive = False
key = 0
print("before thread")
# process = threading.Thread(target=key_thread)
# process.start()
while True:
# stdscr.nodelay(1)
while sys.stdin in select.select([sys.stdin], [], [], 0)[0]:
key = sys.stdin.read(1)
if not key:
exit(0)
img_name = display_img()
# print(img_name, curr_auto, drive)
ct = time.time()
# key = getch.getch()
# print("Key:", key)
if ct - ot > 1:
drive = True
if key == '\033':
if auto:
print("Autopilot disengaged")
wait_time = 5
auto = False
if drive:
maunal_drive()
ot = ct
drive = False
# Exit command
elif key == 'q':
exit(0)
elif key == 'a':
auto = True
print("Autopilot mode on!")
elif key == 's':
auto = False
print("Autopilot disengaged")
# If drive window is open and currently autopilot mode is on
elif auto and drive and img_name:
# st_t = time.time()
ang, act_i = auto_drive(img_name)
print("Prediction angle: %.2f, %s" % (ang, links[act_i]))
# auto_drive(img_name)
# print("time", time.time() - st_t)
ot = ct
drive = False
img_name = 0
key = 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Driver')
parser.add_argument(
'-model',
type=str,
default='',
help='Path to model h5 file. Model should be on the same path.'
)
parser.add_argument(
'-url',
type=str,
help='Url for connection. Default: http://10.10.10.112',
default="http://10.10.10.112"
)
parser.add_argument(
'-data_dir',
type=str,
help='Img stream directory. Default: st_data',
default="st_data"
)
args = parser.parse_args()
if os.path.exists(args.data_dir):
fetch_last_img = "ls " + args.data_dir + " | tail -n1"
else:
print("Error: streaming directory %s doesn't exist" % args.data_dir)
exit(1)
auto = False
if args.model:
shape = (100, 100, 3)
model = model(True, shape, args.model)
auto = True
err = 0
actions = ['A', 'D', 'C', 'B']
links = ['/fwd', '/fwd/lt', '/fwd/rt', '/rev']
# clinks = ['curl '+ args.url + el for el in links]
clinks = [args.url + el for el in links]
# curses.wrapper(drive)
drive(auto)
|
def triangle(a, b, c):
sides = sorted([a, b, c])
unique_sides = sorted(set(sides))
if unique_sides[0] <= 0:
raise TriangleError('All sides should be greater than 0')
if sides[2] >= sides[0] + sides[1]:
raise TriangleError('The sum of any two sides should be greater than the third one')
if len(unique_sides) == 1:
return 'equilateral'
elif len(unique_sides) == 2:
return 'isosceles'
elif len(unique_sides) == 3:
return 'scalene'
class TriangleError(StandardError):
pass
|
from beetle import *
def main():
beetle, ui, groot, webthread = setup()
b = beetle
self = beetle
webthread.start()
stop = lambda: webthread.stop()
variables = locals()
variables.update(globals())
IPython.start_ipython(user_ns=variables)
if __name__ == "__main__":
main()
|
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import logging
import os
import subprocess
import sys
from threading import Thread
from django.contrib.staticfiles.management.commands.runserver import \
Command as RunserverCommand
from django.core.management.base import CommandError
logger = logging.getLogger(__name__)
class Command(RunserverCommand):
"""
Subclass the RunserverCommand from Staticfiles to optionally run webpack.
"""
def __init__(self, *args, **kwargs):
self.webpack_cleanup_closing = False
self.webpack_process = None
self.karma_cleanup_closing = False
self.karma_process = None
super(Command, self).__init__(*args, **kwargs)
def add_arguments(self, parser):
parser.add_argument(
'--webpack', action='store_true', dest='webpack', default=False,
help='Tells Django to runserver to spawn a webpack watch subprocess.',
)
parser.add_argument(
'--karma', action='store_true', dest='karma', default=False,
help='Tells Django to runserver to spawn a karma test watch subprocess.',
)
super(Command, self).add_arguments(parser)
def handle(self, *args, **options):
if options["webpack"]:
self.spawn_webpack()
if options["karma"]:
self.spawn_karma()
return super(Command, self).handle(*args, **options)
def spawn_webpack(self):
self.spawn_subprocess("webpack_process", self.start_webpack, self.kill_webpack_process)
def spawn_karma(self):
self.spawn_subprocess("karma_process", self.start_karma, self.kill_karma_process)
def spawn_subprocess(self, process_name, process_start, process_kill):
# We're subclassing runserver, which spawns threads for its
# autoreloader with RUN_MAIN set to true, we have to check for
# this to avoid running browserify twice.
if not os.getenv('RUN_MAIN', False) and not getattr(self, process_name):
subprocess_thread = Thread(target=process_start)
subprocess_thread.daemon = True
subprocess_thread.start()
atexit.register(process_kill)
def kill_webpack_process(self):
if self.webpack_process and self.webpack_process.returncode is not None:
return
logger.info('Closing webpack process')
self.webpack_cleanup_closing = True
self.webpack_process.terminate()
def start_webpack(self):
logger.info('Starting webpack process from Django runserver command')
self.webpack_process = subprocess.Popen(
'npm run watch',
shell=True,
stdin=subprocess.PIPE,
stdout=sys.stdout,
stderr=sys.stderr)
if self.webpack_process.poll() is not None:
raise CommandError('Webpack process failed to start from Django runserver command')
logger.info(
'Django Runserver command has spawned a Webpack watcher process on pid {0}'.format(
self.webpack_process.pid))
self.webpack_process.wait()
if self.webpack_process.returncode != 0 and not self.webpack_cleanup_closing:
logger.error("Webpack process exited unexpectedly.")
def kill_karma_process(self):
if self.karma_process and self.karma_process.returncode is not None:
return
logger.info('Closing karma process')
self.karma_cleanup_closing = True
self.karma_process.terminate()
def start_karma(self):
logger.info('Starting karma test watcher process from Django runserver command')
self.karma_process = subprocess.Popen(
'npm run test-karma:watch',
shell=True,
stdin=subprocess.PIPE,
stdout=sys.stdout,
stderr=sys.stderr)
if self.karma_process.poll() is not None:
raise CommandError('Karma process failed to start from Django runserver command')
logger.info(
'Django Runserver command has spawned a Karma test watcher process on pid {0}'.format(
self.karma_process.pid))
self.karma_process.wait()
if self.karma_process.returncode != 0 and not self.karma_cleanup_closing:
logger.error("Karma process exited unexpectedly.")
|
import sqlalchemy.types
import uuid
class UUID(sqlalchemy.types.TypeDecorator):
impl = sqlalchemy.types.BINARY(16)
python_type = uuid.UUID
def load_dialect_impl(self, dialect):
return dialect.type_descriptor(self.impl)
def process_bind_param(self, value, dialect):
if value is None:
return value
if not isinstance(value, uuid.UUID):
if value and not isinstance(value, uuid.UUID):
try:
value = uuid.UUID(value)
except (TypeError, ValueError):
value = uuid.UUID(bytes=value)
return value.bytes
def process_literal_param(self, value, dialect):
pass
def process_result_value(self, value, dialect):
if value is None:
return value
return uuid.UUID(bytes=value)
|
from setuptools import setup, find_packages
setup(name='MODEL1310110014',
version=20140916,
description='MODEL1310110014 from BioModels',
url='http://www.ebi.ac.uk/biomodels-main/MODEL1310110014',
maintainer='Stanley Gu',
maintainer_url='stanleygu@gmail.com',
packages=find_packages(),
package_data={'': ['*.xml', 'README.md']},
)
|
import os
path = os.path.dirname(os.path.realpath(__file__))
sbmlFilePath = os.path.join(path, 'MODEL1204280035.xml')
with open(sbmlFilePath,'r') as f:
sbmlString = f.read()
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
if module_exists('libsbml'):
import libsbml
sbml = libsbml.readSBMLFromString(sbmlString)
|
import OOMP
newPart = OOMP.oompItem(9581)
newPart.addTag("oompType", "VREG")
newPart.addTag("oompSize", "T220")
newPart.addTag("oompColor", "X")
newPart.addTag("oompDesc", "V05")
newPart.addTag("oompIndex", "A1")
OOMP.parts.append(newPart)
|
from __future__ import unicode_literals
import django.contrib.auth.models
import django.core.validators
from django.db import migrations, models
import re
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0007_alter_validators_add_error_messages'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(help_text='Um nome curto que será usado para identificá-lo de forma única na plataforma', max_length=30, unique=True, validators=[django.core.validators.RegexValidator(re.compile('^[\\w.@+-]+$', 32), 'Informe um nome de usuário válido. Este valor deve conter apenas letras, números e os caracteres: @/./+/-/_ .', 'invalid')], verbose_name='Usuário')),
('name', models.CharField(blank=True, max_length=100, verbose_name='Nome')),
('email', models.EmailField(max_length=254, unique=True, verbose_name='E-mail')),
('is_staff', models.BooleanField(default=False, verbose_name='Equipe')),
('is_active', models.BooleanField(default=True, verbose_name='Ativo')),
('date_joined', models.DateTimeField(auto_now_add=True, verbose_name='Data de Entrada')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name_plural': 'Usuários',
'verbose_name': 'Usuário',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
|
from setuptools import setup, find_packages
setup(name='BIOMD0000000255',
version=20140916,
description='BIOMD0000000255 from BioModels',
url='http://www.ebi.ac.uk/biomodels-main/BIOMD0000000255',
maintainer='Stanley Gu',
maintainer_url='stanleygu@gmail.com',
packages=find_packages(),
package_data={'': ['*.xml', 'README.md']},
)
|
import csv
import boto3
import datetime
import logging
import tempfile
import pandas as pd
from pandas import isnull
from sqlalchemy import func
from dataactcore.config import CONFIG_BROKER
from dataactcore.interfaces.db import GlobalDB
from dataactcore.logging import configure_logging
from dataactcore.models.jobModels import PublishedFilesHistory, Job
from dataactcore.models.jobModels import Submission
from dataactcore.models.userModel import User # noqa
from dataactcore.models.lookups import PUBLISH_STATUS_DICT, FILE_TYPE_DICT, JOB_TYPE_DICT, FILE_TYPE_DICT_ID
from dataactcore.models.stagingModels import FlexField, CertifiedFlexField
from dataactvalidator.health_check import create_app
from dataactvalidator.scripts.loader_utils import insert_dataframe
logger = logging.getLogger(__name__)
FILE_LIST = [FILE_TYPE_DICT['appropriations'], FILE_TYPE_DICT['program_activity'], FILE_TYPE_DICT['award_financial']]
def copy_published_submission_flex_fields():
""" Copy flex fields from the flex_field table to the published_flex_field table for published DABS submissions. """
logger.info('Moving published flex fields')
sess = GlobalDB.db().session
column_list = [col.key for col in FlexField.__table__.columns]
column_list.remove('created_at')
column_list.remove('updated_at')
column_list.remove('flex_field_id')
published_col_string = ', '.join(column_list)
col_string = ', '.join([col if not col == 'submission_id' else 'flex_field.' + col for col in column_list])
# Delete the old ones so we don't have conflicts
sess.execute(
"""DELETE FROM certified_flex_field
USING submission
WHERE submission.submission_id = certified_flex_field.submission_id
AND publish_status_id = {}
""".format(PUBLISH_STATUS_DICT['published']))
# Insert all flex fields from submissions in the certified (not updated) status
sess.execute(
"""INSERT INTO certified_flex_field (created_at, updated_at, {})
SELECT NOW() AS created_at, NOW() AS updated_at, {}
FROM flex_field
JOIN submission ON submission.submission_id = flex_field.submission_id
WHERE submission.publish_status_id = {}
AND submission.d2_submission IS FALSE
""".format(published_col_string, col_string, PUBLISH_STATUS_DICT['published']))
sess.commit()
logger.info('Moved certified flex fields')
def clean_col(datum):
if isnull(datum) or not str(datum).strip():
return None
# Trim
return str(datum).strip()
def process_flex_data(data, flex_headers, submission_id, job_id, file_type_id):
""" Process the file that contains flex fields and insert all flex cells into the published table
Args:
data: The pandas dataframe containing the file
flex_headers: The flex fields contained in this file
submission_id: The ID associated with the submission this file comes from
job_id: The ID associated with the job this file comes from
file_type_id: The ID of the file type that this is
"""
# Only use the flex columns
data = data.rename(columns=lambda x: x.lower().strip())
data = data[list(flex_headers)]
if len(data.index) > 0:
data = data.applymap(clean_col)
# Populate row number, adding 2 to the index because the first row is always row 2 but index starts at 0
data = data.reset_index()
data['row_number'] = data.index + 2
data = data.drop(['index'], axis=1)
# Split each flex field into its own row with both content and headers while keeping the row number
new_df = pd.melt(data, id_vars=['row_number'], value_vars=flex_headers, var_name='header', value_name='cell')
# Filling in all the shared data for these flex fields
now = datetime.datetime.now()
new_df['created_at'] = now
new_df['updated_at'] = now
new_df['job_id'] = job_id
new_df['submission_id'] = submission_id
new_df['file_type_id'] = file_type_id
return new_df
def load_updated_flex_fields():
""" Load in flex fields from updated submissions as they were at the latest publication """
logger.info('Moving updated flex fields')
sess = GlobalDB.db().session
# Get a list of all submissions with published flex fields
published_flex_subs = sess.query(CertifiedFlexField.submission_id).distinct().all()
# We only want to go through updated submissions without flex fields already loaded
updated_subs = sess.query(Submission.submission_id).\
filter(~Submission.submission_id.in_(published_flex_subs),
Submission.d2_submission.is_(False),
Submission.publish_status_id == PUBLISH_STATUS_DICT['updated']).all()
published_ids = sess. \
query(func.max(PublishedFilesHistory.publish_history_id).label('max_pub_id')). \
filter(PublishedFilesHistory.submission_id.in_(updated_subs)). \
group_by(PublishedFilesHistory.submission_id).cte('published_ids')
historical_files = sess.query(PublishedFilesHistory.filename, PublishedFilesHistory.file_type_id,
PublishedFilesHistory.submission_id). \
join(published_ids, published_ids.c.max_pub_id == PublishedFilesHistory.publish_history_id).\
filter(PublishedFilesHistory.file_type_id.in_(FILE_LIST))
# Loop through each updated submission
for historical_file in historical_files:
filename = historical_file.filename
submission_id = historical_file.submission_id
file_type_id = historical_file.file_type_id
# If this is a file in S3, download to a local temp file first then use temp file as local file
if CONFIG_BROKER['use_aws']:
(file, tmp_filename) = tempfile.mkstemp()
s3 = boto3.client('s3', region_name=CONFIG_BROKER['aws_region'])
s3.download_file(CONFIG_BROKER['certified_bucket'], filename, tmp_filename)
filename = tmp_filename
with open(filename) as file:
# Get file delimiter, get an array of the header row, and reset reader to start of file
header_line = file.readline()
delim = '|' if header_line.count('|') != 0 else ','
header_row = next(csv.reader([header_line], quotechar='"', dialect='excel', delimiter=delim))
file.seek(0)
flex_list = [header.lower() for header in header_row if header.lower().startswith('flex_')]
# If there are no flex fields, just ignore this file, no need to go through it
if len(flex_list) == 0:
continue
# Create dataframe from file
data = pd.read_csv(file, dtype=str, delimiter=delim)
logger.info('Moving flex fields for submission {}, {} file'.format(submission_id,
FILE_TYPE_DICT_ID[file_type_id]))
# Getting the job so we can get the ID
job = sess.query(Job).filter_by(submission_id=submission_id, file_type_id=file_type_id,
job_type_id=JOB_TYPE_DICT['csv_record_validation']).one()
# Process and insert the data
flex_data = process_flex_data(data, flex_list, submission_id, job.job_id, file_type_id)
insert_dataframe(flex_data, CertifiedFlexField.__table__.name, sess.connection())
sess.commit()
logger.info('Moved updated flex fields')
def main():
""" Load flex fields for published submissions that haven't been loaded into the published flex fields table. """
copy_published_submission_flex_fields()
load_updated_flex_fields()
if __name__ == '__main__':
configure_logging()
with create_app().app_context():
main()
|
# !/usr/bin/env python
import os, sys, time
import subprocess
import pickle
from wrappers import Tmux
from fonctions import *
CONFIGSERVER = os.path.join(os.path.expanduser("~"), "TimeLaps", "configServer")
if not os.path.isfile(CONFIGSERVER):
subprocess.call(["python", os.path.join(os.path.expanduser("~"), "TimeLaps", "FichierConfig.py")])
dictEtatScript = {"CapturePhoto":[0,0], "TransfertFichier":[0,0], "ListDossierAttente":[0,0]}
with open("etatScript", "wb") as file:
configWrite = pickle.Pickler(file)
configWrite.dump(dictEtatScript)
etatScript = {}
with open("etatScript", "rb") as file:
configRead = pickle.Unpickler(file)
etatScript = configRead.load()
for cle,valeur in etatScript.items():
print(cle,valeur)
with open("configServer", "rb") as fichierConfig:
configRead = pickle.Unpickler(fichierConfig)
HOST = configRead.load()
PORT = int(configRead.load())
USER = configRead.load()
PORTSSH = int(configRead.load())
windows = Tmux(subprocess)
windows.newWindows("TimeLaps")
script="CapturePhoto.py"
strScript = os.path.join(os.path.expanduser("~"), "TimeLaps", script)
windows.sendKeys("python {0}".format(strScript))
time.sleep(0.5)
processPythonPid = pythonExist(strScript)
if type(processPythonPid) == int:
processPython = psutil.Process(processPythonPid)
print("le script {} a été démarré : {}".format(strScript,processPythonPid))
dictEtatScript["CapturePhoto"] = [1,processPythonPid]
else:
print("le script {} n'a pas été démarré".format(strScript))
sys.exit()
windows.splitWindows("v")
windows.resizePane("D", "5")
windows.sendKeys("ssh -p {0} -t {1}@{2} tmux a || ssh -p {0} -t {1}@{2} tmux".format(PORTSSH, USER, HOST))
windows.selectPane("0")
windows.splitWindows("h")
windows.resizePane("R", "10")
script="TransfertFichier.py"
strScript = os.path.join(os.path.expanduser("~"), "TimeLaps", script)
windows.sendKeys("python {0}".format(strScript))
time.sleep(0.5)
processPythonPid = pythonExist(strScript)
if type(processPythonPid) == int:
processPython = psutil.Process(processPythonPid)
print("le script {} a été démarré : {}".format(strScript,processPythonPid))
dictEtatScript["TransfertFichier"] = [1,processPythonPid]
else:
print("le script {} n'a pas été démarré".format(strScript))
sys.exit()
windows.splitWindows("v")
script="ListDossierAttente.py"
strScript = os.path.join(os.path.expanduser("~"), "TimeLaps", script)
windows.sendKeys("python {0}".format(strScript))
time.sleep(0.5)
processPythonPid = pythonExist(strScript)
if type(processPythonPid) == int:
processPython = psutil.Process(processPythonPid)
print("le script {} a été démarré : {}".format(strScript,processPythonPid))
dictEtatScript["ListDossierAttente"] = [1,processPythonPid]
else:
print("le script {} n'a pas été démarré".format(strScript))
sys.exit()
windows.attachSession()
with open("etatScript", "wb") as file:
configWrite = pickle.Pickler(file)
configWrite.dump(dictEtatScript)
etatScript = {}
with open("etatScript", "rb") as file:
configRead = pickle.Unpickler(file)
etatScript = configRead.load()
for cle,valeur in etatScript.items():
print(cle,valeur)
|
import matplotlib.pyplot as plt
import numpy as np
from scipy.interpolate import griddata
from mpl_toolkits.mplot3d import Axes3D
step = 10 # grid step
a = np.loadtxt('lidar.txt', delimiter=',')
x = a[:,0]
y = a[:,1]
z = a[:,2]
xmin = 548040.0
ymin = 5129010.0
zmin = round(z.min() + 5.0, -1)
xmax = 548300.0
ymax = 5129270.0
zmax = round(z.max() - 5.0, -1)
xi = np.arange(xmin, xmax+step, step)
yi = np.arange(ymin, ymax+step, step)
xi, yi = np.meshgrid(xi, yi)
zi = griddata((x, y), z, (xi, yi), method='linear')
fig = plt.figure()
plt.contour(xi,yi,zi,np.arange(zmin,zmax+step, step))
plt.xlabel('xi',fontsize=16)
plt.ylabel('yi',fontsize=16)
plt.savefig('lidar_contour.png',dpi=100)
plt.close(fig)
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.plot_wireframe(xi, yi, zi)
ax.contour3D(xi, yi, zi, 10)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
plt.savefig('lidar_3dcontour.png',dpi=100)
plt.close(fig)
vol = sum(zi[zi > 1000] - 1000.0) * step**2
print('Volume above 1000m: {:.0f} m3'.format(vol))
|
def on_square(square):
return pow(2,square-1)
def total_after(square):
return sum(pow(2,x) for x in range(square))
|
"""
Minimal character-level Vanilla RNN model. Written by Andrej Karpathy (@karpathy)
BSD License
"""
import numpy as np
data = open('sonnet.txt', 'r').read() # should be simple plain text file
chars = list(set(data))
data_size, vocab_size = len(data), len(chars)
print 'data has %d characters, %d unique.' % (data_size, vocab_size)
char_to_ix = { ch:i for i,ch in enumerate(chars) }
ix_to_char = { i:ch for i,ch in enumerate(chars) }
hidden_size = 100 # size of hidden layer of neurons
seq_length = 25 # number of steps to unroll the RNN for
learning_rate = 1e-1
Wxh = np.random.randn(hidden_size, vocab_size)*0.01 # input to hidden n by |V|
Whh = np.random.randn(hidden_size, hidden_size)*0.01 # hidden to hidden n by n
Why = np.random.randn(vocab_size, hidden_size)*0.01 # hidden to output |V| by n
bh = np.zeros((hidden_size, 1)) # hidden bias
by = np.zeros((vocab_size, 1)) # output bias
def lossFun(inputs, targets, hprev):
"""
inputs,targets are both list of integers.
hprev is Hx1 array of initial hidden state
returns the loss, gradients on model parameters, and last hidden state
"""
xs, hs, ys, ps = {}, {}, {}, {}
hs[-1] = np.copy(hprev)
loss = 0
# forward pass
for t in xrange(len(inputs)):
# as one-hot-encoding
xs[t] = np.zeros((vocab_size,1)) # encode in 1-of-k representation
xs[t][inputs[t]] = 1
# current state of h by current x and previous h
hs[t] = np.tanh(np.dot(Wxh, xs[t]) + np.dot(Whh, hs[t-1]) + bh) # hidden state
# current y by current state h
ys[t] = np.dot(Why, hs[t]) + by # unnormalized log probabilities for next chars
# normalized to be proper probabilities
ps[t] = np.exp(ys[t]) / np.sum(np.exp(ys[t])) # probabilities for next chars
loss += -np.log(ps[t][targets[t],0]) # softmax (cross-entropy loss)
# backward pass: compute gradients going backwards
dWxh, dWhh, dWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)
dbh, dby = np.zeros_like(bh), np.zeros_like(by)
dhnext = np.zeros_like(hs[0])
for t in reversed(xrange(len(inputs))):
dy = np.copy(ps[t])
dy[targets[t]] -= 1 # backprop into y. see http://cs231n.github.io/neural-networks-case-study/#grad if confused here
dWhy += np.dot(dy, hs[t].T)
dby += dy
dh = np.dot(Why.T, dy) + dhnext # backprop into h
dhraw = (1 - hs[t] * hs[t]) * dh # backprop through tanh nonlinearity
dbh += dhraw
dWxh += np.dot(dhraw, xs[t].T)
dWhh += np.dot(dhraw, hs[t-1].T)
dhnext = np.dot(Whh.T, dhraw)
for dparam in [dWxh, dWhh, dWhy, dbh, dby]:
np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients
return loss, dWxh, dWhh, dWhy, dbh, dby, hs[len(inputs)-1]
def sample(h, seed_ix, n):
"""
sample a sequence of integers from the model
h is memory state, seed_ix is seed letter for first time step
"""
x = np.zeros((vocab_size, 1))
x[seed_ix] = 1
ixes = []
for t in xrange(n):
h = np.tanh(np.dot(Wxh, x) + np.dot(Whh, h) + bh)
y = np.dot(Why, h) + by
p = np.exp(y) / np.sum(np.exp(y))
ix = np.random.choice(range(vocab_size), p=p.ravel())
x = np.zeros((vocab_size, 1))
x[ix] = 1
ixes.append(ix)
return ixes
n, p = 0, 0
mWxh, mWhh, mWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)
mbh, mby = np.zeros_like(bh), np.zeros_like(by) # memory variables for Adagrad
smooth_loss = -np.log(1.0/vocab_size)*seq_length # loss at iteration 0
while True:
# prepare inputs (we're sweeping from left to right in steps seq_length long)
if p+seq_length+1 >= len(data) or n == 0:
hprev = np.zeros((hidden_size,1)) # reset RNN memory
p = 0 # go from start of data
inputs = [char_to_ix[ch] for ch in data[p:p+seq_length]]
targets = [char_to_ix[ch] for ch in data[p+1:p+seq_length+1]]
# sample from the model now and then
if n % 100 == 0:
sample_ix = sample(hprev, inputs[0], 200)
txt = ''.join(ix_to_char[ix] for ix in sample_ix)
print '----\n %s \n----' % (txt, )
# forward seq_length characters through the net and fetch gradient
loss, dWxh, dWhh, dWhy, dbh, dby, hprev = lossFun(inputs, targets, hprev)
smooth_loss = smooth_loss * 0.999 + loss * 0.001
if n % 100 == 0: print 'iter %d, loss: %f' % (n, smooth_loss) # print progress
# perform parameter update with Adagrad
for param, dparam, mem in zip([Wxh, Whh, Why, bh, by],
[dWxh, dWhh, dWhy, dbh, dby],
[mWxh, mWhh, mWhy, mbh, mby]):
mem += dparam * dparam
param += -learning_rate * dparam / np.sqrt(mem + 1e-8) # adagrad update
p += seq_length # move data pointer
n += 1 # iteration counter
|
from gi.repository import Gtk
def window_delete_event(window, event):
messagedialog = Gtk.MessageDialog()
messagedialog.add_button("Do Not Quit", Gtk.ResponseType.CANCEL)
messagedialog.add_button("Quit", Gtk.ResponseType.OK)
messagedialog.set_markup("Quit the program?")
if messagedialog.run() == Gtk.ResponseType.OK:
state = False
Gtk.main_quit()
messagedialog.destroy()
return True
window = Gtk.Window()
window.set_default_size(200, 200)
window.connect("delete-event", window_delete_event)
window.show_all()
Gtk.main()
|
import sys
from jarray import *
from java.lang import *
from java.awt import *
from java.util import *
from java.lang.reflect import *
from javax.swing.event import *
from javax.swing import *
from java.awt.event import *
from java.net import *
from java.io import *
from org.csstudio.mps.sns.tools.bricks import WindowReference
from org.csstudio.mps.sns.tools.plot import *
true = (1==1)
false = not true
null = None
sample_folder = File( sys.argv[0] ).getParentFile()
url = File( sample_folder, "test.bricks" ).toURL()
print url
window_ref = WindowReference( url, "MainWindow", ["Test Title"] )
window = window_ref.getWindow()
magnet_list = window_ref.getView( "MagnetList" );
magnets = Vector()
magnets.add( "Dipole" )
magnets.add( "Quadrupole" )
magnets.add( "Sextupole" )
magnets.add( "Octupole" )
magnets.add( "Skew Dipole" )
magnets.add( "Skew Quadrupole" )
magnets.add( "Skew Sextupole" )
magnet_list.setListData( magnets )
plot = window_ref.getView( "SinePlot" )
graphData = BasicGraphData();
graphData.setGraphColor( Color.BLUE );
graphData.setGraphProperty( plot.getLegendKeyString(), "Sine" );
for x in range( 100 ):
graphData.addPoint( x, Math.sin( 0.1 * x ) );
series = Vector(1);
series.add( graphData );
plot.addGraphData( series );
class DialogOkayAction( ActionListener ):
def __init__( self, dialog ):
self.dialog = dialog
def actionPerformed( self, event ):
self.dialog.setVisible( false )
class QuitAction( ActionListener ):
def actionPerformed( self, event ):
sys.exit( 0 )
class RunAction( ActionListener ):
def actionPerformed( self, event ):
# create the dialog box with the main window as the dialog box's owner
dialog_ref = WindowReference( url, "HelloDialog", [window] )
dialog = dialog_ref.getWindow()
button = dialog_ref.getView( "OkayButton" )
button.addActionListener( DialogOkayAction( dialog ) )
dialog.setLocationRelativeTo( window )
dialog.setVisible( true )
runButton = window_ref.getView( "RunButton" )
runButton.addActionListener( RunAction() )
quitButton = window_ref.getView( "QuitButton" )
quitButton.addActionListener( QuitAction() )
window.setVisible( true )
|
"""Proxy definitions."""
from __future__ import absolute_import, print_function
from flask import current_app
from werkzeug.local import LocalProxy
from .permissions import (CommunityAdminActionNeed,
CommunityReadActionNeed,
CommunityManageActionNeed,
CommunityCurateActionNeed)
current_permission_factory = {
"communities-admin": LocalProxy(lambda:
current_app.extensions["invenio-communities"].admin_permission_factory),
"communities-read": LocalProxy(lambda:
current_app.extensions["invenio-communities"].read_permission_factory),
"communities-manage": LocalProxy(lambda:
current_app.extensions["invenio-communities"].manage_permission_factory),
"communities-curate": LocalProxy(lambda:
current_app.extensions["invenio-communities"].curate_permission_factory)
}
needs = {
"communities-admin": CommunityAdminActionNeed,
"communities-read": CommunityReadActionNeed,
"communities-manage": CommunityManageActionNeed,
"communities-curate": CommunityCurateActionNeed
}
|
import configparser
from datetime import datetime
from os import path, sep
directory = path.dirname(path.realpath(__file__)) + sep
config = None
def get(section, key):
global config
if config is None:
load()
if config.get(section) is None:
return None
else:
return config[section].get(key)
def set(section, key, value):
global config
if config is None:
load()
if config.get(section) is None:
config[section] = {}
config[section][key] = value
def verbose(message):
timestamp = datetime.now().strftime("[ %Y/%m/%d %H:%M:%S ]")
if get('bell', 'verbose'):
print(timestamp + ' ' + message)
if get('bell', 'log'):
with open('log.txt', 'a') as log_file:
log_file.write(timestamp + ' ' + message + '\n')
def load():
parser = configparser.ConfigParser()
parser.read(directory + 'config.ini')
global config
config = {}
for section in parser.sections():
section_config = {}
for key in parser._sections[section]:
value = eval(parser._sections[section][key])
section_config[key] = value
config[section] = section_config
|
import json
import sys
import os
from sys import stdin, stdout
def getRequest():
return json.loads(stdin.read(int(stdin.read(7))))
def sendResponse(response):
dump = json.dumps(response)
dump = "%07d%s" % (len(dump)+7, dump)
stdout.write(dump)
stdout.flush()
def mainLoop():
info = {'type': 'info', 'status':True, 'version': '', 'exception': None}
try:
import youtube_dl
options = {'quiet': True}
ydl = youtube_dl.YoutubeDL(options)
sendResponse(info)
except Exception as e:
info['status'] = False
info['exception'] = str(e)
sendResponse(info)
exit(1)
while True:
request = getRequest()
if request:
response = {'type':'request', 'status':False, 'result':None, 'exception':None}
try:
result = ydl.extract_info(request['url'], False)
response['status'] = True
response['result'] = result
except Exception as e:
response['exception'] = str(e)
sendResponse(response)
if __name__ == "__main__":
ydl_lib = os.path.join(os.path.dirname(os.path.realpath(__file__)),'youtube_dl')
sys.path.append(ydl_lib)
mainLoop()
|
"""
Sub-sample the preprocessed training set into subsets based on click/non-click
per hour.
Input
------
ptrain.csv: the preprocessed training set.
num: the number of subsets. Default is 10.
Output
-------
train_1 ~ num.csv: num subsets of the training set.
"""
FILE_DIR = './data'
import os
import copy
import csv
import random
def random_partition(lines, num):
'''Get random partition of a list of records'''
random.shuffle(lines)
division = len(lines)/float(num)
return [lines[int(round(division*i)):int(round(division*(i + 1)))]
for i in xrange(num)]
def append_files(lines, num, is_header=False):
'''Append subsets to files. If is_header=True, create files.'''
if is_header:
for i in range(num):
with open(
os.path.join(FILE_DIR, 'train_' + str(i) +'.csv'), "w+"
)as f_subset:
c = csv.writer(f_subset, delimiter=',')
c.writerows([lines])
else:
subsets = random_partition(lines, num)
# append to existing files
for i in range(num):
with open(
os.path.join(FILE_DIR, 'train_' + str(i) +'.csv'), "a"
)as f_subset:
c = csv.writer(f_subset, delimiter=',')
c.writerows(subsets[i])
def sub_sample(filename, num=10):
'''Get subsamples from .csv file. Default number of subsets is 10.'''
with open(os.path.join(FILE_DIR, filename)) as f:
is_header = 1
lines = [] # container
for line in f:
# if not header
if not is_header:
if lines != []: # check if not empty
line_split = line.split(',')
# check if in same hour
if line_last == line_split[2]:
lines.append(line_split)
else:
append_files(lines, num=num)
lines = []
else:
line_split = line.split(',')
lines.append(line_split)
line_last = line_split[2] # hour indicator
else:
# header of the file
file_header = line.split(',')
append_files(file_header, num=num, is_header=is_header)
is_header = 0
print "subsampling SUCCEED!!"
if __name__ == "__main__":
random.seed(3)
sub_sample('ptrain.csv', num=10)
|
from gi.repository import Gtk, Notify, GObject
import logging
import time
import threading
import os
class PomodoroGui:
def __init__(self):
self.gladfile = "pomodoro.glade"
self.builder = Gtk.Builder()
self.builder.add_from_file(self.gladfile)
#magic
self.builder.connect_signals(self)
self.window = self.builder.get_object("root_window")
self.aboutdialog = self.builder.get_object("aboutdialog1")
#self.statusbar = self.builder.get_object("statusbar1")
#self.context_id = self.statusbar.get_context_id("status")
#time left label
self.time_left = self.builder.get_object("time_left_label")
self.time_left.set_text("not running")
#text entry box for timer duration
self.timer_value = self.builder.get_object("timer_entry")
#self.timer_value.set_text("1")
rb1 = self.builder.get_object("rb1")
rb1.set_active(True)
#run button
self.pomodoro_timer = PomodoroTimer()
self.pomodoro_timer.register_running_callback(self.timer_on_off_callback)
self.pomodoro_timer.register_time_left_callback(self.time_left_callback)
self.window.show()
def on_root_window_destroy(self, object, data=None):
logging.debug("quit with cancel")
Gtk.main_quit()
def on_gtk_quit_activate(self, menuitem, data=None):
logging.debug("quit from menu")
Gtk.main_quit()
def on_gtk_about_activate(self, menuitem, data=None):
logging.debug("help about selected")
self.response = self.aboutdialog.run()
self.aboutdialog.hide()
def on_button_run_clicked(self, button, data=None):
timer_value = self.builder.get_object("timer_entry")
#TODO validate input
try:
timer_value = float(timer_value.get_text()) * 60
except Exception as e:
#fixme
raise e
self.pomodoro_timer.start(timer_value)
def on_button_cancel_clicked(self, button, data=None):
self.pomodoro_timer.running = False
def radiobutton_toggle(self, widget):
rb_values = {'rb1':'25',
'rb2':'5',
'rb3':'15'}
if widget.get_active():
selected_button = Gtk.Buildable.get_name(widget)
self.timer_value.set_text(rb_values[selected_button])
#callbacks
def timer_on_off_callback(self, is_now_running):
"""
disables / enables ui component based on the status of the PomodoroTimer
"""
logging.debug("is_now_running %s" % is_now_running)
button_run = self.builder.get_object("button_run")
if(is_now_running):
GObject.idle_add(lambda: button_run.set_sensitive(False))
else:
GObject.idle_add(lambda: button_run.set_sensitive(True))
def time_left_callback(self, time_left):
logging.debug("time_left: %s" % time_left)
GObject.idle_add(lambda: self.time_left.set_text("%02.0f:%02.0f" % (time_left //60, time_left % 60 )))
class PomodoroTimer:
def __init__(self):
self._running = False
self.running_callbacks = []
self._time_left = 0
self.time_left_callbacks = []
def start(self, duration):
self.time_left = duration
thr = threading.Thread(target=self.make_notification, args=(duration,))
thr.daemon = True
thr.start()
def make_notification(self, duration):
self.running = True
while(self.time_left > 0):
time.sleep(1)
if(not self.running):
return False
self.time_left -= 1
Notify.init("pomodoro.py")
main_text = "Pomodoro Timer"
summary_text = "Time is up."
notification = Notify.Notification.new(main_text, summary_text, None)
notification.show()
os.system("aplay --quiet static/notification01.wav")
self.running = False
@property
def running(self):
return self._running
@running.setter
def running(self, value):
self._running = value
for callback in self.running_callbacks:
callback(self._running)
def register_running_callback(self, callback):
"""
callback(bool running): function that is called when 'running' changes its value
"""
if callback not in self.running_callbacks:
self.running_callbacks.append(callback)
@property
def time_left(self):
return self._time_left
@time_left.setter
def time_left(self, new_time):
self._time_left = new_time
for callback in self.time_left_callbacks:
callback(self._time_left)
def register_time_left_callback(self, callback):
"""
callback(float time_left): function that is called when 'time_left' changes its value
"""
if callback not in self.time_left_callbacks:
self.time_left_callbacks.append(callback)
if __name__ == "__main__":
FORMAT = "[%(levelname)s %(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s"
logging.basicConfig(level=logging.DEBUG, format=FORMAT)
logger = logging.getLogger(__name__)
GObject.threads_init()
main = PomodoroGui()
Gtk.main()
|
"""This module represents the Punjabi language.
.. seealso:: http://en.wikipedia.org/wiki/Punjabi_language
"""
import re
from translate.lang import common
class pa(common.Common):
"""This class represents Punjabi."""
sentenceend = "।!?…"
sentencere = re.compile(r"""(?s) # make . also match newlines
.*? # anything, but match non-greedy
[%s] # the puntuation for sentence ending
\s+ # the spacing after the puntuation
(?=[^a-z\d])# lookahead that next part starts with
# caps
""" % sentenceend, re.VERBOSE)
puncdict = {
". ": "। ",
".\n": "।\n",
}
ignoretests = {
'all': ["simplecaps", "startcaps"],
}
|
from datetime import datetime
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.db.models import get_model
from edc_crypto_fields.classes import ModelCryptor, FieldCryptor
class Command(BaseCommand):
args = '--list-models --list-fields --check --dry-run --verify-lookup --decribe'
help = 'Encrypt fields within any INSTALLED_APP model using an encrypted field object.'
option_list = BaseCommand.option_list + (
make_option('--encrypt',
action='store_true',
dest='encrypt',
default=False,
help=('Encrypts data in all models that use encryption. (DATA WILL BE CHANGED.).')),
)
option_list += (
make_option('--list-models',
action='store_true',
dest='list',
default=False,
help=('Lists models using encryption. (Safe. Lists only, does not encrypt any data).')),
)
option_list += (
make_option('--check',
action='store_true',
dest='check',
default=False,
help=('Checks if all instances of each model are encrypted. (checks only, does not encrypt any data).')),
)
option_list += (
make_option('--list-fields',
action='store_true',
dest='list_fields',
default=False,
help=('Lists the fields in each model using encryption. (Safe. Lists only, '
'does not encrypt any data)..')),
)
option_list += (
make_option('--dry-run',
action='store_true',
dest='dry_run',
default=False,
help=('Encrypts without saving. (Safe. Does not encrypt any data)')),
)
option_list += (
make_option('--verify-lookup',
action='store_true',
dest='verify_lookup',
default=False,
help=('Verifies secrets and hashing in lookup table, bhp_crypto.models.crypt. '
'(Safe. Does not encrypt any data)')),
)
option_list += (
make_option('--describe-plan',
action='store_true',
dest='describe',
default=False,
help=('Describes encryption plan by showing number of models, fields and '
'instances to be encrypted. (Safe. Does not encrypt any data)')),
)
def handle(self, *args, **options):
self.save = True
if options['dry_run']:
self.save = False
self.encrypt(False)
elif options['list']:
self._list_encrypted_models()
elif options['check']:
self._check_models_encrypted()
elif options['list_fields']:
self._list_encrypted_fields()
elif options['describe']:
self.describe()
elif options['verify_lookup']:
self.verify_lookup()
elif options['encrypt']:
self.encrypt()
else:
raise CommandError('Unknown option, Try --help for a list of valid options')
def encrypt(self, save=True):
"""For each app, encrypts all models with field objects that use encryption."""
self._list_encrypted_models(count_only=True)
self.describe()
if not save:
self.stdout.write('This is a dry-run, no data will be changed.\n')
msg = 'No models to encrypt.'
n = 0
model_cryptor = ModelCryptor()
all_encrypted_models = model_cryptor.get_all_encrypted_models()
if all_encrypted_models:
for encrypted_models in all_encrypted_models.itervalues():
for encrypted_model in encrypted_models.itervalues():
self._encrypt_model(encrypted_model['model'], save)
msg = 'Complete. {0} models encrypted.\n'.format(n)
self.stdout.write(msg)
self.stdout.flush()
def _encrypt_model(self, model, save=True):
""" Encrypts all instances for given model that are not yet encrypted."""
model_cryptor = ModelCryptor()
app_name = model._meta.app_label
model_name = model._meta.object_name.lower()
start = datetime.today()
self.stdout.write('Encrypting {app_name}.{model}...'
'started {start}\n'.format(app_name=app_name,
model=model_name,
start=start.strftime("%H:%M:%S")))
model_cryptor.encrypt_model(model, save)
end = datetime.today()
hours, remainder = divmod((end - start).seconds, 3600)
minutes, seconds = divmod(remainder, 60)
self.stdout.write('done in {0}:{1}:{2}.\n'.format(str(hours).rjust(2, '0'),
str(minutes).rjust(2, '0'),
str(seconds).rjust(2, '0')))
self.stdout.flush()
def _list_encrypted_models(self, **kwargs):
"""Lists names of models that contain field objects that use encryption.
Keyword Arguments:
list-fields -- include for each model with the names of the fields that
use encryption. (default: False)
count_only -- just list the model count. (default: False)
"""
list_fields = kwargs.get('list_fields', False)
count_only = kwargs.get('count_only', False)
model_cryptor = ModelCryptor()
n = 0
field_count = 0
instance_count_total = 0
all_encrypted_models = model_cryptor.get_all_encrypted_models()
for app_name, encrypted_models in all_encrypted_models.iteritems():
for meta in encrypted_models.itervalues():
model = meta['model']
encrypted_fields = meta['encrypted_fields']
field_count += len(meta['encrypted_fields'])
n += 1
instance_count = model.objects.all().count()
instance_count_total += instance_count
if not count_only:
self.stdout.write('{app_name}.{model}. {encrypted_fields} '
'fields. ({records} records)\n'.format(app_name=app_name,
model=model._meta.object_name.lower(),
encrypted_fields=len(encrypted_fields),
records=instance_count))
if list_fields:
self.stdout.write(' {encrypted_fields}\n'.format(encrypted_fields=' \n '.join(([' '.join((field.attname, '-'.join((field.algorithm, field.mode)))) for field in encrypted_fields]))))
if not count_only:
self.stdout.write('{0} models use encryption in {1} fields.\n'.format(n, field_count))
return {'models': n, 'fields': field_count, 'instances': instance_count_total}
def _list_encrypted_fields(self):
""" Lists each model with the names of the fields that use encryption. """
self._list_encrypted_models(list_fields=True)
def _check_models_encrypted(self):
"""Checks the encryption status of each instance in each model. """
model_cryptor = ModelCryptor()
all_encrypted_models = model_cryptor.get_all_encrypted_models()
for app_name, encrypted_models in all_encrypted_models.iteritems():
print '\n' + app_name.upper()
for meta in encrypted_models.itervalues():
model = meta['model']
model_cryptor.is_model_encrypted(model=model)
def verify_lookup(self, **kwargs):
"""Verifies the hashes and secrets in the lookup model Crypt by decrypting the secrets,
hashing them and comparing to the stored hashes.
"""
print_messages = kwargs.get('print_messages', True)
if print_messages:
self.stdout.write('Verify secrets and hashes stored in lookup model '
'(bhp_crypto.models.crypt)...\n')
self.stdout.write('Verify from newest to oldest.\n')
n = 0
verified = 0
failed_hash = 0
failed_decrypt = 0
Crypt = get_model(*settings.CRYPT_MODEL)
total = Crypt.objects.using('crypt').all().count()
for instance in Crypt.objects.using('crypt').all().order_by('-modified'):
if print_messages:
self.stdout.write('\r\x1b[K {0} / {1} verifying...'.format(n, total))
n += 1
field_cryptor = FieldCryptor(instance.algorithm, instance.mode)
try:
stored_secret = (
field_cryptor.cryptor.HASH_PREFIX +
instance.hash +
field_cryptor.cryptor.SECRET_PREFIX +
instance.secret)
plain_text = field_cryptor.decrypt(stored_secret)
plain_text_encrypt_decrypt = field_cryptor.decrypt(field_cryptor.encrypt(plain_text))
if plain_text != plain_text_encrypt_decrypt:
self.stdout.write('pk=\'{0}\' failed on secrets comparison\n'.format(instance.id))
print plain_text + '\n\n'
print plain_text_encrypt_decrypt + '\n\n'
return
test_hash = field_cryptor.get_hash(plain_text)
if test_hash != instance.hash:
failed_hash += 1
if print_messages:
self.stdout.write('pk=\'{0}\' failed on hash comparison\n'.format(instance.id))
else:
verified += 1
except:
if print_messages:
self.stdout.write('pk=\'{0}\' failed on decrypt\n'.format(instance.id))
else:
print 'pk=\'{0}\' failed on decrypt\n'.format(instance.id)
failed_decrypt += 1
del field_cryptor
if print_messages:
self.stdout.flush()
msg = ('Total secrets: {0}\nVerified: {1}\nFailed decrypt: {2}\nFailed hash comparison: '
' {3}\nDone.').format(n, verified, failed_decrypt, failed_hash)
if print_messages:
self.stdout.write(msg)
else:
print msg
def describe(self):
model_cryptor = ModelCryptor()
counts = self._list_encrypted_models(count_only=True)
all_encrypted_models = model_cryptor.get_all_encrypted_models()
unencrypted_instances = 0
for encrypted_models in all_encrypted_models.itervalues():
for meta in encrypted_models.itervalues():
unencrypted_values_set, field_name = model_cryptor.get_unencrypted_values_set(meta['model'])
unencrypted_instances += unencrypted_values_set.count()
counts.update({'unencrypted_instances': unencrypted_instances})
hours, minutes = divmod(unencrypted_instances / 120, 60)
counts.update({'estimated_time': '{0} hour {1} minutes.'.format(hours, minutes)})
self.stdout.write('Models: {models}\nFields: {fields}\nTotal instance: {instances}\n'
'Unencrypted instances {unencrypted_instances}\n'
'Estimated time: {estimated_time}\n'.format(**counts))
|
import re
from django import forms
from django.contrib import admin
from .models import Mirror, MirrorProtocol, MirrorUrl, MirrorRsync
class MirrorUrlForm(forms.ModelForm):
class Meta:
model = MirrorUrl
def clean_url(self):
# ensure we always save the URL with a trailing slash
url = self.cleaned_data["url"].strip()
if url[-1] == '/':
return url
return url + '/'
class MirrorUrlInlineAdmin(admin.TabularInline):
model = MirrorUrl
form = MirrorUrlForm
extra = 3
ipv4nm_re = re.compile(r'^(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}(/(\d|[1-2]\d|3[0-2])){0,1}$')
class IPAddressNetmaskField(forms.fields.RegexField):
default_error_messages = {
'invalid': u'Enter a valid IPv4 address, possibly including netmask.',
}
def __init__(self, *args, **kwargs):
super(IPAddressNetmaskField, self).__init__(ipv4nm_re, *args, **kwargs)
class MirrorRsyncForm(forms.ModelForm):
class Meta:
model = MirrorRsync
ip = IPAddressNetmaskField(label='IP')
class MirrorRsyncInlineAdmin(admin.TabularInline):
model = MirrorRsync
form = MirrorRsyncForm
extra = 2
class MirrorAdminForm(forms.ModelForm):
class Meta:
model = Mirror
upstream = forms.ModelChoiceField(queryset=Mirror.objects.filter(tier__gte=0, tier__lte=1), required=False)
class MirrorAdmin(admin.ModelAdmin):
form = MirrorAdminForm
list_display = ('name', 'tier', 'country', 'active', 'public', 'isos', 'admin_email', 'supported_protocols')
list_filter = ('tier', 'country', 'active', 'public')
search_fields = ('name',)
inlines = [
MirrorUrlInlineAdmin,
MirrorRsyncInlineAdmin,
]
class MirrorProtocolAdmin(admin.ModelAdmin):
list_display = ('protocol', 'is_download',)
list_filter = ('is_download',)
admin.site.register(Mirror, MirrorAdmin)
admin.site.register(MirrorProtocol, MirrorProtocolAdmin)
|
from __future__ import print_function
import sys
import warnings
import wx
import traits.etsconfig.api
traits.etsconfig.api.ETSConfig.toolkit = 'wx'
from .external import backport_chaco_viridis # noqa: F401
from .gui import frontend
def prepare_app():
# bypass "iCCP: known incorrect sRGB profile":
wx.Log.SetLogLevel(0)
# first initialize the app to prevent errors in Windows,
# which is checking some wx runtime variables beforehand.
app = wx.App(False)
# get version
try:
from ._version import version
except:
warnings.warn("Could not determine Shape-Out version.")
version = None
app.frame = frontend.Frame(version)
return app
if __name__ == "__main__":
# get session file
session_file = None
for arg in sys.argv:
if arg.endswith(".zmso"):
print("\nUsing Session "+arg)
session_file=arg
else:
print("Ignoring command line parameter: "+arg)
app = prepare_app()
app.frame.InitRun(session_file=session_file)
app.MainLoop()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.