prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
le.get('force_update') and self.projects[task['project']].task_queue.is_processing(task['taskid']):
# when a task is in processing, the modify may conflict with the running task.
# postpone the modify after task finished.
logger.info('postpone modify task %(project)s:%(taskid)s %(url)s', task)
self._postpone_request.append(task)
return
restart = False
schedule_age = _schedule.get('age', self.default_schedule['age'])
if _schedule.get('itag') and _schedule['itag'] != old_schedule.get('itag'):
restart = True
elif schedule_age >= 0 and schedule_age + (old_task.get('lastcrawltime', 0) or 0) < now:
restart = True
elif _schedule.get('force_update'):
restart = True
if not restart:
logger.debug('ignore newtask %(project)s:%(taskid)s %(url)s', task)
return
if _schedule.get('cancel'):
logger.info('cancel task %(project)s:%(taskid)s %(url)s', task)
task['status'] = self.taskdb.BAD
self.update_task(task)
self.projects[task['project']].task_queue.delete(task['taskid'])
return task
task['status'] = self.taskdb.ACTIVE
self.update_task(task)
self.put_task(task)
project = task['project']
if old_task['status'] != self.taskdb.ACTIVE:
self._cnt['5m'].event((project, 'pending'), +1)
self._cnt['1h'].event((project, 'pending'), +1)
self._cnt['1d'].event((project, 'pending'), +1)
if old_task['status'] == self.taskdb.SUCCESS:
self._cnt['all'].event((project, 'success'), -1).event((project, 'pending'), +1)
elif old_task['status'] == self.taskdb.FAILED:
self._cnt['all'].event((project, 'failed'), -1).event((project, 'pending'), +1)
logger.info('restart task %(project)s:%(taskid)s %(url)s', task)
return task
def on_task_status(self, task):
'''Called when a status pack is arrived'''
try:
procesok = task['track']['process']['ok']
if not self.projects[task['project']].task_queue.done(task['taskid']):
logging.error('not processing pack: %(project)s:%(taskid)s %(url)s', task)
return None
except KeyError as e:
logger.error("Bad status pack: %s", e)
return None
if procesok:
ret = self.on_task_done(task)
else:
ret = self.on_task_failed(task)
if task['track']['fetch'].get('time'):
self._cnt['5m_time'].event((task['project'], 'fetch_time'),
task['track']['fetch']['time'])
if task['track']['process'].get('time'):
self._cnt['5m_time'].event((task['project'], 'process_time'),
task['track']['process'].get('time'))
self.projects[task['project']].active_tasks.appendleft((time.time(), task))
return ret
def on_task_done(self, task):
'''Called when a task is done and success, called by `on_task_status`'''
task['status'] = self.taskdb.SUCCESS
task['lastcrawltime'] = time.time()
if 'schedule' in task:
if task['schedule'].get('auto_recrawl') and 'age' in task['schedule']:
task['status'] = self.taskdb.ACTIVE
next_exetime = task['schedule'].get('age')
task['schedule']['exetime'] = time.time() + next_exetime
self.put_task(task)
else:
del task['schedule']
self.update_task(task)
project = task['project']
self._cnt['5m'].event((project, 'success'), +1)
self._cnt['1h'].event((project, 'success'), +1)
self._cnt['1d'].event((project, 'success'), +1)
self._cnt['all'].event((project, 'success'), +1).event((project, 'pending'), -1)
logger.info('task done %(project)s:%(taskid)s %(url)s', ta | sk)
return task
def on_task_failed(self, task):
'''Called when a task is failed, called by `on_task_status`'''
if 'schedule' not in task:
old_task = self.taskdb.get_task(task['project'], task['taskid'], fields=['schedule'])
if old_task is None:
logging.error('unknown status pack: %s' % task)
return
task['schedule'] = old_task.get('schedule', {})
retries = task['schedule | '].get('retries', self.default_schedule['retries'])
retried = task['schedule'].get('retried', 0)
project_info = self.projects[task['project']]
retry_delay = project_info.retry_delay or self.DEFAULT_RETRY_DELAY
next_exetime = retry_delay.get(retried, retry_delay.get('', self.DEFAULT_RETRY_DELAY['']))
if task['schedule'].get('auto_recrawl') and 'age' in task['schedule']:
next_exetime = min(next_exetime, task['schedule'].get('age'))
else:
if retried >= retries:
next_exetime = -1
elif 'age' in task['schedule'] and next_exetime > task['schedule'].get('age'):
next_exetime = task['schedule'].get('age')
if next_exetime < 0:
task['status'] = self.taskdb.FAILED
task['lastcrawltime'] = time.time()
self.update_task(task)
project = task['project']
self._cnt['5m'].event((project, 'failed'), +1)
self._cnt['1h'].event((project, 'failed'), +1)
self._cnt['1d'].event((project, 'failed'), +1)
self._cnt['all'].event((project, 'failed'), +1).event((project, 'pending'), -1)
logger.info('task failed %(project)s:%(taskid)s %(url)s' % task)
return task
else:
task['schedule']['retried'] = retried + 1
task['schedule']['exetime'] = time.time() + next_exetime
task['lastcrawltime'] = time.time()
self.update_task(task)
self.put_task(task)
project = task['project']
self._cnt['5m'].event((project, 'retry'), +1)
self._cnt['1h'].event((project, 'retry'), +1)
self._cnt['1d'].event((project, 'retry'), +1)
# self._cnt['all'].event((project, 'retry'), +1)
logger.info('task retry %d/%d %%(project)s:%%(taskid)s %%(url)s' % (
retried, retries), task)
return task
def on_select_task(self, task):
'''Called when a task is selected to fetch & process'''
# inject informations about project
logger.info('select %(project)s:%(taskid)s %(url)s', task)
project_info = self.projects.get(task['project'])
assert project_info, 'no such project'
task['type'] = self.TASK_PACK
task['group'] = project_info.group
task['project_md5sum'] = project_info.md5sum
task['project_updatetime'] = project_info.updatetime
# lazy join project.crawl_config
if getattr(project_info, 'crawl_config', None):
task = BaseHandler.task_join_crawl_config(task, project_info.crawl_config)
project_info.active_tasks.appendleft((time.time(), task))
self.send_task(task)
return task
from tornado import gen
class OneScheduler(Scheduler):
"""
Scheduler Mixin class for one mode
overwirted send_task method
call processor.on_task(fetcher.fetch(task)) instead of consuming queue
"""
def _check_select(self):
"""
interactive mode of select tasks
"""
if not self.interactive:
return super(OneScheduler, self)._check_select()
# waiting for running tasks
if self.running_task > 0:
return
is_crawled = []
def run(project=None):
return crawl('on_start', project=project)
def crawl(url, project=None, **kwargs):
"""
Crawl given url, same parameters as BaseHandler.crawl
url - url or taskid, parameters will be used if in taskdb
project - can be ignored if only one project exists.
|
#-*- coding:utf-8 -*-
"""
This file is part of OpenSesame.
OpenSesame is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenSesame is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the imp | lied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General | Public License for more details.
You should have received a copy of the GNU General Public License
along with OpenSesame. If not, see <http://www.gnu.org/licenses/>.
"""
from libqnotero import qt
if qt.pyqt == 5:
from PyQt5.QtCore import *
else:
from PyQt4.QtCore import *
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Category.description'
db.add_column('submissions_category', 'description',
self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Category.description'
db.delete_column('submissions_category', 'description')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'submissions.category': {
'Meta': {'object_name': 'Category'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
'submissions.challenge': {
'Meta': {'object_name': 'Challenge'},
'end_date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'start_date': ('django.db.models.fields.DateTimeField', | [], {})
},
'submissions.entry': {
'Meta': {'object_name': 'Entry'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['submissions.Category']", 'null': 'True', 'blank': 'True'}),
'created_by | ': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['users.Profile']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'team_desciption': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'team_members': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'team_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'to_market': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '255'}),
'video_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '255'})
},
'users.profile': {
'Meta': {'object_name': 'Profile'},
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
}
}
complete_apps = ['submissions'] |
# Author: Vikram Raman
# Date: 09-12-2015
import time
# edit distance between two strings
# e(i,j) = min (1 + e(i-1,j) | 1 + e(i,j-1) | diff(i,j) + e(i-1,j-1))
def editdistance(s1, s2):
m = 0 if s1 is None else len(s1)
n = 0 if s2 is None else len(s2)
if m == 0:
return n
elif n == 0:
return m
l = [[i for i in range(0,n+1)]]
for i in range(1,m+1):
l.append([i])
for i in range(1,m+1):
for j in range(1,n+1):
| minimum = min(1 + l[i-1][j], 1 + l[i][j-1], diff(s1,s2,i,j) + l[i-1][j-1])
l[i].append(minimum)
return l[m][n]
def dif | f (s1, s2, i, j):
return s1[i-1] != s2[j-1]
s1 = "exponential"
s2 = "polynomial"
print "s1=%s, s2=%s" % (s1,s2)
start_time = time.clock()
distance=editdistance(s1, s2)
print "distance=%d" % (distance)
print("--- %s seconds ---" % (time.clock() - start_time))
print editdistance("foo", "bar")
|
#!/usr/bin/env python
# coding:utf-8
# Copyright (c) 2011, Vadim Velikodniy <vadim-velikodniy@yandex.ru>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR | A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
import httplib, json
import fotki.collection as collection
from fotki.auth import DEFAULT_AUTH
class UserException(Exception):
"""Класс-исключение для ошибок получения информации о пользователе."""
pass
class UserNotFound(UserException):
"""Исключение 'Пользователь не найден'"""
pass
c | lass User(object):
"""Класс, соответствующий пользователям сервиса. Позволяет получить
списки фотографий, альбомов и тегов для указанного пользователя,
если есть соответствующие права."""
def __init__(self, username, auth=DEFAULT_AUTH):
"""Конструктор. Загружает сервисный документ.
auth - «аутентификационный» объект,
user - строка с именем пользователя."""
self._username = username
self._auth = auth
self._get_info()
def _get_info(self):
"""Загрузить сервисный документ и получить ссылки на коллекции
фотографий, альбомов и тегов."""
connection = httplib.HTTPConnection('api-fotki.yandex.ru')
headers = {'Accept': 'application/json'}
headers.update(self._auth.auth_header)
api_path = '/api/users/%s/' % self._username
connection.request('GET', api_path, headers=headers)
response = connection.getresponse()
data = response.read()
connection.close()
if response.status == 200:
self._info = json.loads(data)
elif response.status == 404:
raise UserNotFound('User %s not found' % self._username)
else:
raise UserException(response.reason)
def _get_title(self):
return self._info['title']
title = property(_get_title, doc='Заголовок')
def photos(self):
"""Возвращает общую коллекцию фотографий."""
url = self._info['collections']['photo-list']['href']
return collection.PhotoCollection(url, self._auth)
def albums(self):
"""Возвращет коллекцию альбомов."""
url = self._info['collections']['album-list']['href']
return collection.AlbumCollection(url, self._auth)
def tags(self):
"""Возвращает коллекцию тегов."""
url = self._info['collections']['tag-list']['href']
return collection.TagCollection(url, self._auth)
|
simulation_serial import simulate_list_serial_workflow, corrupt_list_serial_workflow
from workflows.serial.pipelines.pipeline_serial import continuum_imaging_list_serial_workflow, ical_list_serial_workflow
import pprint
import time
pp = pprint.PrettyPrinter()
import logging
def init_logging():
log = logging.getLogger()
logging.basicConfig(filename='%s/imaging-pipeline.log' % results_dir,
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.INFO)
return log
log = init_logging()
log.info("Starting imaging-pipeline")
# In[2]:
#pylab.rcParams['figure.figsize'] = (12.0, 12.0)
#pylab.rcParams['image.cmap'] = 'Greys'
# We make the visibility. The parameter rmax determines the distance of the furthest antenna/stations used. All over parameters are determined from this number.
# In[3]:
nfreqwin=7
ntimes=5
rmax=300.0
frequency=numpy.linspace(1.0e8,1.2e8,nfreqwin)
#ntimes=11
#rmax=300.0
#frequency=numpy.linspace(0.9e8,1.1e8,nfreqwin)
channel_bandwidth=numpy.array(nfreqwin*[frequency[1]-frequency[0]])
times = numpy.linspace(-numpy.pi/3.0, numpy.pi/3.0, ntimes)
#phasecentre=SkyCoord(ra=+30.0 * u.deg, dec=-60.0 * u.deg, frame='icrs', equinox='J2000')
phasecentre=SkyCoord(ra=+0.0 * u.deg, dec=-40.0 * u.deg, frame='icrs', equinox='J2000')
bvis_list=simulate_list_serial_workflow('LOWBD2',
frequency=frequency,
channel_bandwidth=channel_bandwidth,
times=times,
phasecentre=phasecentre,
order='frequency',
rmax=rmax)
vis_list = [convert_blockvisibility_to_visibility(bv) for bv in bvis_list]
print('%d elements in vis_list' % len(vis_list))
log.debug('%d elements in vis_list' % len(vis_list))
# In[4]:
wprojection_planes=1
advice_low=advise_wide_field(vis_list[0], guard_band_image=8.0, delA=0.02,
wprojection_planes=wprojection_planes)
advice_high=advise_wide_field(vis_list[-1], guard_band_image=8.0, delA=0.02,
wprojection_planes=wprojection_planes)
vis_slices = advice_low['vis_slices']
npixel=advice_high['npixels2']
cellsize=min(advice_low['cellsize'], advice_high['cellsize'])
# Now make a graph to fill with a model drawn from GLEAM
# In[ ]:
gleam_model = [create_low_test_image_from_gleam(npixel=npixel,
frequency=[frequency[f]],
channel_bandwidth=[channel_bandwidth[f]],
cellsize=cellsize,
phasecentre=phasecentre,
polarisation_frame=PolarisationFrame("stokesI"),
flux_limit=1.0,
applybeam=True)
for f, freq in enumerate(frequency)]
log.info('About to make GLEAM model')
# In[ ]:
log.info('About to run predict to get predicted visibility')
log.info('About to run predict to get predicted visibility')
start=time.time()
predicted_vislist = predict_list_serial_workflow(vis_list, gleam_model,
context='wstack', vis_slices=vis_slices)
#log.info('About to run corrupt to get corrupted visibility')
#corrupted_vislist = corrupt_list_serial_workflow(predicted_vislist, phase_error=1.0)
end=time.time()
print('predict finished in %f seconds'%(end-start),flush=True)
# Get the LSM. This is currently blank.
# In[ ]:
model_list = [create_image_from_visibility(vis_list[f],
npixel=npixel,
frequency=[frequency[f]],
channel_bandwidth=[channel_bandwidth[f]],
cellsize=cellsize,
phasecentre=phasecentre,
polarisation_frame=PolarisationFrame("stokesI"))
for f, freq in enumerate(frequency)]
# In[ ]:
start=time.time()
print('About to start invert' ,flush=True)
dirty_list = invert_list_serial_workflow(predicted_vislist, model_list,
context='wstack',
vis_slices=vis_slices, dopsf=False)
psf_list = invert_list_serial_workflow(predicted_vislist, model_list,
context='wstack',
vis_slices=vis_slices, dopsf=True)
end=time.time()
print('invert finished in %f seconds'%(end-start),flush=True)
# Create and execute graphs to make the dirty image and PSF
# In[ ]:
log.info('About to run invert to get dirty image')
dirty = dirty_list[0][0]
#show_image(dirty, cm='Greys', vmax=1.0, vmin=-0.1)
#plt.show()
print(qa_image(dirty))
export_image_to_fits(dirty, '%s/imaging-dirty.fits'
%(results_dir))
log.info('About to run invert to get PSF')
psf = psf_list[0][0]
#show_image(psf, cm='Greys', vmax=0.1, vmin=-0.01)
#plt.show()
print(qa_image(psf))
export_image_to_fits(psf, '%s/imaging-psf.fits'
%(results_dir))
# Now deconvolve using msclean
# In[ ]:
log.info('About to run deconvolve')
start=time.time()
deconvolved, _ = deconvolve_list_serial_workflow(dirty_list, psf_list, model_imagelist=model_list,
deconvolve_facets=8, deconvolve_overlap=16, deconvolve_taper='tukey',
scales=[0, 3, 10],
algorithm='msclean', niter=1000,
fractional_threshold=0.1,
threshold=0.1, gain=0.1, psf_support=64)
end=time.time()
print('deconvolve finished in %f seconds'%(end-start),flush=True)
#show_image(deconvolved[0], cm='Greys', vmax=0.1, vmin=-0.01)
#plt.show()
# In[ ]:
log.info('About to run continuum imaging')
start=time.time()
continuum_imaging_list = continuum_imaging_list_serial_workflow(predicted_vislist,
model_imagelist=model_list,
context='wstack', vis_slices=vis_slices,
scales=[0, 3, 10], algorithm='mmclean',
nmoment=3, niter=1000,
| fractional_threshold=0.1,
threshold=0.1, nmajor=5, gain=0.25,
deconvolve_facets = 8, deconvolve_overlap=16,
deconvolve_taper='tukey', psf_support=64)
# In[ ]:
end=time.time()
print('continuum imaging finished in %f seconds'%(end-start) | ,flush=True)
deconvolved = continuum_imaging_list[0][0]
residual = continuum_imaging_list[1][0]
restored = continuum_imaging_list[2][0]
#f=show_image(deconvolved, title='Clean image - no selfcal', cm='Greys',
# vmax=0.1, vmin=-0.01)
print(qa_image(deconvolved, context='Clean image - no selfcal'))
#plt.show()
#f=show_image(restored, title='Restored clean image - no selfcal',
# cm='Greys', vmax=1.0, vmin=-0.1)
print(qa_image(restored, context='Restored clean image - no selfcal'))
#plt.show()
export_image_to_fits(restored, '%s/imaging-dask_continuum_imaging_restored.fits'
%(results_dir))
#f=show_image(residual[0], title='Residual clean image - no selfcal', cm='Greys',
# vmax=0.1, vmin=-0.01)
print(qa_image(residual[0], context='Residual clean image - no selfcal'))
#plt.show()
export_image_to_fits(residual[0], '%s/imaging-dask_continuum_imaging_residual.fits'
|
# Copyright (C) 2011 REES Marche <http://www.reesmarche.org>
#
# This file is part of ``django-flexi-auth``.
# ``django-flexi-auth`` is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# ``django-flexi-auth`` is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with ``django-flexi-auth``. If not, see <http://www.gnu.org/licenses/>.
from django.db import models
from flexi_auth.models import PermissionBase
class Author(models.Model):
name = models.CharField(max_length=50)
surname = models.CharField(max_length=50)
class Magazine(models.Model):
name = models.CharField(max_length=50)
printing = models.IntegerField()
class Article(models.Model):
title = models.CharField(max_length=50)
body = models.TextField()
author = models.ForeignKey(Author)
published_to = models.ManyToManyField(Magazine)
def __unicode__(self):
return "An article with title '%s'" % self.title
##-------------- authorization API----------------##
# table-level CREATE permission
@classmethod
def can_create(cls, user, context):
# print "Now entering ``can_create`` method of model ``Article``..."
# print "Executing check for permission 'CREATE' on model %(cls)s for user %(user)s wrt context %(ctx)s"\
# % {'cls':cls, 'user':user, 'ctx':context}
if context:
website = context.get('website', None)
edition = context.get('edition', None)
if (website=="BarSite" or (website=="FooSite" and edition=="morning")):
return True
return False
# row-level VIEW permission
def can_view (self, user, context):
# print "Now entering ``can_view`` method of model ``Article``..."
# print "Executing check for permission 'VIEW' on instance %(self)s for user %(user)s wrt context %(ctx)s"\
# % {'self':self, 'user':user, 'ctx':context}
if context:
website = context.get('website', None)
edition = context.get('edition', None)
if (website=="BarSite" or (website=="FooSite" and edition=="morning")):
return True
return False
##-------------------------------------------------##
class Book(models.Model, PermissionBase):
title = models.CharField(max_length=50)
content = models.TextField()
authors = models.ManyToManyField(Author)
def __unicode__(self):
return "A book with title '%s'" % self.title
##-------------- authorization API----------------##
# table-level CREATE permission
@classmethod
def can_create(cls, user, context):
# print "Now entering ``can_create`` method of model ``Book``..."
# print "Executing check for permission 'CREATE' on model %(cls)s for user %(user)s wrt context %(ctx)s"\
# % {'cls':cls, 'us | er':user, 'ctx':context}
if context:
language = context.get('language', None)
cover = context.get('cover', None)
if (language=="Italian" or (language=="Dutch" and cover=="Paperback")):
return True
return False
# ro | w-level VIEW permission
def can_view (self, user, context):
# print "Now entering ``can_view`` method of model ``Book``..."
# print "Executing check for permission 'VIEW' on instance %(self)s for user %(user)s wrt context %(ctx)s"\
# % {'self':self, 'user':user, 'ctx':context}
if context:
language = context.get('language', None)
cover = context.get('cover', None)
if (language=="Italian" or (language=="Dutch" and cover=="Paperback")):
return True
return False
##-------------------------------------------------##
|
"""
| Deploy state manager
"""
from py_mina.utils import _AttributeDict
################################################################################
# Default state
################################################################################
state = _AttributeDict({
'pre_deploy': None,
'deploy': None,
'post_deploy': None,
'finalize': N | one,
'success': None,
'on_success': None,
})
################################################################################
# Set state
################################################################################
def set(key, value):
if key in state.keys():
state[key] = value
else:
raise Exception('State "%s" is not defined' % key)
# Alias to prevent conflict when importing "py_mina.config" and "py_mina.state"
set_state = set
|
import logging
# Note: do not introduce unnecessary library dependencies here, e.g. gym.
# This file is imported from the tune module in order to register RLlib agents.
from ray.rllib.env.base_env import BaseEnv
from ray.rllib.env.external_env import ExternalEnv
from ray.rllib.env.multi_agent_en | v import MultiAgentEnv
from ray.rllib.env.vector_env import VectorEnv
from ray.rllib.evaluation.rollout_worker import RolloutWorker
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.tf_policy import TFPolicy
from ray.rllib.policy.torch_policy import T | orchPolicy
from ray.tune.registry import register_trainable
def _setup_logger():
logger = logging.getLogger("ray.rllib")
handler = logging.StreamHandler()
handler.setFormatter(
logging.Formatter(
"%(asctime)s\t%(levelname)s %(filename)s:%(lineno)s -- %(message)s"
))
logger.addHandler(handler)
logger.propagate = False
def _register_all():
from ray.rllib.agents.trainer import Trainer, with_common_config
from ray.rllib.agents.registry import ALGORITHMS, get_agent_class
from ray.rllib.contrib.registry import CONTRIBUTED_ALGORITHMS
for key in list(ALGORITHMS.keys()) + list(CONTRIBUTED_ALGORITHMS.keys(
)) + ["__fake", "__sigmoid_fake_data", "__parameter_tuning"]:
register_trainable(key, get_agent_class(key))
def _see_contrib(name):
"""Returns dummy agent class warning algo is in contrib/."""
class _SeeContrib(Trainer):
_name = "SeeContrib"
_default_config = with_common_config({})
def setup(self, config):
raise NameError(
"Please run `contrib/{}` instead.".format(name))
return _SeeContrib
# also register the aliases minus contrib/ to give a good error message
for key in list(CONTRIBUTED_ALGORITHMS.keys()):
assert key.startswith("contrib/")
alias = key.split("/", 1)[1]
register_trainable(alias, _see_contrib(alias))
_setup_logger()
_register_all()
__all__ = [
"Policy",
"TFPolicy",
"TorchPolicy",
"RolloutWorker",
"SampleBatch",
"BaseEnv",
"MultiAgentEnv",
"VectorEnv",
"ExternalEnv",
]
|
import math
def factor(n) | :
d = 2
factors = []
while n > 1 and d < math.sqrt(n):
if n % d == 0:
factors.append(d)
n = n/d
else:
d=d+1
return fa | ctors
|
essible (and
often visible) to the resource owner. Since such applications
reside within the user-agent, they can make seamless use of the
user-agent capabilities when requesting authorization.
The implicit grant type is used to obtain access tokens (it does not
support the issuance of refresh tokens) and is optimized for public
clients known to operate a particular redirection URI. These clients
are typically implemented in a browser using a scripting language
such as JavaScript.
As a redirection-based flow, the client must be capable of
interacting with the resource owner's user-agent (typically a web
browser) and capable of receiving incoming requests (via redirection)
from the authorization server.
Unlike the authorization code grant type in which the client makes
separate requests for authorization and access token, the client
receives the access token as the result of the authorization request.
The implicit grant type does not include client authentication, and
relies on the presence of the resource owner and the registration of
the redirection URI. Because the access token is encoded into the
redirection URI, it may be exposed to the resource owner and other
applications residing on the same device.
"""
def prepare_request_uri(self, uri, redirect_uri=None, scope=None,
state=None, **kwargs):
"""Prepare the implicit grant request URI.
The client constructs the request URI by adding the following
parameters to the query component of the authorization endpoint URI
using the "application/x-www-form-urlencoded" format, per `Appendix B`_:
:param redirect_uri: OPTIONAL. The redirect URI must be an absolute URI
and it should have been registerd with the OAuth
provider prior to use. As described in `Section 3.1. | 2`_.
:param scope: OPTIONAL. The scope of the access request as described by
Section 3.3`_. These may be any string but are commonly
URIs or various categories such as ``videos`` or ``documents``.
:param state: RECOMMENDED. An opaque value used by the client to maintain
state between the request and callback. The authorization
server | includes this value when redirecting the user-agent back
to the client. The parameter SHOULD be used for preventing
cross-site request forgery as described in `Section 10.12`_.
:param kwargs: Extra arguments to include in the request URI.
In addition to supplied parameters, OAuthLib will append the ``client_id``
that was provided in the constructor as well as the mandatory ``response_type``
argument, set to ``token``::
>>> from oauthlib.oauth2 import MobileApplicationClient
>>> client = MobileApplicationClient('your_id')
>>> client.prepare_request_uri('https://example.com')
'https://example.com?client_id=your_id&response_type=token'
>>> client.prepare_request_uri('https://example.com', redirect_uri='https://a.b/callback')
'https://example.com?client_id=your_id&response_type=token&redirect_uri=https%3A%2F%2Fa.b%2Fcallback'
>>> client.prepare_request_uri('https://example.com', scope=['profile', 'pictures'])
'https://example.com?client_id=your_id&response_type=token&scope=profile+pictures'
>>> client.prepare_request_uri('https://example.com', foo='bar')
'https://example.com?client_id=your_id&response_type=token&foo=bar'
.. _`Appendix B`: http://tools.ietf.org/html/rfc6749#appendix-B
.. _`Section 2.2`: http://tools.ietf.org/html/rfc6749#section-2.2
.. _`Section 3.1.2`: http://tools.ietf.org/html/rfc6749#section-3.1.2
.. _`Section 3.3`: http://tools.ietf.org/html/rfc6749#section-3.3
.. _`Section 10.12`: http://tools.ietf.org/html/rfc6749#section-10.12
"""
return prepare_grant_uri(uri, self.client_id, 'token',
redirect_uri=redirect_uri, state=state, scope=scope, **kwargs)
def parse_request_uri_response(self, uri, state=None, scope=None):
"""Parse the response URI fragment.
If the resource owner grants the access request, the authorization
server issues an access token and delivers it to the client by adding
the following parameters to the fragment component of the redirection
URI using the "application/x-www-form-urlencoded" format:
:param uri: The callback URI that resulted from the user being redirected
back from the provider to you, the client.
:param state: The state provided in the authorization request.
:param scope: The scopes provided in the authorization request.
:return: Dictionary of token parameters.
:raises: Warning if scope has changed. OAuth2Error if response is invalid.
A successful response should always contain
**access_token**
The access token issued by the authorization server. Often
a random string.
**token_type**
The type of the token issued as described in `Section 7.1`_.
Commonly ``Bearer``.
**state**
If you provided the state parameter in the authorization phase, then
the provider is required to include that exact state value in the
response.
While it is not mandated it is recommended that the provider include
**expires_in**
The lifetime in seconds of the access token. For
example, the value "3600" denotes that the access token will
expire in one hour from the time the response was generated.
If omitted, the authorization server SHOULD provide the
expiration time via other means or document the default value.
**scope**
Providers may supply this in all responses but are required to only
if it has changed since the authorization request.
A few example responses can be seen below::
>>> response_uri = 'https://example.com/callback#access_token=sdlfkj452&state=ss345asyht&token_type=Bearer&scope=hello+world'
>>> from oauthlib.oauth2 import MobileApplicationClient
>>> client = MobileApplicationClient('your_id')
>>> client.parse_request_uri_response(response_uri)
{
'access_token': 'sdlfkj452',
'token_type': 'Bearer',
'state': 'ss345asyht',
'scope': [u'hello', u'world']
}
>>> client.parse_request_uri_response(response_uri, state='other')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "oauthlib/oauth2/rfc6749/__init__.py", line 598, in parse_request_uri_response
**scope**
File "oauthlib/oauth2/rfc6749/parameters.py", line 197, in parse_implicit_response
raise ValueError("Mismatching or missing state in params.")
ValueError: Mismatching or missing state in params.
>>> client.parse_request_uri_response(response_uri, scope=['other'])
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "oauthlib/oauth2/rfc6749/__init__.py", line 598, in parse_request_uri_response
**scope**
File "oauthlib/oauth2/rfc6749/parameters.py", line 199, in parse_implicit_response
validate_token_parameters(params, scope)
File "oauthlib/oauth2/rfc6749/parameters.py", line 285, in validate_token_parameters
raise Warning("Scope has changed to %s." % new_scope)
Warning: Scope has changed to [u'hello', u'world'].
.. _`Section 7.1`: http://tools.ietf.org/html/rfc6749#section-7.1
.. _`Section 3. |
# The MIT License (MIT)
#
# Copyright (c) 2016 Frederic Guillot
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTI | ON OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from cliff import app
from cliff import commandmanager
| from pbr import version as app_version
import sys
from kanboard_cli.commands import application
from kanboard_cli.commands import project
from kanboard_cli.commands import task
from kanboard_cli import client
class KanboardShell(app.App):
def __init__(self):
super(KanboardShell, self).__init__(
description='Kanboard Command Line Client',
version=app_version.VersionInfo('kanboard_cli').version_string(),
command_manager=commandmanager.CommandManager('kanboard.cli'),
deferred_help=True)
self.client = None
self.is_super_user = True
def build_option_parser(self, description, version, argparse_kwargs=None):
parser = super(KanboardShell, self).build_option_parser(
description, version, argparse_kwargs=argparse_kwargs)
parser.add_argument(
'--url',
metavar='<api url>',
help='Kanboard API URL',
)
parser.add_argument(
'--username',
metavar='<api username>',
help='API username',
)
parser.add_argument(
'--password',
metavar='<api password>',
help='API password/token',
)
parser.add_argument(
'--auth-header',
metavar='<authentication header>',
help='API authentication header',
)
return parser
def initialize_app(self, argv):
client_manager = client.ClientManager(self.options)
self.client = client_manager.get_client()
self.is_super_user = client_manager.is_super_user()
self.command_manager.add_command('app version', application.ShowVersion)
self.command_manager.add_command('app timezone', application.ShowTimezone)
self.command_manager.add_command('project show', project.ShowProject)
self.command_manager.add_command('project list', project.ListProjects)
self.command_manager.add_command('task create', task.CreateTask)
self.command_manager.add_command('task list', task.ListTasks)
def main(argv=sys.argv[1:]):
return KanboardShell().run(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
import pytest
from c | lustaar.authorize.conditions import TrueCondition
@pytest.fixt | ure
def condition():
return TrueCondition()
class TestCall(object):
def test_returns_true(self, condition):
assert condition({})
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
""" An extensible ASCII table reader and writer.
"""
from .core import (InconsistentTableError,
ParameterError,
NoType, StrType, NumType, FloatType, IntType, AllType,
Column,
BaseInputter, ContinuationLinesInputter,
BaseHeader,
BaseData,
BaseOutputter, TableOutputter,
BaseReader,
BaseSplitter, DefaultSplitter, WhitespaceSplitter,
convert_numpy,
masked
)
from .basic import (Basic, BasicHeader, BasicData,
Rd | b,
Csv,
Tab,
NoHeader,
CommentedHeader)
from .fastbasic import (FastBasic,
FastCsv,
FastTab,
FastNoHeader,
FastCommented | Header,
FastRdb)
from .cds import Cds
from .ecsv import Ecsv
from .latex import Latex, AASTex, latexdicts
from .html import HTML
from .ipac import Ipac
from .daophot import Daophot
from .sextractor import SExtractor
from .fixedwidth import (FixedWidth, FixedWidthNoHeader,
FixedWidthTwoLine, FixedWidthSplitter,
FixedWidthHeader, FixedWidthData)
from .rst import RST
from .ui import (set_guess, get_reader, read, get_writer, write, get_read_trace)
from . import connect
|
import numpy as np
import matplotlib.pyplot as pl
def tvbox(box=(1,1), xcen=0, ycen=0, center=None,**kwargs):
"""
draw a circle on an image.
| radius
xcen
ycen
center= tuple in (Y,X) order.
"""
if center is not Non | e:
xcen=center[1]
ycen=center[0]
x = [xcen-box[0], xcen+box[0], xcen+box[0], xcen-box[0], xcen-box[0]]
y = [ycen-box[1], ycen-box[1], ycen+box[1], ycen+box[1], ycen-box[1]]
pl.plot(x,y, **kwargs)
|
# @file plugin.py
#
# Connect Zen Coding to Pluma.
#
# Adapted to pluma by Joao Manoel (joaomanoel7@gmail.com)
#
# Original Author Franck Marcia (franck.marcia@gmail.com)
#
import pluma, gobject, gtk, os
from zen_editor import ZenEditor
zencoding_ui_str = """
<ui>
<menubar name="MenuBar">
<menu name="EditMenu" action="Edit">
<placeholder name="EditOps_5">
<menu action="ZenCodingMenuAction">
<menuitem name="ZenCodingExpand" action="ZenCodingExpandAction"/>
<menuitem name="ZenCodingExpandW" action="ZenCodingExpandWAction"/>
<menuitem name="ZenCodingWrap" action="ZenCodingWrapAction"/>
<separator/>
<menuitem name="ZenCodingInward" action="ZenCodingInwardAction"/>
<menuitem name="ZenCodingOutward" action="ZenCodingOutwardAction"/>
<menuitem name="ZenCodingMerge" action="ZenCodingMergeAction"/>
<separator/>
<menuitem name="ZenCodingPrev" action="ZenCodingPrevAction"/>
<menuitem name="ZenCodingNext" action="ZenCodingNextAction"/>
<separator/>
<menuitem name="ZenCodingRemove" action="ZenCodingRemoveAction"/>
<menuitem name="ZenCodingSplit" action="ZenCodingSplitAction"/>
<menuitem name="ZenCodingComment" action="ZenCodingCommentAction"/>
</menu>
</placeholder>
</menu>
</menubar>
</ui>
"""
class ZenCodingPlugin(pluma.Plugin):
"""A Pluma plugin to implement Zen Coding's HTML and CSS shorthand expander."""
def activate(self, window):
actions = [
('ZenCodingMenuAction', None, '_Zen Coding', None, "Zen Coding tools", None),
('ZenCodingExpandAction', None, '_Expand abbreviation', '<Ctrl>E', "Expand abbreviation to raw HTML/CSS", self.expand_abbreviation),
('ZenCodingExpandWAction', None, 'E_xpand dynamic abbreviation...', '<Ctrl><Alt>E', "Dynamically expand abbreviation as you type", self.expand_with_abbreviation),
('ZenCodingWrapAction', None, '_Wrap with abbreviation...', '<Ctrl><Shift>E', "Wrap with code expanded from abbreviation", self.wrap_with_abbreviation),
('ZenCodingInwardAction', None, 'Balance tag _inward', '<Ctrl><Alt>I', "Select inner tag's content", self.match_pair_inward),
('ZenCodingOutwardAction', None, 'Balance tag _outward', '<Ctrl><Alt>O', "Select outer tag's content", self.match_pair_outward),
('ZenCodingMergeAction', None, '_Merge lines', '<Ctrl><Alt>M', "Merge all lines of the current selection", self.merge_lines),
('ZenCodingPrevAction', None, '_Pr | evious edit point', '<Alt>Left', "Place the cursor at the previous edit point", self.prev_edit_point),
('ZenCodingNextAction', None, '_Next edit point', '<Alt>Right', "Place the cursor at the next edit point", self.next_edit_point),
('ZenCodingRemoveAction', None, '_Remove tag' | , '<Ctrl><Alt>R', "Remove a tag", self.remove_tag),
('ZenCodingSplitAction', None, 'Split or _join tag', '<Ctrl><Alt>J', "Toggle between single and double tag", self.split_join_tag),
('ZenCodingCommentAction', None, 'Toggle _comment', '<Ctrl><Alt>C', "Toggle an XML or HTML comment", self.toggle_comment)
]
windowdata = dict()
window.set_data("ZenCodingPluginDataKey", windowdata)
windowdata["action_group"] = gtk.ActionGroup("PlumaZenCodingPluginActions")
windowdata["action_group"].add_actions(actions, window)
manager = window.get_ui_manager()
manager.insert_action_group(windowdata["action_group"], -1)
windowdata["ui_id"] = manager.add_ui_from_string(zencoding_ui_str)
window.set_data("ZenCodingPluginInfo", windowdata)
self.editor = ZenEditor()
error = self.editor.get_user_settings_error()
if error:
md = gtk.MessageDialog(window, gtk.DIALOG_MODAL, gtk.MESSAGE_ERROR,
gtk.BUTTONS_CLOSE, "There is an error in user settings:")
message = "{0} on line {1} at character {2}\n\nUser settings will not be available."
md.set_title("Zen Coding error")
md.format_secondary_text(message.format(error['msg'], error['lineno'], error['offset']))
md.run()
md.destroy()
def deactivate(self, window):
windowdata = window.get_data("ZenCodingPluginDataKey")
manager = window.get_ui_manager()
manager.remove_ui(windowdata["ui_id"])
manager.remove_action_group(windowdata["action_group"])
def update_ui(self, window):
view = window.get_active_view()
windowdata = window.get_data("ZenCodingPluginDataKey")
windowdata["action_group"].set_sensitive(bool(view and view.get_editable()))
def expand_abbreviation(self, action, window):
self.editor.expand_abbreviation(window)
def expand_with_abbreviation(self, action, window):
self.editor.expand_with_abbreviation(window)
def wrap_with_abbreviation(self, action, window):
self.editor.wrap_with_abbreviation(window)
def match_pair_inward(self, action, window):
self.editor.match_pair_inward(window)
def match_pair_outward(self, action, window):
self.editor.match_pair_outward(window)
def merge_lines(self, action, window):
self.editor.merge_lines(window)
def prev_edit_point(self, action, window):
self.editor.prev_edit_point(window)
def next_edit_point(self, action, window):
self.editor.next_edit_point(window)
def remove_tag(self, action, window):
self.editor.remove_tag(window)
def split_join_tag(self, action, window):
self.editor.split_join_tag(window)
def toggle_comment(self, action, window):
self.editor.toggle_comment(window)
|
"""
This script is an example of how to use the random gaussian noise generator (type 2)
module. |br|
In this example only one signal is generated.
Both the minimum and the maximum frequency component in the signal is regulated.
After the generation, spectrum fo the signal is a | nalyzed with an Welch analysis
and ploted.
*Author*:
Jacek Pierzchlewski, Aalborg University, Denmark. <jap@es.aau.dk>
*Version*:
1.0 | 20-JAN-2016 : * Version | 1.0 released. |br|
*License*:
BSD 2-Clause
"""
import rxcs
import scipy.signal as scsig
import matplotlib.pyplot as plt
def _gaussNoise2_ex1():
# Things on the table:
gaussNoise = rxcs.sig.gaussNoise2() # Gaussian noise generator
# Configure the generator...
gaussNoise.fR = 100e6 # Representation sampling frequency [100 MHz]
gaussNoise.tS = 1 # Time [1 sec]
gaussNoise.fMin = 100e3 # Minimum frequency component [100 kHz]
gaussNoise.fMax = 200e3 # Maximum frequency component [200 kHz]
gaussNoise.run() # ... and run it!
vSig = gaussNoise.mSig[0, :] # take the generated signal
# -----------------------------------------------------------------
# Analyze the signal and plot it
(vFxx, vPxx) = scsig.welch(vSig, fs=gaussNoise.fR, nperseg=100*1024, noverlap=100*512)
hFig1 = plt.figure(1)
hSubPlot1 = hFig1.add_subplot(111)
hSubPlot1.grid(True)
hSubPlot1.set_title('Spectrum of the signal (psd)')
hSubPlot1.set_xlabel('Frequency [kHz]')
hSubPlot1.plot(vFxx/1e3, vPxx, '-')
hSubPlot1.set_xlim(0, 1e3)
plt.show(block=True)
# =====================================================================
# Trigger when start as a script
# =====================================================================
if __name__ == '__main__':
_gaussNoise2_ex1()
|
ings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.graphviz',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Pontoon'
copyright = u'2015, Matjaž Horvat, Mozilla Foundation'
author = u'Matjaž Horvat, Mozilla Foundation'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'venv']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use | .
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the docum | entation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Pontoondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Pontoon.tex', u'Pontoon Documentation',
u'Matjaž Horvat, Mozilla Foundation', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pontoon', u'Pontoon Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Pontoon', u'Pontoon Documentation',
author, 'Pontoon', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footno |
from .db_utils | import PostgresController
from .enums import Action, Change
__all__ = ['PostgresControlle | r', 'Action', 'Change']
|
# Copyright 2013 Huawei Technologies Co.,LTD.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute.security_groups import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
from tempest import test
CONF = config.CONF
def not_existing_id():
if CONF.service_available.neutron:
return data_utils.rand_uuid()
else:
return data_utils.rand_int_id(start=999)
class SecurityGroupRulesNegativeTestJSON(base.BaseSecurityGroupsTest):
@classmethod
def resource_setup(cls):
super(SecurityGroupRulesNegativeTestJSON, cls).resource_setup()
cls.client = cls.security_groups_client
@test.attr(type=['negative', 'smoke'])
@test.services('network')
def test_create_security_group_rule_with_non_existent_id(self):
# Negative test: Creation of Security Group rule should FAIL
# with non existent Parent group id
# Adding rules to the non existent Security Group id
parent_group_id = not_existing_id()
ip_protocol = 'tcp'
from_port = 22
to_port = 22
self.assertRaises(exceptions.NotFound,
self.client.create_security_group_rule,
parent_group_id, ip_protocol, from_port, to_port)
@test.attr(type=['negative', 'smoke'])
@test.services('network')
def test_create_security_group_rule_with_invalid_id(self):
# Negative test: Creation of Security Group rule should FAIL
# with Parent group id which is not integer
# Adding rules to the non int Security Group id
parent_group_id = data_utils.rand_name('non_int_id')
ip_protocol = 'tcp'
from_port = 22
to_port = 22
self.assertRaises(exceptions.BadRequest,
self.client.create_security_group_rule,
parent_group_id, ip_protocol, from_port, to_port)
@test.attr(type=['negative', 'smoke'])
@test.services('network')
def test_create_security_group_rule_duplicate(self):
# Negative test: Create Security Group rule duplicate should fail
# Creating a Security Group to add rule to it
resp, sg = self.create_security_group()
# Adding rules to the created Security Group
parent_group_id = sg['id']
ip_protocol = 'tcp'
from_port = 22
to_port = 22
resp, rule = \
self.client.create_security_group_rule(parent_group_id,
ip_protocol,
from_port,
to_port)
self.addCleanup(self.client.delete_security_group_rule, rule['id'])
self.assertEqual(200, resp.status)
# Add the same rule to the group should fail
self.assertRaises(exceptions.BadRequest,
self.client.create_security_group_rule,
parent_group_id, ip_protocol, from_port, to_port)
@test.attr(type=['negative', 'smoke'])
@test.services('network')
def test_create_security_group_rule_with_invalid_ip_protocol(self):
# Negative test: Creation of Security Group rule should FAIL
# with invalid ip_protocol
# Creating a Security Group to add rule to it
resp, sg = self.create_security_group()
# Adding rules to the created Security Group
parent_group_id = sg['id']
ip_protocol = data_utils.rand_name('999')
from_port = 22
to_port = 22
self.assertRaises(exceptions.BadRequest,
self.client.create_security_group_rule,
parent_group_id, ip_protocol, from_port, to_port)
@test.attr(type=['negative', 'smoke'])
@test.services('network')
def test_create_security_group_rule_with_invalid_from_port(self):
# Negative test: Creation of Security Group rule should FAIL
# with invalid from_port
# Creating a Security Group to add rule to it
resp, sg = self.create_security_group()
# Adding rules to the created Security Group
parent_group_id = sg['id']
ip_protocol = 'tcp'
from_port = data_utils.rand_int_id(start=65536)
to_port = 22
self.assertRaises(exceptions.BadRequest,
self.client.create_security_group_rule,
parent_group_id, ip_protocol, from_port, to_port)
@test.attr(type=['negative', 'smoke'])
@test.services('network')
def test_create_security_group_rule_with_invalid_to_port(self):
# Negative test: Creation of Security Group rule should FAIL
# with invalid to_port
# Creating a Security Group to add rule to it
resp, sg = self.create_security_group()
# Adding rules to the created Security Group
parent_group_id = sg['id']
ip_protocol = 'tcp'
from_port = 22
to_port = data_utils.rand_int_id(start=65536)
self.assertRaises(exceptions.BadRequest,
self.client.create_security_group_rule,
parent_group_id, ip_protocol, from_port, to_port)
@test.attr(type=['negative', 'smoke'])
@test.service | s('network')
def test_create_security_group_rule_with_invalid_port_range(self):
# Negative test: Creation of Security Group rule should FAIL
# with invalid port range.
# Creating a Security Group to add rule to it.
resp, sg = | self.create_security_group()
# Adding a rule to the created Security Group
secgroup_id = sg['id']
ip_protocol = 'tcp'
from_port = 22
to_port = 21
self.assertRaises(exceptions.BadRequest,
self.client.create_security_group_rule,
secgroup_id, ip_protocol, from_port, to_port)
@test.attr(type=['negative', 'smoke'])
@test.services('network')
def test_delete_security_group_rule_with_non_existent_id(self):
# Negative test: Deletion of Security Group rule should be FAIL
# with non existent id
non_existent_rule_id = not_existing_id()
self.assertRaises(exceptions.NotFound,
self.client.delete_security_group_rule,
non_existent_rule_id)
class SecurityGroupRulesNegativeTestXML(SecurityGroupRulesNegativeTestJSON):
_interface = 'xml'
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
"""
Copyright (c) 2011 Tyl | er Kenendy <tk@tkte.ch>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, i | ncluding without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
class Topping(object):
PROVIDES = None
DEPENDS = None
@staticmethod
def act(aggregate, classloader, verbose=False):
raise NotImplementedError()
|
from libcloud.loadbalancer.types import Provider
from l | ibcloud.loadbalancer.providers import get_driver
ACCESS_ID = 'your access id'
SECRET_KEY = 'your secret key'
cls = get_driver(Provider.ELB)
driver = cls(key=ACCESS_ID, secret=SECRET_KEY)
print(driver.ex_list_ | balancer_policy_types())
|
"""Support for Synology DSM cameras."""
from typing import Dict
from synology_dsm.api.surveillance_station import SynoSurveillanceStation
from homeassistant.components.camera import SUPPORT_STREAM, Camera
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.typing import HomeAssistantType
from . import SynologyDSMEntity
from .const import (
DOMAIN,
ENTITY_CLASS,
ENTITY_ENABLE,
ENTITY_ICON,
ENTITY_NAME,
ENTITY_UNIT,
SYNO_API,
)
async def async_setup_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the Synology NAS binary sensor."""
api = hass.data[DOMAIN][entry.unique_id][SYNO_API]
if SynoSurveillanceStation.CAMERA_API_KEY not in api.dsm.apis:
return
surveillance_station = api.surveillance_station |
await hass.async_add_executor_job(surveillance_station.update)
cameras = surveillan | ce_station.get_all_cameras()
entities = [SynoDSMCamera(api, camera) for camera in cameras]
async_add_entities(entities)
class SynoDSMCamera(SynologyDSMEntity, Camera):
"""Representation a Synology camera."""
def __init__(self, api, camera):
"""Initialize a Synology camera."""
super().__init__(
api,
f"{SynoSurveillanceStation.CAMERA_API_KEY}:{camera.id}",
{
ENTITY_NAME: camera.name,
ENTITY_CLASS: None,
ENTITY_ICON: None,
ENTITY_ENABLE: True,
ENTITY_UNIT: None,
},
)
self._camera = camera
@property
def device_info(self) -> Dict[str, any]:
"""Return the device information."""
return {
"identifiers": {(DOMAIN, self._api.information.serial, self._camera.id)},
"name": self.name,
"model": self._camera.model,
"via_device": (DOMAIN, self._api.information.serial),
}
@property
def supported_features(self) -> int:
"""Return supported features of this camera."""
return SUPPORT_STREAM
@property
def is_recording(self):
"""Return true if the device is recording."""
return self._camera.is_recording
@property
def motion_detection_enabled(self):
"""Return the camera motion detection status."""
return self._camera.is_motion_detection_enabled
def camera_image(self) -> bytes:
"""Return bytes of camera image."""
return self._api.surveillance_station.get_camera_image(self._camera.id)
async def stream_source(self) -> str:
"""Return the source of the stream."""
return self._camera.live_view.rtsp
def enable_motion_detection(self):
"""Enable motion detection in the camera."""
self._api.surveillance_station.enable_motion_detection(self._camera.id)
def disable_motion_detection(self):
"""Disable motion detection in camera."""
self._api.surveillance_station.disable_motion_detection(self._camera.id)
|
try:
self._get_supported_subnetpools(context)
except n_exc.NotFound:
raise exceptions.AutoAllocationFailure(
reason=_("No default subnetpools defined"))
return {'id': 'dry-run=pass', 'tenant_id': tenant_id}
def _validate(self, context, tenant_id):
"""Validate and return the tenant to be associated to the topology."""
if tenant_id == 'None':
# NOTE(HenryG): the client might be sending us astray by
# passing no tenant; this is really meant to be the tenant
# issuing the request, therefore let's get it from the context
tenant_id = context.tenant_id
if not context.is_admin and tenant_id != context.tenant_id:
raise n_exc.NotAuthorized()
return tenant_id
def _get_auto_allocated_topology(self, context, tenant_id):
"""Return the auto allocated topology record if present or None."""
return auto_allocate_obj.AutoAllocatedTopology.get_object(
context, project_id=tenant_id)
def _get_auto_allocated_network(self, context, tenant_id):
"""Get the auto allocated network for the tenant."""
network = self._get_auto_allocated_topology(context, tenant_id)
if network:
return network['network_id']
@staticmethod
def _response(network_id, tenant_id, fields=None):
"""Build response for auto-allocated network."""
res = {
'id': network_id,
'tenant_id': tenant_id
}
return db_utils.resource_fields(res, fields)
def _get_default_external_network(self, context):
"""Get the default external network for the deployment."""
default_external_networks = net_obj.ExternalNetwork.get_objects(
context, is_default=True)
if not default_external_networks:
LOG.error("Unable to find default external network "
"for deployment, please create/assign one to "
"allow auto-allocation to work correctly.")
raise exceptions.AutoAllocationFailure(
reason=_("No default router:external network"))
if len(default_external_networks) > 1:
LOG.error("Multiple external default networks detected. "
"Network %s is true 'default'.",
default_external_networks[0]['network_id'])
return default_external_networks[0].network_id
def _get_supported_subnetpools(self, context):
"""Return the default subnet pools available."""
default_subnet_pools = [
self.core_plugin.get_default_subnetpool(
context, ver) for ver in (4, 6)
]
available_pools = [
s for s in default_subnet_pools if s
]
if not available_pools:
LOG.error("No default pools available")
raise n_exc.NotFound()
return available_pools
def _provision_tenant_private_network(self, context, tenant_id):
"""Create a tenant private network/subnets."""
network = None
try:
network_args = {
'name': 'auto_allocated_network',
'admin_state_up': False,
'tenant_id': tenant_id,
'shared': False
}
network = p_utils.create_network(
self.core_plugin, context, {'network': network_args})
subnets = []
for pool in self._get_supported_subnetpools(context):
subnet_args = {
'name': 'auto_allocated_subnet_v%s' % pool['ip_version'],
'network_id': network['id'],
'tenant_id': tenant_id,
'ip_version': pool['ip_version'],
'subnetpool_id': pool['id'],
}
subnets.append(p_utils.create_subnet(
self.core_plugin, context, {'subnet': subnet_args}))
return subnets
except (c_exc.SubnetAllocationError, ValueError,
n_exc.BadRequest, n_exc.NotFound) as e:
LOG.error("Unable to auto allocate topology for tenant "
"%(tenant_id)s due to missing or unmet "
"requirements. Reason: %(reason)s",
{'tenant_id': tenant_id, 'reason': e})
if network:
self._cleanup(context, network['id'])
raise exceptions.AutoAllocationFailure(
reason=_("Unable to provide tenant private network"))
except Exception as e:
network_id = network['id'] if network else None
raise exceptions.UnknownProvisioningError(e, network_id=network_id)
def _provision_external_connectivity(
self, context, default_external_network, subnets, tenant_id):
"""Uplink tenant subnet(s) to external network."""
router_args = {
'name': 'auto_allocated_router',
l3_apidef.EXTERNAL_GW_INFO: {
'network_id': default_external_network},
'tenant_id': tenant_id,
'admin_state_up': True
}
router = None
attached_subnets = []
try:
router = self.l3_plugin.create_router(
context, {'router': router_args})
for subnet in subnets:
self.l3_plugin.add_router_interface(
context, router['id'], {'subnet_id': subnet['id']})
attached_subnets.append(subnet)
return router
except n_exc.BadRequest as e:
LOG.error("Unable to auto allocate topology for tenant "
"%(tenant_id)s because of router errors. "
"Reason: %(reason)s",
{'tenant_id': tenant_id, 'reason': e})
router_id = router['id'] if router else None
self._cleanup(context,
network_id=subnets[0]['network_id'],
router_id=router_id, subnets=attached_subnets)
raise exceptions.AutoAllocationFailure(
reason=_("Unable to provide external connectivity"))
except Exception as e:
router_id = router['id'] if router else None
raise exceptions.UnknownProvisioningError(
e, network_id=subnets[0]['network_id'],
router_id=router_id, subnets=subnets)
def _save(self, context, tenant_id, network_id, router_id, subnets):
"""Save auto-allocated topology, or revert in case of DB errors."""
try:
auto_allocate_obj.AutoAllocatedTopology(
context, project_id=tenant_id, network_id=network_id,
router_id=router_id).create()
self.core_plugin.update_network(
context, network_id,
{'network': {'admin_state_up': True}})
except obj_exc.NeutronDbObjectDuplicateEntry:
LOG.debug("Multiple auto-allocated networks detected for "
"tenant %s. Attempting clean up for network %s "
"and router %s.",
tenant_id, network_id, router_id)
self._cleanup(
context, network_id=network_id,
router_id=router_id, subnets=subnets)
network_i | d = self._get_aut | o_allocated_network(context, tenant_id)
except Exception as e:
raise exceptions.UnknownProvisioningError(
e, network_id=network_id,
router_id=router_id, subnets=subnets)
return network_id
def _cleanup(self, context, network_id=None, router_id=None, subnets=None):
"""Clean up auto allocated resources."""
# Concurrent attempts to delete the topology may interleave and
# cause some operations to fail with NotFound exceptions. Rather
# than fail partially, the exceptions should be ignored and the
# cleanup should proceed uninterrupted.
if router_id:
for subnet in subnets or []:
ignore_notfound(
self.l3_plugin.remove_router_interface,
|
from __future__ import absolute_import
import copy
from six.moves.urllib.parse import urlencode
from sentry.models import GroupHash
from sentry.testutils import APITestCase, SnubaTestCase
from sentry.testutils.factories import DEFAULT_EVENT_DATA
from sentry.testutils.helpers.datetime import iso_format, before_now
from sentry.eventstream.snuba import SnubaEventStream
class GroupHashesTest(APITestCase, SnubaTestCase):
def test_only_return_latest_event(self):
self.login_as(user=self.user)
min_ago = iso_format(before_now(minutes=1))
two_min_ago = iso_format(before_now(minutes=2))
new_event_id = "b" * 32
old_event = self.store_event(
data={
"event_id": "a" * 32,
"message": "message",
"timestamp": two_min_ago,
"stacktrace": copy.deepcopy(DEFAULT_EVENT_DATA["stacktrace"]),
"fingerprint": ["group-1"],
},
project_id=self.project.id,
)
new_event = self.store_event(
data={
"event_id": new_event_id,
"message": "message",
"timestamp": min_ago,
"stacktrace": copy.deepcopy(DEFAULT_EVENT_DATA["stacktrace"]),
"fingerprint": ["group-1"],
},
project_id=self.project.id,
)
assert new_event.group_id == old_event.group_id
url = u"/api/0/issues/{}/hashes/".format(new_event.group_id)
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert response.data[0]["latestEvent"]["eventID"] == new_event_id
def test_return_multiple_hashes(self):
self.login_as(user=self.user)
min_ago = iso_format(before_now(minutes=1))
two_min_ago = iso_format(before_now(minutes=2))
event1 = self.store_event(
data={
"event_id": "a" * 32,
"message": "message",
"timestamp": two_min_ago,
"stacktrace": copy.deepcopy(DEFAULT_EVENT_DATA["stacktrace"]),
"fingerprint": ["group-1"],
},
project_id=self.project.id,
)
event2 = self.store_event(
data={
"event_id": "b" * | 32,
"message": "mess | age2",
"timestamp": min_ago,
"fingerprint": ["group-2"],
},
project_id=self.project.id,
)
# Merge the events
eventstream = SnubaEventStream()
state = eventstream.start_merge(self.project.id, [event2.group_id], event1.group_id)
eventstream.end_merge(state)
url = u"/api/0/issues/{}/hashes/".format(event1.group_id)
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert len(response.data) == 2
primary_hashes = [hash["id"] for hash in response.data]
assert primary_hashes == [event2.get_primary_hash(), event1.get_primary_hash()]
def test_unmerge(self):
self.login_as(user=self.user)
group = self.create_group()
hashes = [
GroupHash.objects.create(project=group.project, group=group, hash=hash)
for hash in ["a" * 32, "b" * 32]
]
url = "?".join(
[
u"/api/0/issues/{}/hashes/".format(group.id),
urlencode({"id": [h.hash for h in hashes]}, True),
]
)
response = self.client.delete(url, format="json")
assert response.status_code == 202, response.content
|
# -*- coding: utf-8 -*-
# Octopasty is an Asterisk AMI proxy
# Copyright (C) 2011 Jean Schurger <jean@schurger.org>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from hashlib import sha1, md5
from time import time
from random import randint
from utils import Packet, bigtime, tmp_debug
from asterisk import Success, Error, Goodbye
KEEP_INTERNAL = ['logoff']
def handle_action(self, packet):
action = packet.packet
if action.name.lower() == 'error':
# needs to handle errors, may be a timeout before next try
login_failed_on_ami(self, packet.emiter)
if action.name.lower() == 'success':
logged_on_ami(self, packet.emiter)
if action.name.lower() == 'challenge':
k = filter(lambda k: k.lower() == 'authtype',
action.parameters.keys())
k = k and k[0] or None
if k:
challenge(self, packet.emiter, packet.locked,
action.parameters.get(k).lower())
if action.name.lower() == 'login':
login = dict()
for k in ['Username', 'Secret', 'Events', 'Key']:
v = action.parameters.get(k) or \
action.parameters.get(k.lower()) or \
action.parameters.get(k.upper())
login[k.lower()] = v
auth_user(self, packet.emiter, packet.locked, login.get('username'),
login.get('secret') or login.get('key'),
(logi | n.get('events') and \
login.get('events').lower() == 'off') and False or True)
if action.name.lower() == 'logoff':
logoff_user(self, packet)
def auth_user(self, emiter, locked, username, secret, wants_events):
login_sucessfull = False
client = self.clients.get(emiter)
if username in self.config.get('users'):
hashed = self.config.get('users').get(username).get('password')
i | f client.authtype is None:
if sha1(secret).hexdigest() == hashed:
login_sucessfull = True
elif client.authtype[0] == 'md5':
key = client.authtype[1]
_md5 = md5(key)
_md5.update(self.config.get('users').get(username).get('password'))
if secret == _md5.hexdigest():
login_sucessfull = True
if login_sucessfull:
old_id = client.id
client.id = '%s_%d' % (username, bigtime())
self.clients.pop(old_id)
self.clients.update({client.id: client})
client.logged = True
_servers = self.config.get('users').get(username).get('servers')
_servers = [s.strip() for s in _servers.split(',')]
if len(_servers) == 1:
client.binded_server = _servers[0]
else:
client.multiple_servers = _servers
client.wants_events = wants_events
response = Success(parameters=dict(
Message='Authentication accepted'))
p = dict(emiter='__internal__',
locked=locked,
timestamp=time(),
packet=response,
dest=client.id)
tmp_debug("AUTH", "'%s' logged successfully" % username)
self.out_queue.put(Packet(p))
else:
response = Error(parameters=dict(Message='Authentication failed'))
p = dict(emiter='__internal__',
locked=locked,
timestamp=time(),
packet=response,
dest=client.id)
client.send(Packet(p))
tmp_debug("AUTH", "'%s' failed to login" % username)
client.disconnect()
def logoff_user(self, packet):
client = self.clients.get(packet.emiter)
response = Goodbye(parameters=dict(Message="Don't panic."))
p = dict(emiter='__internal__',
locked=packet.locked,
timestamp=time(),
packet=response,
dest=client.id)
client.send(Packet(p))
tmp_debug("AUTH", "'%s' logout" % packet.emiter[:packet.emiter.find('_')])
client.disconnect()
def login_failed_on_ami(self, _ami):
tmp_debug("AUTH", "Login failed on '%s'" % _ami)
def logged_on_ami(self, _ami):
tmp_debug("AUTH", "Logged on '%s'" % _ami)
ami = self.amis.get(_ami)
ami.logged = True
def challenge(self, emiter, locked, authtype):
if authtype == 'md5':
key = str(randint(100000000, 999999999))
response = Success(parameters=dict(
Challenge='%s' % key))
tmp_debug("AUTH", "'%s' asked for '%s' challenge, sent '%s'" % \
(emiter, authtype, key))
else:
response = Error(parameters=dict(
Message='Authentication type not supported'))
client = self.clients.get(emiter)
p = dict(emiter='__internal__',
locked=locked,
timestamp=time(),
packet=response,
dest=client.id)
client.send(Packet(p))
client.authtype = (authtype, key)
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2016-2020 Ryan Roden-Corrent (rcorre) <ryan@rcorre.net>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Test the keyhint widget."""
import pytest
from qutebrowser.misc import objects
from qutebrowser.misc.keyhintwidget import KeyHintView
def expected_text(*args):
"""Helper to format text we expect the KeyHintView to generate.
Args:
args: One tuple for each row in the expected output.
Tuples are of the form: (prefix, color, suffix, command).
"""
text = '<table>'
for group in args:
text += ("<tr>"
"<td>{}</td>"
"<td style='color: {}'>{}</td>"
"<td style='padding-left: 2ex'>{}</td>"
"</tr>").format(*group)
return text + '</table>'
@pytest.fixture
def keyhint(qtbot, config_stub, key_config_stub):
"""Fixture to initialize a KeyHintView."""
config_stub.val.colors.keyhint.suffix.fg = 'yellow'
keyhint = KeyHintView(0, None)
qtbot.add_widget(keyhint)
assert keyhint.text() == ''
return keyhint
def test_show_and_hide(qtbot, keyhint):
with qtbot.waitSignal(keyhint.update_geometry):
with qtbot.waitExposed(keyhint):
keyhint.show()
keyhint.update_keyhint('normal', '')
assert not keyhint.isVisible()
def test_position_change(keyhint, config_stub):
config_stub.val.statusbar.position = 'top'
stylesheet = keyhint.styleSheet()
assert 'border-bottom-right-radius' in stylesheet
assert 'border-top-right-radius' not in stylesheet
def test_suggestions(keyhint, config_stub):
"""Test that keyhints are shown based on a prefix."""
bindings = {'normal': {
'aa': 'message-info cmd-aa',
'ab': 'message-info cmd-ab',
'aba': 'message-info cmd-aba',
'abb': 'message-info cmd-abb',
'xd': 'message-info cmd-xd',
'xe': 'message-info cmd-xe',
}}
default_bindings = {'normal': {
'ac': 'message-info cmd-ac',
}}
config_stub.val.bindings.default = default_bindings
config_stub.val.bindings.commands = bindings
keyhint.update_keyhint('normal', 'a')
assert keyhint.text() == expected_text(
('a', 'yellow', 'a', 'message-info cmd-aa'),
('a', 'yellow', 'b', 'message-info cmd-ab'),
('a', 'yellow', 'ba', 'message-info cmd-aba'),
('a', 'yellow', 'bb', 'message-info cmd-abb'),
('a', 'yellow', 'c', 'message-info cmd-ac'))
def test_suggestions_special(keyhint, config_stub):
"""Test that special characters work properly as prefix."""
bindings = {'normal': {
'<Ctrl-C>a': 'message-info cmd-Cca',
'<Ctrl-C><Ctrl-C>': 'message-info cmd-CcCc',
'<Ctrl-C><Ctrl-X>': 'message-info cmd-CcCx',
'cbb': 'message-info cmd-cbb',
'xd': 'message-info cmd-xd',
'xe': 'message-info cmd-xe',
}}
default_bindings = {'normal': {
'<Ctrl-C>c': 'message-info cmd-Ccc',
}}
config_stub.val.bindings.default = default_bindings
config_stub.val.bindings.commands = bindings
keyhint.update_keyhint('normal', '<Ctrl+c>')
assert keyhint.text() == expected_text(
('<Ctrl+c>', 'yellow', 'a', 'message-info cmd-Cca'),
('<Ctrl+c>', 'yellow', 'c', 'message-info cmd-Ccc'),
('<Ctrl+c>', 'yellow', '<Ctrl+c>',
'message-info cmd-CcCc'),
('<Ctrl+c>', 'yellow', '<Ctrl+x>',
'message-info cmd-CcCx'))
def test_suggestions_with_count(keyhint, config_stub, monkeypatch, stubs):
"""Test that a count prefix filters out commands that take no count."""
monkeypatch.setattr(objects, 'commands', {
'foo': stubs.FakeCommand(name='foo', takes_count=lambda: False),
'bar': stubs.FakeCommand(name='bar', takes_count=lambda: True),
})
bindings = {'normal': {'aa': 'foo', 'ab': 'bar'}}
config_stub.val.bindings.default = bindings
config_stub.val.bindings.commands = bindings
keyhint.update_keyhint('normal', '2a')
assert keyhint.text() == expected_text(
('a', 'yellow', 'b', 'bar'),
)
def test_special_bindings(keyhint, config_stub):
"""Ensure a prefix of '<' doesn't suggest special keys."""
bindings = {'normal': {
'<a': 'message-info cmd-<a',
'<b': 'message-info cmd-<b',
'<ctrl-a>': 'message-info cmd-ctrla',
}}
config_stub.val.bindings.default = {}
config_stub.val.bindings.commands = bindings
keyhint.update_keyhint('normal', '<')
assert keyhint.text() == expected_text(
('<', 'yellow', 'a', 'message-info cmd-<a'),
('<', 'yellow', 'b', 'message-info cmd-<b'))
def test_color_switch(keyhint, config_stub):
"""Ensure the keyhint suffix color can be updated at runtime."""
bindings = {'normal': {'aa': 'message-info cmd-aa'}}
config_stub.val.colors.keyhint.suffix.fg = '#ABCDEF'
config_stub.val.bindings.default = {}
config_stub.val.bindings.commands = bindings
keyhint.update_keyhint('normal', 'a')
assert keyhint.text() == expected_text(('a', '#ABCDEF', 'a',
'message-info cmd-aa'))
def test_no_matches(keyhint, config_stub):
"""Ensure the widget isn't visible if there are no keystrings to show."""
bindings = {'normal': {
'aa': 'message-info cmd-aa',
'ab': 'message-info cmd-ab',
}}
config_stub.val.bindings.default = {}
config_stub.val.bindings.commands = bindings
keyhint.update_keyhint('normal', 'z')
assert not keyhint.text()
assert not keyhint.isVisible()
@pytest.mark.parametrize('blacklist, expected', [
(['ab*'], expected_text(('a', 'yellow', 'a', 'message-info cmd-aa'))),
(['*'], ''),
])
def test_blacklist(keyhint, config_stub, blacklist, expected):
"""Test that blacklisted keychains aren't hinted."""
config_stub.val.keyhint.blacklist = blacklist
bindings = {'normal': {
'aa': 'message-info cmd-aa',
'ab': 'message | -info cmd-ab',
'aba': 'message-info cmd-aba',
'abb': 'message-info cmd-abb',
'xd': 'message-info cmd-xd',
'xe': 'message-info cmd-xe',
}}
| config_stub.val.bindings.default = {}
config_stub.val.bindings.commands = bindings
keyhint.update_keyhint('normal', 'a')
assert keyhint.text() == expected
def test_delay(qtbot, stubs, monkeypatch, config_stub, key_config_stub):
timer = stubs.FakeTimer()
monkeypatch.setattr(
'qutebrowser.misc.keyhintwidget.usertypes.Timer',
lambda *_: timer)
interval = 200
bindings = {'normal': {'aa': 'message-info cmd-aa'}}
config_stub.val.keyhint.delay = interval
config_stub.val.bindings.default = {}
config_stub.val.bindings.commands = bindings
keyhint = KeyHintView(0, None)
keyhint.update_keyhint('normal', 'a')
assert timer.isSingleShot()
assert timer.interval() == interval
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
NetGen is a tool for financial network analysis
Copyright (C) 2013 Tarik Roukny (troukny@ulb.ac.be)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
"""
# ==========================================
# Libraries and Packages
import networkx as nx
# ==========================================
class NetworkHandler:
# -------------------------------------------------------------
#
# init (directory, name)
#
# -------------------------------------------------------------
# def __init__(self, directory, name):
def __init__ (self, directory, name,
list_edges, out_node, in_node,
is_directed, is_weighted, edge_weight):
if is_directed == 'on':
self.G = nx.DiGraph()
else:
self.G = nx.Graph()
self.directory = directory
self.name = name
self.list_edges = list_edges
self.out_node_index = out_node
self.in_node_index = in_node
self.is_weighted = is_weighted
self.edge_weight = edge_weight
self.generate_network()
self.save_network()
# -------------------------------------------------------------
#
# generate_network (data)
#
# -------------------------------------------------------------
def generate_network(self):
if self.is_weighted == 'on':
for edge in self.list_edges:
| weight = edge[self.edge_weight]
try :
weight = float(weight)
except:
weight = 0.0
self.G.add_edge(edge[self.out_node_index], edge[self.in_node_index],{'weight':weight})
else:
for edge in self.list_edges:
self.G.add_edge(edge[self.out_node_index], edge[self.i | n_node_index])
# -------------------------------------------------------------
#
# save_network ()
#
# -------------------------------------------------------------
def save_network(self):
nx.write_gexf(self.G, self.directory + self.name + '.gexf')
|
#!/usr/bin/env python
# Copyright (c) 2012 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import re
import uuid
from subprocess import call
from tests.common.test_vector import TestDimension
from tests.common.impala_test_suite import ImpalaTestSuite
# Number of tables to create per thread
NUM_TBLS_PER_THREAD = 10
# Each client will get a different test id.
TEST_IDS = xrange(0, 10)
# Simple stress test for DDL operations. Attempts to create, cache,
# uncache, then drop many different tables in parallel.
class TestDdlStress(ImpalaTestSuite):
@classmethod
def get_workload(self):
return 'targeted-stress'
@classmethod
def add_test_dimensions(cls):
super(TestDdlStress, cls).add_test_dimensions()
cls.TestMatrix.add_dimension(TestDimension('test_id', *TEST_IDS))
cls.TestMatrix.add_constraint(lambda v: v.get_value('exec_option')['batch_size'] == 0)
cls.TestMatrix.add_constraint(lambda v:\
v.get_value('table_format').file_format == 'text' and\
v.get_value('table_format').compression_codec == 'none')
@pytest.mark.stress
def test_create_cache_many_tables(self, vector):
self.client.set_configuration(vector.get_value('exec_option'))
self.client.execute("create database if not exists ddl_stress_testdb")
self.client.execute("use ddl_stress_testdb")
tbl_uniquifier = str(uuid.uuid4()).replace('-', '')
for i in xrange(NUM_TBLS_PER_THREAD):
tbl_name = "tmp_%s_%s" % (tbl_uniquifier, i)
# Create a partitioned and unpartitioned table
self.client.execute("create table %s (i int)" % tbl_name)
self.client.execute("create table %s_part (i int) partitioned by (j int)" %\
tbl_name)
# Add some data to each
self.client.execute("insert overwrite table %s select int_col from "\
"functional.alltypestiny" % tbl_name)
self.client.execute("insert overwrite table %s_part partition(j) "\
"values (1, 1), (2, 2), (3, 3), (4, 4), (4, 4)" % tbl_name)
# Cache the data the unpartitioned table
self.client.execute("alter table %s set cached in | 'testPool'" % tbl_name)
# Cach | e, uncache, then re-cache the data in the partitioned table.
self.client.execute("alter table %s_part set cached in 'testPool'" % tbl_name)
self.client.execute("alter table %s_part set uncached" % tbl_name)
self.client.execute("alter table %s_part set cached in 'testPool'" % tbl_name)
# Drop the tables, this should remove the cache requests.
self.client.execute("drop table %s" % tbl_name)
self.client.execute("drop table %s_part" % tbl_name)
|
import rethin | kdb as r
import db
def _forget_project(name, conn):
db.get_table().filter(r.row['name'] == name).delete().run(conn)
def forget_project(name):
conn = db.get_conn()
return _forget_project(name, | conn)
|
{"Sheet 1": [["这", "是", "中", "文"], ["这", "是", "中", "文"]]}
book = p.Book()
book.bookdict = adict
sys.stdout.write(repr(book))
def test_issue_63_empty_array_crash_texttable_renderer():
sheet = p.Sheet([])
print(sheet)
def test_xls_issue_11():
data = [[1, 2]]
sheet = p.Sheet(data)
sheet2 = p.get_sheet(file_content=sheet.xls, file_type="XLS")
eq_(sheet.array, sheet2.array)
test_file = "xls_issue_11.JSON"
sheet2.save_as(test_file)
os.unlink(test_file)
def test_issue_68():
data = [[1]]
sheet = p.Sheet(data)
stream = sheet.save_to_memory("csv")
eq_(stream.read(), "1\r\n")
data = {"sheet": [[1]]}
book = p.Book(data)
stream = book.save_to_memory("csv")
eq_(stream.read(), "1\r\n")
def test_issue_74():
from decimal import Decimal
data = [[Decimal("1.1")]]
sheet = p.Sheet(data)
table = sheet.texttable
expected = "pyexcel sheet:\n+-----+\n| 1.1 |\n+-----+"
eq_(table, expected)
def test_issue_76():
from pyexcel._compact import StringIO
tsv_stream = StringIO()
tsv_stream.write("1\t2\t3\t4\n")
tsv_stream.write("1\t2\t3\t4\n")
tsv_stream.seek(0)
sheet = p.get_sheet(
file_stream=tsv_stream, file_type="csv", delimiter="\t"
)
data = [[1, 2, 3, 4], [1, 2, 3, 4]]
eq_(sheet.array, data)
def test_issue_83_csv_file_handle():
proc = psutil.Process()
test_file = os.path.join("tests", "fixtures", "bug_01.csv")
open_files_l1 = proc.open_files()
# start with a csv file
data = p.iget_array(file_name=test_file)
open_files_l2 = proc.open_files()
delta = len(open_files_l2) - len(open_files_l1)
# interestingly, no open file handle yet
assert delta == 0
# now the file handle get opened when we run through
# the generator
list(data)
open_files_l3 = proc.open_files()
delta = len(open_files_l3) - len(open_files_l1)
# caught an open file handle, the "fish" finally
assert delta == 1
# free the fish
p.free_resources()
open_files_l4 = proc.open_files()
# this confirms that no more open file handle
eq_(open_files_l1, open_files_l4)
def test_issue_83_file_handle_no_generator():
proc = psutil.Process()
tes | t_files = [
os.path.join("tests" | , "fixtures", "bug_01.csv"),
os.path.join("tests", "fixtures", "test-single.csvz"),
os.path.join("tests", "fixtures", "date_field.xls"),
]
for test_file in test_files:
open_files_l1 = proc.open_files()
# start with a csv file
p.get_array(file_name=test_file)
open_files_l2 = proc.open_files()
delta = len(open_files_l2) - len(open_files_l1)
# no open file handle should be left
assert delta == 0
def test_issue_83_csvz_file_handle():
proc = psutil.Process()
test_file = os.path.join("tests", "fixtures", "test-single.csvz")
open_files_l1 = proc.open_files()
# start with a csv file
data = p.iget_array(file_name=test_file)
open_files_l2 = proc.open_files()
delta = len(open_files_l2) - len(open_files_l1)
# interestingly, file is already open :)
assert delta == 1
# now the file handle get opened when we run through
# the generator
list(data)
open_files_l3 = proc.open_files()
delta = len(open_files_l3) - len(open_files_l1)
# caught an open file handle, the "fish" finally
assert delta == 1
# free the fish
p.free_resources()
open_files_l4 = proc.open_files()
# this confirms that no more open file handle
eq_(open_files_l1, open_files_l4)
def test_issue_83_xls_file_handle():
proc = psutil.Process()
test_file = os.path.join("tests", "fixtures", "date_field.xls")
open_files_l1 = proc.open_files()
# start with a csv file
data = p.iget_array(file_name=test_file)
open_files_l2 = proc.open_files()
delta = len(open_files_l2) - len(open_files_l1)
# interestingly, no open file using xlrd
assert delta == 0
# now the file handle get opened when we run through
# the generator
list(data)
open_files_l3 = proc.open_files()
delta = len(open_files_l3) - len(open_files_l1)
# still no open file
assert delta == 0
p.free_resources()
open_files_l4 = proc.open_files()
eq_(open_files_l1, open_files_l4)
def test_issue_92_non_uniform_records():
records = [{"a": 1}, {"b": 2}, {"c": 3}]
sheet = p.get_sheet(records=records, custom_headers=["a", "b", "c"])
content = dedent(
"""
+---+---+---+
| a | b | c |
+---+---+---+
| 1 | | |
+---+---+---+
| | 2 | |
+---+---+---+
| | | 3 |
+---+---+---+"""
).strip("\n")
eq_(str(sheet.content), content)
def test_issue_92_incomplete_records():
records = [{"a": 1, "b": 2, "c": 3}, {"b": 2}, {"c": 3}]
sheet = p.get_sheet(records=records)
content = dedent(
"""
+---+---+---+
| a | b | c |
+---+---+---+
| 1 | 2 | 3 |
+---+---+---+
| | 2 | |
+---+---+---+
| | | 3 |
+---+---+---+"""
).strip("\n")
eq_(str(sheet.content), content)
def test_issue_92_verify_save_as():
records = [{"a": 1, "b": 2, "c": 3}, {"b": 2}, {"c": 3}]
csv_io = p.save_as(records=records, dest_file_type="csv")
content = "a,b,c\r\n1,2,3\r\n,2,\r\n,,3\r\n"
eq_(csv_io.getvalue(), content)
def test_issue_95_preserve_order_in_iget_orders():
test_data = [["a", "b", "c"], ["1", "2", "3"], ["4", "5", "6"]]
records = p.iget_records(array=test_data)
result = []
for record in records:
for key, value in record.items():
result.append([key, value])
expected = [
["a", "1"],
["b", "2"],
["c", "3"],
["a", "4"],
["b", "5"],
["c", "6"],
]
eq_(result, expected)
def test_issue_95_preserve_custom_order_in_iget_orders():
test_data = [["a", "b", "c"], ["1", "2", "3"], ["4", "5", "6"]]
records = p.iget_records(array=test_data, custom_headers=["c", "a", "b"])
result = []
for record in records:
for key, value in record.items():
result.append([key, value])
expected = [
["c", "3"],
["a", "1"],
["b", "2"],
["c", "6"],
["a", "4"],
["b", "5"],
]
eq_(result, expected)
def test_issue_95_preserve_order_in_get_orders():
test_data = [["a", "b", "c"], ["1", "2", "3"], ["4", "5", "6"]]
records = p.get_records(array=test_data)
result = []
for record in records:
for key, value in record.items():
result.append([key, value])
expected = [
["a", "1"],
["b", "2"],
["c", "3"],
["a", "4"],
["b", "5"],
["c", "6"],
]
eq_(result, expected)
def test_issue_100():
data = [["a", "b"]]
sheet = p.Sheet(data)
sheet.name_columns_by_row(0)
eq_(sheet.to_dict(), {"a": [], "b": []})
def test_issue_125():
book = p.Book()
book += p.Sheet([[1]], "A")
book += p.Sheet([[2]], "B")
eq_(book.sheet_names(), ["A", "B"])
book.sort_sheets(reverse=True)
eq_(book.sheet_names(), ["B", "A"])
def test_issue_125_saving_the_order():
test_file = "issue_125.xls"
book = p.Book()
book += p.Sheet([[1]], "A")
book += p.Sheet([[2]], "B")
eq_(book.sheet_names(), ["A", "B"])
book.sort_sheets(reverse=True)
book.save_as(test_file)
book2 = p.get_book(file_name=test_file)
eq_(book2.sheet_names(), ["B", "A"])
os.unlink(test_file)
def test_issue_125_using_key():
test_file = "issue_125.xls"
book = p.Book()
book += p.Sheet([[1]], "A")
book += p.Sheet([[2]], "B")
book += p.Sheet([[3]], "C")
custom_order = {"A": 1, "B": 3, "C": 2}
book.sort_sheets(key=lambda x: custom_order[x])
book.save_as(test_file)
book2 = p.get_book(file_name=test_file)
eq_(book2.sheet_names(), ["A", "C", "B"])
os.unlink(test_file)
def test_issue_126():
data = [[1]]
test_file = "issue_126.xls"
test_name = "doyoufindme"
p.save_as(array=data, dest_file_name=test_file, dest_sheet_name=test_name)
she |
# -*- codin | g: utf-8 -*-
import click
from trips import oslo
@click.command()
@click.argument('from_place')
@click.argument('to_place')
def main(from_place, to_place):
proposals = oslo.proposals(from_place, to_place)
oslo.print_proposals(proposals)
if __name__ == "__main__":
main()
| |
import spidev
import RPi.GPIO as GPIO
import time
class DoorSensor:
def __init__(self, pin = None, verbose = False, dblogger = None):
self.results = ()
self.device = None
self.pin = pin
self.dblogger = dblogger
self.verbose = verbose
# assign default pin if none provided
if self.pin == None:
self.pin = 12
GPIO.setmode(GPIO.BOARD)
GPIO.setup(self.pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
self.device = GPIO
self.name = "Door"
def getName(self):
return self.name
def door_event(self, open):
evt = 'opened' if open else 'closed'
self.results = (evt,)
if open:
if self.verbose: print('door ' + evt)
else:
if self.verbose: print('door ' + evt)
time.sleep(0.5)
# log in DB if logger present
if self.dblogger is not None:
self.logLastReading(self.dblogger)
def waitForEvents(self):
d = self.device
pin = self.pin
switch = True
while True:
if d.input(pin): # if door is opened
if (switch):
self.door_event(True) # send door open e | vent
switch = False # make sure it doesn't fire again
if not d.input(pin): # if door is closed
if not (switch):
self.door_event(False | ) # send door closed event
switch = True # make sure it doesn't fire again
def logLastReading(self, dblogger):
cursor = dblogger.cursor
conn = dblogger.conn
loc = dblogger.location
tstamp = int(round(time.time() * 1000))
cmd = "INSERT INTO Door (timestamp, location, door_status) VALUES (%s, %s, %s);"
cursor.execute(cmd, (tstamp, loc, self.results[0]))
conn.commit()
def getLastReading(self):
return self.results
def cleanUp(self):
GPIO.cleanup()
if __name__ == '__main__':
# initialize different sensors
vbose = True
from ..DBLogger import DBLogger
dbl = DBLogger()
ds = DoorSensor(dblogger=dbl, verbose=vbose)
# Listen to events
try:
ds.waitForEvents()
except KeyboardInterrupt:
if vbose: print("finishing.")
finally:
ds.cleanUp()
|
######################################################################
#
# Copyright 2012 Zenoss | , Inc. All Rights Reserved.
#
######################################################################
from zope.interface import Interface
try:
from Products.Zuul.interfaces.actions import IActionContentInfo
except ImportError:
from Products.Zuul import IInfo as IActionContentInfo
from Products.Zuul.interfaces import IFacade
from Products.Zuul.form import schema
from Products.Zuul.utils import ZuulMessageFact | ory as _t
class ISlackActionContentInfo(IActionContentInfo):
slackUrl = schema.TextLine(
title=_t(u'Slack URL'),
order=90,
)
proxyUrl = schema.TextLine(
title=_t(u'Proxy URL'),
order=100,
)
proxyUsername = schema.TextLine(
title=_t(u'Proxy username'),
order=110,
)
proxyPassword = schema.Password(
title=_t(u'Proxy password'),
order=120,
)
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# TEST_UNICODE_LITERALS
from ... import table
from .. import pprint
class MyRow(tab | le.Row):
def __str__(self):
return str(self.as_void())
class MyColumn(table.Column):
pass
class MyMaskedColumn(table.MaskedColumn):
pass
class MyTableColumns(table.TableColumns):
pass
class MyTableFormatter(p | print.TableFormatter):
pass
class MyTable(table.Table):
Row = MyRow
Column = MyColumn
MaskedColumn = MyMaskedColumn
TableColumns = MyTableColumns
TableFormatter = MyTableFormatter
def test_simple_subclass():
t = MyTable([[1, 2], [3, 4]])
row = t[0]
assert isinstance(row, MyRow)
assert isinstance(t['col0'], MyColumn)
assert isinstance(t.columns, MyTableColumns)
assert isinstance(t.formatter, MyTableFormatter)
t2 = MyTable(t)
row = t2[0]
assert isinstance(row, MyRow)
assert str(row) == '(1, 3)'
t3 = table.Table(t)
row = t3[0]
assert not isinstance(row, MyRow)
assert str(row) != '(1, 3)'
t = MyTable([[1, 2], [3, 4]], masked=True)
row = t[0]
assert isinstance(row, MyRow)
assert str(row) == '(1, 3)'
assert isinstance(t['col0'], MyMaskedColumn)
assert isinstance(t.formatter, MyTableFormatter)
class ParamsRow(table.Row):
"""
Row class that allows access to an arbitrary dict of parameters
stored as a dict object in the ``params`` column.
"""
def __getitem__(self, item):
if item not in self.colnames:
return super(ParamsRow, self).__getitem__('params')[item]
else:
return super(ParamsRow, self).__getitem__(item)
def keys(self):
out = [name for name in self.colnames if name != 'params']
params = [key.lower() for key in sorted(self['params'])]
return out + params
def values(self):
return [self[key] for key in self.keys()]
class ParamsTable(table.Table):
Row = ParamsRow
def test_params_table():
t = ParamsTable(names=['a', 'b', 'params'], dtype=['i', 'f', 'O'])
t.add_row((1, 2.0, {'x': 1.5, 'y': 2.5}))
t.add_row((2, 3.0, {'z': 'hello', 'id': 123123}))
assert t['params'][0] == {'x': 1.5, 'y': 2.5}
assert t[0]['params'] == {'x': 1.5, 'y': 2.5}
assert t[0]['y'] == 2.5
assert t[1]['id'] == 123123
assert list(t[1].keys()) == ['a', 'b', 'id', 'z']
assert list(t[1].values()) == [2, 3.0, 123123, 'hello']
|
DT_FLOAT
tensor_shape { dim { size: 3 } dim { size: 1 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([[10.0], [20.0], [30.0]], dtype=np.float32),
a)
def testFloatSizesLessValues(self):
t = tensor_util.make_tensor_proto(10.0, shape=[1, 3])
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 1 } dim { size: 3 } }
float_val: 10.0
""", t)
# No conversion to Ndarray for this one: not enough values.
def testFloatNpArrayFloat64(self):
t = tensor_util.make_tensor_proto(
np.array([[10.0, 20.0, 30.0]], dtype=np.float64))
self.assertProtoEquals("""
dtype: DT_DOUBLE
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "\000\000\000\000\000\000$@\000\000\000\000\000\0004@\000\000\000\000\000\000>@"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float64, a.dtype)
self.assertAllClose(np.array([[10.0, 20.0, 30.0]], dtype=np.float64),
tensor_util.MakeNdarray(t))
def testFloatTypesWithImplicitRepeat(self):
for dtype, nptype in [
(tf.float32, np.float32), (tf.float64, np.float64)]:
t = tensor_util.make_tensor_proto([10.0], shape=[3, 4], dtype=dtype)
a = tensor_util.MakeNdarray(t)
self.assertAllClose(np.array([[10.0, 10.0, 10.0, 10.0],
[10.0, 10.0, 10.0, 10.0],
[10.0, 10.0, 10.0, 10.0]], dtype=nptype), a)
def testInt(self):
t = tensor_util.make_tensor_proto(10)
self.assertProtoEquals("""
dtype: DT_INT32
tensor_shape {}
int_val: 10
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int32, a.dtype)
self.assertAllClose(np.array(10, dtype=np.int32), a)
def testIntNDefaultType(self):
t = tensor_util.make_tensor_proto([10, 20, 30, 40], shape=[2, 2])
self.assertProtoEquals("""
dtype: DT_INT32
tensor_shape { dim { size: 2 } dim { size: 2 } }
tensor_content: "\\n\000\000\000\024\000\000\000\036\000\000\000(\000\000\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int32, a.dtype)
self.assertAllClose(np.array([[10, 20], [30, 40]], dtype=np.int32), a)
def testIntTypes(self):
for dtype, nptype in [
(tf.int32, np.int32),
(tf.uint8, np.uint8),
(tf.uint16, np.uint16),
(tf.int16, np.int16),
(tf.int8, np.int8)]:
# Test with array.
t = tensor_util.make_tensor_proto([10, 20, 30], dtype=dtype)
self.assertEquals(dtype, t.dtype)
self.assertProtoEquals("dim { size: 3 }", t.tensor_shape)
a = tensor_util.MakeNdarray(t)
self.assertEquals(nptype, a.dtype)
self.assertAllClose(np.array([10, 20, 30], dtype=nptype), a)
# Test with ndarray.
t = tensor_util.make_tensor_proto(np.array([10, 20, 30], dtype=nptype))
self.assertEquals(dtype, t.dtype)
self.assertProtoEquals("dim { size: 3 }", t.tensor_shape)
a = tensor_util.MakeNdarray(t)
self.assertEquals(nptype, a.dtype)
self.assertAllClose(np.array([10, 20, 30], dtype=nptype), a)
def testIntTypesWithImplicitRepeat(self):
for dtype, nptype in [
(tf.int64, np.int64),
(tf.int32, np.int32),
(tf.uint8, np.uint8),
(tf.uint16, np.uint16),
(tf.int16, np.int16),
(tf.int8, np.int8)]:
t = tensor_util.make_tensor_proto([10], shape=[3, 4], dtype=dtype)
a = tensor_util.MakeNdarray(t)
self.assertAllEqual(np.array([[10, 10, 10, 10],
[10, 10, 10, 10],
[10, 10, 10, 10]], dtype=nptype), a)
def testLong(self):
t = tensor_util.make_tensor_proto(10, dtype=tf.int64)
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape {}
int64_val: 10
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int64, a.dtype)
self.assertAllClose(np.array(10, dtype=np.int64), a)
def testLongN(self):
t = tensor_util.make_tensor_proto([10, 20, 30], shape=[1, 3],
dtype=tf.int64)
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "\\n\000\000\000\000\000\000\000\024\000\000\000\000\000\000\000\036\000\000\000\000\000\000\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int64, a.dtype)
self.assertAllClose(np.array([[10, 20, 30]], dtype=np.int64), a)
def testLongNpArray(self):
t = tensor_util.make_tensor_proto(np.array([10, 20, 30]))
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape { dim { size: 3 } }
tensor_content: "\\n\000\000\000\000\000\000\000\024\000\000\000\000\000\000\000\036\000\000\000\000\000\000\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int64, a.dtype)
self.assertAllClose(np.array([10, 20, 30], dtype=np.int64), a)
def testString(self):
t = tensor_util.make_tensor_proto("foo")
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape {}
string_val: "foo"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.object, a.dtype)
self.assertEquals([b"foo"], a)
def testStringWithImplicitRepeat(self):
t = tensor_util.make_tensor_proto("f", shape=[3, 4])
a = tensor_util.MakeNdarray(t)
self.assertAllEqual(np.array([[b"f"] * 4] * 3, dtype=np.object), a)
def testStringN(self):
t = tensor_util.make_tensor_proto([b"foo", b"bar", b"baz"], shape=[1, 3])
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape { dim { size: 1 } dim { size: 3 } }
string_val: "foo"
string_val: "bar"
string_val: "baz"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.object, a.dtype)
self.assertAllEqual(np.array([[b"foo", b"bar", b"baz"]]), a)
def testStringNpArray(self):
t = tensor_util.make_tensor_proto(np.array([[b"a", b"ab"],
[b"abc", b"abcd"]]))
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape { dim { size: 2 } dim { size: 2 } }
string_val: "a"
string_val: "ab"
string_val: "abc"
string_val: "abcd"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.object, a.dtype)
self.assertAllEqual(np.array([[b"a", b"ab"], [b"abc", b"abcd"]]), a)
def testComplex(self):
t = tensor_util.make_tensor_proto((1 + 2j), dtype=tf.complex64)
self.assertP | rotoEquals("""
dtype: DT_COMPLEX64
tensor_shape {}
scomplex_val: 1
scomplex_val: 2
""", t)
a = tensor_util.Ma | keNdarray(t)
self.assertEquals(np.complex64, a.dtype)
self.assertAllEqual(np.array(1 + 2j), a)
def testComplexWithImplicitRepeat(self):
t = tensor_util.make_tensor_proto((1 + 1j), shape=[3, 4],
dtype=tf.complex64)
a = tensor_util.MakeNdarray(t)
self.assertAllClose(np.array([[(1 + 1j), (1 + 1j), (1 + 1j), (1 + 1j)],
[(1 + 1j), (1 + 1j), (1 + 1j), (1 + 1j)],
[(1 + 1j), (1 + 1j), (1 + 1j), (1 + 1j)]],
dtype=np.complex64), a)
def testComplexN(self):
t = tensor_util.make_tensor_proto([(1 + 2j), (3 + 4j), (5 + 6j)], shape=[1, 3],
dtype=tf.comple |
first_name = raw_input("Type your name here ")
last_name = raw_input("Type your last name here ")
print "Hello " + first_name +' '+last_name + " nice to meet you, welcome to the height conversion program"
def info():
a = raw_input("type your height here: ")
return a
def height_in_inches():
height_in_inches = float(info()) * 0.39370
return height_in_inches
def your_height_vs_average_height():
your_height_vs_average_height = float(height_in_inches()) - 69
return your_height_vs_average_height
def main():
print "you are {} inches tall,{} is your height compared to the average height (6 | 9 inches)".format(str(height_in_inches()),str(your_height_vs_average_height()))
#if the number returned is positive, that means you are that much taller than the average height, if the number returned is negative, then th | at means you are that much shorter than average height
main()
|
#!/usr/bin/python
###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
import sys
from .maya_environment import *
from .maya_app import *
from .maya_builder import *
def get_maya_builder_cmd(path, ticket):
# dynamically import Config. This can only be done by Tactic (no client)
from pyasm.common import Config
python = Config.get_value("services", "python")
cmd = '%s "%s" %s %s' % (python, __file__, path, ticket)
return cmd
def maya_builder_exec(path, ticket):
# run this executable as a separate system process
cmd = get_maya_builder_cmd(path, ticket)
print(cmd)
os.system(cmd)
if __name__ == '__main__':
executable = sys.argv[0]
path = sys.argv[1]
ticket = sys.argv[2]
# need to add these paths because they not currently in the
# windows environment
#sys.path.append("E:/sthpw/tactic/sthpw/src")
from pyasm.security import Batch
Batch()
file = open(path, 'r')
contents = file.read()
file.close()
# set up maya
from pyasm.application.common import BaseAppInfo
info = BaseAppInfo("maya")
from pyasm.common import Environment
tmpdir = "%s/temp/%s" % (Environment.get_tmp_dir(), ticket)
info.set_tmpdir(tmpdir)
info.set_user(Environment.get_user_name() )
info.set_ticket(ticket)
info.set_up_maya(init=True)
env = info.get_app_env()
env.set_tmpdir(tmpdir)
# create the file builder
builder = info.get_builder()
builder.execute(contents)
# save the file
filepath = "%s/maya_render.ma" % env.get_tmpdir()
| info.get_app().save(filepath)
from maya_introspect import MayaIntrospect
introspect = MayaIntrospect()
introspect.execute()
session_xml = introspect.get_session_xml()
# should reprod | uce glue!!!
file = open("%s/session.xml" % env.get_tmpdir(), "w" )
file.write(session_xml)
file.close()
|
from struct import pack, unpack
from time import time
from communication.ComAPI.packet import Packet
class PacketLogin(Packet):
"""Class for constructing binary data based
on a common API between client / server."""
def __init__(self):
super().__init__()
self.packetID = 3
def encode(self, username, avatar, position):
"""
Encode a message with API format
DRPG + PacketID + username length + username
+ avatar length + avatar + x + y + z
"""
bContainer = super().encode()
## Add position
## TODO: Be aware of byte order from client for portable version
bContainer = bContainer.__add__(pack(">B" , len(username) ))
bC | ontainer = bContainer.__add__(username.encode())
bContainer = bContainer.__add__(pack(">B",len(avatar)))
bContainer = bContainer.__add__(avatar.encode())
bContainer = bContainer.__add__(pack(">f", position[0]))
bContainer = bContainer.__add__(pack(" | >f", position[1]))
bContainer = bContainer.__add__(pack(">f", position[2]))
return bContainer |
from django import forms
from django.contrib import messages
from django.core.exceptions import ValidationError
from django.db.models import ObjectDoesNotExist
from django.shortcuts import get_object_or_404, redirect, render_to_response
from django.template import RequestContext
from django.utils.translation import ugettext as _
from django.views import generic
from plata.contact.models import Contact
from plata.discount.models import Discount
from plata.shop.views import Shop
from plata.shop.models import Order
from simple.models import Product
shop = Shop(Contact, Order, Discount)
product_list = generic.ListView.as_view(
queryset=Product.objects.filter(is_active=True | ),
template_name='product/product_list.html',
)
class OrderItemForm(form | s.Form):
quantity = forms.IntegerField(label=_('quantity'), initial=1,
min_value=1, max_value=100)
def product_detail(request, object_id):
product = get_object_or_404(Product.objects.filter(is_active=True), pk=object_id)
if request.method == 'POST':
form = OrderItemForm(request.POST)
if form.is_valid():
order = shop.order_from_request(request, create=True)
try:
order.modify_item(product, form.cleaned_data.get('quantity'))
messages.success(request, _('The cart has been updated.'))
except ValidationError, e:
if e.code == 'order_sealed':
[messages.error(request, msg) for msg in e.messages]
else:
raise
return redirect('plata_shop_cart')
else:
form = OrderItemForm()
return render_to_response('product/product_detail.html', {
'object': product,
'form': form,
}, context_instance=RequestContext(request))
|
format(Humidity) + " percent \" "
Temperature = w.get_temperature(' | celsius') # {'temp_max': 10.5, 'temp': 9.7, 'temp_min': 9.0}
TemperatureText = "espeak -g 10 \" Current temperature is " + format(Temperature) + " degrees \" "
TemperatureAvg = w.get_temperature('celsius')['temp'] # {'temp | _max': 10.5, 'temp': 9.7, 'temp_min': 9.0}
TemperatureAvgText = "espeak -g 10 \" Current temperature is " + format(TemperatureAvg) + " degrees \" "
Clouds = w.get_clouds()
Rainfall = w.get_rain()
Pressure = w.get_pressure()
#subprocess.call(shlex.split(WindText))
subprocess.call(shlex.split(SWindText))
subprocess.call(shlex.split(HumidityText))
subprocess.call(shlex.split(TemperatureAvgText))
#Output for debugging purpose
#print (" ")
#print (" ")
#print ("****************************************************************************************************")
#print ("Current wind Speed and Direction right now in Melbourne is = %s " %Wind)
#print ("Current Temperature in Melbourne is = %s" %Temperature)
#print ("Current Humidity in Melbourne is = %s Percent" %Humidity)
#print ("Cloud ceiling across Melbourne is %s thousand feet" %Clouds)
#print ("Current Rainfall across Melbourne is %s " %Rainfall)
#print ("Barometric Pressure across Melbourne is %s " %Pressure)
#print ("****************************************************************************************************")
#print (" ")
#print (" ")
#Face Matching Code Starts Here
rekognition = boto3.client("rekognition", region)
response = rekognition.compare_faces(
SourceImage={
"S3Object": {
"Bucket": bucket,
"Name": key,
}
},
TargetImage={
"S3Object": {
"Bucket": bucket_target,
"Name": key_target,
}
},
SimilarityThreshold=threshold,
)
#Reading JSON and converting into workable format
#print(response)
temp1 = json.dumps(response)
temp2 = json.loads(temp1)
#print(temp2['FaceMatches'])
print "Source Face Confidence in %s " %format(temp2['SourceImageFace']['Confidence'])
for match in temp2['FaceMatches']:
print "*******************************************"
print " "
print "Similarity between compared faces is %s " %format(temp2['FaceMatches'][0]['Similarity'])
print " "
print "*******************************************"
#Writing timestamp to log file
now = datetime.datetime.now()
outputFile = open('/opt/data/face_capture_time_log.txt', 'a')
outputWriter = csv.writer(outputFile)
tempArray = [now]
outputWriter.writerow(tempArray)
outputFile.close()
#Reading older timestamp from log file
proc = subprocess.Popen(["tail -n 1 /opt/data/face_capture_time_log.txt | cut -d : -f 2"], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
temp = out.strip()
oldtimestampminute = int(temp)
#Subtracting seconds to find the difference
diff = oldtimestampminute - now.minute
if abs(diff) > 1: #abs takes care of negative values and provides a positive number as the result
print "*******************************************"
print " "
print " !!! Speech To Text happens here!!!! "
print " "
print "*******************************************"
subprocess.call('espeak \" Hi Trevor Welcome back \" ', shell=True)
WeatherProcessing()
else:
print "****************************************************************************"
print " "
print ("Ain't bothering you because we just spotted you less than a a min ago")
print " "
print "****************************************************************************"
for nomatch in temp2['UnmatchedFaces']:
print "Faces either don't match or are a poor match"
return
#Main Code Section Starts Here
face_cascade = cv2.CascadeClassifier('/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('/usr/local/share/OpenCV/haarcascades/haarcascade_eye.xml')
#nose_cascade = cv2.CascadeClassifier('/home/pi/opencv-3.0.0/data/haarcascades/Nariz.xml')
camera = PiCamera()
camera.resolution = (640,480)
camera.framerate = 32
rawCapture = PiRGBArray(camera, size=(640,480))
s3 = boto3.client('s3')
time.sleep(2)
#Clearing the buffer before loading the first image
rawCapture.truncate(0)
while True:
#time.sleep(1)
camera.capture(rawCapture, format="bgr")
img = rawCapture.array
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30), flags = cv2.CASCADE_SCALE_IMAGE)
# iterate over all identified faces and try to find eyes
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
#The code on the next three lines works and has been tested out
#Disabling it because it's not required for purposes of identification of faces
#eyes = eye_cascade.detectMultiScale(roi_gray, minSize=(30, 30))
#for (ex,ey,ew,eh) in eyes:
#cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(255,0,0),2)
#Detection of code for noses has not been validated or tested
#noses = nose_cascade.detectMultiScale(roi_gray, minSize=(100, 30))
#for (ex,ey,ew,eh) in noses:
# cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,0,255),2)
#printing messages to the screen
print "At time "+time.strftime("%d/%m/%y-%H:%M:%S")+", found {0} faces in the picture!!!".format(len(faces))
#writing the image to the screen
font = cv2.FONT_HERSHEY_SIMPLEX
#cv2.putText(img, str(datetime.datetime.now().strftime("%d/%m/%y-%H/%M/%S")), (100,500), font, 4,(255,255,255),2)
cv2.putText(img, "DateTime - "+str(datetime.datetime.now().strftime("%d/%m/%y %H:%M:%S")), (5,25), font, 0.5,(255,255,255))
cv2.imshow('Mapping Faces within the Image', img)
#writing the image to a file
if len(faces) > 0:
#Older versions of cv2.imwrite
#cv2.imwrite("temp"+str(time.strftime("%d/%m/%y-%H%M%S"))+".jpg",img)
#cv2.imwrite("temp"+str(datetime.datetime.now())+".jpg",img)
#cv2.imwrite("temp"+str(datetime.datetime.now().strftime("%d/%m/%y-%H/%M/%S"))+".jpg",img)
#cv2.imwrite("FaceCaptureWarrenPi-"+str(datetime.datetime.now())+".jpg",img)
#current version of cv2.imwrite
#imagename = "FaceCaptureWarrenPi-" + format(str(datetime.datetime.now())) + ".jpg" #This also works
imagename = "FaceCaptureWarrenPi-" + format(str(time.strftime("%d%m%y-%H%M%S"))) + ".jpg"
writepath = "/home/pi/Downloads/TW_Experiments/Python_Projects/RaspiPythonProjects/OpenCV/CaptureVideoStream/imagecapture/" + imagename
cv2.imwrite(writepath, img)
print "Captured image to file !!!"
#Uploading files to AWS S3
with open(writepath, 'rb') as data:
s3.upload_fileobj(data, "tw37-opencv", imagename)
#Comparing images using AWS Rekognition
bucket_target_var = "tw37-opencv"
#key_target_var = "new_image_name.jpg"
key_source_var = "orignal_trevor_1706.jpg"
bucket_source_var = "tw37-original"
#source_face, matches = compare_faces(bucket_source_var, key_source_var, bucket_target_var, imagename)
#print "Source Face ({Confidence}%)".format(**source_face)
#one match for each target face
#for match in matches:
# |
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 03 09:36:48 2018
@author: tih
"""
import os
import sys
from DataAccess import DownloadData
def main(Dir, Startdate='', Enddate='', latlim=[-60, 70], lonlim=[-180, 180], Waitbar = 1):
"""
This function downloads monthly ETmonitor data
Keyword arguments:
| Dir -- 'C:/file/to/path/'
Startdate -- 'yyyy-mm-dd'
Enddate -- 'yyyy-mm-dd'
latlim -- [ymin, ymax] (values must be between -60 and 70)
lonlim -- [xmin, xmax] (values must be between -180 and 180)
"""
print '\nDownload monthly ETmonitor Soil Evaporation data for the period %s till %s' %(Startdate, Enddate)
Type = "es"
# Download da | ta
DownloadData(Dir, Startdate, Enddate, latlim, lonlim, Type, Waitbar)
if __name__ == '__main__':
main(sys.argv) |
# -*- coding: utf-8 -*-
from Node import Node
r | es = float("-inf")
# 只有顶层root节点才可能即经过左边 又经过右边
def maxPathSum(root):
if not root:
return 0
max_l = maxPathSum(root.left)
max_r = maxPathSum(root.right)
max_single = max(max(max_l, max_r) + root.data, root.data)
max_top = max(max_single, max_l + max_r + root.data)
g | lobal res
res = max(res, max_top)
return max_single
# Driver program
root = Node(10)
root.left = Node(2)
root.right = Node(10);
root.left.left = Node(20);
root.left.right = Node(1);
root.right.right = Node(-25);
root.right.right.left = Node(3);
root.right.right.right = Node(4);
maxPathSum(root)
print res
|
# Aspect Ratio 3D
import SMESH_mechanic_tetra
import SMESH
smesh = SMESH_mechanic_tetra.smesh
mesh = SMESH_mechanic_tetra.mesh
salome = SMESH_mechanic_tetra.salome
# Criterion : ASPECT RATIO 3D > 4.5
ar_margin = 4.5
aFilter = smesh.GetFilter(SMESH.VOLUME, SMESH.FT_AspectRatio3D, SMESH.FT_MoreThan, ar_margin)
anIds = mesh.GetIdsFromFilter(aFilter)
# print the result
print "Criterion: Aspect Ratio 3D > ", ar_margin, " Nb = ", len(anIds)
j = 1
for i in range(len(anIds)):
if j > 20: j = 1; print ""
print anIds[i],
j = j + 1
pass
print ""
# create a group
aGroup = mesh.CreateEmptyGroup(SMESH.VOLUME, "Aspec | t Ratio 3D > " + `ar_margin`)
aGroup.Add(anIds)
salome.sg.updateObjBr | owser(1)
|
# | -*- encoding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# Copyright (c) 2011 Cubic ERP - Teradata SAC. (https://cubicerp.com).
{
"name": "Bolivia - Accounting",
"version": "2.0",
"description": """
Bolivian accounting chart and tax localization.
Plan contable boliviano e impuestos de acuerdo a disposiciones vigentes
| """,
"author": "Cubic ERP",
"website": "https://cubicERP.com",
'category': 'Localization',
"depends": ["account"],
"data": [
"l10n_bo_chart.xml",
"account_tax.xml",
"account_chart_template.yml",
],
"installable": True,
}
|
#!/bin/python
import os, subprocess
import logging
from autotest.client import test
from autotest.client.shared import error
class perl_Carp_Clan(test.test):
"""
Autotest module for testing basic functionality
of perl_Carp_Clan
@author Kumuda G <kumuda.govind@in.ibm.com> ##
"""
version = 1
nfail = 0
path = ''
def initialize(self):
"""
Sets the overall failure counter for the test.
"""
self.nfail = 0
| logging.info('\n Test initialize successfully')
def run_once(self, test_path=''):
"""
Trigger test run
"""
try:
os.environ["LTPBIN"] = "%s/shared" %(test_path)
ret_val = subprocess.Popen(['./perl-Carp-Clan.sh'], cwd="%s/perl_Carp_Clan" %(test_path))
ret_v | al.communicate()
if ret_val.returncode != 0:
self.nfail += 1
except error.CmdError, e:
self.nfail += 1
logging.error("Test Failed: %s", e)
def postprocess(self):
if self.nfail != 0:
logging.info('\n nfails is non-zero')
raise error.TestError('\nTest failed')
else:
logging.info('\n Test completed successfully ')
|
"If 'None' is specified, CPAC will not exclude any subjects.",
values = "None")
self.page.add(label= "Sites to Include (Optional) ",
control = control.TEXT_BOX,
name = "siteList",
type = dtype.STR,
comment = "Include only a sub-set of the sites present in the folders defined above.\n\n"
"List sites in this box (e.g., NYU, UCLA) or provide the path to a text\n"
"file with one site on each line.\n\n"
"If 'None' is specified, CPAC will include all sites.",
values ="None",
style= wx.EXPAND | wx.ALL,
size = (532,-1))
self.page.add(label="Scan Parameters File (Optional) ",
control=control.COMBO_BOX,
name = "scanParametersCSV",
type = dtype.COMBO,
comment = "Required for Slice Timing Correction.\n\n"
"Path to a .csv file containing information about scan acquisition parameters.\n\n"
"For instructions on how to create this file, see the User Guide.\n\n"
"If 'None' is specified, CPAC will skip Slice Timing Correction.",
values = "None")
self.page.add(label = "Output Directory ",
control = control.DIR_COMBO_BOX,
name = "outputSubjectListLocation",
type = dtype.STR,
comment = "Directory where CPAC should place subject list files.",
values = "")
self.page.add(label = "Subject List Name ",
control = control.TEXT_BOX,
name = "subjectListName",
type = dtype.STR,
comment = "A label to be appended to the generated " \
"subject list files.",
values = "",
style= wx.EXPAND | wx.ALL,
size = (300,-1))
self.page.set_sizer()
mainSizer.Add(self.window, 1, wx.EXPAND)
btnPanel = wx.Panel(self.panel, -1)
hbox = wx.BoxSizer(wx.HORIZONTAL)
self.multiscan = wx.CheckBox(btnPanel, -1, label = "Multiscan Data")
if 'linux' in sys.platform:
hbox.Add(self.multiscan,0, flag=wx.TOP, border=5)
else:
hbox.Add(self.multiscan, 0, flag=wx.RIGHT | wx.BOTTOM, border=5)
img = wx.Image(p.resource_filename('CPAC', 'GUI/resources/images/help.png'), wx.BITMAP_TYPE_ANY).ConvertToBitmap()
help = wx.BitmapButton(btnPanel, id=-1, bitmap=img,
pos=(10, 20), size = (img.GetWidth()+5, img.GetHeight()+5))
help.Bind(wx.EVT_BUTTON, self.onHelp)
if 'linux' in sys.platform:
hbox.Add(help, 0, flag = wx.TOP, border =5)
else:
hbox.Add(help, 0, flag=wx.RIGHT | wx.BOTTOM, border=5)
buffer2 = wx.StaticText(btnPanel, label = "\t")
hbox.Add(buffer2)
run_ext = wx.Button(btnPanel, ID_RUN_EXT, "Generate Subject Lists", (280,10), wx.DefaultSize, 0 )
self.Bind(wx.EVT_BUTTON, lambda event: self.save(event,'run'), id=ID_RUN_EXT)
hbox.Add( run_ext, 1, flag=wx.LEFT|wx.ALIGN_LEFT, border=10)
buffer = wx.StaticText(btnPanel, label = "\t\t\t\t")
hbox.Add(buffer)
cancel = wx.Button(btnPanel, wx.ID_CANCEL, "Cancel",(220,10), wx.DefaultSize, 0 )
self.Bind(wx.EVT_BUTTON, self.cancel, id=wx.ID_CANCEL)
hbox.Add( cancel, 0, flag=wx.LEFT|wx.BOTTOM, border=5)
load = wx.Button(btnPanel, wx.ID_ADD, "Load Settings", (280,10), wx.DefaultSize, 0 )
self.Bind(wx.EVT_BUTTON, self.load, id=wx.ID_ADD)
hbox.Add(load, 0.6, flag=wx.LEFT|wx.BOTTOM, border=5)
save = wx.Button(btnPanel, wx.ID_SAVE, "Save Settings", (280,10), wx.DefaultSize, 0 )
self.Bind(wx.EVT_BUTTON, lambda event: self.save(event,'save'), id=wx.ID_SAVE)
hbox.Add(save, 0.6, flag=wx.LEFT|wx.BOTTOM, border=5)
btnPanel.SetSizer(hbox)
mainSizer.Add(btnPanel, 0.5, flag=wx.ALIGN_RIGHT|wx.RIGHT, border=20)
self.panel.SetSizer(mainSizer)
self.Show()
def cancel(self, event):
self.Close()
def onHelp(self, event):
comment = "Check the box only if the scans have different slice timing infomation."
wx.TipWindow(self, comment, 500)
def run(self, config):
try:
try:
config_map = yaml.load(open(config, 'r'))
out_location = os.path.join(\
os.path.realpath(config_map.get('outputSubjectListLocation')),\
'CPAC_subject_list_%s.yml' % confi | g_map.get('subjectListName')[0])
except Exception, e:
print "Error load | ing data config file", e
raise
print "executing extract data"
multiscan = self.multiscan.IsChecked()
import CPAC
if multiscan:
CPAC.utils.extract_data_multiscan.run(config)
else:
CPAC.utils.extract_data.run(config)
while True:
dlg2 = wx.TextEntryDialog(self, 'Please enter a name for the Subject List',
'Sublist Name', 'CPAC_subject_list_%s.yml' % config_map.get('subjectListName')[0])
if dlg2.ShowModal() == wx.ID_OK:
if len(dlg2.GetValue()) >0:
parent = self.Parent
map = parent.get_sublist_map()
if map.get(dlg2.GetValue()) == None:
map[dlg2.GetValue()]= out_location
parent.listbox2.Append(dlg2.GetValue())
dlg2.Destroy()
break
else:
dlg3 = wx.MessageDialog(self, 'Subject List with this name already exist','Error!',
wx.OK | wx.ICON_ERROR)
dlg3.ShowModal()
dlg3.Destroy()
return 1
except ImportError, e:
wx.MessageBox("Error importing CPAC. Unable to run extract data tool.", "Error")
print "Error importing CPAC"
print e
return -1
except Exception, e:
dlg2 = wx.MessageDialog(self, "Error Creating CPAC Subject List.\n%s"%e,
'Error!',
wx.OK | wx.ICON_ERROR)
dlg2.ShowModal()
dlg2.Destroy()
return -1
def save(self, event, flag):
config_list =[]
def display(win, msg):
wx.MessageBox(msg, "Error")
win.SetBackgroundColour("pink")
win.SetFocus()
win.Refresh()
raise ValueError
try:
for ctrl in self.page.get_ctrl_list():
#print "validating ctrl-->", ctrl.get_name()
win = ctrl.get_ctrl()
#print "ctrl.get_selection()", ctrl.get_selection()
#print "type(ctrl.get_selection())", type(ctrl.get_selection())
value = str(ctrl.get_selection())
value = value.strip()
name = ctrl.get_name()
dtype= ctrl.get_datatype()
if name == 'subjectListName':
subject_list_name = value
if len(value) == 0:
display(win,"%s field must contain some text!"%ctrl.get_name())
|
from unittest import TestCase
from preggy import expect
from remotecv.image_processor import ImageProcessor
from tests import read_fixture
class ImageProcessorTest(TestCase):
def test_when_detector_unavailable(self):
image_processor = ImageProcessor()
with expect.error_to_happen(AttributeError):
image_processor.detect("feat", read_fixture("broken.jpg"))
def test_when_image_is_huge(self):
image_processor = ImageProcessor()
detect = image_processor.detect("all", read_fixture("huge_image.jpg"))
expect(detect).Not.to_be_empty()
def test_with_multiple_detectors(self):
image_processor = ImageProcessor()
detect = image_processor.detect(
"face+profile+glass", read_fixture("one_face.jpg")
)
expect(detect).Not.to_be_empty()
def test_when_not_animated_gif(self):
image_processor = ImageProcessor()
detect = image_processor.detect("face", read_fixture("one_face.gif"))
expect(detect).Not.to_be_empty()
def test_when_animated_gif(self):
image_processor = ImageProcessor()
detect = image_processor.detect("all", read_fixture("animated.gif" | ))
expect(detect).to_be_empty()
def test_feature_detection(self):
image_processor = ImageProcessor()
detect = image_processor.detect("feature", read_fixture("one_face.jpg"))
expect(detect).Not.to_be_empty()
def test_should_be_empty_when_invalid_image(self):
image_processor = ImageProcessor()
detect = | image_processor.detect("all", b"asdas")
expect(detect).to_be_empty()
def test_should_ignore_gif(self):
image_processor = ImageProcessor()
detect = image_processor.detect("all", b"asdas")
expect(detect).to_be_empty()
|
ue
class AltCtxFactory(ssl.ClientContextFactory):
def getContext(self):
ctx = ssl.ClientContextFactory.getContext(self)
#TODO: replace VERIFY_NONE with VERIFY_PEER when we have
#a real server with a valid CA signed cert. If that doesn't
#work it'll be possible to use self-signed certs, if they're distributed,
#by placing the cert.pem file and location in the config and uncommenting
#the ctx.load_verify_locations line.
#As it stands this is using non-authenticated certs, meaning MITM exposed.
ctx.set_verify(SSL.VERIFY_NONE, verifyCallback)
#ctx.load_verify_locations("/path/to/cert.pem")
return ctx
class CoinSwapJSONRPCClient(object):
"""A class encapsulating Alice's json rpc client.
"""
#Keys map to states as per description of CoinswapAlice
method_names = {0: "handshake",
1: "negotiate",
3: "tx0id_hx_tx2sig",
5: "sigtx3",
9: "secret",
12: "sigtx4"}
def __init__(self, host, port, json_callback=None, backout_callback=None,
usessl=False):
self.host = host
self.port = int(port)
#Callback fired on receiving response to send()
self.json_callback = json_callback
#Callback fired on receiving any response failure
self.backout_callback = backout_callback
if usessl:
self.proxy = Proxy('https://' + host + ":" + str(port) + "/",
ssl_ctx_factory=AltCtxFactory)
else:
self.proxy = Proxy('http://' + host + ":" + str(port) + "/")
def error(self, errmsg):
"""error callback implies we must back out at this point.
Note that this includes stateless queries, as any malformed
or non-response must be interpreted as malicious.
"""
self.backout_callback(str(errmsg))
def send_poll(self, method, callback, noncesig, sessionid, *args):
"""Stateless queries during the run use this call, and provide
their own callback for the response.
"""
d = self.proxy.callRemote("coinswap", sessionid, noncesig, method, *args)
d.addCallback(callback).addErrback(self.error)
def send_poll_unsigned(self, method, callback, *args):
"""Stateless queries outside of a coinswap run use
this query method; no nonce, sessionid or signature needed.
"""
d = self.proxy.callRemote(method, *args)
d.addCallback(callback).addErrback(self.error)
def send(self, method, *args):
"""Stateful queries share the same callback: the state machine
update function.
"""
d = self.proxy.callRemote(method, *args)
d.addCallback(self.json_callback).addErrback(self.error)
class CoinSwapCarolJSONServer(jsonrpc.JSONRPC):
def __init__(self, wallet, testing_mode=False, carol_class=CoinSwapCarol,
fail_carol_state=None):
self.testing_mode = testing_mode
self.wallet = wallet
self.carol_class = carol_class
self.fail_carol_state = fail_carol_state
self.carols = {}
self.fee_policy = FeePolicy(cs_single().config)
self.update_status()
jsonrpc.JSONRPC.__init__(self)
def render(self, request):
"""In order to respond appropriately to ill formed requests (no content,
or ill-formed content), we return a null response early in this class,
overriding render() from the base class, which unfortunately does not
correctly handle e.g. browser GET requests.
"""
request.content.seek(0, 0)
content = request.content.read()
try:
json.loads(content)
except:
return "Nothing here."
return jsonrpc.JSONRPC.render(self, request)
def refresh_carols(self):
"""Remove CoinSwapCarol instances that are flagged complete from
the running dict."""
to_remove = []
for k, v in self.carols.iteritems():
if v.completed:
to_remove.append(k)
for x in to_remove:
self.carols.pop(x, None)
cslog.info("Removed session: " + str(x) + " from tracking (finished).")
def update_status(self):
#initialise status variables from config; some are updated dynamically
c = cs_single().config
source_chain = c.get("SERVER", "source_chain")
destination_chain = c.get("SERVER", "destination_chain")
minimum_amount = c.getint("SERVER", "minimum_amount")
maximum_amount = c.getint("SERVER", "maximum_amount")
serverlockrange = c.get("SERVER", "server_locktime_range")
serverlockmin, serverlockmax = [int(x) for x in serverlockrange.split(",")]
clientlockrange = c.get("SERVER", "client_locktime_range")
clientlockmin, clientlockmax = [int(x) for x in clientlockrange.split(",")]
tx01_confirm_range = c.get("SERVER", "tx01_confirm_range")
tx01_confirm_min, tx01_confirm_max = [int(
x) for x in tx01_confirm_range.split(",")]
lock0 = c.getint("TIMEOUT", "lock_client")
s | tatus = {}
self.refresh_carols()
if len(self.carols.keys()) >= c.getint("SERVER",
"m | aximum_concurrent_coinswaps"):
status["busy"] = True
else:
status["busy"] = False
#real-time balance query; we source only from mixdepth 0
available_funds = self.wallet.get_balance_by_mixdepth(verbose=False)[0]
#The conservativeness here (switch off if total avail < max
#is required for privacy (otherwise we leak our wallet balance in
#this costless query). Note that the wallet can be funded while
#the server is running.
if available_funds < maximum_amount:
status["busy"] = True
status["maximum_amount"] = -1
else:
status["maximum_amount"] = maximum_amount
status["minimum_amount"] = minimum_amount
status["source_chain"] = source_chain
status["destination_chain"] = destination_chain
status["cscs_version"] = cs_single().CSCS_VERSION
status["fee_policy"] = self.fee_policy.get_policy()
status["locktimes"] = {"lock_server": {"min": serverlockmin,
"max": serverlockmax},
"lock_client": {"min": clientlockmin,
"max": clientlockmax}}
status["tx01_confirm_wait"] = {"min": tx01_confirm_min,
"max": tx01_confirm_max}
status["testnet"] = True if get_network() else False
return status
def jsonrpc_status(self):
"""This can be polled at any time.
The call to get_balance_by_mixdepth does not involve sync,
so is not resource intensive.
"""
return self.update_status()
def set_carol(self, carol, sessionid):
"""Once a CoinSwapCarol object has been initiated, its session id
has been set, so it can be added to the dict.
"""
#should be computationally infeasible; note *we* set this.
assert sessionid not in self.carols
self.carols[sessionid] = carol
return True
def consume_nonce(self, nonce, sessionid):
if sessionid not in self.carols:
return False
return self.carols[sessionid].consume_nonce(nonce)
def validate_sig_nonce(self, carol, paramlist):
noncesig = paramlist[0]
if not "nonce" in noncesig or not "sig" in noncesig:
return (False, "Ill formed nonce/sig")
nonce = noncesig["nonce"]
sig = noncesig["sig"]
if not carol.consume_nonce(nonce):
return (False, "Nonce invalid, probably a repeat")
#paramlist[1] is method name, the remaining are the args
msg_to_verify = prepare_ecdsa_msg(nonce, paramlist[1], *paramlist[2:])
if not carol.validate_alice_sig(sig, msg_to_verify):
|
#!/usr/bin/python3
import requests
import bs4
import sys
url = input('Enter URL -> ')
pattern = input('Enter search pattern-> ')
html = requests.get(url)
dir_download = "./download/"
if html.text.find("400 Bad Request") != -1:
print ("Bad Request")
sys.exit()
soup = bs4.BeautifulSoup(html.text)
tags = soup('a')
for tag in tags:
url_path = tag.get('href')
text = str(url_path)
if text.find(pattern) == -1:
continue
domain = url.split("http://")[1].split("/")[0]
urldownload = "http://" + domain + text
print ("Retrieve: {0},{1}".format(tag.contents[0],urldownload))
file = text.split("/")[-1]
path_and_file = dir_download + file
try:
r = requests.get(urldownload)
with open(path_and_file, "wb" | ) as f:
f.write(r.content)
except ConnectionError:
print("Can't download file: {0}".format(f | ile))
except HTTPError:
print("Can't download file: {0}".format(file))
f.close()
|
# ~*~ coding: utf-8 | ~*~
from django.conf.urls import *
import virtenviro.registration.views
import django.contrib.auth.views
urlpatterns = [
url(r'^signup/$', virtenviro.registration.views.signup),
url(r'^login/$', django.contrib.auth.views.login, {"template_name": "virtenviro/accounts/login.html"}),
url(r'^logout/$', django.contrib.auth.views.logout_then_login, name='logou | t'),
]
|
ss WebKitSearch(browsertab.AbstractSearch):
"""QtWebKit implementations related to searching on the page."""
def __init__(self, parent=None):
super().__init__(parent)
self._flags = QWebPage.FindFlags(0)
def _call_cb(self, callback, found, text, flags, caller):
"""Call the given callback if it's non-None.
Delays the call via a QTimer so the website is re-rendered in between.
Args:
callback: What to call
found: If the text was found
text: The text searched for
flags: The flags searched with
caller: Name of the caller.
"""
found_text = 'found' if found else "didn't find"
# Removing FindWrapsAroundDocument to get the same logging as with
# QtWebEngine
debug_flags = debug.qflags_key(
QWebPage, flags & ~QWebPage.FindWrapsAroundDocument,
klass=QWebPage.FindFlag)
if debug_flags != '0x0000':
flag_text = 'with flags {}'.format(debug_flags)
else:
flag_text = ''
log.webview.debug(' '.join([caller, found_text, text, flag_text])
.strip())
if callback is not None:
QTimer.singleShot(0, functools.partial(callback, found))
def clear(self):
self.search_displayed = False
# We first clear the marked text, then the highlights
self._widget.findText('')
self._widget.findText('', QWebPage.HighlightAllOccurrences)
def search(self, text, *, ignore_case='never', reverse=False,
result_cb=None):
# Don't go to next entry on duplicate search
if self.text == text and self.search_displayed:
log.webview.debug("Ignoring duplicate search request"
" for {}".format(text))
return
# Clear old search results, this is done automatically on QtWebEngine.
self.clear()
self.text = text
self.search_displayed = True
self._flags = QWebPage.FindWrapsAroundDocument
if self._is_case_sensitive(ignore_case):
self._flags |= QWebPage.FindCaseSensitively
if reverse:
self._flags |= QWebPage.FindBackward
# We actually search *twice* - once to highlight everything, then again
# to get a mark so we can navigate.
found = self._widget.findText(text, self._flags)
self._widget.findText(text,
self._flags | QWebPage.HighlightAllOccurrences)
self._call_cb(result_cb, found, text, self._flags, 'search')
def next_result(self, *, result_cb=None):
self.search_displayed = True
found = self._widget.findText(self.text, self._flags)
self._call_cb(result_cb, found, self.text, self._flags, 'next_result')
def prev_result(self, *, result_cb=None):
self.search_displayed = True
# The int() here makes sure we get a copy of the flags.
flags = QWebPage.FindFlags(int(self._flags))
if flags & QWebPage.FindBackward:
flags &= ~QWebPage.FindBackward
else:
flags |= QWebPage.FindBackward
found = self._widget.findText(self.text, flags)
self._call_cb(result_cb, found, self.text, flags, 'prev_result')
class WebKitCaret(browsertab.AbstractCaret):
"""QtWebKit implementations related to moving the cursor/selection."""
@pyqtSlot(usertypes.KeyMode)
def _on_mode_entered(self, mode):
if mode != usertypes.KeyMode.caret:
return
self.selection_enabled = self._widget.hasSelection()
self.selection_toggled.emit(self.selection_enabled)
settings = self._widget.settings()
settings.setAttribute(QWebSettings.CaretBrowsingEnabled, True)
if self._widget.isVisible():
# Sometimes the caret isn't immediately visible, but unfocusing
# and refocusing it fixes that.
self._widget.clearFocus()
self._widget.setFocus(Qt.OtherFocusReason)
# Move the caret to the first element in the viewport if there
# isn't any text which is already selected.
#
# Note: We can't use hasSelection() here, as that's always
# true in caret mode.
if not self.selection_enabled:
self._widget.page().currentFrame().evaluateJavaScript(
utils.read_file('javascript/position_caret.js'))
@pyqtSlot(usertypes.KeyMode)
def _on_mode_left(self, _mode):
settings = self._widget.settings( | )
if settings.testAttribute(QWebSettings.CaretBrowsingEnabled):
| if self.selection_enabled and self._widget.hasSelection():
# Remove selection if it exists
self._widget.triggerPageAction(QWebPage.MoveToNextChar)
settings.setAttribute(QWebSettings.CaretBrowsingEnabled, False)
self.selection_enabled = False
def move_to_next_line(self, count=1):
if not self.selection_enabled:
act = QWebPage.MoveToNextLine
else:
act = QWebPage.SelectNextLine
for _ in range(count):
self._widget.triggerPageAction(act)
def move_to_prev_line(self, count=1):
if not self.selection_enabled:
act = QWebPage.MoveToPreviousLine
else:
act = QWebPage.SelectPreviousLine
for _ in range(count):
self._widget.triggerPageAction(act)
def move_to_next_char(self, count=1):
if not self.selection_enabled:
act = QWebPage.MoveToNextChar
else:
act = QWebPage.SelectNextChar
for _ in range(count):
self._widget.triggerPageAction(act)
def move_to_prev_char(self, count=1):
if not self.selection_enabled:
act = QWebPage.MoveToPreviousChar
else:
act = QWebPage.SelectPreviousChar
for _ in range(count):
self._widget.triggerPageAction(act)
def move_to_end_of_word(self, count=1):
if not self.selection_enabled:
act = [QWebPage.MoveToNextWord]
if utils.is_windows: # pragma: no cover
act.append(QWebPage.MoveToPreviousChar)
else:
act = [QWebPage.SelectNextWord]
if utils.is_windows: # pragma: no cover
act.append(QWebPage.SelectPreviousChar)
for _ in range(count):
for a in act:
self._widget.triggerPageAction(a)
def move_to_next_word(self, count=1):
if not self.selection_enabled:
act = [QWebPage.MoveToNextWord]
if not utils.is_windows: # pragma: no branch
act.append(QWebPage.MoveToNextChar)
else:
act = [QWebPage.SelectNextWord]
if not utils.is_windows: # pragma: no branch
act.append(QWebPage.SelectNextChar)
for _ in range(count):
for a in act:
self._widget.triggerPageAction(a)
def move_to_prev_word(self, count=1):
if not self.selection_enabled:
act = QWebPage.MoveToPreviousWord
else:
act = QWebPage.SelectPreviousWord
for _ in range(count):
self._widget.triggerPageAction(act)
def move_to_start_of_line(self):
if not self.selection_enabled:
act = QWebPage.MoveToStartOfLine
else:
act = QWebPage.SelectStartOfLine
self._widget.triggerPageAction(act)
def move_to_end_of_line(self):
if not self.selection_enabled:
act = QWebPage.MoveToEndOfLine
else:
act = QWebPage.SelectEndOfLine
self._widget.triggerPageAction(act)
def move_to_start_of_next_block(self, count=1):
if not self.selection_enabled:
act = [QWebPage.MoveToNextLine,
QWebPage.MoveToStartOfBlock]
else:
act = [QWebPage.SelectNextLine,
QWebPage.SelectStartOfBlock]
for _ in range(count):
for a in act:
self._widget.triggerPageAction(a)
|
# A robot is located at the top-left corner of a m x n grid (marked 'Start' i | n the diagram below).
# The robot can only move either down or right at any point in time. The robot is trying to reach the bottom-right corner of the grid (marked 'Finish' in the diagram below).
# How many possible unique paths are there?
# Above is a 3 x 7 grid. How many possible unique paths are there?
# Note: m and n will be at most 100.
class Solution:
# @param {integer} m
# @param {integer} n
# @return {integer}
def uniquePaths(self, | m, n):
# (x+y)!/x!y!
if m == 0 or n == 0:
return 0
paths = [[1 for x in range(n)] for x in range(m)]
for i in range(1, m):
for j in range(1, n):
paths[i][j] = paths[i-1][j] + paths[i][j-1]
return paths[-1][-1]
|
#!/usr/bin/python
# inbuild - Exception - python exception - parent clas
# InvalidAgeException - Child class
class InvalidAgeException(Exception):
def __init__(self,ag | e):
self.age = age
def validate_age(age):
if age > 18:
return "welcome to the movie!!!"
else:
raise InvalidAgeException(age)
if __name__ == '__main__':
age = input("please enter your age:")
try:
validate_age(age)
except InvalidAgeException as e:
print "Buddy!! Go home and sleep you are still {}".format(e.age)
else: |
print validate_age(age) |
"""
Example ussage of the read_multi_vars function
This was tested against a S7-319 CPU
"""
import ctypes
import struct
import snap7
from snap7.common import check_error
from snap7.snap7types import S7DataItem, S7AreaDB, S7WLByte
client = snap7.client.Client()
client.connect('10.100.5.2', 0, 2)
data_items = (S7DataItem * 3)()
data_items[0].Area = ctypes.c_int32(S7AreaDB)
data_items[0].WordLen = ctypes. | c_int32(S7WLByte)
data_items[0].Result = ctypes.c_int32(0)
data_items[0].DBNumber = ctypes.c_int32(200)
data_items[0].Start = ctypes.c_int32(16)
data_items[0].Amount = ctypes.c_int32(4) # reading a REAL, 4 bytes
data_items[1].Area = ctypes.c_int32(S7AreaDB)
data_items[1].WordLen = ctypes.c_int32(S7WLByte)
data_items[1].Result = ctypes.c_int32(0)
data_items[1].DBN | umber = ctypes.c_int32(200)
data_items[1].Start = ctypes.c_int32(12)
data_items[1].Amount = ctypes.c_int32(4) # reading a REAL, 4 bytes
data_items[2].Area = ctypes.c_int32(S7AreaDB)
data_items[2].WordLen = ctypes.c_int32(S7WLByte)
data_items[2].Result = ctypes.c_int32(0)
data_items[2].DBNumber = ctypes.c_int32(200)
data_items[2].Start = ctypes.c_int32(2)
data_items[2].Amount = ctypes.c_int32(2) # reading an INT, 2 bytes
# create buffers to receive the data
# use the Amount attribute on each item to size the buffer
for di in data_items:
# create the buffer
buffer = ctypes.create_string_buffer(di.Amount)
# cast the pointer to the buffer to the required type
pBuffer = ctypes.cast(ctypes.pointer(buffer),
ctypes.POINTER(ctypes.c_uint8))
di.pData = pBuffer
result, data_items = client.read_multi_vars(data_items)
for di in data_items:
check_error(di.Result)
# struct formats
fmts = ['>f', '>f', '>h']
# unpack and print the result of each read
for i in range(0, len(data_items)):
fmt = fmts[i]
di = data_items[i]
foo = ''.join([chr(di.pData[i]) for i in range(0, di.Amount)])
fnum = struct.unpack(fmt, foo)[0]
print(fnum)
client.disconnect()
client.destroy() |
from django.contrib import admin
from .models import Gallery
| class GalleryAdmin(admin.ModelAdmin):
list_display = ('title', 'gallery_image', | 'alt_text', 'display_order', 'visibility')
search_fields = ['title', 'alt_text']
admin.site.register(Gallery, GalleryAdmin) |
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writin | g, software
d | istributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import tinctest
from mpp.lib.PSQL import PSQL
from mpp.lib.gpdbverify import GpdbVerify
from mpp.lib.config import GPDBConfig
from mpp.models import MPPTestCase
class DbStateClass(MPPTestCase):
def __init__(self,methodName,config=None):
if config is not None:
self.config = config
else:
self.config = GPDBConfig()
self.gpverify = GpdbVerify(config=self.config)
super(DbStateClass,self).__init__(methodName)
def check_system(self):
'''
@summary: Check whether the system is up and sync. Exit out if not
'''
cmd ="select count(*) from gp_segment_configuration where content<> -1 ;"
count_all = PSQL.run_sql_command(cmd, flags ='-q -t', dbname='postgres')
cmd ="select count(*) from gp_segment_configuration where content<> -1 and mode = 's' and status = 'u';"
count_up_and_sync = PSQL.run_sql_command(cmd, flags ='-q -t', dbname='postgres')
if count_all.strip() != count_up_and_sync.strip() :
raise Exception('The cluster is not in up/sync ............')
else:
tinctest.logger.info("\n Starting New Test: System is up and in sync .........")
def check_catalog(self,dbname=None, alldb=True, online=False, testname=None, outputFile=None, host=None, port=None):
'''1. Run gpcheckcat'''
(errorCode, hasError, gpcheckcat_output, repairScriptDir) = self.gpverify.gpcheckcat(dbname=dbname, alldb=alldb, online=online, testname=testname, outputFile=outputFile, host=host, port=port)
if errorCode != 0:
raise Exception('GpCheckcat failed with errcode %s '% (errorCode))
def check_mirrorintegrity(self, master=False):
'''Runs checkmirrorintegrity(default), check_mastermirrorintegrity(when master=True) '''
(checkmirror, fix_outfile) = self.gpverify.gpcheckmirrorseg(master=master)
if not checkmirror:
self.fail('Checkmirrorseg failed. Fix file location : %s' %fix_outfile)
tinctest.logger.info('Successfully completed integrity check')
def run_validation(self):
'''
1. gpcheckcat
2. checkmirrorintegrity
3. check_mastermirrorintegrity
'''
self.check_catalog()
self.check_mirrorintegrity()
if self.config.has_master_mirror():
self.check_mirrorintegrity(master=True)
|
# -*- | coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
| ('app', '0004_add_default_ordering_of_categories_by_name'),
]
operations = [
migrations.AlterField(
model_name='category',
name='description',
field=models.TextField(default='', blank=True),
),
migrations.AlterField(
model_name='expense',
name='description',
field=models.TextField(default='', blank=True),
),
]
|
#!/usr/bin/env python
"""
setup.py file for augeas
"""
import os
prefix = os.e | nviron.get("prefix", "/usr")
from distutils.core import setup
setup (name = 'python-augeas',
version = '0.3.0',
author = "Harald Hoyer",
author_email = "augeas-devel@redhat.com",
description = """Pytho | n bindings for Augeas""",
py_modules = [ "augeas" ],
url = "http://augeas.net/",
)
|
import os
import sys
import lit.formats
import lit.llvm
# Configu | ration file for the 'lit' test runner.
lit.llvm.initialize(lit_config, config)
config.name = "RISC-V tests"
config.test_format = lit.formats.ShTest(True)
config.suffixes = [".run"]
config.environment["BUILD_RISCV_DIR"] = os.getenv("BUILD_RISCV_DIR")
config.environment["TES | T_CMD"] = (
"%s -cpu rv64,x-v=true,x-k=true,vlen=256,elen=64,vext_spec=v1.0"
" -L %s/sysroot " %
(os.getenv("QEMU_RV64_BIN"), os.getenv("RISCV_TOOLCHAIN_ROOT")))
config.environment["TEST_MODULE_CMD"] = (
"%s %s/iree/tools/iree-run-module --driver=dylib" %
(config.environment["TEST_CMD"], os.getenv("BUILD_RISCV_DIR")))
config.test_exec_root = os.getenv("BUILD_RISCV_DIR") + \
"/tests"
|
ometry.fromMultiPolygonXY([
[[QgsPointXY(1, 1),
QgsPointXY(2, 2),
QgsPointXY(1, 2),
QgsPointXY(1, 1)]],
[[QgsPointXY(2, 2),
QgsPointXY(3, 3),
QgsPointXY(3, 1),
QgsPointXY(2, 2)]]
])
self.assertEqual(mp.constGet().__repr__(), '<QgsMultiPolygon: MultiPolygon (((1 1, 2 2, 1 2, 1 1)),((2 2, 3 3, 3 1, 2 2)))>')
def testQgsPolygonRepr(self):
p = QgsGeometry.fromPolygonXY(
[[QgsPointXY(0, 0),
QgsPointXY(2, 0),
QgsPointXY(2, 2),
QgsPointXY(0, 2),
QgsPointXY(0, 0)]])
self.assertEqual(p.constGet().__repr__(), '<QgsPolygon: Polygon ((0 0, 2 0, 2 2, 0 2, 0 0))>')
def testQgsRectangleRepr(self):
r = QgsRectangle(1, 2, 3, 4)
self.assertEqual(r.__repr__(), '<QgsRectangle: 1 2, 3 4>')
def testQgsReferencedRectan | gleRepr(self):
r = QgsReferencedRectangle(QgsRectangle(1, 2, 3, 4), QgsCoordinateReferenceSystem('EPSG:4326'))
self.assertEqual(r.__repr__(), '<QgsReferencedRectangle: 1 2, 3 4 (EPSG:4326)>')
def testQgsReferencedGeometryRepr(self):
g = QgsReferencedGeometry(QgsGeometry. | fromPointXY(QgsPointXY(1, 2)), QgsCoordinateReferenceSystem('EPSG:4326'))
self.assertEqual(g.__repr__(), '<QgsReferencedGeometry: Point (1 2) (EPSG:4326)>')
def testQgsCoordinateReferenceSystem(self):
crs = QgsCoordinateReferenceSystem()
self.assertEqual(crs.__repr__(), '<QgsCoordinateReferenceSystem: invalid>')
crs = QgsCoordinateReferenceSystem('EPSG:4326')
self.assertEqual(crs.__repr__(), '<QgsCoordinateReferenceSystem: EPSG:4326>')
crs.setCoordinateEpoch(2021.3)
self.assertEqual(crs.__repr__(), '<QgsCoordinateReferenceSystem: EPSG:4326 @ 2021.3>')
crs = QgsCoordinateReferenceSystem('EPSG:3111')
self.assertEqual(crs.__repr__(), '<QgsCoordinateReferenceSystem: EPSG:3111>')
def testQgsCoordinateTransform(self):
xform = QgsCoordinateTransform()
self.assertEqual(xform.__repr__(), '<QgsCoordinateTransform: NULL to NULL>')
xform = QgsCoordinateTransform(QgsCoordinateReferenceSystem('EPSG:4326'), QgsCoordinateReferenceSystem(), QgsProject.instance())
self.assertEqual(xform.__repr__(), '<QgsCoordinateTransform: EPSG:4326 to NULL>')
xform = QgsCoordinateTransform(QgsCoordinateReferenceSystem(), QgsCoordinateReferenceSystem('EPSG:4326'), QgsProject.instance())
self.assertEqual(xform.__repr__(), '<QgsCoordinateTransform: NULL to EPSG:4326>')
xform = QgsCoordinateTransform(QgsCoordinateReferenceSystem('EPSG:3111'), QgsCoordinateReferenceSystem('EPSG:4326'), QgsProject.instance())
self.assertEqual(xform.__repr__(), '<QgsCoordinateTransform: EPSG:3111 to EPSG:4326>')
def testQgsVector(self):
v = QgsVector(1, 2)
self.assertEqual(v.__repr__(), '<QgsVector: Vector (1, 2)>')
v = QgsVector3D(1, 2, 3)
self.assertEqual(v.__repr__(), '<QgsVector3D: Vector3D (1, 2, 3)>')
def testQgsExpressionRepr(self):
e = QgsExpression('my expression')
self.assertEqual(e.__repr__(), "<QgsExpression: 'my expression'>")
def testQgsFieldRepr(self):
f = QgsField('field_name', QVariant.Double, 'double')
self.assertEqual(f.__repr__(), "<QgsField: field_name (double)>")
def testQgsErrorRepr(self):
e = QgsError('you done wrong son', 'dad')
self.assertEqual(e.__repr__(), "<QgsError: dad you done wrong son>")
def testQgsMimeDataUri(self):
d = QgsMimeDataUtils.Uri()
d.uri = 'my_uri'
d.providerKey = 'my_provider'
self.assertEqual(d.__repr__(), "<QgsMimeDataUtils::Uri (my_provider): my_uri>")
def testQgsMapLayerRepr(self):
vl = QgsVectorLayer(
'Point?crs=epsg:4326&field=pk:integer&field=cnt:integer&field=name:string(0)&field=name2:string(0)&field=num_char:string&key=pk',
'QGIS搖滾', 'memory')
self.assertEqual(vl.__repr__(), "<QgsVectorLayer: 'QGIS搖滾' (memory)>")
rl = QgsRasterLayer('', 'QGIS搖滾', 'gdal')
self.assertEqual(rl.__repr__(), "<QgsRasterLayer: 'QGIS搖滾' (gdal)>")
ml = QgsMeshLayer('', 'QGIS搖滾', 'mdal')
self.assertEqual(ml.__repr__(), "<QgsMeshLayer: 'QGIS搖滾' (Invalid)>")
al = QgsAnnotationLayer('QGIS搖滾', QgsAnnotationLayer.LayerOptions(QgsProject.instance().transformContext()))
self.assertEqual(al.__repr__(), "<QgsAnnotationLayer: 'QGIS搖滾'>")
pcl = QgsPointCloudLayer('', 'QGIS搖滾', 'pc')
self.assertEqual(pcl.__repr__(), "<QgsPointCloudLayer: 'QGIS搖滾' (Invalid)>")
vtl = QgsVectorTileLayer('', 'QGIS搖滾')
self.assertEqual(vtl.__repr__(), "<QgsVectorTileLayer: 'QGIS搖滾'>")
def testQgsProjectRepr(self):
p = QgsProject()
self.assertEqual(p.__repr__(), "<QgsProject: ''>")
p.setFileName('/home/test/my_project.qgs')
self.assertEqual(p.__repr__(), "<QgsProject: '/home/test/my_project.qgs'>")
self.assertEqual(QgsProject.instance().__repr__(), "<QgsProject: '' (singleton instance)>")
QgsProject.instance().setFileName('/home/test/my_project.qgs')
self.assertEqual(QgsProject.instance().__repr__(), "<QgsProject: '/home/test/my_project.qgs' (singleton instance)>")
def testQgsBookmark(self):
b = QgsBookmark()
self.assertEqual(b.__repr__(), "<QgsBookmark: '' (0 0, 0 0 - )>")
b.setName('test bookmark')
self.assertEqual(b.__repr__(), "<QgsBookmark: 'test bookmark' (0 0, 0 0 - )>")
b.setExtent(QgsReferencedRectangle(QgsRectangle(1, 2, 3, 4), QgsCoordinateReferenceSystem('EPSG:3111')))
self.assertEqual(b.__repr__(), "<QgsBookmark: 'test bookmark' (1 2, 3 4 - EPSG:3111)>")
def testQgsLayoutPoint(self):
b = QgsLayoutPoint(1, 2, QgsUnitTypes.LayoutInches)
self.assertEqual(b.__repr__(), "<QgsLayoutPoint: 1, 2 in >")
def testQgsLayoutMeasurement(self):
b = QgsLayoutMeasurement(3, QgsUnitTypes.LayoutPoints)
self.assertEqual(b.__repr__(), "<QgsLayoutMeasurement: 3 pt >")
def testQgsLayoutSize(self):
b = QgsLayoutSize(10, 20, QgsUnitTypes.LayoutInches)
self.assertEqual(b.__repr__(), "<QgsLayoutSize: 10 x 20 in >")
def testQgsConditionalStyle(self):
b = QgsConditionalStyle('@value > 20')
self.assertEqual(b.__repr__(), "<QgsConditionalStyle: @value > 20>")
b.setName('test name')
self.assertEqual(b.__repr__(), "<QgsConditionalStyle: 'test name' (@value > 20)>")
def testQgsTableCell(self):
b = QgsTableCell('test')
self.assertEqual(b.__repr__(), "<QgsTableCell: test>")
b.setContent(5)
self.assertEqual(b.__repr__(), "<QgsTableCell: 5>")
def testQgsProperty(self):
p = QgsProperty.fromValue(5)
self.assertEqual(p.__repr__(), '<QgsProperty: static (5)>')
p = QgsProperty.fromField('my_field')
self.assertEqual(p.__repr__(), '<QgsProperty: field (my_field)>')
p = QgsProperty.fromExpression('5*5 || \'a\'')
self.assertEqual(p.__repr__(), '<QgsProperty: expression (5*5 || \'a\')>')
p = QgsProperty.fromValue(5, False)
self.assertEqual(p.__repr__(), '<QgsProperty: INACTIVE static (5)>')
p = QgsProperty.fromField('my_field', False)
self.assertEqual(p.__repr__(), '<QgsProperty: INACTIVE field (my_field)>')
p = QgsProperty.fromExpression('5*5 || \'a\'', False)
self.assertEqual(p.__repr__(), '<QgsProperty: INACTIVE expression (5*5 || \'a\')>')
p = QgsProperty()
self.assertEqual(p.__repr__(), '<QgsProperty: invalid>')
def testQgsVertexId(self):
v = QgsVertexId()
self.assertEqual(v.__repr__(), '<QgsVertexId: -1,-1,-1>')
v = QgsVertexId(1, 2, 3)
self.assertEqual(v.__repr__(), '<QgsVertexId: 1,2,3>')
v = QgsVertexId(1, 2, 3, _type=QgsVertexId.CurveVertex)
self.assertEqual(v.__repr__(), '<QgsVertexId: 1,2,3 CurveVertex>')
def testProviderMetadata(self):
self.assertEqual(QgsProvi |
#!/usr/bin/env python3
import locale
import os
import sys
try:
# suppress warnings from GI
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('Poppler', '0.18')
gi.require_version('PangoCairo', '1.0')
except:
pass
try:
from gi.repository import GLib
from gi.repository import Gtk
g_gtk_available = True
except Exception as exc:
g_gtk_available = False
"""
Some modules/libraries req | uired by Paperwork cannot be installed with pip or
easy_install. So we will just help the user detecting what is missing and what
must be installed
"""
LANGUAGES = {
None: {
'aspell': 'en',
| 'tesseract': 'eng',
},
'fr': {
'aspell': 'fr',
'tesseract': 'fra',
},
'de': {
'aspell': 'de',
'tesseract': 'deu',
},
'en': {
'aspell': 'en',
'tesseract': 'eng',
},
}
DEFAULT_LANG = {
'aspell': '<your language>',
'tesseract': '<your language>',
}
MODULES = [
(
'Gtk', 'gi.repository.Gtk',
{
'debian': 'gir1.2-gtk-3.0',
'fedora': 'gtk3',
'gentoo': 'x11-libs/gtk+',
'linuxmint': 'gir1.2-gtk-3.0',
'ubuntu': 'gir1.2-gtk-3.0',
'suse': 'python-gtk',
},
),
]
DATA_FILES = [
(
"Gnome symbolic icons"
" (/usr/share/icons/gnome/(...)/go-previous-symbolic.svg",
[
"/usr/share/icons/gnome/scalable/actions/go-previous-symbolic.svg",
"/usr/local/share/icons/gnome/scalable/"
"actions/go-previous-symbolic.svg",
],
{
'debian': 'gnome-icon-theme-symbolic',
'ubuntu': 'gnome-icon-theme-symbolic',
'fedora': 'gnome-icon-theme-symbolic',
}
),
]
def get_language():
lang = locale.getdefaultlocale()[0]
if lang:
lang = lang[:2]
if lang in LANGUAGES:
return LANGUAGES[lang]
print(
"[WARNING] Unable to figure out the exact language package to install"
)
return DEFAULT_LANG
def find_missing_modules():
"""
look for dependency that setuptools cannot check or that are too painful to
install with setuptools
"""
missing_modules = []
for module in MODULES:
try:
__import__(module[1])
except ImportError:
missing_modules.append(module)
return missing_modules
def find_missing_ocr(lang):
"""
OCR tools are a little bit more tricky
"""
missing = []
try:
from pyocr import pyocr
ocr_tools = pyocr.get_available_tools()
except ImportError:
print (
"[WARNING] Couldn't import Pyocr. Will assume OCR tool is not"
" installed yet"
)
ocr_tools = []
if len(ocr_tools) > 0:
langs = ocr_tools[0].get_available_languages()
else:
langs = []
missing.append(
(
'Tesseract', '(none)',
{
'debian': 'tesseract-ocr',
'fedora': 'tesseract',
'gentoo': 'app-text/tesseract',
'linuxmint': 'tesseract-ocr',
'ubuntu': 'tesseract-ocr',
},
)
)
if (len(langs) <= 0 or lang['tesseract'] not in langs):
missing.append(
(
'Tesseract language data', '(none)',
{
'debian': ('tesseract-ocr-%s' % lang['tesseract']),
'fedora': ('tesseract-langpack-%s' % lang['tesseract']),
'linuxmint': ('tesseract-ocr-%s' % lang['tesseract']),
'ubuntu': ('tesseract-ocr-%s' % lang['tesseract']),
},
)
)
return missing
def find_missing_dict(lang):
if os.name == "nt":
return []
import enchant
missing = []
try:
enchant.request_dict(lang['aspell'])
except:
missing.append(
(
'Dictionary', '(none)',
{
'debian': ('aspell-%s' % lang['aspell']),
'fedora': ('aspell-%s' % lang['aspell']),
'gentoo': ('aspell-%s' % lang['aspell']),
'linuxmint': ('aspell-%s' % lang['aspell']),
'ubuntu': ('aspell-%s' % lang['aspell']),
}
)
)
return missing
def _check_cairo():
from gi.repository import Gtk
class CheckCairo(object):
def __init__(self):
self.test_successful = False
def on_draw(self, widget, cairo_ctx):
self.test_successful = True
Gtk.main_quit()
return False
def quit(self):
try:
Gtk.main_quit()
except Exception as exc:
print("FAILED TO STOP GTK !")
print("ASSUMING python-gi-cairo is not installed")
print("Exception was: {}".format(exc))
sys.exit(1)
check = CheckCairo()
try:
from gi.repository import GLib
window = Gtk.Window()
da = Gtk.DrawingArea()
da.set_size_request(200, 200)
da.connect("draw", check.on_draw)
window.add(da)
da.queue_draw()
window.show_all()
GLib.timeout_add(2000, check.quit)
Gtk.main()
window.set_visible(False)
while Gtk.events_pending():
Gtk.main_iteration()
except Exception:
pass
return check.test_successful
def check_cairo():
missing = []
if not g_gtk_available:
success = False
else:
success = _check_cairo()
if not success:
missing.append(
(
'python-gi-cairo', '(none)',
{
'debian': 'python3-gi-cairo',
'linuxmint': 'python3-gi-cairo',
'ubuntu': 'python3-gi-cairo',
},
)
)
return missing
def check_sane():
import pyinsane2
missing = []
try:
pyinsane2.init()
pyinsane2.exit()
except:
missing.append(
(
'libsane', '(none)',
{
'debian': 'libsane',
'fedora': 'sane-backends',
'linuxmint': 'libsane',
'ubuntu': 'libsane',
},
)
)
return missing
def find_missing_data_files():
missings = []
for (user_name, file_paths, packages) in DATA_FILES:
missing = True
for file_path in file_paths:
if os.path.exists(file_path):
missing = False
break
if missing:
missings.append((user_name, "(none)", packages))
return missings
def find_missing_dependencies():
lang = get_language()
# missing_modules is an array of
# (common_name, python_name, { "distrib": "package" })
missing = []
missing += find_missing_modules()
missing += find_missing_ocr(lang)
missing += find_missing_dict(lang)
missing += find_missing_data_files()
missing += check_cairo()
missing += check_sane()
return missing
|
# Attempt to detect phase jumps by taking the derivatives in time and
# frequency direction. 0.7 is an emperical value.
abs_weighted_DP = np.abs(weight * DP)
_x = abs_weighted_DP.max() # NOQA
test_field = ne.evaluate("weight * DP / _x")
criterion_1 = np.sum([np.abs(np.diff(test_field, axis=0)) > 0.7])
criterion_2 = np.sum([np.abs(np.diff(test_field, axis=1)) > 0.7])
criterion = np.sum([criterion_1, criterion_2])
# Compute the phase misfit
dnu = nu[1] - nu[0]
i = ne.evaluate("sum(weight ** 2 * DP ** 2)")
# inserted by Nienke Blom, 22-11-2016
weighted_DP = ne.evaluate("weight * DP")
phasediff_integral = float(ne.evaluate("sum(weighted_DP * dnu * dt_new)"))
mean_delay = np.mean(weighted_DP)
wDP = weighted_DP.flatten()
wDP_thresh = wDP[abs(wDP) > 0.1 * max(wDP, key=lambda x: abs(x))]
median_delay = np.median(wDP_thresh)
max_delay = max(wDP, key=lambda x: abs(x))
phase_misfit = np.sqrt(i * dt_new * dnu)
# Sanity check. Should not occur.
if np.isnan(phase_misfit):
msg = "The phase misfit is NaN."
raise LASIFAdjointSourceCalculationError(msg)
# The misfit can still be computed, even if not adjoint source is
# available.
if criterion > max_criterion:
warning = ("Possible phase jump detected. Misfit included. No "
"adjoint source computed. Criterion: %.1f - Max allowed "
"criterion: %.1f" % (criterion, max_criterion))
warnings.warn(warning)
messages.append(warning)
ret_dict = {
"adjoint_source": None,
"misfit_value": phase_misfit,
"details": {"messages": messages,
#"weighted_DP": weighted_DP,
#"weight": weight,
#"DP": DP,
"mean_delay": mean_delay, # added NAB 30-8-2017
"phasediff_integral": phasediff_integral, # added NAB 22-11-2016, edited 30-8-2017
"median_delay": median_delay, # added NAB 22-11-2016, edited 30-8-2017
"max_delay": max_delay} # added NAB 31-8-2017
}
return ret_dict
# Make kernel for the inverse tf transform
idp = ne.evaluate(
"weight ** 2 * DP * tf_synth / (m + abs(tf_synth) ** 2)")
# Invert tf transform and make adjoint source
ad_src, it, I = time_frequency.itfa(tau, idp, width)
# Interpolate both signals to the new time axis
ad_src = lanczos_interpolation(
# Pad with a couple of zeros in case some where lost in all
# these resampling operations. The first sample should not
# change the time.
data=np.concatenate([ad_src.imag, np.zeros(100)]),
old_start=tau[0],
old_dt=tau[1] - tau[0],
new_start=original_time[0],
new_dt=original_time[1] - original_time[0],
new_npts=len(original_time), a=8, window="blackmann")
# Divide by the misfit and change sign.
ad_src /= (phase_misfit + eps)
ad_src = -1.0 * np.diff(ad_src) / (t[1] - t[0])
# Taper at both ends. Exploit ObsPy to not have to deal with all the
# nasty things.
ad_src = \
obspy.Trace(ad_src).taper(max_percentage=0.05, type="hann").data
# Reverse time and add a leading zero so the adjoint source has the
# same length as the input time series.
ad_src = ad_src[::-1]
ad_src = np.concatenate([[0.0], ad_src])
# Plot if requested. ------------------------------------------------------
if plot:
import matplotlib as mpl
import matplotlib.pyplot as plt
plt.style.use("seaborn-whitegrid")
from lasif.colors import get_colormap
if isinstance(plot, mpl.figure.Figure):
fig = plot
else:
fig = plt.gcf()
# Manually set-up the axes for full control.
l, b, w, h = 0.1, 0.05, 0.80, 0.22
rect = l, b + 3 * h, w, h
waveforms_axis = fig.add_axes(rect)
rect = l, b + h, w, 2 * h
tf_axis = fig.add_axes(rect)
rect = l, b, w, h
adj_src_axis = fig.add_axes(rect)
rect = l + w + 0.02, b, 1.0 - (l + w + 0.02) - 0.05, 4 * h
cm_axis = fig.add_axes(rect)
# Plot the weighted phase difference.
weighted_phase_difference = (DP * weight).transpose()
mappable = tf_axis.pcolormesh(
tau, nu, weighted_phase_difference, vmin=-1.0, vmax=1.0,
cmap=get_colormap("tomo_full_scale_linear_lightness_r"),
shading="gouraud", zorder=-10)
tf_axis.grid(True)
tf_axis.grid(True, which='minor', axis='both', linestyle='-',
color='k')
cm = fig.colorbar(mappable, cax=cm_axis)
cm.set_label("Phase difference in radian", fontsize="large")
# Various texts on the time frequency domain plot.
text = "Misfit: %.4f" % phase_misfit
tf_axis.text(x=0.99, y=0.02, s=text, transform=tf_axis.transAxes,
fontsize="large", color="#C25734", fontweight=900,
verticalalignment="bottom",
horizontalalignment="right")
txt = "Weighted Phase Difference - red is a phase advance of the " \
"synthetics"
tf_axis.text(x=0.99, y=0.95, s=txt,
fontsize="large", color="0.1",
transform=tf_axis.transAxes,
verticalalignment="top",
horizontalalignment="right")
if messages:
message = "\n".join(messages)
tf_axis.text(x=0.99, y=0.98, s=message,
transform=tf_axis.transAxes,
bbox=dict(facecolor='red', alpha=0.8),
verticalalignment="top",
horizontalalignment="right")
# Adjoint source.
adj_src_axis.plot(original_time, ad_src[::-1], color="0.1", lw=2,
label="Adjoint source (non-time-reversed)")
adj_src_axis.legend()
# Waveforms.
waveforms_axis.plot(original_time, original_data, color="0.1", lw=2,
label="Observed")
waveforms_axis.plot(original_time, original_synthetic,
color="#C11E11", lw=2, label="Synthetic")
waveforms_axis.legend()
# Set limits for all axes.
tf_axis.set_ylim(0, 2.0 / min_period)
tf_axis.set_xlim(0, tau[-1])
adj_src_axis.set_xlim(0, tau[-1])
waveforms_axis.set_xlim(0, tau[-1])
waveforms_axis.set_ylabel("Velocity [m/s]", fontsize="large")
tf_axis.set_ylabel("Period [s]", fontsize="large")
adj_src_axis.set_xlabel("Seconds since event", fontsize="large")
# Hack to keep ticklines but remove the ticks - there is probably a
# better way to do this.
waveforms_axis.set_xticklabels([
"" for _i in waveforms_axis.get_xticks()])
tf_axis.set_xticklabels(["" for _i in tf_axis.get_xticks()])
_l = tf_axis.get_ylim()
_r = _l[1] - _l[0]
_t = tf_axis.get_yticks()
_t = _t[(_l[0] + 0.1 * _r < _t) & (_t < _l[1] - 0.1 * _r)]
tf_axis.set_yticks(_t)
tf_axis.set_yticklabels(["%.1fs" % (1.0 / _i) for _i in _t])
waveforms_axis.get_yaxis().set_label_coords(-0.08, 0.5)
tf_axis.get_yaxis().set_label_coords(-0.08, 0.5)
fig.suptitle("Time Frequency Phase Misfit and Adjoint Source",
fontsize="xx-large")
ret_dict = {
"adjoint_source": ad_src,
"misfit_value": phase_misfit,
"details": {"messages": messages,
#"weighted_DP": weighted_DP,
#"weight": weight,
#"DP": DP,
"mean_de | lay": mean_delay, # added NAB 30-8-2017
"phasedi | ff_integral": phasediff_integral, # added NAB 22-11-2016, edited 30-8-2017
"median_delay": median_delay, # added NAB 22-11-2016, edited 30-8-2017
"max_delay": max_delay} # added NAB 31-8-2017
}
# print |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Menu manager.
A dictionary of menu instance() functions (see MenuFactory)
"""
class MenuManager:
menus = { }
def register(menu | Name, menuFactory):
MenuManager.menus[menuName] = menuFactory
register = staticmethod(register)
def get(menuName): |
return MenuManager.menus[menuName]()
get = staticmethod(get)
|
# Copyright (c) - 2013 Mitchell Peabody.
# See COPYRIGHT.txt and LICENSE.txt in the root of this project.
from functools import wraps
import inspect
import logging
from google.appengine.api import xmpp, users
from google.appengine.ext.webapp import xmpp_handlers
from model import User, Variable, Value
from nl import parser
logger = logging.getLogger()
def extract_email(sender):
"""XMPP sender is <address>/<client>. I think. This may be wrong, but that's
what it appears like in GoogleTalk.
:param xmpp_sender: The sender of the message.
:returns: email of the sender
"""
email = sender.split("/")
return e | mail[0]
def describe(description, params = ""):
def _describe(func):
setattr(func, "__command_help__",
"/{0} {1}\n\t{2}".format(func.__name__.replace("_command", ""),
params,
description))
return func
return _describe
class XmppHandler(xmpp_handlers.CommandHandler):
@describe("Disremembers the user | .")
def forget_command(self, message = None):
email = extract_email(message.sender)
try:
sender = users.User(email)
except users.UserNotFoundError as e:
message.reply("You don't seem to have an account that I can find.")
appuser = User.all().filter("info = ", sender).get()
if appuser:
message.reply("Okay, I'm forgetting you, {sender}.".format(sender = sender.email()))
for variable in user.variables:
for value in variable.values:
value.delete()
variable.delete()
appuser.delete()
else:
message.reply("I don't know you.")
@describe("Give help for the system.")
def help_command(self, message = None):
"""Returns a list of all the commands defined on this class.
"""
members = [x[1] for x in inspect.getmembers(self, inspect.ismethod)
if x[0].endswith("_command") and hasattr(x[1], "__command_help__")]
reply = "\n".join(map(lambda x: getattr(x, "__command_help__"),
members))
message.reply(reply)
def text_message(self, message):
email = extract_email(message.sender)
try:
sender = users.User(email)
except users.UserNotFoundError as e:
message.reply("You don't seem to have an account that I can find.")
appuser = User.all().filter("info = ", sender).get()
if not appuser:
appuser = User(info = sender)
appuser.put()
try:
datum = parser.parse(message.body)
except parser.ParseException as e:
message.reply("I couldn't understand you. (Message was: {msg})".format(msg = e.message))
variable = Variable.all().filter("name = ", datum["variable"]).get()
if not variable:
variable = Variable(name = datum["variable"], user = appuser)
variable.put()
value = Value(value = datum["value"], variable = variable)
value.put()
message.reply("I've logged variable {variable} as being {value}".format(sender = email,
variable = datum["variable"],
value = datum["value"]))
|
#
# Copyright (C) 2012-2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Vratislav Podzimek <vpodzime@redhat.com>
#
"""
Module facilitating the work with NTP servers and NTP daemon's configuration
"""
import re
import os
import tempfile
import shutil
import ntplib
import socket
from pyanaconda import isys
from pyanaconda.threads import threadMgr, AnacondaThread
from pyanaconda.constants import THREAD_SYNC_TIME_BASENAME
NTP_CONFIG_FILE = "/etc/chrony.conf"
#example line:
#server 0.fedora.pool.ntp.org iburst
SRV_LINE_REGEXP = re.compile(r"^\s*server\s*([-a-zA-Z.0-9]+)\s*[a-zA-Z]+\s*$")
class NTPconfigError(Exception):
"""Exception class for NTP related problems"""
pass
def ntp_server_working(server):
"""
Tries to do an NTP request to the $server (timeout may take some time).
:param server: hostname or IP address of an NTP server
:type server: string
:return: True if the given server is reachable and working, False otherwise
:rtype: bool
"""
client = ntplib.NTPClient()
try:
client.request(server)
except ntplib.NTPException:
return False
# address related error
except socket.gaierror:
return False
# socket related error
# (including "Network is unreachable")
except socket.error:
return False
return True
def get_servers_from_config(conf_file_path=NTP_CONFIG_FILE,
srv_regexp=SRV_LINE_REGEXP):
"""
Goes through the chronyd's configuration file looking for lines starting
with 'server'.
:return: servers found in the chronyd's configuration
:rtype: list
"""
ret = list()
try:
with open(conf_file_path, "r") as conf_file:
for line in conf_file:
match = srv_regexp.match(line)
if match:
ret.append(match.group(1))
except IOError as ioerr:
msg = "Cannot open config file %s for reading (%s)" % (conf_file_path,
ioerr.strerror)
raise NTPconfigError(msg)
return ret
def save_servers_to_config(servers, conf_file_path=NTP_CONFIG_FILE,
srv_regexp=SRV_LINE_REGEXP, out_file_path=None):
"""
Replaces the servers defined in the chronyd's configuration file with
the given ones. If the out_file is not None, then it is used for the
resulting config.
:type servers: iterable
:param out_file_path: path to the file used for the resulting config
"""
try:
old_conf_file = open(conf_file_path, "r")
except IOError as ioerr:
msg = "Cann | ot open config file %s for reading (%s)" % (conf_file_path,
ioerr.strerror)
| raise NTPconfigError(msg)
try:
if out_file_path:
new_conf_file = open(out_file_path, "w")
else:
(fildes, temp_path) = tempfile.mkstemp()
new_conf_file = os.fdopen(fildes, "w")
except IOError as ioerr:
if out_file_path:
msg = "Cannot open new config file %s "\
"for writing (%s)" % (out_file_path, ioerr.strerror)
else:
msg = "Cannot open temporary file %s "\
"for writing (%s)" % (temp_path, ioerr.strerror)
raise NTPconfigError(msg)
heading = "# These servers were defined in the installation:\n"
#write info about the origin of the following lines
new_conf_file.write(heading)
#write new servers
for server in servers:
new_conf_file.write("server " + server + " iburst\n")
#copy non-server lines from the old config and skip our heading
for line in old_conf_file:
if not srv_regexp.match(line) and line != heading:
new_conf_file.write(line)
old_conf_file.close()
new_conf_file.close()
if not out_file_path:
try:
stat = os.stat(conf_file_path)
# Use copy rather then move to get the correct selinux context
shutil.copy(temp_path, conf_file_path)
os.chmod(conf_file_path, stat.st_mode)
os.unlink(temp_path)
except OSError as oserr:
msg = "Cannot replace the old config with "\
"the new one (%s)" % (oserr.strerror)
raise NTPconfigError(msg)
def one_time_sync(server, callback=None):
"""
Synchronize the system time with a given NTP server. Note that this
function is blocking and will not return until the time gets synced or
querying server fails (may take some time before timeouting).
:param server: NTP server
:param callback: callback function to run after sync or failure
:type callback: a function taking one boolean argument (success)
:return: True if the sync was successful, False otherwise
"""
client = ntplib.NTPClient()
try:
results = client.request(server)
isys.set_system_time(int(results.tx_time))
success = True
except ntplib.NTPException:
success = False
except socket.gaierror:
success = False
if callback is not None:
callback(success)
return success
def one_time_sync_async(server, callback=None):
"""
Asynchronously synchronize the system time with a given NTP server. This
function is non-blocking it starts a new thread for synchronization and
returns. Use callback argument to specify the function called when the
new thread finishes if needed.
:param server: NTP server
:param callback: callback function to run after sync or failure
:type callback: a function taking one boolean argument (success)
"""
thread_name = "%s_%s" % (THREAD_SYNC_TIME_BASENAME, server)
if threadMgr.get(thread_name):
#syncing with the same server running
return
threadMgr.add(AnacondaThread(name=thread_name, target=one_time_sync,
args=(server, callback)))
|
(50, 85),
(60, 81),
(70, 74),
(80, 64),
(90, 50),
(100, 40),
],
},
"blue": {
"hue_range": [(179, 257)],
"lower_bounds": [
(20, 100),
(30, 86),
(40, 80),
(50, 74),
(60, 60),
(70, 52),
(80, 44),
(90, 39),
(100, 35),
],
},
"purple": {
"hue_range": [(258, 282)],
"lower_bounds": [
(20, 100),
(30, 87),
(40, 79),
(50, 70),
(60, 65),
(70, 59),
(80, 52),
(90, 45),
(100, 42),
],
},
"pink": {
"hue_range": [(283, 334)],
"lower_bounds": [
(20, 100),
(30, 90),
(40, 86),
(60, 84),
(80, 80),
(90, 75),
(100, 73),
],
},
}
class RandomColor:
"""Implement random color generation in a human-friendly way.
This helper class encapsulates the internal implementation and logic of the
:meth:`color() <faker.providers.color.Provider.color>` method.
"""
def __init__(self, generator: Optional["Generator"] = None, seed: Optional[Hashable] = None) -> None:
self.colormap = COLOR_MAP
# Option to specify a seed was not removed so this class
# can still be tested independently w/o generators
if generator:
self.random = generator.random
else:
self.seed = seed if seed else random.randint(0, sys.maxsize)
self.random = random.Random(self.seed)
for color_name, color_attrs in self.colormap.items():
lower_bounds: Sequence[Tuple[int, int]] = color_attrs["lower_bounds"]
s_min, b_max = lower_bounds[0]
s_max, b_min = lower_bounds[-1]
self.colormap[color_name]["saturation_range"] = [(s_min, s_max)]
self.colormap[color_name]["brightness_range"] = [(b_min, b_max)]
def generate(
self,
hue: Optional[HueType] = None,
luminosity: Optional[str] = None,
color_format: str = "hex",
) -> str:
"""Generate a color.
Whenever :meth:`color() <faker.providers.color.Provider.color>` is
called, the arguments used are simply passed into this method, and this
method handles the rest.
"""
# First we pick a hue (H)
h = self.pick_hue(hue)
# Then use H to determine saturation (S)
s = self.pick_saturation(h, hue, luminosity)
# Then use S and H to determine brightness (B).
b = self.pick_brightness(h, s, luminosity)
# Then we return the HSB color in the desired format
return self.set_format((h, s, b), color_format)
def pick_hue(self, hue: Optional[HueType]) -> int:
"""Return a numerical hue value."""
hue_ = self.random_within(self.get_hue_range(hue))
# Instead of storing red as two separate ranges, |
# we group them, using negative numbers
if hue_ < 0:
hue_ += 360
return hue_
def pick_saturation(self, hue: int, hue_name: Optional[HueType], luminosity: Optional[str]) -> int:
"""Return a numerical saturation value."""
if luminosity is None:
luminosity = ""
if luminosity == "random":
return self.random_within((0, 100))
if isinstanc | e(hue_name, str) and hue_name == "monochrome":
return 0
s_min, s_max = self.get_saturation_range(hue)
if luminosity == "bright":
s_min = 55
elif luminosity == "dark":
s_min = s_max - 10
elif luminosity == "light":
s_max = 55
return self.random_within((s_min, s_max))
def pick_brightness(self, h: int, s: int, luminosity: Optional[str]) -> int:
"""Return a numerical brightness value."""
if luminosity is None:
luminosity = ""
b_min = self.get_minimum_brightness(h, s)
b_max = 100
if luminosity == "dark":
b_max = b_min + 20
elif luminosity == "light":
b_min = (b_max + b_min) // 2
elif luminosity == "random":
b_min = 0
b_max = 100
return self.random_within((b_min, b_max))
def set_format(self, hsv: Tuple[int, int, int], color_format: str) -> str:
"""Handle conversion of HSV values into desired format."""
if color_format == "hsv":
color = f"hsv({hsv[0]}, {hsv[1]}, {hsv[2]})"
elif color_format == "hsl":
hsl = self.hsv_to_hsl(hsv)
color = f"hsl({hsl[0]}, {hsl[1]}, {hsl[2]})"
elif color_format == "rgb":
rgb = self.hsv_to_rgb(hsv)
color = f"rgb({rgb[0]}, {rgb[1]}, {rgb[2]})"
else:
rgb = self.hsv_to_rgb(hsv)
color = f"#{rgb[0]:02x}{rgb[1]:02x}{rgb[2]:02x}"
return color
def get_minimum_brightness(self, h: int, s: int) -> int:
"""Return the minimum allowed brightness for ``h`` and ``s``."""
lower_bounds: Sequence[Tuple[int, int]] = self.get_color_info(h)["lower_bounds"]
for i in range(len(lower_bounds) - 1):
s1, v1 = lower_bounds[i]
s2, v2 = lower_bounds[i + 1]
if s1 <= s <= s2:
m: float = (v2 - v1) / (s2 - s1)
b: float = v1 - m * s1
return int(m * s + b)
return 0
def get_hue_range(self, color_input: Optional[HueType]) -> Tuple[int, int]:
"""Return the hue range for a given ``color_input``."""
if isinstance(color_input, (int, float)) and 0 <= color_input <= 360:
color_input = int(color_input)
return (color_input, color_input)
elif isinstance(color_input, str) and color_input in self.colormap:
return self.colormap[color_input]["hue_range"][0]
elif color_input is None:
return (0, 360)
if isinstance(color_input, list):
color_input = tuple(color_input)
if (
isinstance(color_input, tuple)
and len(color_input) == 2
and all(isinstance(c, (float, int)) for c in color_input)
):
v1 = int(color_input[0])
v2 = int(color_input[1])
if v2 < v1:
v1, v2 = v2, v1
v1 = max(v1, 0)
v2 = min(v2, 360)
return (v1, v2)
raise TypeError("Hue must be a valid string, numeric type, or a tuple/list of 2 numeric types.")
def get_saturation_range(self, hue: int) -> Tuple[int, int]:
"""Return the saturation range for a given numerical ``hue`` value."""
return self.get_color_info(hue)["saturation_range"][0]
def get_color_info(self, hue: int) -> Dict[str, Sequence[Tuple[int, int]]]:
"""Return the color info for a given numerical ``hue`` value."""
# Maps red colors to make picking hue easier
if 334 <= hue <= 360:
hue -= 360
for color_name, color in self.colormap.items():
hue_range: Tuple[int, int] = color["hue_range"][0]
if hue_range[0] <= hue <= hue_range[1]:
return self.colormap[color_name]
else:
raise ValueError("Value of hue `%s` is invalid." % hue)
def random_within(self, r: Sequence[int]) -> int:
"""Return a random integer within the range ``r``."""
return self.random.randint(int(r[0]), int(r[1]))
@classmethod
def hsv_to_rgb(cls, hsv: Tuple[int, int, int]) -> Tuple[int, int, int]:
"""Convert HSV to RGB.
This method expects ``hsv`` to be a 3-tuple of H, S, and V values, and
it will return a 3-tuple of the equivalent R, G, and B values.
"""
h, s, v = hsv
h = max(h, 1)
h = min(h, 359)
r, g, b = colorsys.hsv_to_rgb(h / 360, s / 100, v / 100)
return (int(r * 255), int(g * 255), int(b * 255))
@classmethod
def hsv_to_hsl(cls, hsv: Tuple[int, int, int]) -> Tu |
from PySide.QtCore import *
from PySide.QtGui import *
import managers
class lineNumberBarClass(QWidget):
def __init__(self, edit, parent=None):
QWidget.__init__(self, parent)
self.edit = edit
self.highest_line = 0
self.setMinimumWidth(30)
self.edit.installEventFilter(self)
self.edit.viewport().installEventFilter(self)
self.bg = None
def update(self, *args):
'''
Updates the number bar to display the current set of numbers.
Also, adjusts the width of the number bar if necessary.
'''
# The + 4 is used to compensate for the current line being bold.
if managers.context == 'hou':
fontSize = self.edit.fs
else:
fontSize = self.edit.font().pointSize()
width = ((self.fontMetrics().width(str(self.highest_line)) + 7))*(fontSize/13.0)
if self.width() != width and width > 10:
self.setFixedWidth(width)
bg = self.palette().brush(QPalette.Normal,QPalette.Window).color().toHsv()
v = bg.value()
if v > 20:
v = int(bg.value()*0.8)
else:
v = int(bg.value()*1.1)
self.bg = QColor.fromHsv(bg.hue(), bg.saturation(), v)
QWidget.update(self, *args)
def paintEvent(self, event):
contents_y = self.edit.verticalScrollBar().value()
page_bottom = contents_y + self.edit.viewport().height()
font_metrics = self.fontMetrics()
current_block = self.edit.document().findBlock(self.edit.textCursor().position())
painter = QPainter(self)
line_count = 0
# Iterate over all text blocks in the document.
block = self.edit.document().begin()
if managers.context == 'hou':
fontSize = self.edit.fs
font = QFont('monospace', fontSize*0.7)
offset = (font_metrics.ascent() + font_metrics.descent())/2
else:
fontSize = self.edit.font().pointSize()
font = painter.font()
font.setPixelSize(fontSize)
offset = font_metrics.ascent() + font_metrics.descent()
color = painter.pen().color()
painter.setFont(font)
align = Qt.AlignRight
while block.isValid():
line_count += 1
# The top left position of the block in the document
position = self.edit.document().documentLayout().blockBoundingRect(block).topLeft()
# Check if the position of the block is out side of the visible
# area.
if position.y() == page_bottom:
break
rec = QRect(0,
round(position.y()) - contents_y,
self.width()-5,
fontSize + offset)
# draw line rect
if block | == current_block:
painter.setPen(Qt.NoPen)
painter.setBrush(QBrush(self.bg))
painter.drawRect(QRect(0,
round(position.y()) - contents_y,
| self.width(),
fontSize + (offset/2) ))
# #restore color
painter.setPen(QPen(color))
# draw text
painter.drawText(rec, align, str(line_count))
# control points
block = block.next()
self.highest_line = line_count
painter.end()
QWidget.paintEvent(self, event)
def eventFilter(self, object, event):
# Update the line numbers for all events on the text edit and the viewport.
# This is easier than connecting all necessary singals.
if object in (self.edit, self.edit.viewport()):
self.update()
return False
return QWidget.eventFilter(object, event) |
from comics.aggregator.crawler import CrawlerBase, CrawlerImage
from comics.core.comic_data import ComicDataBase
class ComicData(ComicDataBase) | :
name = 'Axe Cop'
language = 'en'
url = 'http://www.axecop.com/'
start_date = '2010-01-02'
rights = 'Ethan Nicolle'
class Crawler(CrawlerBase):
| history_capable_days = 60
schedule = 'Tu'
time_zone = 'US/Pacific'
headers = {'User-Agent': 'Mozilla/4.0'}
def crawl(self, pub_date):
feed = self.parse_feed('http://axecop.com/feed/')
for entry in feed.for_date(pub_date):
title = entry.title
url = entry.summary.src('img[src*="/wp-content/uploads/"]')
url = url.replace('-150x150', '')
return CrawlerImage(url, title)
|
#
# LayerImage.py -- Abstraction of an generic layered image.
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import numpy
import time
import Bunch
import BaseImage
class LayerImage(object):
"""Mixin class for BaseImage subclasses. Adds layers and alpha/rgb
compositing.
"""
def __init__(self):
self._layer = []
self.cnt = 0
self.compose_types = ('alpha', 'rgb')
self.compose = 'alpha'
def _insertLayer(self, idx, image, alpha=None, name=None):
if alpha == None:
alpha = 1.0
if name == None:
name = "layer%d" % (self.cnt)
self.cnt += 1
bnch = Bunch.Bunch(image=image, alpha=alpha, name=name)
self._layer.insert(idx, bnch)
def insertLayer(self, idx, image, alpha=None, name=None,
compose=True):
self._insertLayer(idx, image, alpha=alpha, name=name)
if compose:
self.compose_layers()
def getLayer(self, idx):
return self._layer[idx]
def numLayers(self):
return len(self._layer)
def getShape(self, entity='image'):
maxdim = -1
for layer in self._layer:
if entity == 'image':
shape = layer[entity].get_shape()
elif entity == 'alpha':
item = layer.alpha
# If alpha is an image, get the array
if isinstance(item, BaseImage.BaseImage):
item = layer.alpha.get_data()
shape = numpy.shape(item)
else:
raise BaseImage.ImageError("entity '%s' not in (image, alpha)" % (
entity))
if len(shape) > maxdim:
maxdim = len(shape)
maxshape = shape
return maxshape
## def alpha_combine(self, src, alpha, dst):
## return (src * alpha) + (dst * (1.0 - alpha))
def mono2color(self, data):
return numpy.dstack((data, data, data))
def alpha_multiply(self, alpha, data, shape=None):
"""(alpha) can be a scalar or an array.
"""
# alpha can be a scalar or an array
if shape == None:
shape = data.shape
if len(data.shape) == 2:
res = alpha * data
# If desired shape is monochrome then return a mono image
# otherwise broadcast to a grey color image.
if len(shape) == 2:
return res
# note: in timing tests, dstack was not as efficient here...
#data = numpy.dstack((res, res, res))
data = numpy.empty(shape)
data[:, :, 0] = res[:, :]
data[:, :, 1] = res[:, :]
data[:, :, 2] = res[:, :]
return data
else:
# note: in timing tests, dstack was not as efficient here...
#res = numpy.dstack((data[:, :, 0] * alpha,
# data[:, :, 1] * alpha,
# data[:, :, 2] * alpha))
res = numpy.empty(shape)
res[:, :, 0] = data[:, :, 0] * alpha
res[:, :, 1] = data[:, :, 1] * alpha
res[:, :, 2] = data[:, :, 2] * alpha
return res
def alpha_compose(self):
start_time = time.time()
shape = self.getShape()
ht, wd = shape[:2]
# alpha can be a scalar or an array, prepare for the appropriate kind
ashape = self.getShape(entity='alpha')
if len(ashape) == 0:
alpha_used = 0.0
else:
alpha_used = numpy.zeros((ht, wd))
# result holds the result of the composition
result = numpy.zeros(shape)
cnt = 0
for layer in self._layer:
alpha = layer.alpha
if isinstance(alpha, BaseImage.BaseImage):
alpha = alpha.get_data()
alpha = numpy.clip((1.0 - alpha_used) * alpha, 0.0, 1.0)
#mina = numpy.min(alpha)
#print "cnt=%d mina=%f" % (cnt, mina)
data = layer.image.get_data()
result += self.alpha_multiply(alpha, data, shape=shape)
alpha_used += layer.alpha
#numpy.clip(alpha_used, 0.0, 1.0)
cnt += 1
self.set_data(result)
end_time = time.time()
print "alpha compose=%.4f sec" % (end_time - start_time)
# def rgb_compose(self):
# slices = []
# start_time = time.time()
# for i in xrange(len(self._layer)):
# layer = self.getLayer(i)
# data = self.alpha_multiply(layer.alpha, layer.image.get_data())
# slices.append(data)
# split_time = time.time()
# result = numpy.dstack(slices)
# end_time = time.time()
# self.set_data(result)
# print "rgb_compose alpha multiply=%.4f sec dstack=%.4f sec sec total=%.4f sec" % (
# split_time - start_time, end_time - split_time,
# end_time - start_time)
def rgb_compose(self):
num = self.numLayers()
layer = self.getLayer(0)
wd, ht = layer.image.get_size()
result = numpy.empty((ht, wd, num))
start_time = time.time()
for i in xrange(len(self._layer)):
layer = self.getLayer(i)
alpha = layer.alpha
if isinstance(alpha, BaseImage.BaseImage):
alpha = alpha.get_data()
data = self.alpha_multiply(alpha, layer.image.get_data())
re | sult[:, :, i] = data[:, :]
end_time = time.time()
self.set_data(result)
print "rgb_compose total=%.4f sec" % (
end_time - start_time)
def rgb_decompose(self, image):
data = image.get_data()
shape = data | .shape
if len(shape) == 2:
self._insertLayer(0, image)
else:
names = ("Red", "Green", "Blue")
alphas = (0.292, 0.594, 0.114)
for i in xrange(shape[2]):
print "count = %d" % i
imgslice = data[:, :, i]
#img = BaseImage.BaseImage(data_np=imgslice, logger=self.logger)
# Create the same type of image as we are decomposing
img = image.__class__(data_np=imgslice, logger=self.logger)
if i < 3:
name = names[i]
alpha = alphas[i]
else:
name = "layer%d" % i
alpha = 0.0
self._insertLayer(i, img, name=name, alpha=alpha)
print "composing layers"
self.compose_layers()
print "rgb decompose done"
def setComposeType(self, ctype):
assert ctype in self.compose_types, \
BaseImage.ImageError("Bad compose type '%s': must be one of %s" % (
ctype, str(self.compose_types)))
self.compose = ctype
self.compose_layers()
def setAlpha(self, lidx, val):
layer = self._layer[lidx]
layer.alpha = val
self.compose_layers()
def setAlphas(self, vals):
for lidx in xrange(len(vals)):
layer = self._layer[lidx]
layer.alpha = vals[lidx]
self.compose_layers()
def compose_layers(self):
if self.compose == 'rgb':
self.rgb_compose()
else:
self.alpha_compose()
#END
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
from .sitemap import BlogSitemap
from .views import RobotPageView, HumanPageView, GooglePageView
admin.autodiscover()
sitemaps = {
'blog': BlogSitemap,
}
urlpatterns = patterns('',
url(
regex=r"^robots\.txt$",
view=RobotPageView.as_view(),
name="site_robots",
),
url(
regex=r"^humans\.txt$",
view=HumanPageView.as_view(),
name="site_humans",
),
url(
regex=r"^google25e8 | e23e2bfc7d2c\.html$",
view=GooglePageView.as_view(),
name="google_webmasters",
),
url(r'^sitemap\.xml$', 'django.contrib.sitemaps.views.sitemap', {'sitemaps': sitemaps}),
url(r'^admin/', include(admin.site.urls)),
url(r'^bro | wserid/', include('django_browserid.urls')),
url(r"^contact/$", include('contact.urls', namespace='contact', app_name='contact')),
url(r'^tinymce/', include('tinymce.urls')),
url(r"^", include('blog.urls', namespace='blog', app_name='blog')),
)
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django.conf import settings
from django.http import HttpResponseRedirect
import l10n_utils
def secure_required(view_func):
" | ""Decorator makes sure URL is accessed over https."""
def _wrapped_view_func(request, *args, **kwargs):
if not request.is_secure():
if not getattr(settings, 'DEBUG', True):
request_url = request.build_absolute_uri(request.get_full_path())
secure_url = request_url.replace('http://', 'https://')
return HttpResponseRedirect(secure_url)
return view_func(request, *args, **kwargs | )
return _wrapped_view_func
def server_error_view(request, template_name='500.html'):
"""500 error handler that runs context processors."""
return l10n_utils.render(request, template_name)
|
from Screen import Screen
from Components.Label import Label
class PVRState(Screen):
def __i | nit__(self, session):
Screen.__init__(self, session)
self["state"] = Label(text="" | )
class TimeshiftState(PVRState):
pass
|
from django.conf import settings
BACKENDS = getattr(settings, 'FAIREPART_BACKENDS', (
'fairepart.backends.facebook.FacebookBackend',
'fairepart.backends.google.GoogleOAuth2Backend',
))
RELAT | ION_LIST_PAGINATE_BY | = getattr(settings, 'FAIREPART_RELATION_LIST_PAGINATE_BY', 5)
GOOGLE_APP_NAME = getattr(settings, 'FAIREPART_GOOGLE_APP_NAME', '')
|
atent_heat_constant
factor = (self.rho_air * self.Lv * self.De)
delta_e = (self.e_air - self.e_surf)
self.Qe = factor * delta_e * (const / self.p0)
# update_latent_heat_flux()
#-------------------------------------------------------------------
def update_conduction_heat_flux(self):
if (self.DEBUG):
print 'Calling update_conduction_heat_flux()...'
#-----------------------------------------------------------------
# | Notes: The conduction heat flux from snow to soil for computing
# snowmelt energy, Qm, is close to zero.
# However, the conduction heat flux from surface and sub-
# surface for computing Qet is given by Fourier's Law,
# namely Qc = Ks(Tx - Ts)/x.
# All the Q's have units of W/m^2 = J/(m^2 s).
#----------------------------------------------------------------- |
pass # (initialized at start)
# update_conduction_heat_flux()
#-------------------------------------------------------------------
def update_advection_heat_flux(self):
if (self.DEBUG):
print 'Calling update_advection_heat_flux()...'
#------------------------------------------------------
# Notes: All the Q's have units of W/m^2 = J/(m^2 s).
#------------------------------------------------------
pass # (initialized at start)
# update_advection_heat_flux()
#-------------------------------------------------------------------
def update_julian_day(self):
if (self.DEBUG):
print 'Calling update_julian_day()...'
#----------------------------------
# Update the *decimal* Julian day
#----------------------------------
self.julian_day += (self.dt / self.secs_per_day) # (days)
#------------------------------------------
# Compute the offset from True Solar Noon
# clock_hour is in 24-hour military time
# but it can have a decimal part.
#------------------------------------------
dec_part = self.julian_day - np.int16(self.julian_day)
clock_hour = dec_part * self.hours_per_day
## print ' Computing solar_noon...'
solar_noon = solar.True_Solar_Noon( self.julian_day,
self.lon_deg,
self.GMT_offset )
## print ' Computing TSN_offset...'
self.TSN_offset = (clock_hour - solar_noon) # (hours)
# update_julian_day()
#-------------------------------------------------------------------
def update_net_shortwave_radiation(self):
#---------------------------------------------------------
# Notes: If time is before local sunrise or after local
# sunset then Qn_SW should be zero.
#---------------------------------------------------------
if (self.DEBUG):
print 'Calling update_net_shortwave_radiation()...'
#--------------------------------
# Compute Qn_SW for this time
#--------------------------------
Qn_SW = solar.Clear_Sky_Radiation( self.lat_deg,
self.julian_day,
self.W_p,
self.TSN_offset,
self.alpha,
self.beta,
self.albedo,
self.dust_atten )
if (np.rank( self.Qn_SW ) == 0):
self.Qn_SW.fill( Qn_SW ) #### (mutable scalar)
else:
self.Qn_SW(:) = Qn_SW # (W m-2)
# update_net_shortwave_radiation()
#-------------------------------------------------------------------
def update_em_air(self):
if (self.DEBUG):
print 'Calling update_em_air()...'
#---------------------------------------------------------
# NB! The Brutsaert and Satterlund formulas for air
# emissivity as a function of air temperature are in
# close agreement; see compare_em_air_methods().
# However, we must pay close attention to whether
# equations require units of kPa, Pa, or mbar.
#
# 100 kPa = 1 bar = 1000 mbars
# => 1 kPa = 10 mbars
#---------------------------------------------------------
# NB! Temperatures are assumed to be given with units
# of degrees Celsius and are converted to Kelvin
# wherever necessary by adding C_to_K = 273.15.
#
# RH = relative humidity (unitless)
#---------------------------------------------------------
# NB! I'm not sure about how F is added at end because
# of how the equation is printed in Dingman (2002).
# But it reduces to other formulas as it should.
#---------------------------------------------------------
T_air_K = self.T_air + self.C_to_K
if not(self.SATTERLUND):
#-----------------------------------------------------
# Brutsaert (1975) method for computing emissivity
# of the air, em_air. This formula uses e_air with
# units of kPa. (From Dingman (2002, p. 196).)
# See notes for update_vapor_pressure().
#-----------------------------------------------------
e_air_kPa = self.e_air / np.float64(10) # (kPa)
F = self.canopy_factor
C = self.cloud_factor
term1 = (1.0 - F) * 1.72 * (e_air_kPa / T_air_K) ** self.one_seventh
term2 = (1.0 + (0.22 * C ** 2.0))
self.em_air = (term1 * term2) + F
else:
#--------------------------------------------------------
# Satterlund (1979) method for computing the emissivity
# of the air, em_air, that is intended to "correct
# apparent deficiencies in this formulation at air
# temperatures below 0 degrees C" (see G. Liston)
# Liston cites Aase and Idso(1978), Satterlund (1979)
#--------------------------------------------------------
e_air_mbar = self.e_air
eterm = np.exp(-1 * (e_air_mbar)**(T_air_K / 2016) )
self.em_air = 1.08 * (1.0 - eterm)
#--------------------------------------------------------------
# Can't do this yet. em_air is always initialized scalar now
# but may change to grid on assignment. (9/23/14)
#--------------------------------------------------------------
# if (np.rank( self.em_air ) == 0):
# self.em_air.fill( em_air ) #### (mutable scalar)
# else:
# self.em_air(:) = em_air
# update_em_air()
#-------------------------------------------------------------------
def update_net_longwave_radiation(self):
#----------------------------------------------------------------
# Notes: Net longwave radiation is computed using the
# Stefan-Boltzman law. All four data types
# should be allowed (scalar, time series, grid or
# grid stack).
#
# Qn_LW = (LW_in - LW_out)
# LW_in = em_air * sigma * (T_air + 273.15)^4
# LW_out = em_surf * sigma * (T_surf + 273.15)^4
#
# Temperatures in (deg_C) must be converted to
# (K). Recall that absolute zero occurs at
# 0 (deg_K) or -273.15 (deg_C).
#
#----------------------------------------------------------------
# First, e_air is computed as:
# e_air = RH * 0.611 * exp[(17.3 * T_air) / (T_air + 237.3) |
# -*- coding: utf-8 -*-
from . | import models
from .hooks import set_def | ault_map_settings
|
ck = None
else:
raise err
try:
self.hash_table_id = self.hash_table_pack['resources'][0]['id']
except (IndexError, TypeError):
self.hash_table_id = None
# shortcuts
self.datastore_search = ckan.action.datastore_search
self.datastore_create = ckan.action.datastore_create
self.datastore_delete = ckan.action.datastore_delete
self.datastore_upsert = ckan.action.datastore_upsert
self.datastore_search = ckan.action.datastore_search
self.resource_show = ckan.action.resource_show
self.resource_create = ckan.action.resource_create
self.package_create = ckan.action.package_create
self.package_update = ckan.action.package_update
self.package_privatize = ckan.action.bulk_update_private
self.revision_show = ckan.action.revision_show
self.organization_list = ckan.action.organization_list_for_user
self.organization_show = ckan.action.organization_show
self.license_list = ckan.action.license_list
self.group_list = ckan.action.group_list
self.user = ckan.action.get_site_user()
def create_table(self, resource_id, fields, **kwargs):
"""Creates a datastore table for an existing filestore resource.
Args:
resource_id (str): The filestore resource id.
fields (List[dict]): fields/columns and their extra metadata.
**kwargs: Keyword arguments that are passed to datastore_create.
Kwargs:
force (bool): Create resource even if read-only.
aliases (List[str]): name(s) for read only alias(es) of the
resource.
primary_key (List[str]): field(s) that represent a unique key.
indexes (List[str]): index(es) on table.
Returns:
dict: The newly created data object.
Raises:
ValidationError: If unable to validate user on ckan site.
NotFound: If unable to find resource.
Examples:
>>> CKAN(quiet=True).create_table('rid', fields=[{'id': 'field', \
'type': 'text'}])
Traceback (most recent call last):
NotFound: Resource `rid` was not found in filestore.
"""
kwargs.setdefault('force', self.force)
kwargs['resource_id'] = resource_id
kwargs['fields'] = fields
err_msg = 'Resource `%s` was not found in filestore.' % resource_id
if self.verbose:
print('Creating table `%s` in datastore...' % resource_id)
try:
return self.datastore_create(**kwargs)
except ValidationError as err:
if err.error_dict.get('resource_id') == ['Not found: Resource']:
raise NotFound(err_msg)
else:
raise
def delete_table(self, resource_id, **kwargs):
"""Deletes a datastore table.
Args:
resource_id (str): The datastore resource id.
**kwargs: Keyword arguments that are passed to datastore_create.
Kwargs:
force (bool): Delete resource even if read-only.
filters (dict): Filters to apply before deleting, e.g.,
{"name": "fred"}. If missing delete whole table and all
dependent views.
Returns:
dict: Original filters sent if table was found, `None` otherwise.
Raises:
ValidationError: If unable to validate user on ckan site.
Examples:
>>> CKAN(quiet=True).delete_table('rid')
Can't delete. Table `rid` was not found in datastore.
"""
kwargs.setdefault('force', self.force)
kwargs['resource_id'] = resource_id
init_msg = "Can't delete. Table `%s`" % resource_id
err_msg = '%s was not found in datastore.' % init_msg
read_msg = '%s is read only.' % init_msg
if self.verbose:
print('Deleting table `%s` from datastore...' % resource_id)
try:
result = self.datastore_delete(**kwargs)
except NotFound:
print(err_msg)
| result = Non | e
except ValidationError as err:
if 'read-only' in err.error_dict:
print(read_msg)
print("Set 'force' to True and try again.")
result = None
elif err.error_dict.get('resource_id') == ['Not found: Resource']:
print(err_msg)
result = None
else:
raise err
return result
def insert_records(self, resource_id, records, **kwargs):
"""Inserts records into a datastore table.
Args:
resource_id (str): The datastore resource id.
records (List[dict]): The records to insert.
**kwargs: Keyword arguments that are passed to datastore_create.
Kwargs:
method (str): Insert method. One of ['update, 'insert', 'upsert']
(default: 'insert').
force (bool): Create resource even if read-only.
start (int): Row number to start from (zero indexed).
stop (int): Row number to stop at (zero indexed).
chunksize (int): Number of rows to write at a time.
Returns:
int: Number of records inserted.
Raises:
NotFound: If unable to find the resource.
Examples:
>>> CKAN(quiet=True).insert_records('rid', [{'field': 'value'}])
Traceback (most recent call last):
NotFound: Resource `rid` was not found in filestore.
"""
recoded = pr.json_recode(records)
chunksize = kwargs.pop('chunksize', 0)
start = kwargs.pop('start', 0)
stop = kwargs.pop('stop', None)
kwargs.setdefault('force', self.force)
kwargs.setdefault('method', 'insert')
kwargs['resource_id'] = resource_id
count = 1
for chunk in ft.chunk(recoded, chunksize, start=start, stop=stop):
length = len(chunk)
if self.verbose:
print(
'Adding records %i - %i to resource %s...' % (
count, count + length - 1, resource_id))
kwargs['records'] = chunk
err_msg = 'Resource `%s` was not found in filestore.' % resource_id
try:
self.datastore_upsert(**kwargs)
except requests.exceptions.ConnectionError as err:
if 'Broken pipe' in err.message[1]:
print('Chunksize too large. Try using a smaller chunksize.')
return 0
else:
raise err
except NotFound:
# Keep exception message consistent with the others
raise NotFound(err_msg)
except ValidationError as err:
if err.error_dict.get('resource_id') == ['Not found: Resource']:
raise NotFound(err_msg)
else:
raise err
count += length
return count
def get_hash(self, resource_id):
"""Gets the hash of a datastore table.
Args:
resource_id (str): The datastore resource id.
Returns:
str: The datastore resource hash.
Raises:
NotFound: If `hash_table_id` isn't set or not in datastore.
NotAuthorized: If unable to authorize ckan user.
Examples:
>>> CKAN(hash_table='hash_jhb34rtj34t').get_hash('rid')
Traceback (most recent call last):
NotFound: {u'item': u'package', u'message': u'Package \
`hash_jhb34rtj34t` was not found!'}
"""
if not self.hash_table_pack:
message = 'Package `%s` was not found!' % self.hash_table
raise NotFound({'message': message, 'item': 'package'})
if not self.hash_table_id:
message = 'No resources found in package `%s`!' % self.hash_table
raise NotFound({'message': message, 'item': 'resource'})
kwargs = {
'resource_id': self.hash_table_id,
|
#!/usr/bin/env python
###
#
# Copyright (C) 2007 Mola Pahnadayan
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
##
holiday = { 1386:[[1,1],[1,2],[1,3],[1,4],[1,12],[1,13],[1,17],
[2,14],[2,28] | ,[3,14],[3,15],[3,28],[5,6],[5,20],[6,7],
[7,11],[7,21],[8,15],[9,30],[10,8],[10,28],[10,29],
[11,22],[12,9],[12,17],[12,18],[12,29]],
1387:[[1,1],[1,2],[1,3],[1,4],[1,12],[1,13],[3,14],[3 | ,15],[3,18],[4,26],[5,9],[5,27],
[7,1],[7,10],[8,4],[9,19],[9,27],[10,17],[10,18],
[11,22],[11,28],[12,6],[12,8],[12,25],[12,29]],
1388:[[1,1],[1,2],[1,3],[1,4],[1,12],[1,13],[3,7],[3,14],[3,15],[4,15],[4,29],[5,16],
[6,20],[6,29],[7,22],[9,7],[9,15],[10,5],[10,6],
[11,16],[11,22],[11,24],[11,26],[12,13],[12,29]] }
|
import pytest
import klondike
def test_popis_rubem_nahoru():
karta = 13, 'Pi', Fal | se
assert klondike.popis_karty(karta) == '[???]'
def test_popis_srdcova_kralovna():
karta = 12, 'Sr' | , True
assert klondike.popis_karty(karta) in ['[Q ♥]', '[Q S]']
def test_otoc_kralovnu():
karta = 12, 'Sr', True
assert klondike.otoc_kartu(karta, True) == (12, 'Sr', True)
assert klondike.otoc_kartu(karta, False) == (12, 'Sr', False)
def test_otoc_eso():
karta = 1, 'Pi', False
assert klondike.otoc_kartu(karta, True) == (1, 'Pi', True)
assert klondike.otoc_kartu(karta, False) == (1, 'Pi', False)
# Tohle je testovací vychytávka, kterou zatím neznáme:
# několik testů v jedné funkci
@pytest.mark.parametrize('hodnota,znak', [
(1, 'A'),
(2, '2'),
(3, '3'),
(4, '4'),
(5, '5'),
(6, '6'),
(7, '7'),
(8, '8'),
(9, '9'),
(10, 'X'),
(11, 'J'),
(12, 'Q'),
(13, 'K'),
])
def test_popis_hodnoty(hodnota, znak):
karta = hodnota, 'Sr', True
assert klondike.popis_karty(karta) in ['[' + znak + ' ♥]', '[' + znak + ' S]']
|
# -*- coding: utf-8 -*-
# @author: vuolter
from __future__ import absolute_import, unicode_literals
import os
import re
from future import standard_library
from pyload.utils import convert, purge, web
from pyload.utils.convert import to_str
from pyload.utils.layer.legacy import hashlib
from pyload.utils.time import seconds_to_midnight
standard_library.install_aliases()
_RE_ALIAS = re.compile(r'[\d.-_]+')
def alias(text):
chunks = _RE_ALIAS.split(purge.name(text))
return ''.join(word.capitalize() for word in chunks if word)
_BOOLEANMAP = {
'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False}
def boolean(text):
return _BOOLEANMAP.get(text.strip().lower())
def entries(text, allow_whitespaces=False):
chars = ';,|'
if not allow_whitespaces:
chars += r'\s'
pattr = r'[{0}]+'.format(chars)
return [entry for entry in re.split(pattr, text) if entry]
def hash(text):
text = text.replace('-', '').lower()
algop = '|'.join(hashlib.algorithms + ('adler32', 'crc(32)?'))
pattr = r'(?P<D1>{}|)\s*[:=]?\s*(?P<H>[\w^_]{8,}?)\s*[:=]?\s*(?P<D2>{}|)'
pattr = pattr.format(algop, algop)
m = re.search(pattr, text)
if m is None:
return None, None
checksum = m.group('H')
algorithm = m.group('D1') or m.group('D2')
if algorithm == 'crc':
algorithm = 'crc32'
return checksum, algorithm
def name(text, strict=True):
try:
name = web.parse.name(text)
except Exception:
name = os.path.basename(text).strip()
return name if strict else purge.name(name)
_ONEWORDS = (
'zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight',
'nine', 'ten', 'eleven', 'twelve', 'thirteen', 'fourteen', 'fifteen',
'sixteen', 'seventeen', 'eighteen', 'nineteen')
_TENWORDS = (
'twenty', 'thirty', 'forty' | , 'fifty', 'sixty', 'seventy', 'eighty',
'ninety')
_RE_NUMBER = re.compile(r'[\s-]+')
def number(text):
try:
text = web.misc.translate(text).lower()
except Exception:
text = text.lower()
o_tuple = [(w, i) for i, w in enumerate(_ONEWORDS)]
t_tuple = [(w, i * 10) for i, w in enumerate(_TENWORDS, 2)]
numwords = | dict(o_tuple + t_tuple)
tokens = _RE_NUMBER.split(text)
numbers = [_f for _f in (numwords.get(word) for word in tokens) if _f]
return sum(numbers) if numbers else None
_RE_PACKS = re.compile(r'[^a-z0-9]+(?:(cd|part).*?\d+)?', flags=re.I)
def packs(nameurls):
DEFAULT_URLNAME = 'Unknown'
packs = {}
for urlname, url in nameurls:
urlname = name(urlname, strict=False)
urlname = os.path.splitext(urlname)[0].strip()
urlname = _RE_PACKS.sub('_', urlname).strip('_')
if not urlname:
urlname = DEFAULT_URLNAME
packs.setdefault(urlname, []).append(url)
return packs
_RE_SIZE = re.compile(r'(?P<S>-?[\d.,]+)\s*(?P<U>[a-zA-Z]*)')
def bytesize(text, unit=None): # returns integer bytes
DEFAULT_INPUTUNIT = 'byte'
m = _RE_SIZE.match(to_str(text))
if m is None:
return None
if unit is None:
unit = m.group('U') or DEFAULT_INPUTUNIT
size = float(m.group('S').replace(',', '.'))
unit = unit[0].lower()
return int(convert.size(size, unit, 'byte'))
_TIMEWORDS = ('this', 'a', 'an', 'next')
_TIMEMAP = {
'day': 60 ** 2 * 12, 'hr': 60 ** 2, 'hour': 60 ** 2, 'min': 60, 'sec': 1}
_RE_TIME = re.compile(r'(\d+|[a-zA-Z-]+)\s*(day|hr|hour|min|sec)|(\d+)')
def seconds(text):
def to_int(obj):
try:
return int(obj)
except ValueError:
return None
try:
text = web.misc.translate(text).lower()
except Exception:
text = text.lower()
pattr = r'({0})\s+day|today|daily'.format('|'.join(_TIMEWORDS))
m = re.search(pattr, text)
if m is not None:
return seconds_to_midnight()
seconds = sum(
(w in _TIMEWORDS or to_int(i or w) or number(w) or 1) *
_TIMEMAP.get(u, 1) for w, u, i in _RE_TIME.findall(text))
return seconds
def minutes(text):
return seconds(text) / 60
def hours(text):
return seconds(text) / 60 ** 2
|
ponseError, \
ResourceNotFoundError
from musicbrainz2.model import Release
# TODO:
#
# * dynamically discover timezone
# * add station name in yaml
# 8 bytes for timestamp
# 5 for title
# 6 for artist
# 10 average for song title
# 10 average for artist name
# --
# 39 total bytes per record
# x 34 stations
# --
#
# 1,326 bytes
#
# 24 * 60 minutes in a day = 1440
#
# 3 minutes per song
#
# ---
#
# 480 songs per day
# x 1,326
# ---------
# 636,480 bytes per day
# x 30
# ---------
# 19,094,400 bytes per month
URL = 'http://websvc.bdsrealtime.com/NBDS.Consumer.Tempo/' \
'nowplaying.aspx?uid=Service%s&stnid=%04d'
POST = '__EVENTTARGET=detectTime&__EVENTARGUMENT=&detectTime='
def main():
"""Loops through all stations and scrapes the most recent songs"""
print(datetime.datetime.now())
try:
stations_yaml = open('stations.yaml')
except IOError:
print("Failed to load station list", file=sys.stderr)
sys.exit(-1)
stations = yaml.load(stations_yaml)
for key, value in stations.items():
scrapeStation( key, URL % (key, value['id']), POST, value['tz'] )
print(datetime.datetime.now())
def store_in_cloud_db( domain, plays ):
"""Stores a play list in a SimpleDB domain
Existing plays will be queried to see if the album has already been defined.
Keywork arguments:
domain -- the SimpleDB domain to store it in
plays -- a dict with keys representing timestamps, the value is a tuple (artist, title)
"""
items = {}
total = 0
for epoch, attrs in plays.items():
artist = attrs[0].replace('"', '""')
title = attrs[1].replace('"', '""')
song_query = 'select * from `%s` where Album is not null ' \
'and Artist = "%s" and Title = "%s"' \
% (domain.name, artist, title)
song_rs = domain.select(song_query, max_items=1)
album = None
for song in song_rs:
album = song['Album']
if album is not None and album is not "":
# TODO: FIXME: Query all domains, not just the current station
item_attrs = {'Artist': attrs[0],
'Title': attrs[1],
'Album': album}
#print("Found existing album", album, "for", attrs)
else:
item_attrs = {'Artist': attrs[0], 'Title': attrs[1]}
#print("Could not find album for", attrs)
items["%08x" % epoch] = item_attrs
if len(items) == 20:
domain.batch_put_attributes(items)
items = {}
total += 20
else:
if len(items) != 0:
domain.batch_put_attributes(items)
total += len(items)
print("Songs inserted: ", total)
def get_last_song_time( domain ):
"""Gets the timestamp of the last song played in a domain"""
query = 'select * from `%s` where itemName() > "00000000" ' \
'order by itemName() desc limit 1' % (domain.name)
result_set = domain.select(query, max_items=1)
for item in result_set:
print(domain.name, item.name, item)
try:
last_song_time = int(item.name, 16)
break
except ValueError:
invalid_item = domain.get_item(item.name)
print("Deleting", item.name)
domain.delete_item( invalid_item )
# print("Last song: ", int(item.name, 16), ":", item)
else:
last_song_time = 0
return last_song_time
def get_timestamps( source, timezone ):
timestamps = collections.OrderedDict()
song_times = re.findall('<option value=\"(.*?)\">(.*?)<\/option>',
source)
if ( len(song_times) == 0 ):
return timestamps
# Get the station's current time
then = datetime.datetime(1970, 1, 1)
then = pytz.UTC.localize(then)
then = then.astimezone(pytz.timezone(timezone))
station_time = datetime.datetime.now(pytz.timezone(tim | ezone))
station_epoch = (station_time - then).total_seconds()
for song_time in reversed(song_times):
# Convert song time to 'current time'
is_pm | = song_time[0][-2:] == 'PM'
hour, minute = song_time[0][:-2].split(':')
hour = int(hour)
minute = int(minute)
# If we are 1:00PM and greater
if is_pm and hour != 12:
hour += 12
# If we are 12:00AM - 12:59AM
if not is_pm and hour == 12:
hour = 0
song_dt = station_time.replace(hour=hour, minute=minute,
second=0, microsecond=0)
song_epoch = int((song_dt - then).total_seconds())
if song_epoch > station_epoch:
song_epoch -= 24*60*60
timestamps[song_epoch] = song_time[0]
return timestamps
def getSongInfo( plays ):
detailed_plays = collections.OrderedDict()
for k,v in plays.items():
q = Query()
time.sleep(1.1)
found = False
i = 1
while not found and i < 10:
try:
f = TrackFilter(title=v[1], artistName=v[0])
results = q.getTracks(f)
found = True
except (AuthenticationError,
ConnectionError,
RequestError,
ResponseError,
ResourceNotFoundError,
WebServiceError) as error:
detailed_plays[k] = (v[0], v[1], "")
print('Error:', error, 'waiting', i*10, 'seconds')
results = None
time.sleep(i*10)
i += 1
if (results != None) and (len(results) != 0):
found_release = None
release_type = None
release = None
for result in results:
track = result.track
title = track.title
artist = track.artist.name
# Prefer: album, single, live, anything else
for release in track.releases:
if Release.TYPE_ALBUM in release.getTypes():
found_release = release
release_type = Release.TYPE_ALBUM
break
elif Release.TYPE_SINGLE in release.getTypes():
if release_type != Release.TYPE_ALBUM:
found_release = release
release_type = Release.TYPE_SINGLE
elif Release.TYPE_LIVE in release.getTypes():
if release_type != Release.TYPE_ALBUM and \
release_type != Release.TYPE_SINGLE:
found_release = release
release_type = Release.TYPE_LIVE
else:
if release_type != Release.TYPE_ALBUM and \
release_type != Release.TYPE_SINGLE and \
release_type != Release.TYPE_LIVE:
found_release = release
if release_type == Release.TYPE_ALBUM:
break
if found_release == None:
album = ""
else:
album = release.title
detailed_plays[k] = (artist, title, album)
else:
detailed_plays[k] = (v[0], v[1], "")
return detailed_plays
def getPlays( times, url, post_base ):
plays = collections.OrderedDict()
# i = 0
for epoch, url_time_str in times.iteritems():
post = post_base + url_time_str.replace(':', '%3A')
try:
websvc_source = urllib.urlopen(url, post)
except IOError as error:
print("Error reading URL:", error)
sys.exit(1)
source = websvc_source.read()
websvc_source.close()
artist = re.findall('<span id=\"detectArtist\">(.*?)<\/span>', source)
song = re.find |
: The following test-cases are missing:
#
# - JSON numbers, strings and arrays
# - More invalid characters or malformed structures
# - Valid, but not obvious syntax, like zillion of spaces or
# strings with unicode chars (different suite maybe?)
bad_json = []
# A JSON value MUST be an object, array, number, string, true, false,
# or null
#
# NOTE: QMP seems to ignore a number of chars, like: | and ?
bad_json.append(":")
bad_json.append(",")
# Malformed json-objects
#
# NOTE: sending only "}" seems to break QMP
# NOTE: Duplicate keys are accepted (should it?)
bad_json.append("{ \"execute\" }")
bad_json.append("{ \"execute\": \"query-version\", }")
bad_json.append("{ 1: \"query-version\" }")
bad_json.append("{ true: \"query-version\" }")
bad_json.append("{ []: \"query-version\" }")
bad_json.append("{ {}: \"query-version\" }")
for cmd in bad_json:
resp = monitor.cmd_raw(cmd)
check_error_resp(resp, "JSONParsing")
def test_id_key(monitor):
"""
Check that QMP's "id" key is correctly handled.
"""
# The "id" key must be echoed back in error responses
id_key = "virt-test"
resp = monitor.cmd_qmp("eject", {"foobar": True}, q_id=id_key)
check_error_resp(resp)
check_str_key(resp, "id", id_key)
# The "id" key must be echoed back in success responses
resp = monitor.cmd_qmp("query-status", q_id=id_key)
check_success_resp(resp)
check_str_key(resp, "id", id_key)
# The "id" key can be any json-object
for id_key in (True, 1234, "string again!", [1, [], {}, True, "foo"],
{"key": {}}):
resp = monitor.cmd_qmp("query-status", q_id=id_key)
check_success_resp(resp)
if resp["id"] != id_key:
raise error.TestFail("expected id '%s' but got '%s'" %
(str(id_key), str(resp["id"])))
def test_invalid_arg_key(monitor):
"""
Currently, the only supported keys in the input object are: "execute",
"arguments" and "id". Although expansion is supported, invalid key
names must be detected.
"""
resp = monitor.cmd_obj({"execute": "eject", "foobar": True})
expected_error = "QMPExtraInputObjectM | ember"
data_dict = {"member": "foobar"}
check_error_resp(resp, expected_error, data_dict)
def test_bad_arguments_key_type(monitor) | :
"""
The "arguments" key must be an json-object.
We use the eject command to perform the tests, but that's a random
choice, any command that accepts arguments will do, as the command
doesn't get called.
"""
for item in (True, [], 1, "foo"):
resp = monitor.cmd_obj({"execute": "eject", "arguments": item})
check_error_resp(resp, "QMPBadInputObjectMember",
{"member": "arguments", "expected": "object"})
def test_bad_execute_key_type(monitor):
"""
The "execute" key must be a json-string.
"""
for item in (False, 1, {}, []):
resp = monitor.cmd_obj({"execute": item})
check_error_resp(resp, "QMPBadInputObjectMember",
{"member": "execute", "expected": "string"})
def test_no_execute_key(monitor):
"""
The "execute" key must exist, we also test for some stupid parsing
errors.
"""
for cmd in ({}, {"execut": "qmp_capabilities"},
{"executee": "qmp_capabilities"}, {"foo": "bar"}):
resp = monitor.cmd_obj(cmd)
check_error_resp(resp) # XXX: check class and data dict?
def test_bad_input_obj_type(monitor):
"""
The input object must be... an json-object.
"""
for cmd in ("foo", [], True, 1):
resp = monitor.cmd_obj(cmd)
check_error_resp(resp, "QMPBadInputObject", {"expected": "object"})
def test_good_input_obj(monitor):
"""
Basic success tests for issuing QMP commands.
"""
# NOTE: We don't use the cmd_qmp() method here because the command
# object is in a 'random' order
resp = monitor.cmd_obj({"execute": "query-version"})
check_success_resp(resp)
resp = monitor.cmd_obj({"arguments": {}, "execute": "query-version"})
check_success_resp(resp)
id_key = "1234foo"
resp = monitor.cmd_obj({"id": id_key, "execute": "query-version",
"arguments": {}})
check_success_resp(resp)
check_str_key(resp, "id", id_key)
# TODO: would be good to test simple argument usage, but we don't have
# a read-only command that accepts arguments.
def input_object_suite(monitor):
"""
Check the input object format, as described in the QMP specfication
section '2.3 Issuing Commands'.
{ "execute": json-string, "arguments": json-object, "id": json-value }
"""
test_good_input_obj(monitor)
test_bad_input_obj_type(monitor)
test_no_execute_key(monitor)
test_bad_execute_key_type(monitor)
test_bad_arguments_key_type(monitor)
test_id_key(monitor)
test_invalid_arg_key(monitor)
def argument_checker_suite(monitor):
"""
Check that QMP's argument checker is detecting all possible errors.
We use a number of different commands to perform the checks, but the
command used doesn't matter much as QMP performs argument checking
_before_ calling the command.
"""
# qmp in RHEL6 is different from 0.13.*:
# 1. 'stop' command just return {} evenif stop have arguments.
# 2. there is no 'screendump' command.
# 3. argument isn't checked in 'device' command.
# so skip these tests in RHEL6.
# test optional argument: 'force' is omitted, but it's optional, so
# the handler has to be called. Test this happens by checking an
# error that is generated by the handler itself.
resp = monitor.cmd_qmp("eject", {"device": "foobar"})
check_error_resp(resp, "DeviceNotFound")
# val argument must be a json-int
for arg in ({}, [], True, "foo"):
resp = monitor.cmd_qmp("memsave", {"val": arg, "filename": "foo",
"size": 10})
check_error_resp(resp, "InvalidParameterType",
{"name": "val", "expected": "int"})
# value argument must be a json-number
for arg in ({}, [], True, "foo"):
resp = monitor.cmd_qmp("migrate_set_speed", {"value": arg})
check_error_resp(resp, "InvalidParameterType",
{"name": "value", "expected": "number"})
# qdev-type commands have their own argument checker, all QMP does
# is to skip its checking and pass arguments through. Check this
# works by providing invalid options to device_add and expecting
# an error message from qdev
resp = monitor.cmd_qmp("device_add", {"driver": "e1000", "foo": "bar"})
check_error_resp(resp, "PropertyNotFound",
{"device": "e1000", "property": "foo"})
def unknown_commands_suite(monitor):
"""
Check that QMP handles unknown commands correctly.
"""
# We also call a HMP-only command, to be sure it will fail as expected
for cmd in ("bar", "query-", "query-foo", "help"):
resp = monitor.cmd_qmp(cmd)
check_error_resp(resp, "CommandNotFound", {"name": cmd})
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
# Look for the first qmp monitor available, otherwise, fail the test
qmp_monitor = vm.get_monitors_by_type("qmp")
if qmp_monitor:
qmp_monitor = qmp_monitor[0]
else:
raise error.TestError('Could |
from django.db import models
class AbstractFormSetting(models.Model):
form = models.OneToOneField(
"wagtailstreamforms.Form",
on_de | lete=models.CASCADE,
related_name="advanced_settings",
)
class Meta:
abstrac | t = True
def __str__(self):
return self.form.title
|
h_index:
portdb = IndexedPortdb(portdb)
vardb = IndexedVardb(vardb)
if not usepkgonly and portdb._have_root_eclass_dir:
self._dbs.append(portdb)
if (usepkg or usepkgonly) and bindb.cp_all():
self._dbs.append(bindb)
self._dbs.append(vardb)
self._portdb = portdb
self._vardb = vardb
def _spinner_update(self):
if self.spinner:
self.spinner.update()
def _cp_all(self):
iterators = []
for db in self._dbs:
i = db.cp_all()
try:
i = iter(i)
except TypeError:
pass
iterators.append(i)
for group in MultiIterGroupBy(iterators):
yield group[0]
def _aux_get(self, *args, **kwargs):
for db in self._dbs:
try:
return db.aux_get(*args, **kwargs)
except KeyError:
pass
raise KeyError(args[0])
def _aux_get_error(self, cpv):
portage.writemsg("emerge: search: "
"aux_get('%s') failed, skipping\n" % cpv,
noiselevel=-1)
def _findname(self, *args, **kwargs):
for db in self._dbs:
if db is not self._portdb:
# We don't want findname to return anything
# unless it's an ebuild in a portage tree.
# Otherwise, it's already built and we don't
# care about it.
continue
func = getattr(db, "findname", None)
if func:
value = func(*args, **kwargs)
if value:
return value
return None
def _getFetchMap(self, *args, **kwargs):
for db in self._dbs:
func = getattr(db, "getFetchMap", None)
if func:
value = func(*args, **kwargs)
if value:
return value
return {}
def _visible(self, db, cpv, metadata):
installed = db is self._vardb
built = installed or db is not self._portdb
pkg_type = "ebuild"
if installed:
pkg_type = "installed"
elif built:
pkg_type = "binary"
return Package(type_name=pkg_type,
root_config=self.root_config,
cpv=cpv, built=built, installed=installed,
metadata=metadata).visible
def _first_cp(self, cp):
for db in self._dbs:
if hasattr(db, "cp_list"):
matches = db.cp_list(cp)
if matches:
return matches[-1]
else:
matches = db.match(cp)
for cpv in matches:
if cpv.cp == cp:
return cpv
return None
def _xmatch(self, level, atom):
"""
This method does not expand old-style virtuals because it
is restricted to returning matches for a single ${CATEGORY}/${PN}
and old-style virual matches unreliable for that when querying
multiple package databases. If necessary, old-style virtuals
can be performed on atoms prior to calling this method.
"""
cp = portage.dep_getkey(atom)
if level == "match-all":
matches = set()
for db in self._dbs:
if hasattr(db, "xmatch"):
matches.update(db.xmatch(level, atom))
else:
matches.update(db.match(atom))
result = list(x for x in matches if portage.cpv_getkey(x) == cp)
db._cpv_sort_ascending(result)
elif level == "match-visible":
matches = set()
for db in self._dbs:
if hasattr(db, "xmatch"):
matches.update(db.xmatch(level, atom))
else:
db_keys = list(db._aux_cache_keys)
for cpv in db.match(atom):
try:
metadata = zip(db_keys,
db.aux_get(cpv, db_keys))
except KeyError:
self._aux_get_error(cpv)
continue
if not self._visible(db, cpv, metadata):
continue
matches.add(cpv)
result = list(x for x in matches if portage.cpv_getkey(x) == cp)
db._cpv_sort_ascending(result)
elif level == "bestmatch-visible":
result = None
for db in self._dbs:
if hasattr(db, "xmatch"):
cpv = db.xmatch("bestmatch-visible", atom)
if not cpv or portage.cpv_getkey(cpv) != cp:
continue
if not result or cpv == portage.best([cpv, result]):
result = cpv
else:
db_keys = list(db._aux_cache_keys)
matches = db.match(atom)
try:
db.match_unordered
except AttributeError:
pass
else:
db._cpv_sort_ascending(matches)
# break out of this loop with highest visible
# match, checked in descending order
for cpv in reversed(matches):
if portage.cpv_getkey(cpv) != cp:
continue
try:
metadata = zip(db_keys,
db.aux_get(cpv, db_keys))
except KeyError:
self._aux_get_error(cpv)
continue
if not self._visible(db, cpv, metadata):
continue
if not result or cpv == portage.best([cpv, result]):
result = cpv
break
else:
raise NotImplementedError(level)
return result
def execute(self,searchkey):
"""Performs the search for the supplied search key"""
self.searchkey = searchkey
def _iter_search(self):
match_category = 0
self.packagematches = []
if self.searchdesc:
self.searchdesc=1
self.matches = {"pkg":[], "desc":[], "set":[]}
else:
self.searchdesc=0
self.matches = {"pkg":[], "set":[]}
writemsg_stdout("Searching...\n\n", noiselevel=-1)
regexsearch = False
if self.searchkey.startswith('%'):
regexsearch = True
self.searchkey = self.searchkey[1:]
if self.searchkey.startswith('@'):
match_category = 1
self.searchkey = self.searchkey[1:]
if regexsearch:
self.searchre=re.compile(self.searchkey,re.I)
else:
self.searchre=re.compile(re.escape(self.searchkey), re.I)
for package in self._cp_all():
self._spinner_update()
if match_category:
match_string = package[:]
else:
match_string = package.split("/")[-1]
if self.searchre.search(match_string):
yield ("pkg", package)
elif self.searchdesc: # DESCRIPTION searching
# Use _first_cp to avoid an expensive visibility check,
# since the visibility check can be avoided entirely
| # when the DESCRIPTION does not match.
full_package = self._first_cp(package)
if not full_package:
continue
try:
| full_desc = self._aux_get(
full_package, ["DESCRIPTION"])[0]
except KeyError:
self._aux_get_error(full_package)
continue
if not self.searchre.search(full_desc):
continue
yield ("desc", package)
self.sdict = self.setconfig.getSets()
for setname in self.sdict:
self._spinner_update()
if match_category:
match_string = setname
else:
match_string = setname.split("/")[-1]
if self.searchre.search(match_string):
yield ("set", setname)
elif self.searchdesc:
if self.searchre.search(
self.sdict[setname].getMetadata("DESCRIPTION")):
yield ("set", setname)
def addCP(self, cp):
if not self._xmatch("match-all", cp):
return
self.matches["pkg"].append(cp)
self.mlen += 1
def output(self):
"""Outputs the results of the search."""
class msg(object):
@staticmethod
def append(msg):
writemsg_stdout(msg, noiselevel=-1)
msg.append("\b\b \n[ Results for search key : " + \
bold(self.searchkey) + " ]\n")
vardb = self._vardb
metadata_keys = set(Package.metadata_keys)
metadata_keys.update(["DESCRIPTION", "HOMEPAGE", "LICENSE", "SRC_URI"])
metadata_keys = tuple(metadata_keys)
if self.searchkey is None:
# Handle results added via addCP
addCP_matches = []
for mytype, match in self.matches.items():
addCP_matches.append(mytype, match)
iterator = iter(addCP_matches)
else:
# Do a normal search
iterator = self._iter_search()
for mtype, match in iterator:
self.mlen += 1
masked = False
full_package = None
if mtype in ("pkg", "desc"):
full_package = self._xmatch(
"bestmatch-visible", match)
if not full_package:
masked = True
full_package = self._xmatch("match-all", match)
if full_package:
full_package = full_package[-1]
elif mtype == "set":
msg.append(green("*") + " " + bold(match) + "\n")
if self.verbose:
msg.append(" " + darkgreen("Description:") + \
" " + \
self.sdict[match].getMetadata("DESCRIPTION") \
+ "\n\n")
if full_package:
try:
metadata = dict(zip(metadata_keys,
self._aux_get(full_package, metadata_keys)))
except KeyError:
self._aux_get_error(full_package)
continue
desc = metadata["DESCRIPTION"]
homepage = metadata["HOMEPAGE"]
license = metadata["LICENSE"]
if masked:
msg.append(green("*") + " " + \
white(match |
vide_data
else:
new_dshape = [DataDesc(i.name, shape, i.dtype, i.layout) \
for i, shape in zip(self._data_shapes, new_data_shapes)]
if hasattr(data_batch, "provide_label") and data_batch.provide_label:
new_lshape = data_batch.provide_label
elif hasattr(data_batch, "label") and data_batch.label:
new_lshape = [DataDesc(i.name, j.shape, i.dtype, i.layout) \
for i, j in zip(self._label_shapes, data_batch.label)]
else:
new_lshape = None
self.reshape(new_dshape, new_lshape)
self._exec_group.forward(data_batch, is_train)
def backward(self, out_grads=None):
"""Backward computation.
See Also
----------
:meth:`BaseModule.backward`.
Parameters
----------
out_grads : NDArray or list of NDArray, optional
Gradient on the outputs to be propagated back.
This parameter is only needed when bind is called
on outputs that are not a loss function.
"""
assert self.binded and self.params_initialized
self._exec_group.backward(out_grads=out_grads)
def prepare_states(self):
assert self.binded and self.params_initialized and self.optimizer_initialized
_prepare_params(self._exec_group.param_arrays,
self._exec_group.grad_arrays,
updater=self._updater,
num_device=len(self._context),
kvstore=self._kvstore,
param_names=self._exec_group.param_names)
def update(self):
"""Updates parameters according to the installed optimizer and the gradients computed
in the previous forward-backward batch.
When KVStore is used to update parameters for multi-device or multi-machine training,
a copy of the parameters are stored in KVStore. Note that for `row_sparse` parameters,
this function does update the copy of parameters in KVStore, but doesn't broadcast the
updated parameters to all devices / machines. Please call `prepare` to broadcast
`row_sparse` parameters with the next batch of data.
See Also
----------
:meth:`BaseModule.update`.
"""
assert self.binded and self.params_initialized and self.optimizer_initialized
self._params_dirty = True
if self._update_on_kvstore:
_update_params_on_kvstore(self._exec_group.param_arrays,
self._exec_group.grad_arrays,
self._kvstore, self._exec_group.param_names)
else:
_update_params(self._exec_group.param_arrays,
self._exec_group.grad_arrays,
updater=self._updater,
num_device=len(self._context),
kvstore=self._kvstore,
param_names=self._exec_group.param_names)
def get_outputs(self, merge_multi_context=True):
"""Gets outputs of the previous forward computation.
If ``merge_multi_context`` is ``True``, it is like ``[out1, out2]``. O | therwise, it
is like ``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``. All the output
elements are `NDArray`. When `merge_multi_context` is `False`, those `NDArray`
might live on different devices.
Parameters
----------
merge_multi_context : bool
Default is ``True``. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A ``True`` value indicate that we
should merg | e the collected results so that they look like from a single
executor.
Returns
-------
list of NDArray or list of list of NDArray
Output.
"""
assert self.binded and self.params_initialized
return self._exec_group.get_outputs(merge_multi_context=merge_multi_context)
def get_input_grads(self, merge_multi_context=True):
"""Gets the gradients with respect to the inputs of the module.
If ``merge_multi_context`` is ``True``, it is like ``[grad1, grad2]``. Otherwise, it
is like ``[[grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2]]``. All the output
elements are `NDArray`.
Parameters
----------
merge_multi_context : bool
Default is ``True``. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A ``True`` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
list of NDArray or list of list of NDArray
Input gradients
"""
assert self.binded and self.params_initialized and self.inputs_need_grad
return self._exec_group.get_input_grads(merge_multi_context=merge_multi_context)
def get_states(self, merge_multi_context=True):
"""Gets states from all devices.
If `merge_multi_context` is ``True``, it is like ``[out1, out2]``. Otherwise, it
is like ``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``. All the output
elements are `NDArray`.
Parameters
----------
merge_multi_context : bool
Default is ``True``. In the case when data-parallelism is used, the states
will be collected from multiple devices. A ``True`` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
list of NDArray or list of list of NDArray
States
"""
assert self.binded and self.params_initialized
return self._exec_group.get_states(merge_multi_context=merge_multi_context)
def set_states(self, states=None, value=None):
"""Sets value for states. Only one of the states & value can be specified.
Parameters
----------
states : list of list of NDArrays
source states arrays formatted like ``[[state1_dev1, state1_dev2],
[state2_dev1, state2_dev2]]``.
value : number
a single scalar value for all state arrays.
"""
assert self.binded and self.params_initialized
self._exec_group.set_states(states, value)
def update_metric(self, eval_metric, labels, pre_sliced=False, label_pads=None):
"""Evaluates and accumulates evaluation metric on outputs of the last forward computation.
See Also
----------
:meth:`BaseModule.update_metric`.
Parameters
----------
eval_metric : EvalMetric
Evaluation metric to use.
labels : list of NDArray if `pre_sliced` parameter is set to `False`,
list of lists of NDArray otherwise. Typically `data_batch.label`.
pre_sliced: bool
Whether the labels are already sliced per device (default: False).
label_pads : pad size if `pre_sliced` parameter is set to `False`,
list of pad sizes otherwise. Typically `data_batch.pad` (default: None).
"""
self._exec_group.update_metric(eval_metric, labels, pre_sliced, label_pads)
def _sync_params_from_devices(self):
"""Synchronizes parameters from devices to CPU. This function should be called after
calling `update` that updates the parameters on the devices, before one can read the
latest parameters from ``self._arg_params`` and ``self._aux_params``.
For row_sparse parameters on devices, ther are pulled from KVStore with all row ids.
"""
self._exec_group.get_params(self._arg_params, self._aux_params)
if self._kvstore and self._update_on_kvstore:
for param_name, param_val in sorted(self._arg_params.items()):
if param_val.stype == 'row_sparse':
row_ids = nd.arange( |
#-----------------------------------------------------------------------------
# Copyright (c) 2013, PyInstal | ler Development Team.
#
# Distributed under the terms of | the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
from __future__ import absolute_import
name = 'relimp.relimp1'
from . import relimp2 as upper
from . relimp import relimp2 as lower
assert upper.name == 'relimp.relimp2'
assert lower.name == 'relimp.relimp.relimp2'
if upper.__name__ == lower.__name__:
raise SystemExit("Imported the same module")
if upper.__file__ == lower.__file__:
raise SystemExit("Imported the same file")
|
# Copyright 2014 Dietrich Epp.
# This file is part of SGLib. SGLib is licensed under the terms of the
# 2-clause BSD license. For more information, see LICENSE.txt.
BASE_CONFIG = {
'Config.PlatformToolset': 'v120',
'Config.CharacterSet': 'Unicode',
'ClCompile.WarningLevel': 'Level3',
'ClCompile.SDLCheck': True,
'Link.GenerateDebugInformation': True,
}
DEBUG_CONFIG = {
'Config.UseDebugLibraries': True,
'VC.LinkIncremental': True,
'ClCompile.Optimization': 'Disabled',
'ClCompile.PreprocessorDefinitions': ['WIN32', '_DEBUG', '_WINDO | WS'],
}
RELEASE_CONFIG = {
'Config.WholeProgramOptimization': True,
'Config.UseDebugLibraries': False,
'VC.LinkIncremental': False,
| 'ClCompile.Optimization': 'MaxSpeed',
'ClCompile.FunctionLevelLinking': True,
'ClCompile.IntrinsicFunctions': True,
'ClCompile.PreprocessorDefinitions': ['WIN32', 'NDEBUG', '_WINDOWS'],
'Link.GenerateDebugInformation': True,
'Link.EnableCOMDATFolding': True,
'Link.OptimizeReferences': True,
}
|
def extractNotoriousOnlineBlogspotCom(item):
'''
Parser for 'notorious-online.blogspot.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (ch | p or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| |
from .app import App
from .d | eco | rators import action
|
otationYOnMouseDown = 0;
var mouseX = 0;
var mouseXOnMouseDown = 0;
var mouseY = 0;
var mouseYOnMouseDown = 0;
var moveForward = false;
var moveBackward = false;
var moveLeft = false;
var moveRight = false;
var moveUp = false;
var moveDown = false;
var windowHalfX = window.innerWidth / 2;
var windowHalfY = window.innerHeight / 2;
init();
animate();
function init() {
container = document.createElement( 'div' );
document.body.appendChild( container );
camera = new THREE.PerspectiveCamera( 50, window.innerWidth / window.innerHeight, 1, 200 );
camera.position.z = 100;
controls = new THREE.OrbitControls( camera );
scene = new THREE.Scene();
scene.add( new THREE.AmbientLight(0x101010));
directionalLight = new THREE.DirectionalLight( 0xffffff );
directionalLight.position.x = 1;
directionalLight.position.y = 1;
directionalLight.position.z = 2;
directionalLight.position.normalize();
scene.add( directionalLight );
light1 = new THREE.PointLight( 0xffffff );
scene.add( light1 );
@Uniforms@
@ShaderMaterialDefinition@
phong_material = new THREE.MeshPhongMaterial( { ambient: 0x000000,
color: 0xffaa00,
specular: 0x555555,
| shininess: 30 });
object = new THREE.Mesh( new Shape(), @ShapeMaterial@);
object.overdraw = true;
object.rotation.x = -1.57/2;
scene.add( object );
renderer = new THREE.WebGLRenderer({antialias:true});
| renderer.setClearColor("@background-color@");
renderer.setSize( window.innerWidth, window.innerHeight );
container.appendChild( renderer.domElement );
renderer.shadowMapEnabled = true;
renderer.shadowMapType = THREE.PCFShadowMap;
stats = new Stats();
stats.domElement.style.position = 'absolute';
stats.domElement.style.top = '0px';
container.appendChild( stats.domElement );
window.addEventListener( 'resize', onWindowResize, false );
}
function animate() {
requestAnimationFrame( animate );
controls.update();
render();
stats.update();
}
function render() {
@IncrementTime@
renderer.render( scene, camera );
}
function onWindowResize() {
camera.aspect = window.innerWidth / window.innerHeight;
camera.updateProjectionMatrix();
renderer.setSize( window.innerWidth, window.innerHeight );
}
</script>
</body>
"""
class HTMLHeader(object):
def __init__(self, background_color='#000000'):
self._background_color = background_color
def get_str(self):
header_str = HEADER.replace('@background-color@', '%s' % self._background_color)
header_str = header_str.replace('@VERSION@', OCC.VERSION)
return header_str
class HTMLBody(object):
def __init__(self, background_color='#000000', vertex_shader=None,
fragment_shader=None, uniforms=None):
self._background_color = background_color
self._vertex_shader = vertex_shader
self._fragment_shader = fragment_shader
self._uniforms = uniforms
def get_str(self):
# get the location where pythonocc is running from
threejs_build_location = os.sep.join([OCC.__path__[0], 'Display', 'WebGl', 'js'])
body_str = BODY.replace('@Three.jsPath@', '%s' % threejs_build_location)
body_str = body_str.replace('@background-color@', '%s' % self._background_color)
body_str = body_str.replace('@VERSION@', OCC.VERSION)
if (self._fragment_shader is not None) and (self._fragment_shader is not None):
vertex_shader_string_definition = '<script type="x-shader/x-vertex" id="vertexShader">%s</script>' % self._vertex_shader
fragment_shader_string_definition = '<script type="x-shader/x-fragment" id="fragmentShader">%s</script>' % self._fragment_shader
shader_material_definition = """
var vertexShader = document.getElementById( 'vertexShader' ).textContent;
var fragmentShader = document.getElementById( 'fragmentShader' ).textContent;
var shader_material = new THREE.ShaderMaterial( { uniforms: uniforms,
vertexShader: vertexShader,
fragmentShader: fragmentShader } );
"""
if self._uniforms is None:
body_str = body_str.replace('@Uniforms@', 'uniforms ={};\n')
body_str = body_str.replace('@IncrementTime@', '')
else:
body_str = body_str.replace('@Uniforms@', self._uniforms)
if 'time' in self._uniforms:
body_str = body_str.replace('@IncrementTime@', 'uniforms.time.value += 0.05;')
else:
body_str = body_str.replace('@IncrementTime@', '')
body_str = body_str.replace('@VertexShaderDefinition@', vertex_shader_string_definition)
body_str = body_str.replace('@FragmentShaderDefinition@', fragment_shader_string_definition)
body_str = body_str.replace('@ShaderMaterialDefinition@', shader_material_definition)
body_str = body_str.replace('@ShapeMaterial@', 'shader_material')
else:
body_str = body_str.replace('@Uniforms@', '')
body_str = body_str.replace('@VertexShaderDefinition@', '')
body_str = body_str.replace('@FragmentShaderDefinition@', '')
body_str = body_str.replace('@ShaderMaterialDefinition@', '')
body_str = body_str.replace('@ShapeMaterial@', 'phong_material')
body_str = body_str.replace('@IncrementTime@', '')
return body_str
class ThreejsRenderer(object):
def __init__(self, background_color="#123345", vertex_shader=None, fragment_shader=None, uniforms=None, path=None):
if not path:
self._path = tempfile.mkdtemp()
else:
self._path = path
self._js_filename = os.path.join(self._path, "shape.js")
self._html_filename = os.path.join(self._path, "webgl_topods_shape.html" )
self._background_color = background_color
self._vertex_shader = vertex_shader
self._fragment_shader = fragment_shader
self._uniforms = uniforms
def set_vertex_shader(self, vertex_shader):
''' adds a vertex shader definition '''
self._vertex_shader = vertex_shader
def set_fragment_shader(self, fragment_shader):
''' adds a fragment shader '''
self._fragment_shader = fragment_shader
def create_files(self, shape):
''' generate .js and .html files '''
self._shape = shape
print("Tesselate shape ...")
t0 = time()
tess = Tesselator(self._shape)
t1 = time()
print("done in %f s." % (t1-t0))
print("Exporting tesselation to JSON ...")
t2 = time()
tess.ExportShapeToThreejs(self._js_filename)
t3 = time()
print("done in %f s." % (t3-t2))
print("Generating HTML stream ...")
self.GenerateHTMLFile()
print("done.")
return self._js_filename, self._html_filename
def DisplayShape(self, shape):
self.create_files(shape)
print("Opening html output in the default webbrowser ...")
# previous version us a os.system call to the "open" command
# but this is a platform (osx) specific solution
_path = "file:///{0}".format(os.path.join(os.getcwd(), self._html_filename))
webbrowser.open_new_tab(_path)
|
uments:
Any named arguments will be passed on to the initializer of the ObserverBinder,
overriding any default values. These are all optional:
almanac_time: The observer's time in unix epoch time.
lat: The observer's latitude in degrees
lon: The observer's longitude in degrees
altitude: The observer's altitude in meters
horizon: The horizon angle in degrees
temperature: The observer's temperature (used to calculate refraction)
pressure: The observer's pressure (used to calculate refraction)
"""
# Using an encapsulated class allows easy access to the default values
class ObserverBinder(object):
# Use the default values provided by the outer class (Almanac):
def __init__(self, almanac_time=self.time_ts, lat=self.lat, lon=self.lon,
altitude=self.altitude, horizon=self.horizon, temperature=self.temperature,
pressure=self.pressure, formatter=self.formatter):
# Build an ephem Observer object
self.observer = ephem.Observer()
self.observer.date = timestamp_to_djd(almanac_time)
self.observer.lat = math.radians(lat)
self.observer.long = math.radians(lon)
self.observer.elev = altitude
self.observer.horizon = math.radians(horizon)
self.observer.temp = temperature
self.observer.pressure= pressure
self.formatter = formatter
def __getattr__(self, body):
"""Return a BodyWrapper that binds the observer to a heavenly body.
If there is no such body an exception of type AttributeError will
be raised.
body: A heavenly body. Examples, 'sun', 'moon', 'jupiter'
Returns:
An instance of a BodyWrapper. It will bind together the heavenly
body (an instance of something like ephem.Jupiter) and the observer
(an instance of ephem.Observer)
"""
# Find the module used by pyephem. For example, the module used for
# 'mars' is 'ephem.Mars'. If there is no such module, an exception
# of type AttributeError will get thrown.
ephem_module = getattr(ephem, body.capitalize())
# Now, together with the observer object, return an
# appropriate BodyWrapper
return BodyWrapper(ephem_module, self.observer, self.formatter)
# This will override the default values with any explicit parameters in kwargs:
return ObserverBinder(**kwargs)
def __getattr__(self, attr):
if not self.hasExtras:
# If the Almanac does not have extended capabilities, we can't
# do any of the following. Raise an exception.
raise AttributeError, "Unknown attribute %s" % attr
# We do have extended capability. Check to see if the attribute is a calendar event:
elif attr in ['previous_equinox', 'next_equinox',
'previous_solstice', 'next_solstice',
'previous_autumnal_equinox', 'next_autumnal_equinox',
'previous_vernal_equinox', 'next_vernal_equinox',
'previous_winter_solstice', 'next_winter_solstice',
'previous_summer_solstice', 'next_summer_solstice',
'previous_new_moon', 'next_new_moon',
'previous_first_quarter_moon', 'next_first_quarter_moon',
'previous_full_moon', 'next_full_moon',
'previous_last_quarter_moon', 'next_last_quarter_moon']:
# This is how you call a function on an instance when all you have
# is the function's name as a string
djd = getattr(ephem, attr)(self.time_djd)
return weewx.units.ValueHelper((djd, "dublin_jd", "group_time"),
context="ephem_year", formatter=self.formatter)
else:
# It's not a calendar event. The attribute must be a heavenly body
# (such as 'sun', or 'jupiter'). Create an instance of
# ObserverBinder by calling the __call__ function in Almanac, but
# with no parameters
binder = self()
# Now try getting the body as an attribute. If successful, an
# instance of BodyWrapper will be returned. If not, an exception of
# type Attrib | uteError will be raised.
return getattr(binder, attr)
fn_map = {'rise' : 'next_rising',
'set' : 'next_setting',
'transit' : 'next_transit'}
class BodyWrapper(object):
"""This class wraps a celes | tial body. It returns results in degrees (instead of radians)
and percent (instead of fractions). For times, it returns the results as a ValueHelper.
It also deals with the unfortunate design decision in pyephem to change
the state of the celestial body when using it as an argument in certain functions."""
def __init__(self, body_factory, observer, formatter):
"""Initialize a wrapper
body_factory: A function that returns an instance of the body
to be wrapped. Example would be ephem.Sun
observer: An instance of ephem.Observer, containing the observer's lat, lon, time, etc.
formatter: An instance of weewx.units.Formatter(), containing the formatting
to be used for times.
"""
self.body_factory = body_factory
self.observer = observer
self.formatter = formatter
self.body = body_factory(observer)
self.use_center = False
# Calculate and store the start-of-day in Dublin Julian Days:
(y,m,d) = time.localtime(djd_to_timestamp(observer.date))[0:3]
self.sod_djd = timestamp_to_djd(time.mktime((y,m,d,0,0,0,0,0,-1)))
def __call__(self, use_center=False):
self.use_center = use_center
return self
def __getattr__(self, attr):
if attr in ['az', 'alt', 'a_ra', 'a_dec', 'g_ra', 'ra', 'g_dec', 'dec',
'elong', 'radius', 'hlong', 'hlat', 'sublat', 'sublong']:
# Return the results in degrees rather than radians
return math.degrees(getattr(self.body, attr))
elif attr=='moon_phase':
# Return the result in percent
return 100.0 * self.body.moon_phase
elif attr in ['next_rising', 'next_setting', 'next_transit', 'next_antitransit',
'previous_rising', 'previous_setting', 'previous_transit', 'previous_antitransit']:
# These functions have the unfortunate side effect of changing the state of the body
# being examined. So, create a temporary body and then throw it away
temp_body = self.body_factory()
time_djd = getattr(self.observer, attr)(temp_body, use_center=self.use_center)
return weewx.units.ValueHelper((time_djd, "dublin_jd", "group_time"), context="ephem_day", formatter=self.formatter)
elif attr in fn_map:
# These attribute names have to be mapped to a different function name. Like the
# attributes above, they also have the side effect of changing the state of the body.
# Finally, they return the time of the event anywhere in the day (not just the next
# event), so they take a second argument in the function call.
temp_body = self.body_factory(self.observer)
# Look up the function to be called for this attribute (eg, call 'next_rising' for 'rise')
fn = fn_map[attr]
# Call the function, with a second argument giving the start-of-day
time_djd = getattr(self.observer, fn)(temp_body, self.sod_djd)
|
'read_access': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'objects_read_access'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.AccessEntity']"}),
'subscribers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'subscriptions'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.Tag']", 'null': 'True', 'blank': 'True'}),
'trash': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
},
'core.tag': {
'Meta': {'ordering': "['name']", 'object_name': 'Tag'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
'core.user': {
'Meta': {'ordering': "['name']", 'object_name': 'User'},
'accessentity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.AccessEntity']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'default_group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'default_user_set'", 'null': 'True', 'to': "orm['core.AccessEntity']"}),
'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_access': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'other_groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.Group']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'finance.account': {
'Meta': {'ordering': "['name']", 'object_name': 'Account', '_ormbases': ['core.Object']},
'balance': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '20', 'decimal_places': '2'}),
'balance_currency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['finance.Currency']"}),
'balance_display': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '20', 'decimal_places': '2'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_k | ey': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identities.Contact']"})
},
'finance.category': {
'Meta': {'object_name': 'Category | ', '_ormbases': ['core.Object']},
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'})
},
'finance.currency': {
'Meta': {'object_name': 'Currency', '_ormbases': ['core.Object']},
'code': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'factor': ('django.db.models.fields.DecimalField', [], {'default': '1', 'max_digits': '10', 'decimal_places': '4'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'symbol': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'})
},
'finance.liability': {
'Meta': {'ordering': "['-due_date']", 'object_name': 'Liability', '_ormbases': ['core.Object']},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['finance.Account']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['finance.Category']", 'null': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'due_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'finance_liability_source'", 'to': "orm['identities.Contact']"}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'finance_liability_target'", 'to': "orm['identities.Contact']"}),
'value': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '20', 'decimal_places': '2'}),
'value_currency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['finance.Currency']"}),
'value_display': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '20', 'decimal_places': '2'})
},
'finance.tax': {
'Meta': {'object_name': 'Tax', '_ormbases': ['core.Object']},
'compound': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'rate': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '2'})
},
'finance.transaction': {
'Meta': {'ordering': "['-datetime']", 'object_name': 'Transaction', '_ormbases': ['core.Object']},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['finance.Account']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['finance.Category']", 'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'details': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'liability': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['finance.Liability']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'finance_transaction_source'", 'to': "orm['identities.Contact']"}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'finance_transaction_target'", 'to': "orm['identities.Contact']"}),
'value': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '20', 'decimal_places': '2'}),
'value_currency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['finance.Currency']"}),
'value_display': ('django.db.models.fields.DecimalField', [], {'default': '0' |
#!/us | r/bin/python
# contain | s configuration, parameters can be accessed as attributes
class Config:
# init from argparse.ArgumentParser
def __init__(self, parser):
self.args = vars(parser.parse_args())
def __getattr__(self, name):
return self.args[name]
|
from flask import Blueprint, render_template, json, g, current_app, redirect, url_for, session
from datetime import datetime as dt
from scrim2.extensions import oid, db, lm
from scrim2.models import User
from sqlalchemy.orm.exc import NoResultFound
from flask.ext.login import login_user, logout_user, current_user
import requests, re
home_bp = Blueprint('home_bp', __name__)
@home_bp.route('/')
def index():
if current_user.is_authenticated():
return redirect(url_for('live_bp.live'))
return render_template('/home/index.html')
@home_bp.route('/login')
@oid.loginhandler
def login():
"""Log in via Steam OpenID
"""
if g.user is not None:
return redirect(oid.get_next_url())
else:
return oid.try_login('http://steamcommunity.com/openid')
@oid.after_login
def after_login(resp):
"""
"""
steam_id_regex = re.compile('steamcommunity.com/openid/id/(.*?)$')
steam_id = steam_id_regex.search(resp.identity_url).group(1)
try:
g.user = User.query.filter_by(steam_id=steam_id).one()
user_info = get_user_info(g.user.steam_id)
login_user(g.user)
return redirect(oid.get_nex | t_url())
except NoResultFound:
print "CREATIN USER"
g.user = User()
steam_data = get_user_info(steam_id)
g.user.st | eam_id = steam_id
g.user.nickname = steam_data['personaname']
g.user.avatar_url = steam_data['avatar']
g.user.avatar_url_full = steam_data['avatarfull']
g.user.join_date = dt.utcnow()
db.session.add(g.user)
db.session.commit()
login_user(g.user)
return redirect(url_for('home_bp.index'))
@home_bp.route('/logout')
def logout():
logout_user()
return redirect(url_for('home_bp.index'))
def get_user_info(steam_id):
"""
Return player summaries of the user that has the steam_id.
Example:
{
u'steamid': u'steamid',
u'personaname': u'personaname',
...
}
See: https://developer.valvesoftware.com/wiki/Steam_Web_API#GetPlayerSummaries_.28v0002.29
"""
api = 'http://api.steampowered.com/ISteamUser/GetPlayerSummaries/v0002/?'
params = {
'key': current_app.config['STEAM_API_KEY'],
'steamids': steam_id,
'format': json
}
user_info = requests.get(url=api, params=params)
user_info_json = user_info.json()
return user_info_json['response']['players'][0] or {}
|
#!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0 |
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example displays user role name, ID, s | ubnetwork ID, number of assigned
users, and assigned permissions for the given search criteria. Results are
limited to the first 10 records.
Tags: userrole.getUserRoles
"""
__author__ = 'api.jdilallo@gmail.com (Joseph DiLallo)'
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfaClient
def main(client):
# Initialize appropriate service.
user_role_service = client.GetUserRoleService(
'https://advertisersapitest.doubleclick.net', 'v1.19')
# Set user role search criteria.
user_role_search_criteria = {
'pageSize': '10'
}
# Get user roles that match the search criteria.
results = user_role_service.GetUserRoles(user_role_search_criteria)[0]
# Display user role names, IDs, subnetwork IDs, number of assigned users, and
# assigned permissions.
if results['userRoles']:
for user_role in results['userRoles']:
print ('User role with name \'%s\', ID \'%s\', subnetwork ID \'%s\', and '
'assigned to \'%s\' users was found.'
% (user_role['name'], user_role['id'], user_role['subnetworkId'],
user_role['totalAssignedUsers']))
if user_role['permissions']:
print ' The above user role has the following permissions:'
for permission in user_role['permissions']:
print (' Permission with name \'%s\' and ID \'%s\'.'
% (permission['name'], permission['id']))
else:
print ' The above user role has no permissions assigned.'
else:
print 'No user roles found for your criteria.'
if __name__ == '__main__':
# Initialize client object.
client = DfaClient(path=os.path.join('..', '..', '..', '..'))
main(client)
|
import pandas as pd
from matplotlib import cm
import matplotlib.pyplot as plt
from pydlv import dl_model_3, data_reader, data_analyser, dl_generator, dl_plotter, trajectory_plotter
'''
This script demonstrates how, using coefficients from the csv file generated previously,
plot a 3d surface of decision landscapes fitted to blocks of trajectories.
'''
def plot_surfaces(dlg, fit_params, subj_id, blocks, colors, labels):
dlp = dl_plotter.DLPlotter(elev=10, azim=69)
for i, block_no in enumerate(blocks):
x, y, dl = dlg.get_model_dl(fit_params.loc[subj_id, block_no][2:2+dlg.model.n_params])
dlp.plot_surface(x, y, dl, color=colors[i], alpha=0.8)
dlp.add_legend | (colors, labels)
plt.savefig('figures/blocks_%i_dlv.pdf' % (subj_id))
def plot_trajectories(data, subj_id, blocks, colors, labels):
tp = trajectory_plotter.TrajectoryPlotter()
for i, block_no i | n enumerate(blocks):
block_trajectories = data[(data.subj_id==subj_id) & (data.block_no==block_no)]
tp.plot_mean_trajectories(block_trajectories, colors[i], labels[i])
block_info = block_trajectories.groupby('trial_no').first().groupby('high_chosen') \
.mean()[['motion_time', 'max_d']]
print('\n %s\n' % (labels[i]))
print(block_info)
tp.add_legend_mean_traj(colors, labels)
plt.savefig('figures/blocks_%i_traj.pdf' % (subj_id))
def compare_dlv(subj_id, blocks):
fit_params = pd.read_csv('csv/fit_params_by_block_method_9.csv',
index_col=['subj_id', 'block_no'], header=0)
labels = ['Block %i' % (block) for block in blocks]
cmap = cm.viridis
colors = [cmap(0.7), cmap(0.35), cmap(0.1)]
model = dl_model_3.DLModel3()
dlg = dl_generator.DLGenerator(model)
plot_surfaces(dlg, fit_params, subj_id, blocks, colors, labels)
dr = data_reader.DataReader()
data = dr.get_processed_data(path='csv/processed_data_high_low.csv')
plot_trajectories(data, subj_id, blocks, colors, labels)
da = data_analyser.DataAnalyser()
stats = da.get_block_stats(data)
print('\n %s\n' % ('Block stats'))
print(stats.loc[subj_id])
#subj_id = 233
subj_id = 1334
blocks = [1, 2, 3]
compare_dlv(subj_id, blocks) |
import copy
import pandas as pd
from bokeh.plotting import figure, show, output_notebook
from bokeh.models import Legend, Span
# from bokeh.models import HoverTool
from ..utils import in_ipynb
from .plotobj import BasePlot
from .plotutils import get_color
_INITED = False
class BokehPlot(BasePlot):
def __init__(self, size=None, theme=None):
global _INITED
if not _INITED:
if in_ipynb():
output_notebook(hide_banner=True)
size = size or (800, 500)
self.width = size[0]
self.height = size[1]
self.figure = figure(toolbar_location="below",
toolbar_sticky=False,
x_axis_type='datetime',
plot_width=self.width,
plot_height=self.height) # TODO remove
self.legend = []
def show(self, title='' | , xlabel='', ylabel='', xaxis=True, yaxis=True, xticks=True, yticks=True, legend=True, grid=True, **kwargs):
# self.figure.add_tools(*[HoverTool(
# tooltips=[('x', '@x{%F}'), ('y', '@y')],
# formatters={'x': 'datetime'},
# mode='vline'
# ) for _ in data])
self.figure.outline_line_color = None |
# vline = Span(location=0, dimension='height', line_color='red', line_width=3)
hline = Span(location=0, dimension='width', line_color='black', line_width=1)
self.figure.renderers.append(hline)
if xlabel:
self.figure.xaxis.axis_label = kwargs.get('xlabel')
if ylabel:
self.figure.yaxis.axis_label = kwargs.get('ylabel')
if title:
self.figure.title.text = kwargs.get('title')
if legend:
self.figure.legend.location = (self.width + 10, self.height + 10)
legend = Legend(items=self.legend, location=(10, 100))
legend.items = self.legend
legend.click_policy = "mute"
self.figure.add_layout(legend, 'right')
else:
self.figure.legend.location = None
if not grid:
self.figure.xgrid.grid_line_color = None
self.figure.ygrid.grid_line_color = None
# FIXME
if not yaxis:
for ax in self.figure.yaxis:
ax.axis_line_color = 'white'
if not xaxis:
for ax in self.figure.xaxis:
ax.axis_line_color = 'white'
# Turn off labels:
# self.figure.xaxis.major_label_text_font_size = '0pt'
show(self.figure)
return self.figure
def area(self, data, color=None, y_axis='left', stacked=False, **kwargs):
data2 = data.append(data.iloc[-1] * 0)
data2 = data2.append(data2.iloc[0] * 0)
data2 = data2.sort_index()
data2 = data2.sort_index()
x, y = copy.deepcopy(data2.iloc[0]), copy.deepcopy(data2.iloc[1])
data2.iloc[0], data2.iloc[1] = y, x
for i, col in enumerate(data):
c = get_color(i, col, color)
fig = self.figure.patch(x=data2.index, y=data2[col].values, legend=col, fill_alpha=.2, color=c, **kwargs)
self.legend.append((col, [fig]))
# for stacked: https://bokeh.pydata.org/en/latest/docs/gallery/brewer.html
# p.patches([x2] * areas.shape[1], [areas[c].values for c in areas], color=colors, alpha=0.8, line_color=None)
def _stacked(df):
df_top = df.cumsum(axis=1)
df_bottom = df_top.shift(axis=1).fillna({'y0': 0})[::-1]
df_stack = pd.concat([df_bottom, df_top], ignore_index=True)
return df_stack
def bar(self, data, color=None, y_axis='left', stacked=False, **kwargs):
# stacked bar: https://bokeh.pydata.org/en/latest/docs/gallery/bar_stacked.html
# stacked bar: https://bokeh.pydata.org/en/latest/docs/gallery/bar_stacked_split.html
c = []
for i, col in enumerate(data):
c.append(get_color(i, col, color))
fig = self.figure.vbar(x=data.index, top=data[col].values, width=.9, color=c, **kwargs)
self.legend.append((col, [fig]))
def hist(self, data, color=None, y_axis='left', stacked=False, **kwargs):
raise NotImplementedError()
def hline(self, y, color=None, **kwargs):
raise NotImplementedError()
def hspan(self, yhigh, ylow=0, color=None, **kwargs):
raise NotImplementedError()
# def candlestick(self, data):
# # https://bokeh.pydata.org/en/latest/docs/gallery/candlestick.html
def line(self, data, color=None, y_axis='left', **kwargs):
for i, col in enumerate(data):
c = get_color(i, col, color)
fig = self.figure.line(x=data.index, y=data[col].values, legend=col, color=c, **kwargs)
self.legend.append((col, [fig]))
def scatter(self, data, color=None, y_axis='left', **kwargs):
for i, col in enumerate(data):
if i == 0:
continue # don't scatter against self
x = data.columns[0]
y = data.columns[i]
c = get_color(i, col, color)
fig = self.figure.scatter(x=data[x],
y=data[y],
legend='%s vs %s' % (x, y),
fill_color=c,
fill_alpha=0.6,
line_color=None,
**kwargs)
self.legend.append(('%s vs %s' % (x, y), [fig]))
def step(self, data, color=None, y_axis='left', **kwargs):
raise NotImplementedError()
def vline(self, x, color=None, **kwargs):
raise NotImplementedError()
def vspan(self, xhigh, xlow=0, color=None, **kwargs):
raise NotImplementedError()
|
# | !/usr/bin/env python
"""
Status of DIRAC components using runsvstat utility
"""
#
from __future__ import print_function
from DIRAC.Core.Base import Script
Script.disableCS()
Script.setUsageMessage('\n'.join([__doc__.split('\n')[1],
'Usage:',
' %s [option|cfgfile] ... [system [service|agent]]' % Script.scriptName,
| 'Arguments:',
' system: Name of the system for the component (default *: all)',
' service|agent: Name of the particular component (default *: all)']))
Script.parseCommandLine()
args = Script.getPositionalArgs()
from DIRAC.FrameworkSystem.Client.ComponentInstaller import gComponentInstaller
__RCSID__ = "$Id$"
if len(args) > 2:
Script.showHelp()
exit(-1)
system = '*'
component = '*'
if len(args) > 0:
system = args[0]
if system != '*':
if len(args) > 1:
component = args[1]
#
gComponentInstaller.exitOnError = True
#
result = gComponentInstaller.getStartupComponentStatus([system, component])
if not result['OK']:
print('ERROR:', result['Message'])
exit(-1)
gComponentInstaller.printStartupStatus(result['Value'])
|
from __future__ import unicode_literals
from django.db import migrations
from | django.contrib.postgres.operations import HStoreExtension
class Migration(migrations.Migration):
dependencies = [
('product', '0020_attribute_data_to_class'),
]
operations = [
HStoreExtension(),
| ]
|
from django import forms
class CommentForm(forms.Form):
content_type = forms.CharField(widget=forms.Hi | ddenInput)
object_id = forms.CharField(widget=forms.HiddenInput)
parent_id = forms.IntegerField(widget=forms.Hidden | Input, required=False)
content = forms.CharField(label='',widget=forms.Textarea)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.