text
stringlengths 29
850k
|
|---|
# Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from functools import wraps
from time import time, mktime
import datetime
from django.http import HttpResponse
from django.utils import simplejson as json
from django.template.loader import render_to_string
from astakos.im.models import AstakosUser, Component
from snf_django.lib.api import faults
from snf_django.lib.api.utils import isoformat
from astakos.im.forms import FeedbackForm
from astakos.im.user_utils import send_feedback as send_feedback_func
import logging
logger = logging.getLogger(__name__)
absolute = lambda request, url: request.build_absolute_uri(url)
def _dthandler(obj):
if isinstance(obj, datetime.datetime):
return isoformat(obj)
else:
raise TypeError
def json_response(content, status_code=None):
response = HttpResponse()
if status_code is not None:
response.status_code = status_code
response.content = json.dumps(content, default=_dthandler)
response['Content-Type'] = 'application/json; charset=UTF-8'
response['Content-Length'] = len(response.content)
return response
def xml_response(content, template, status_code=None):
response = HttpResponse()
if status_code is not None:
response.status_code = status_code
response.content = render_to_string(template, content)
response['Content-Type'] = 'application/xml; charset=UTF-8'
response['Content-Length'] = len(response.content)
return response
def check_is_dict(obj):
if not isinstance(obj, dict):
raise faults.BadRequest("Request should be a JSON dict")
def is_integer(x):
return isinstance(x, (int, long))
def are_integer(lst):
return all(map(is_integer, lst))
def validate_user(user):
# Check if the user is active.
if not user.is_active:
raise faults.Unauthorized('User inactive')
# Check if the token has expired.
if user.token_expired():
raise faults.Unauthorized('Authentication expired')
# Check if the user has accepted the terms.
if not user.signed_terms:
raise faults.Unauthorized('Pending approval terms')
def user_from_token(func):
@wraps(func)
def wrapper(request, *args, **kwargs):
try:
token = request.x_auth_token
except AttributeError:
raise faults.Unauthorized("No authentication token")
if not token:
raise faults.Unauthorized("Invalid X-Auth-Token")
try:
user = AstakosUser.objects.get(auth_token=token)
except AstakosUser.DoesNotExist:
raise faults.Unauthorized('Invalid X-Auth-Token')
validate_user(user)
request.user = user
return func(request, *args, **kwargs)
return wrapper
def component_from_token(func):
"""Decorator for authenticating component by its token.
Check that a component with the corresponding token exists. Also,
if component's token has an expiration token, check that it has not
expired.
"""
@wraps(func)
def wrapper(request, *args, **kwargs):
try:
token = request.x_auth_token
except AttributeError:
raise faults.Unauthorized("No authentication token")
if not token:
raise faults.Unauthorized("Invalid X-Auth-Token")
try:
component = Component.objects.get(auth_token=token)
except Component.DoesNotExist:
raise faults.Unauthorized("Invalid X-Auth-Token")
# Check if the token has expired
expiration_date = component.auth_token_expires
if expiration_date:
expires_at = mktime(expiration_date.timetuple())
if time() > expires_at:
raise faults.Unauthorized("Authentication expired")
request.component_instance = component
return func(request, *args, **kwargs)
return wrapper
def get_uuid_displayname_catalogs(request, user_call=True):
# Normal Response Codes: 200
# Error Response Codes: BadRequest (400)
try:
input_data = json.loads(request.body)
except:
raise faults.BadRequest('Request body should be json formatted.')
else:
if not isinstance(input_data, dict):
raise faults.BadRequest(
'Request body should be a json formatted dictionary')
uuids = input_data.get('uuids', [])
if uuids is None and user_call:
uuids = []
displaynames = input_data.get('displaynames', [])
if displaynames is None and user_call:
displaynames = []
user_obj = AstakosUser.objects
d = {'uuid_catalog': user_obj.uuid_catalog(uuids),
'displayname_catalog': user_obj.displayname_catalog(displaynames)}
response = HttpResponse()
response.content = json.dumps(d)
response['Content-Type'] = 'application/json; charset=UTF-8'
response['Content-Length'] = len(response.content)
return response
def send_feedback(request, email_template_name='im/feedback_mail.txt'):
form = FeedbackForm(request.POST)
if not form.is_valid():
logger.error("Invalid feedback request: %r", form.errors)
raise faults.BadRequest('Invalid data')
msg = form.cleaned_data['feedback_msg']
data = form.cleaned_data['feedback_data']
try:
send_feedback_func(msg, data, request.user, email_template_name)
except:
return HttpResponse(status=502)
return HttpResponse(status=200)
def rename_meta_key(d, old, new):
if old not in d:
return
d[new] = d[old]
del(d[old])
def get_int_parameter(p):
if p is not None:
try:
p = int(p)
except ValueError:
return None
if p < 0:
return None
return p
def get_content_length(request):
content_length = get_int_parameter(request.META.get('CONTENT_LENGTH'))
if content_length is None:
raise faults.LengthRequired('Missing or invalid Content-Length header')
return content_length
def invert_dict(d):
return dict((v, k) for k, v in d.iteritems())
|
BIGGER THAN IT LOOKS, HOME, BASEMENT, AND FULL FENCED YARD.REHABED 2016-2017. EXPANDED CAPE COD WITH NEW DORMER WITH 2ND FULL BATH, ALSO NEW ROOF, WINDOWS,SIDING WITH NEW DOWNSPOUTS AND GUTTERS AND HARDWOOD FLOORS. OTHER ''NEWS'' FIRST FLOOR BATH WITH JETTED TUB, NEW KITCHEN WITH NEW STAINLESS APPLIANCES, CABINETS, GRANITE COUNTERTOP. NEW DANLEY TWO CAR GARAGE WITH NEW CONCRETE DRIVE. NEW STAIRS AND STAIRCASE . DINING ROOM OPEN TO KITCHEN . TOO MANY NEWS STOP IN OR CALL FOR APPOINTMENT. WELCOME CHILD FRIENDLY, CLOSE TO SCHOOLS AND TRAIN. SEE TAXES. HOMES IN THIS AREA 2014 FOR 250's, 2017 FOR HIGH 270's. REMEMBER FULL REHABBED BATHS AND KITCHEN.
|
#
# Copyright (C) 2012 Niek Linnenbank
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import sys
import argparse
import fcntl, termios, struct
from bouwer.plugin import Plugin
from bouwer.action import ActionEvent
class ProgressBar(Plugin):
"""
Output a textual progress bar on the terminal
"""
def initialize(self):
"""
Initialize plugin
"""
self.conf.cli.parser.add_argument('-p', '--progress',
dest = 'output_plugin',
action = 'store_const',
const = self,
default = argparse.SUPPRESS,
help = 'Output a progress bar to indicate action status')
def action_event(self, action, event):
"""
Called when an :class:`.ActionEvent` is triggered
"""
if event.type == ActionEvent.FINISH:
todo = len(self.build.actions.workers.pending) + len(self.build.actions.workers.running)
total = len(self.build.actions.actions)
perc = float(total - todo) / float(total)
self.update_progress(perc, action.target)
def get_console_width(self):
"""
Return the width of the console in characters
"""
# TODO: not portable to windows
try:
term = os.get_terminal_size()
return term.columns
except:
try:
return os.environ['COLUMNS']
except:
hw = struct.unpack('hh', fcntl.ioctl(1, termios.TIOCGWINSZ, '1234'))
return hw[1]
def update_progress(self, progress, label = ""):
"""
Displays or updates a console progress bar
"""
labelLength = len(label) + 16
barLength = self.get_console_width() - labelLength
block = int(round(barLength*progress))
#text = "\rPercent: [{0}] {1}% {2}".format( "#"*block + "-"*(barLength-block), progress*100, label)
text = "\r[{0}] {1:.2%} {2}".format("#" * block + "-" * (barLength - block),
progress,
label)
sys.stdout.write(text)
sys.stdout.flush()
if progress == 1.0:
print()
|
I Have a Great Resume – So Why is My Phone Not Ringing?
Does HR do a Good Job at Recruitment?
What Can Travel Staff’s “First Thirstdays” Do For You?
Where Have All the Job Seekers Gone?
Do You Have a Staff Infection?
We’re Building a PMS Team!
Is Your Puzzle Missing a Piece?
Does This Logo Make Me Look Fat?
PMS in the Workforce..the story continues…..
|
from django.conf import settings
from django.core import exceptions
from django.core.cache import cache
from pyquery import PyQuery as pq
from urllib2 import HTTPError, URLError, Request, urlopen, quote
from urllib import urlencode
conf = {}
class GadgetError(Exception):
def __init__(self, msg):
self.msg = 'FogBugz Gadget says... %s' % msg
def __str__(self):
return repr(self.msg)
def _configure():
"""
Checks Django settings for necessary configuration variables.
"""
try:
conf['api_root'] = settings.FOG_API_ROOT
conf['email'] = settings.FOG_EMAIL
conf['password'] = settings.FOG_PASSWORD
conf['project'] = settings.FOG_PROJECT
conf['primary_contact'] = settings.FOG_PRIMARY_CONTACT
except AttributeError:
raise exceptions.ImproperlyConfigured
def _send(query):
# for some reason we have to grab the XML doc manually before passing to pyquery;
# the token isn't grabbed otherwise
try:
request = Request(conf['api_root'], urlencode(query.items()))
xml = pq(urlopen(request).read())
return xml
except HTTPError, e:
raise GadgetError('Error code: %s (check app settings)' % e.code)
except URLError, e:
raise GadgetError('Failed to reach server: %s (check app settings)' % e.reason)
def _logon():
reply = _send({
'cmd': 'logon',
'email': conf['email'],
'password': conf['password'] })
if reply('error'):
raise GadgetError(reply)
token = reply('token').html()
if token is None:
raise GadgetError('No token provided, login unsuccessful')
return token
def _logoff(token):
_send({
'token=': token,
'cmd': 'logoff' })
def get_priorities():
"""
Returns priority values for use in a choice field.
Values are pulled from FogBugz if not found in cache.
"""
if cache.get('priorities') is not None:
return cache.get('priorities')
if not conf:
_configure()
token = _logon()
reply = _send({
'token': token,
'cmd': 'listPriorities' })
if reply('error'):
raise GadgetError(reply)
choices, initial = [], None
for elem in reply('priority'):
val = pq(elem).find('ixPriority').html()
name = val + ' - ' + pq(elem).find('sPriority').html()
choices.append((val, name))
if pq(elem).find('fDefault').html() == 'true':
initial = val
_logoff(token)
cache.set('priorities', (choices, initial))
return choices, initial
def submit_ticket(data):
"""
Returns a case number upon successfull submission of a ticket.
Cleaned form data is expected.
"""
if not conf:
_configure()
token = _logon()
reply = _send({
'cmd': 'new',
'token': token,
'sProject': conf['project'],
'sPrimary': conf['primary_contact'],
'sTitle': data['title'],
'ixPriority': data['priority'],
'sEvent': data['message'] })
case = reply('case').attr('ixBug')
if reply('error'):
raise GadgetError(reply)
_logoff(token)
return case
|
Seven workbooks move in careful steps from basic skills to the most sophisticated skills. Choose the books you or your students need. For classroom, lab, or individual use.
Signals, Relationships, Essay Structure, Critical Reading, Sentence Structure, Vocabulary, Students can work on their own, because each lesson has an exercise with answers in the back, so students can see if they need more study. Teachers can keep track of progress because they can give a checkpoint test at two or three points in the book, and a final test when the student has finished the book. Alternate versions of all tests and checkpoints are available.
By Amy Awtrey and Carol Markos 52 Pages, 8.5 x 11 Having trouble pronouncing the English la..
The Reading Program Book A: Word Patterns CD CD This CD gives the pronunciation of words wi..
THE READING PROGRAM BOOKS A - G 8.5 x 11 30-75 pages each Complete set $84.00 IDEAL FOR..
The Reading Program Book A CD Oral models and opportunities for verbal practice. These&nb..
The Reading Program, Book B: Vocabulary By Amy Awtrey and Carol Markos 42 Pages, 8.5 x 11 sof..
The Reading Program Book C: Sentence Structuree By Amy Awtery and Carol Mark..
The Reading Program Book D: Signals By Amy Awtrey and Carol Markos 60 Pages, 8.5 x..
The Reading Program Book E: Relationships By Amy Awtrey and Carol Markos 62 Pages, 8.5 x 11 ..
The Reading Program Book F: Essay Structure By Amy Awtrey and Carol Markos 76 Page..
The Reading Program Book G: Critical Reading By Amy Awtrey and Carol Markos 76 Pag..
Tests and Checkpoints for the Reading Program By Amy Awtrey and Carol Markos Teacher materi..
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Alert'
db.create_table('alert_alert', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('backend', self.gf('django.db.models.fields.CharField')(default='EmailBackend', max_length=20)),
('alert_type', self.gf('django.db.models.fields.CharField')(max_length=25)),
('title', self.gf('django.db.models.fields.CharField')(default=u'Premium Domain Finder alert', max_length=250)),
('body', self.gf('django.db.models.fields.TextField')()),
('when', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('last_attempt', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('is_sent', self.gf('django.db.models.fields.BooleanField')(default=False)),
('failed', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('alert', ['Alert'])
# Adding model 'AlertPreference'
db.create_table('alert_alertpreference', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('alert_type', self.gf('django.db.models.fields.CharField')(max_length=25)),
('backend', self.gf('django.db.models.fields.CharField')(max_length=25)),
('preference', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('alert', ['AlertPreference'])
# Adding unique constraint on 'AlertPreference', fields ['user', 'alert_type', 'backend']
db.create_unique('alert_alertpreference', ['user_id', 'alert_type', 'backend'])
def backwards(self, orm):
# Removing unique constraint on 'AlertPreference', fields ['user', 'alert_type', 'backend']
db.delete_unique('alert_alertpreference', ['user_id', 'alert_type', 'backend'])
# Deleting model 'Alert'
db.delete_table('alert_alert')
# Deleting model 'AlertPreference'
db.delete_table('alert_alertpreference')
models = {
'alert.alert': {
'Meta': {'object_name': 'Alert'},
'alert_type': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'backend': ('django.db.models.fields.CharField', [], {'default': "'EmailBackend'", 'max_length': '20'}),
'body': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'failed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_sent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_attempt': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'default': "u'Premium Domain Finder alert'", 'max_length': '250'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'when': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'})
},
'alert.alertpreference': {
'Meta': {'unique_together': "(('user', 'alert_type', 'backend'),)", 'object_name': 'AlertPreference'},
'alert_type': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'backend': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'preference': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['alert']
|
THE BEST! Kristen was so helpful. I had talked with a few other attorneys before I came to Kristen and she was so much more compassionate, and her prices were more than fair. She got me the outcome I was hoping for... I would recommend that if anyone need a family law attorney to talk to Kristen.... Kevin, Monroe, WA.
The best divorce Attorney in Seattle - Before I found Ms. Kristen, I had consulted with 5 other attorneys. I initially consulted with her on the phone and 3 minutes into the conversation I knew she was the "one." Through Ms. Kristen, I have filed for dissolution, temporary orders and a domestic violence protection order. It's a very complex divorce case. She had the most knowledge out of all of the attorneys I had spoken with. She knew about jurisdiction and emergency jurisdiction laws, not just for the state of Washington but out of state as well. She has much experience in relocation (with children). She is fierce, strong and fights for me like I am her family. She has given me many options and scenarios so I never feel overwhelmed and know what is coming/expected. Whenever things don't go as planned she comes up with something to remedy it. She makes me feel comfortable and is always available (email, phone, & even text). She listens, is caring and works very hard. My husband holds an esteemed position in the community and she is never intimated about going up against him. I really cannot begin to express my gratitude towards her. She has many years of experience and I would not trust anyone else with my family law issues from this point on. My case is still ongoing and I plan on writing another review when everything is finalized. I highly recommend her! She is worth EVERY penny!!Type your paragraph here. Zuleyha, Seattle, WA.
Great to work with and extremely helpful - Kristen was a real ally in my child custody issues. She was always responsive and tried to guide me in the best way possible. My case was in Pierce county so she was not always 100% familiar with the county specific laws but always researched the statutes and made sure we were on the right path. Raj - Pierce County, WA.
Proactive and Supportive - Ms. Bienstock's combination of empathy and take-charge approach results in action and progress that also feels supportive. Her extensive understanding of the complications involved with family law issues involving substance dependence and domestic violence was essential in guiding our work together.
Ms. Bienstock is fair. She is upfront about approaches she does not think will result in productive outcomes. Her billing is straightforward and clearly outlined. She is considerate of the financial impact that the legal process has on clients.
In short, Ms. Bienstock is smart and proactive and above all else, she treats her clients as human beings. Not cases.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2017-07-11 12:03
from __future__ import unicode_literals
import django.core.files.storage
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import journal.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ArticleOrdering',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.PositiveIntegerField(default=1)),
],
),
migrations.CreateModel(
name='BannedIPs',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ip', models.GenericIPAddressField()),
('date_banned', models.DateField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='FixedPubCheckItems',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('metadata', models.BooleanField(default=False)),
('verify_doi', models.BooleanField(default=False)),
('select_issue', models.BooleanField(default=False)),
('set_pub_date', models.BooleanField(default=False)),
('notify_the_author', models.BooleanField(default=False)),
('select_render_galley', models.BooleanField(default=False)),
('select_article_image', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Issue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('volume', models.IntegerField(default=1)),
('issue', models.IntegerField(default=1)),
('issue_title', models.CharField(blank=True, max_length=300)),
('date', models.DateTimeField(default=django.utils.timezone.now)),
('order', models.IntegerField(default=1)),
('issue_type', models.CharField(choices=[('Issue', 'Issue'), ('Collection', 'Collection')], default='Issue', max_length=200)),
('issue_description', models.TextField()),
('cover_image', models.ImageField(blank=True, null=True, storage=django.core.files.storage.FileSystemStorage(location='/home/ajrbyers/code/janeway/src/media'), upload_to=journal.models.cover_images_upload_path)),
('large_image', models.ImageField(blank=True, null=True, storage=django.core.files.storage.FileSystemStorage(location='/home/ajrbyers/code/janeway/src/media'), upload_to=journal.models.issue_large_image_path)),
],
options={
'ordering': ('order', '-date'),
},
),
migrations.CreateModel(
name='Journal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=4)),
('domain', models.CharField(default='localhost', max_length=255, unique=True)),
('default_cover_image', models.ImageField(blank=True, null=True, storage=django.core.files.storage.FileSystemStorage(location='/home/ajrbyers/code/janeway/src/media'), upload_to=journal.models.cover_images_upload_path)),
('default_large_image', models.ImageField(blank=True, null=True, storage=django.core.files.storage.FileSystemStorage(location='/home/ajrbyers/code/janeway/src/media'), upload_to=journal.models.cover_images_upload_path)),
('header_image', models.ImageField(blank=True, null=True, storage=django.core.files.storage.FileSystemStorage(location='/home/ajrbyers/code/janeway/src/media'), upload_to=journal.models.cover_images_upload_path)),
('favicon', models.ImageField(blank=True, null=True, storage=django.core.files.storage.FileSystemStorage(location='/home/ajrbyers/code/janeway/src/media'), upload_to=journal.models.cover_images_upload_path)),
('description', models.TextField(blank=True, null=True, verbose_name='Journal Description')),
('is_remote', models.BooleanField(default=False)),
('remote_submit_url', models.URLField(blank=True, null=True)),
('remote_view_url', models.URLField(blank=True, null=True)),
('nav_home', models.BooleanField(default=True)),
('nav_articles', models.BooleanField(default=True)),
('nav_issues', models.BooleanField(default=True)),
('nav_contact', models.BooleanField(default=True)),
('nav_start', models.BooleanField(default=True)),
('nav_review', models.BooleanField(default=True)),
('nav_sub', models.BooleanField(default=True)),
('has_xslt', models.BooleanField(default=False)),
('hide_from_press', models.BooleanField(default=False)),
('sequence', models.PositiveIntegerField(default=0)),
],
),
migrations.CreateModel(
name='Notifications',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('domain', models.CharField(max_length=100)),
('type', models.CharField(choices=[('submission', 'Submission'), ('acceptance', 'Acceptance')], max_length=10)),
('active', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='PrePublicationChecklistItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('completed', models.BooleanField(default=False)),
('completed_on', models.DateTimeField(blank=True, null=True)),
('title', models.TextField()),
('text', models.TextField()),
],
),
migrations.CreateModel(
name='PresetPublicationCheckItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.TextField()),
('text', models.TextField()),
('enabled', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='SectionOrdering',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.PositiveIntegerField(default=1)),
('issue', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='journal.Issue')),
],
),
]
|
MacBook Air vs. MacBook Pro: Which Apple 13-incher should you buy?
Two 13-inch MacBooks. One's thin. One has more ports. Which one's best for you? We pit them head to head, and the answer's more difficult than you think.
Just a few short months ago (in March, to be exact), we wrote about an odd glut in Apple's otherwise streamlined lineup of products: instead of one or even two 13-inch laptops to choose from, Apple was offering three. The white MacBook, the MacBook Air, and the MacBook Pro each had a slightly different value equation in terms of design, features, and price.
Can the MacBook Air replace the White MacBook?
That glut's been lessened a little now that the white MacBook has been discontinued. The MacBook Air and the MacBook Pro are the only two MacBook product lines left, and they represent two different solutions to same problem. The unibody aluminum Pro used to be considered thin, but it's--relatively--thick at 1 inch, and weighs over 4 pounds. The Pro offers better specs, while the Air, which has just been recently updated, leans toward light weight and quick booting as its advantages.
There's more to consider, though: the 2011 13-inch MacBook Pro and new 13-inch MacBook Air have become more similar than they've ever been before, for several reasons: performance, battery life, and a Thunderbolt port.
Two 13-inch MacBooks: which do you choose?
Size: How quickly things change: in March, the 2011 MacBook Pro was the newest laptop in Apple's stable. Now, it's the oldest. It's obviously far heavier than the 13-inch Air, by over a pound and a half. Edge: Air.
Performance: Last time, we recommended the entry-level 13-inch MacBook Pro as the best all-around Apple 13-incher in terms of price, design, and performance. The MacBook Air has caught up fast. Its price and base RAM/storage specs are still the same, but the new Air has a dramatically faster second-gen Core i5 CPU that makes for a far better computer. Benchmark results are so close they're practically indistinguishable: despite the Air having a lower-speed CPU, it performs nearly identically to the Pro. The 11-inch Air's no slouch either, and it's nearly as good, its performance lag likely being due to having less RAM and a .1GHz slower processor. Edge: Tie.
Battery life: Again, nearly the same. The MacBook Air lost the battle in the spring, but the new, more power-efficient second-gen Intel Core i5 CPU has evened the comparison. Both 13-inchers netted around 6 hours and 40 minutes. Edge: Tie.
Extras: The 13-inch Pro wins on features, but it's a narrower win than you'd expect. A larger hard drive and a DVD-burning optical drive are the larger Pro's advantages, along with an HD Webcam. Port-wise, the Pro adds a FireWire port and an Ethernet jack, and that's it. However, the Air has a higher-resolution 1,440x900-pixel display, while the Pro, oddly enough, has a 1,280x800-pixel screen. Edge: Pro.
Best value for the money: Pro. Unless you truly value size and weight above all else, the 13-inch Pro offers more features, ports, and hard-drive space, along with an optical drive. The advantage is narrow, though; narrower than it's ever been.
For the coffee-shopper and frequent traveler: Air. Obviously, size is key. The Air's tiny, and now it also has improved battery life. Some people, though, might prefer the even smaller 11-inch Air, although it doesn't last as long on a charge.
Student who wants to save money: Pro. Last time, we said to get the white MacBook, the king of value. You're out of luck on deals this time. The $999 11-inch Air is the cheapest, but sacrifices valuable storage space. The $1,199 Pro's technically a good value, but more expensive. Educational discounts currently have the entry-level Pro at $1,099 and the entry-level Air at $1,249. Does a difference of $150 affect your purchasing decision? Alternatively, perhaps consider the $499 iPad, or a Windows laptop.
Money is no object, best computer: Air. We would have said the 13-inch Pro a year ago, but you might as well consider splurging on a 256GB 13-inch Air with an upgraded 1.8GHz Core i7 processor, and get a Thunderbolt-connected storage array along with a Thunderbolt Display to dock with. It'll cost a fortune, but you said you were rich, right?
Executive with expense account: Air. See above. The MacBook Air is finally as fast as the 13-inch Pro, so there's less of a sacrifice between size and speed.
Conclusion: Very tough call. Now it's a split. We give it to the 13-inch Pro on a technicality, for now, for the average person looking to replace an everyday laptop. The 13-inch Pro is still a bit more of a value, even though the Air is arguably a better pure portable Mac. The Air did earn an Editors' Choice Award on CNET, whereas the 13-inch Pro didn't, but that's also a matter of accomplishment for its size class: the 13-inch Air is the best thin MacBook, while the 15-inch Pro is technically the better larger-form MacBook. Even though Apple's Web site suggests that new users start with the MacBook Air, technically, if you're considering a 13-incher, you should start with the $100-cheaper 13-inch Pro.
Are an optical drive and extra hard-drive space worth the added weight? With networked hard drives, external peripherals, and cloud-based storage, is the 13-inch Air more your style? Sound off below, let us know what you'd prefer.
Discuss: MacBook Air vs. MacBook Pro: Which Apple 13-incher should you buy?
|
from collections import namedtuple
from enum import Enum
from math import floor, log10
from sys import stdout
from time import clock
from vsut.assertion import AssertResult
class Unit():
"""A unit is a group of tests, that are run at once.
Every method of this class, that starts with 'test' will be run automatically,
when the run()-method is called.
Before and after every test the setup and teardown methods will be called respectively.
For every test it's execution time, status, and if necessary an error message are recorded.
Attributes:
tests ({int: str}): A map that maps function names to an unique id.
times ({int: str}): A map that maps a functions execution time as a string to its id.
results ({int: AssertResult}): A map that maps a tests result to its id. If a test is successful its entry is None.
"""
def __init__(self):
self.tests = {
id: funcName
for id, funcName in enumerate([method for method in dir(self)
if callable(getattr(self, method))
and method.startswith("test")])
}
self.times = {}
self.results = {}
self.failed = False
self.ignoreUnit = False
def run(self):
"""Runs all tests in this unit.
Times the execution of all tests and records them.
"""
for id, name in self.tests.items():
# Start timing the tests.
start = clock()
try:
# Get the method that needs to be executed.
func = getattr(self, name, None)
# Run the setup method.
self.setup()
# Run the test method.
func()
# Run the teardown method.
self.teardown()
except AssertResult as e:
result = e
self.failed = True
else:
result = None
self.results[id] = result
# Add the execution time of the test to the times map.
elapsed = clock() - start
self.times[id] = "{0:.6f}".format(elapsed)
def setup(self):
"""Setup is executed before every test.
"""
pass
def teardown(self):
"""Teardown is executed after every test.
"""
pass
|
A high-ranking bureaucrat at the Finance Ministry resigned on Wednesday amid accusations that he made sexually harassing remarks to a female reporter. The allegations dealt a further blow to the scandal-ridden government.
A high-ranking bureaucrat / at the Finance Ministry / resigned on Wednesday / amid accusations / that he made sexually harassing remarks / to a female reporter. / The allegations dealt a further blow / to the scandal-ridden government.
|
'''
The one parameter exponential family distributions used by GLM.
'''
#TODO: quasi, quasibinomial, quasipoisson
#see http://www.biostat.jhsph.edu/~qli/biostatistics_r_doc/library/stats/html/family.html
# for comparison to R, and McCullagh and Nelder
import numpy as np
from scipy import special
from scipy.stats import ss
import links as L
import varfuncs as V
class Family(object):
"""
The parent class for one-parameter exponential families.
Parameters
----------
link : a link function instance
Link is the linear transformation function.
See the individual families for available links.
variance : a variance function
Measures the variance as a function of the mean probabilities.
See the individual families for the default variance function.
"""
#TODO: change these class attributes, use valid somewhere...
valid = [-np.inf, np.inf]
tol = 1.0e-05
links = []
def _setlink(self, link):
"""
Helper method to set the link for a family.
Raises a ValueError exception if the link is not available. Note that
the error message might not be that informative because it tells you
that the link should be in the base class for the link function.
See glm.GLM for a list of appropriate links for each family but note
that not all of these are currently available.
"""
#TODO: change the links class attribute in the families to hold meaningful
# information instead of a list of links instances such as
#[<statsmodels.family.links.Log object at 0x9a4240c>,
# <statsmodels.family.links.Power object at 0x9a423ec>,
# <statsmodels.family.links.Power object at 0x9a4236c>]
# for Poisson...
self._link = link
if not isinstance(link, L.Link):
raise TypeError("The input should be a valid Link object.")
if hasattr(self, "links"):
validlink = link in self.links
# validlink = max([isinstance(link, _.__class__) for _ in self.links])
validlink = max([isinstance(link, _) for _ in self.links])
if not validlink:
errmsg = "Invalid link for family, should be in %s. (got %s)"
raise ValueError(errmsg % (`self.links`, link))
def _getlink(self):
"""
Helper method to get the link for a family.
"""
return self._link
#link property for each family
#pointer to link instance
link = property(_getlink, _setlink, doc="Link function for family")
def __init__(self, link, variance):
self.link = link()
self.variance = variance
def starting_mu(self, y):
"""
Starting value for mu in the IRLS algorithm.
Parameters
----------
y : array
The untransformed response variable.
Returns
-------
mu_0 : array
The first guess on the transformed response variable.
Notes
-----
mu_0 = (endog + mean(endog))/2.
Notes
-----
Only the Binomial family takes a different initial value.
"""
return (y + y.mean())/2.
def weights(self, mu):
"""
Weights for IRLS steps
Parameters
----------
mu : array-like
The transformed mean response variable in the exponential family
Returns
-------
w : array
The weights for the IRLS steps
Notes
-----
`w` = 1 / (link'(`mu`)**2 * variance(`mu`))
"""
return 1. / (self.link.deriv(mu)**2 * self.variance(mu))
def deviance(self, Y, mu, scale=1.):
"""
Deviance of (Y,mu) pair.
Deviance is usually defined as twice the loglikelihood ratio.
Parameters
----------
Y : array-like
The endogenous response variable
mu : array-like
The inverse of the link function at the linear predicted values.
scale : float, optional
An optional scale argument
Returns
-------
DEV : array
The value of deviance function defined below.
Notes
-----
DEV = (sum_i(2*loglike(Y_i,Y_i) - 2*loglike(Y_i,mu_i)) / scale
The deviance functions are analytically defined for each family.
"""
raise NotImplementedError
def resid_dev(self, Y, mu, scale=1.):
"""
The deviance residuals
Parameters
----------
Y : array
The endogenous response variable
mu : array
The inverse of the link function at the linear predicted values.
scale : float, optional
An optional argument to divide the residuals by scale
Returns
-------
Deviance residuals.
Notes
-----
The deviance residuals are defined for each family.
"""
raise NotImplementedError
def fitted(self, eta):
"""
Fitted values based on linear predictors eta.
Parameters
-----------
eta : array
Values of the linear predictor of the model.
dot(X,beta) in a classical linear model.
Returns
--------
mu : array
The mean response variables given by the inverse of the link
function.
"""
return self.link.inverse(eta)
def predict(self, mu):
"""
Linear predictors based on given mu values.
Parameters
----------
mu : array
The mean response variables
Returns
-------
eta : array
Linear predictors based on the mean response variables. The value
of the link function at the given mu.
"""
return self.link(mu)
def loglike(self, Y, mu, scale=1.):
"""
The loglikelihood function.
Parameters
----------
`Y` : array
Usually the endogenous response variable.
`mu` : array
Usually but not always the fitted mean response variable.
Returns
-------
llf : float
The value of the loglikelihood evaluated at (Y,mu).
Notes
-----
This is defined for each family. Y and mu are not restricted to
`Y` and `mu` respectively. For instance, the deviance function calls
both loglike(Y,Y) and loglike(Y,mu) to get the likelihood ratio.
"""
raise NotImplementedError
def resid_anscombe(self, Y, mu):
"""
The Anscome residuals.
See also
--------
statsmodels.families.family.Family docstring and the `resid_anscombe` for
the individual families for more information.
"""
raise NotImplementedError
class Poisson(Family):
"""
Poisson exponential family.
Parameters
----------
link : a link instance, optional
The default link for the Poisson family is the log link. Available
links are log, identity, and sqrt. See statsmodels.family.links for
more information.
Attributes
----------
Poisson.link : a link instance
The link function of the Poisson instance.
Poisson.variance : varfuncs instance
`variance` is an instance of
statsmodels.genmod.families.family.varfuncs.mu
See also
--------
statsmodels.genmod.families.family.Family
"""
links = [L.log, L.identity, L.sqrt]
variance = V.mu
valid = [0, np.inf]
def __init__(self, link=L.log):
self.variance = Poisson.variance
self.link = link()
def resid_dev(self, Y, mu, scale=1.):
"""Poisson deviance residual
Parameters
----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional argument to divide the residuals by scale
Returns
-------
resid_dev : array
Deviance residuals as defined below
Notes
-----
resid_dev = sign(Y-mu)*sqrt(2*Y*log(Y/mu)-2*(Y-mu))
"""
return np.sign(Y-mu) * np.sqrt(2*Y*np.log(Y/mu)-2*(Y-mu))/scale
def deviance(self, Y, mu, scale=1.):
'''
Poisson deviance function
Parameters
----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional scale argument
Returns
-------
deviance : float
The deviance function at (Y,mu) as defined below.
Notes
-----
If a constant term is included it is defined as
:math:`deviance = 2*\\sum_{i}(Y*\\log(Y/\\mu))`
'''
if np.any(Y==0):
retarr = np.zeros(Y.shape)
Ymu = Y/mu
mask = Ymu != 0
YmuMasked = Ymu[mask]
Ymasked = Y[mask]
np.putmask(retarr, mask, Ymasked*np.log(YmuMasked)/scale)
return 2*np.sum(retarr)
else:
return 2*np.sum(Y*np.log(Y/mu))/scale
def loglike(self, Y, mu, scale=1.):
"""
Loglikelihood function for Poisson exponential family distribution.
Parameters
----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
The default is 1.
Returns
-------
llf : float
The value of the loglikelihood function evaluated at (Y,mu,scale)
as defined below.
Notes
-----
llf = scale * sum(-mu + Y*log(mu) - gammaln(Y+1))
where gammaln is the log gamma function
"""
return scale * np.sum(-mu + Y*np.log(mu)-special.gammaln(Y+1))
def resid_anscombe(self, Y, mu):
"""
Anscombe residuals for the Poisson exponential family distribution
Parameters
----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
Returns
-------
resid_anscombe : array
The Anscome residuals for the Poisson family defined below
Notes
-----
resid_anscombe = :math:`(3/2.)*(Y^{2/3.} - \\mu**(2/3.))/\\mu^{1/6.}`
"""
return (3/2.)*(Y**(2/3.)-mu**(2/3.))/mu**(1/6.)
class Gaussian(Family):
"""
Gaussian exponential family distribution.
Parameters
----------
link : a link instance, optional
The default link for the Gaussian family is the identity link.
Available links are log, identity, and inverse.
See statsmodels.family.links for more information.
Attributes
----------
Gaussian.link : a link instance
The link function of the Gaussian instance
Gaussian.variance : varfunc instance
`variance` is an instance of statsmodels.family.varfuncs.constant
See also
--------
statsmodels.genmod.families.family.Family
"""
links = [L.log, L.identity, L.inverse_power]
variance = V.constant
def __init__(self, link=L.identity):
self.variance = Gaussian.variance
self.link = link()
def resid_dev(self, Y, mu, scale=1.):
"""
Gaussian deviance residuals
Parameters
-----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional argument to divide the residuals by scale
Returns
-------
resid_dev : array
Deviance residuals as defined below
Notes
--------
`resid_dev` = (`Y` - `mu`)/sqrt(variance(`mu`))
"""
return (Y - mu) / np.sqrt(self.variance(mu))/scale
def deviance(self, Y, mu, scale=1.):
"""
Gaussian deviance function
Parameters
----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional scale argument
Returns
-------
deviance : float
The deviance function at (Y,mu) as defined below.
Notes
--------
`deviance` = sum((Y-mu)**2)
"""
return np.sum((Y-mu)**2)/scale
def loglike(self, Y, mu, scale=1.):
"""
Loglikelihood function for Gaussian exponential family distribution.
Parameters
----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
Scales the loglikelihood function. The default is 1.
Returns
-------
llf : float
The value of the loglikelihood function evaluated at (Y,mu,scale)
as defined below.
Notes
-----
If the link is the identity link function then the
loglikelihood function is the same as the classical OLS model.
llf = -(nobs/2)*(log(SSR) + (1 + log(2*pi/nobs)))
where SSR = sum((Y-link^(-1)(mu))**2)
If the links is not the identity link then the loglikelihood
function is defined as
llf = sum((`Y`*`mu`-`mu`**2/2)/`scale` - `Y`**2/(2*`scale`) - \
(1/2.)*log(2*pi*`scale`))
"""
if isinstance(self.link, L.Power) and self.link.power == 1:
# This is just the loglikelihood for classical OLS
nobs2 = Y.shape[0]/2.
SSR = ss(Y-self.fitted(mu))
llf = -np.log(SSR) * nobs2
llf -= (1+np.log(np.pi/nobs2))*nobs2
return llf
else:
# Return the loglikelihood for Gaussian GLM
return np.sum((Y*mu-mu**2/2)/scale-Y**2/(2*scale)-\
.5*np.log(2*np.pi*scale))
def resid_anscombe(self, Y, mu):
"""
The Anscombe residuals for the Gaussian exponential family distribution
Parameters
----------
Y : array
Endogenous response variable
mu : array
Fitted mean response variable
Returns
-------
resid_anscombe : array
The Anscombe residuals for the Gaussian family defined below
Notes
--------
`resid_anscombe` = `Y` - `mu`
"""
return Y-mu
class Gamma(Family):
"""
Gamma exponential family distribution.
Parameters
----------
link : a link instance, optional
The default link for the Gamma family is the inverse link.
Available links are log, identity, and inverse.
See statsmodels.family.links for more information.
Attributes
----------
Gamma.link : a link instance
The link function of the Gamma instance
Gamma.variance : varfunc instance
`variance` is an instance of statsmodels.family.varfuncs.mu_squared
See also
--------
statsmodels.genmod.families.family.Family
"""
links = [L.log, L.identity, L.inverse_power]
variance = V.mu_squared
def __init__(self, link=L.inverse_power):
self.variance = Gamma.variance
self.link = link()
#TODO: note the note
def _clean(self, x):
"""
Helper function to trim the data so that is in (0,inf)
Notes
-----
The need for this function was discovered through usage and its
possible that other families might need a check for validity of the
domain.
"""
return np.clip(x, 1.0e-10, np.inf)
def deviance(self, Y, mu, scale=1.):
"""
Gamma deviance function
Parameters
-----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional scale argument
Returns
-------
deviance : float
Deviance function as defined below
Notes
-----
`deviance` = 2*sum((Y - mu)/mu - log(Y/mu))
"""
Y_mu = self._clean(Y/mu)
return 2 * np.sum((Y - mu)/mu - np.log(Y_mu))
def resid_dev(self, Y, mu, scale=1.):
"""
Gamma deviance residuals
Parameters
-----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional argument to divide the residuals by scale
Returns
-------
resid_dev : array
Deviance residuals as defined below
Notes
-----
`resid_dev` = sign(Y - mu) * sqrt(-2*(-(Y-mu)/mu + log(Y/mu)))
"""
Y_mu = self._clean(Y/mu)
return np.sign(Y-mu) * np.sqrt(-2*(-(Y-mu)/mu + np.log(Y_mu)))
def loglike(self, Y, mu, scale=1.):
"""
Loglikelihood function for Gamma exponential family distribution.
Parameters
----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
The default is 1.
Returns
-------
llf : float
The value of the loglikelihood function evaluated at (Y,mu,scale)
as defined below.
Notes
--------
llf = -1/scale * sum(Y/mu + log(mu) + (scale-1)*log(Y) + log(scale) +\
scale*gammaln(1/scale))
where gammaln is the log gamma function.
"""
return - 1./scale * np.sum(Y/mu+np.log(mu)+(scale-1)*np.log(Y)\
+np.log(scale)+scale*special.gammaln(1./scale))
# in Stata scale is set to equal 1 for reporting llf
# in R it's the dispersion, though there is a loss of precision vs. our
# results due to an assumed difference in implementation
def resid_anscombe(self, Y, mu):
"""
The Anscombe residuals for Gamma exponential family distribution
Parameters
----------
Y : array
Endogenous response variable
mu : array
Fitted mean response variable
Returns
-------
resid_anscombe : array
The Anscombe residuals for the Gamma family defined below
Notes
-----
resid_anscombe = 3*(Y**(1/3.)-mu**(1/3.))/mu**(1/3.)
"""
return 3*(Y**(1/3.)-mu**(1/3.))/mu**(1/3.)
class Binomial(Family):
"""
Binomial exponential family distribution.
Parameters
----------
link : a link instance, optional
The default link for the Binomial family is the logit link.
Available links are logit, probit, cauchy, log, and cloglog.
See statsmodels.family.links for more information.
Attributes
----------
Binomial.link : a link instance
The link function of the Binomial instance
Binomial.variance : varfunc instance
`variance` is an instance of statsmodels.family.varfuncs.binary
See also
--------
statsmodels.genmod.families.family.Family
Notes
-----
endog for Binomial can be specified in one of three ways.
"""
links = [L.logit, L.probit, L.cauchy, L.log, L.cloglog]
variance = V.binary # this is not used below in an effort to include n
def __init__(self, link=L.logit): #, n=1.):
#TODO: it *should* work for a constant n>1 actually, if data_weights is
# equal to n
self.n = 1 # overwritten by initialize if needed but
# always used to initialize variance
# since Y is assumed/forced to be (0,1)
self.variance = V.Binomial(n=self.n)
self.link = link()
def starting_mu(self, y):
"""
The starting values for the IRLS algorithm for the Binomial family.
A good choice for the binomial family is
starting_mu = (y + .5)/2
"""
return (y + .5)/2
def initialize(self, Y):
'''
Initialize the response variable.
Parameters
----------
Y : array
Endogenous response variable
Returns
--------
If `Y` is binary, returns `Y`
If `Y` is a 2d array, then the input is assumed to be in the format
(successes, failures) and
successes/(success + failures) is returned. And n is set to
successes + failures.
'''
if (Y.ndim > 1 and Y.shape[1] > 1):
y = Y[:,0]
self.n = Y.sum(1) # overwrite self.n for deviance below
return y*1./self.n
else:
return Y
def deviance(self, Y, mu, scale=1.):
'''
Deviance function for either Bernoulli or Binomial data.
Parameters
----------
Y : array-like
Endogenous response variable (already transformed to a probability
if appropriate).
mu : array
Fitted mean response variable
scale : float, optional
An optional scale argument
Returns
--------
deviance : float
The deviance function as defined below
Notes
-----
If the endogenous variable is binary:
`deviance` = -2*sum(I_one * log(mu) + (I_zero)*log(1-mu))
where I_one is an indicator function that evalueates to 1 if Y_i == 1.
and I_zero is an indicator function that evaluates to 1 if Y_i == 0.
If the model is ninomial:
`deviance` = 2*sum(log(Y/mu) + (n-Y)*log((n-Y)/(n-mu)))
where Y and n are as defined in Binomial.initialize.
'''
if np.shape(self.n) == () and self.n == 1:
one = np.equal(Y,1)
return -2 * np.sum(one * np.log(mu+1e-200) + (1-one) * np.log(1-mu+1e-200))
else:
return 2*np.sum(self.n*(Y*np.log(Y/mu+1e-200)+(1-Y)*np.log((1-Y)/(1-mu)+1e-200)))
def resid_dev(self, Y, mu, scale=1.):
"""
Binomial deviance residuals
Parameters
-----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional argument to divide the residuals by scale
Returns
-------
resid_dev : array
Deviance residuals as defined below
Notes
-----
If `Y` is binary:
resid_dev = sign(Y-mu)*sqrt(-2*log(I_one*mu + I_zero*(1-mu)))
where I_one is an indicator function that evaluates as 1 if Y == 1
and I_zero is an indicator function that evaluates as 1 if Y == 0.
If `Y` is binomial:
resid_dev = sign(Y-mu)*sqrt(2*n*(Y*log(Y/mu)+(1-Y)*log((1-Y)/(1-mu))))
where Y and n are as defined in Binomial.initialize.
"""
mu = self.link._clean(mu)
if np.shape(self.n) == () and self.n == 1:
one = np.equal(Y,1)
return np.sign(Y-mu)*np.sqrt(-2*np.log(one*mu+(1-one)*(1-mu)))\
/scale
else:
return np.sign(Y-mu) * np.sqrt(2*self.n*(Y*np.log(Y/mu+1e-200)+(1-Y)*\
np.log((1-Y)/(1-mu)+1e-200)))/scale
def loglike(self, Y, mu, scale=1.):
"""
Loglikelihood function for Binomial exponential family distribution.
Parameters
----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
The default is 1.
Returns
-------
llf : float
The value of the loglikelihood function evaluated at (Y,mu,scale)
as defined below.
Notes
--------
If `Y` is binary:
`llf` = scale*sum(Y*log(mu/(1-mu))+log(1-mu))
If `Y` is binomial:
`llf` = scale*sum(gammaln(n+1) - gammaln(y+1) - gammaln(n-y+1) +\
y*log(mu/(1-mu)) + n*log(1-mu)
where gammaln is the log gamma function and y = Y*n with Y and n
as defined in Binomial initialize. This simply makes y the original
number of successes.
"""
if np.shape(self.n) == () and self.n == 1:
return scale*np.sum(Y*np.log(mu/(1-mu)+1e-200)+np.log(1-mu))
else:
y=Y*self.n #convert back to successes
return scale * np.sum(special.gammaln(self.n+1)-\
special.gammaln(y+1)-special.gammaln(self.n-y+1)\
+y*np.log(mu/(1-mu))+self.n*np.log(1-mu))
def resid_anscombe(self, Y, mu):
'''
The Anscombe residuals
Parameters
----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
Returns
-------
resid_anscombe : array
The Anscombe residuals as defined below.
Notes
-----
sqrt(n)*(cox_snell(Y)-cox_snell(mu))/(mu**(1/6.)*(1-mu)**(1/6.))
where cox_snell is defined as
cox_snell(x) = betainc(2/3., 2/3., x)*betainc(2/3.,2/3.)
where betainc is the incomplete beta function
The name 'cox_snell' is idiosyncratic and is simply used for
convenience following the approach suggested in Cox and Snell (1968).
Further note that
cox_snell(x) = x**(2/3.)/(2/3.)*hyp2f1(2/3.,1/3.,5/3.,x)
where hyp2f1 is the hypergeometric 2f1 function. The Anscombe
residuals are sometimes defined in the literature using the
hyp2f1 formulation. Both betainc and hyp2f1 can be found in scipy.
References
----------
Anscombe, FJ. (1953) "Contribution to the discussion of H. Hotelling's
paper." Journal of the Royal Statistical Society B. 15, 229-30.
Cox, DR and Snell, EJ. (1968) "A General Definition of Residuals."
Journal of the Royal Statistical Society B. 30, 248-75.
'''
cox_snell = lambda x: special.betainc(2/3., 2/3., x)\
*special.beta(2/3.,2/3.)
return np.sqrt(self.n)*(cox_snell(Y)-cox_snell(mu))/\
(mu**(1/6.)*(1-mu)**(1/6.))
class InverseGaussian(Family):
"""
InverseGaussian exponential family.
Parameters
----------
link : a link instance, optional
The default link for the inverse Gaussian family is the
inverse squared link.
Available links are inverse_squared, inverse, log, and identity.
See statsmodels.family.links for more information.
Attributes
----------
InverseGaussian.link : a link instance
The link function of the inverse Gaussian instance
InverseGaussian.variance : varfunc instance
`variance` is an instance of statsmodels.family.varfuncs.mu_cubed
See also
--------
statsmodels.genmod.families.family.Family
Notes
-----
The inverse Guassian distribution is sometimes referred to in the
literature as the wald distribution.
"""
links = [L.inverse_squared, L.inverse_power, L.identity, L.log]
variance = V.mu_cubed
def __init__(self, link=L.inverse_squared):
self.variance = InverseGaussian.variance
self.link = link()
def resid_dev(self, Y, mu, scale=1.):
"""
Returns the deviance residuals for the inverse Gaussian family.
Parameters
-----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional argument to divide the residuals by scale
Returns
-------
resid_dev : array
Deviance residuals as defined below
Notes
-----
`dev_resid` = sign(Y-mu)*sqrt((Y-mu)**2/(Y*mu**2))
"""
return np.sign(Y-mu) * np.sqrt((Y-mu)**2/(Y*mu**2))/scale
def deviance(self, Y, mu, scale=1.):
"""
Inverse Gaussian deviance function
Parameters
-----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional scale argument
Returns
-------
deviance : float
Deviance function as defined below
Notes
-----
`deviance` = sum((Y=mu)**2/(Y*mu**2))
"""
return np.sum((Y-mu)**2/(Y*mu**2))/scale
def loglike(self, Y, mu, scale=1.):
"""
Loglikelihood function for inverse Gaussian distribution.
Parameters
----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
The default is 1.
Returns
-------
llf : float
The value of the loglikelihood function evaluated at (Y,mu,scale)
as defined below.
Notes
-----
`llf` = -(1/2.)*sum((Y-mu)**2/(Y*mu**2*scale) + log(scale*Y**3)\
+ log(2*pi))
"""
return -.5 * np.sum((Y-mu)**2/(Y*mu**2*scale)\
+ np.log(scale*Y**3) + np.log(2*np.pi))
def resid_anscombe(self, Y, mu):
"""
The Anscombe residuals for the inverse Gaussian distribution
Parameters
----------
Y : array
Endogenous response variable
mu : array
Fitted mean response variable
Returns
-------
resid_anscombe : array
The Anscombe residuals for the inverse Gaussian distribution as
defined below
Notes
-----
`resid_anscombe` = log(Y/mu)/sqrt(mu)
"""
return np.log(Y/mu)/np.sqrt(mu)
class NegativeBinomial(Family):
"""
Negative Binomial exponential family.
Parameters
----------
link : a link instance, optional
The default link for the negative binomial family is the log link.
Available links are log, cloglog, identity, nbinom and power.
See statsmodels.family.links for more information.
alpha : float, optional
The ancillary parameter for the negative binomial distribution.
For now `alpha` is assumed to be nonstochastic. The default value
is 1. Permissible values are usually assumed to be between .01 and 2.
Attributes
----------
NegativeBinomial.link : a link instance
The link function of the negative binomial instance
NegativeBinomial.variance : varfunc instance
`variance` is an instance of statsmodels.family.varfuncs.nbinom
See also
--------
statsmodels.genmod.families.family.Family
Notes
-----
Support for Power link functions is not yet supported.
"""
links = [L.log, L.cloglog, L.identity, L.nbinom, L.Power]
#TODO: add the ability to use the power links with an if test
# similar to below
variance = V.nbinom
def __init__(self, link=L.log, alpha=1.):
self.alpha = alpha
self.variance = V.NegativeBinomial(alpha=self.alpha)
if isinstance(link, L.NegativeBinomial):
self.link = link(alpha=self.alpha)
else:
self.link = link()
def _clean(self, x):
"""
Helper function to trim the data so that is in (0,inf)
Notes
-----
The need for this function was discovered through usage and its
possible that other families might need a check for validity of the
domain.
"""
return np.clip(x, 1.0e-10, np.inf)
def deviance(self, Y, mu, scale=1.):
"""
Returns the value of the deviance function.
Parameters
-----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional scale argument
Returns
-------
deviance : float
Deviance function as defined below
Notes
-----
`deviance` = sum(piecewise)
where piecewise is defined as
if :math:`Y_{i} == 0:`
piecewise_i = :math:`2\\log\\left(1+\\alpha*\\mu\\right)/\\alpha`
if :math:`Y_{i} > 0`:
piecewise_i = :math:`2 Y \\log(Y/\\mu)-2/\\alpha(1+\\alpha Y)*\\log((1+\\alpha Y)/(1+\\alpha\\mu))`
"""
iszero = np.equal(Y,0)
notzero = 1 - iszero
tmp = np.zeros(len(Y))
Y_mu = self._clean(Y/mu)
tmp = iszero*2*np.log(1+self.alpha*mu)/self.alpha
tmp += notzero*(2*Y*np.log(Y_mu)-2/self.alpha*(1+self.alpha*Y)*\
np.log((1+self.alpha*Y)/(1+self.alpha*mu)))
return np.sum(tmp)/scale
def resid_dev(self, Y, mu, scale=1.):
'''
Negative Binomial Deviance Residual
Parameters
----------
Y : array-like
`Y` is the response variable
mu : array-like
`mu` is the fitted value of the model
scale : float, optional
An optional argument to divide the residuals by scale
Returns
--------
resid_dev : array
The array of deviance residuals
Notes
-----
`resid_dev` = sign(Y-mu) * sqrt(piecewise)
where piecewise is defined as
if :math:`Y_i = 0`:
:math:`piecewise_i = 2*log(1+alpha*mu)/alpha`
if :math:`Y_i > 0`:
:math:`piecewise_i = 2*Y*log(Y/\\mu)-2/\\alpha*(1+\\alpha*Y)*log((1+\\alpha*Y)/(1+\\alpha*\\mu))`
'''
iszero = np.equal(Y,0)
notzero = 1 - iszero
tmp=np.zeros(len(Y))
tmp = iszero*2*np.log(1+self.alpha*mu)/self.alpha
tmp += notzero*(2*Y*np.log(Y/mu)-2/self.alpha*(1+self.alpha*Y)*\
np.log((1+self.alpha*Y)/(1+self.alpha*mu)))
return np.sign(Y-mu)*np.sqrt(tmp)/scale
def loglike(self, Y, fittedvalues=None):
"""
The loglikelihood function for the negative binomial family.
Parameters
----------
Y : array-like
Endogenous response variable
fittedvalues : array-like
The linear fitted values of the model. This is dot(exog,params).
Returns
-------
llf : float
The value of the loglikelihood function evaluated at (Y,mu,scale)
as defined below.
Notes
-----
sum(Y*log(alpha*exp(fittedvalues)/(1+alpha*exp(fittedvalues))) -\
log(1+alpha*exp(fittedvalues))/alpha + constant)
where constant is defined as
constant = gammaln(Y + 1/alpha) - gammaln(Y + 1) - gammaln(1/alpha)
"""
# don't need to specify mu
if fittedvalues is None:
raise AttributeError('The loglikelihood for the negative binomial \
requires that the fitted values be provided via the `fittedvalues` keyword \
argument.')
constant = special.gammaln(Y + 1/self.alpha) - special.gammaln(Y+1)\
-special.gammaln(1/self.alpha)
return np.sum(Y*np.log(self.alpha*np.exp(fittedvalues)/\
(1 + self.alpha*np.exp(fittedvalues))) - \
np.log(1+self.alpha*np.exp(fittedvalues))/self.alpha\
+ constant)
def resid_anscombe(self, Y, mu):
"""
The Anscombe residuals for the negative binomial family
Parameters
----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
Returns
-------
resid_anscombe : array
The Anscombe residuals as defined below.
Notes
-----
`resid_anscombe` = (hyp2f1(-alpha*Y)-hyp2f1(-alpha*mu)+\
1.5*(Y**(2/3.)-mu**(2/3.)))/(mu+alpha*mu**2)**(1/6.)
where hyp2f1 is the hypergeometric 2f1 function parameterized as
hyp2f1(x) = hyp2f1(2/3.,1/3.,5/3.,x)
"""
hyp2f1 = lambda x : special.hyp2f1(2/3.,1/3.,5/3.,x)
return (hyp2f1(-self.alpha*Y)-hyp2f1(-self.alpha*mu)+1.5*(Y**(2/3.)-\
mu**(2/3.)))/(mu+self.alpha*mu**2)**(1/6.)
|
The Harlequin mask is one of three masks Geralt can choose from for the ball at the Vegelbud Estate. It can be purchased from Elihal or Pierre.
This page was last edited on 5 July 2018, at 23:36.
|
def break_words(stuff):
"""This function will break up words for us."""
words = stuff.split(' ')
return words
def sort_words(words):
"""Sorts the words."""
return sorted(words)
def print_first_word(words):
"""Prints the first word after popping it off."""
word = words.pop(0)
print word
def print_last_word(words):
"""Prints the last word after popping it off."""
word = words.pop(-1)
print word
def sort_sentence(sentence):
"""Takes in a full sentence and returns the sorted words."""
words = break_words(sentence)
return sort_words(words)
def print_first_and_last(sentence):
"""Prints the first and last words of the sentence."""
words = break_words(sentence)
print_first_word(words)
print_last_word(words)
def print_first_and_last_sorted(sentence):
"""Sorts the words then prints the first and last one."""
words = sort_sentence(sentence)
print_first_word(words)
print_last_word(words)
print "Let's practice everything."
print 'You\'d need to know \'bout escapes with \\ that do \n newlines and \t tabs.'
poem = """
\tThe lovely world
with logic so firmly planted
cannot discern \n the needs of love
nor comprehend passion from intuition
and requires an explantion
\n\t\twhere there is none.
"""
print "--------------"
print poem
print "--------------"
five = 10 - 2 + 3 - 5
print "This should be five: %s" % five
def secret_formula(started):
jelly_beans = started * 500
jars = jelly_beans / 1000
crates = jars / 100
return jelly_beans, jars, crates
start_point = 10000
beans, jars, crates = secret_formula(start_point)
print "With a starting point of: %d" % start_point
print "We'd have %d jeans, %d jars, and %d crates." % (beans, jars, crates)
start_point = start_point / 10
print "We can also do that this way:"
print "We'd have %d beans, %d jars, and %d crates." % secret_formula(start_point)
sentence = "All good\tthings come to those who wait."
words = break_words(sentence)
sorted_words = sort_words(words)
print_first_word(words)
print_last_word(words)
print_first_word(sorted_words)
print_last_word(sorted_words)
sorted_words = sort_sentence(sentence)
# print_sorted_words
print_first_and_last(sentence)
print_first_and_last_sorted(sentence)
|
The ongoing de facto genocide in the Republic of Yemen in a war whose most intense phase began in 2015, has until very recently been all but ignored in the Western mainstream media. What has also been ignored is the fundamental casus belli for the US-backed Saudi war, ostensibly against the Shi’ite Houthi by the Sunni Wahhabite Saudis. As with virtually every war and destabilization since the British first discovered abundant oil in the Persian Gulf over a century ago, the Yemen war is about oil, more precisely about control of oil, lots of oil .
The 2003 invasion of Iraq was about oil. Several US officials admitted so at the time including Paul Wolfowitz.”You’ve got to go where the oil is. I don’t think about it [political volatility] very much,” Cheney told a meeting of Texas oilmen in 1998 when he was still CEO of Halliburton, the world’s largest oil services company. As Vice President under Bush Jr, Cheney by all indications architected the US military campaigns of Defense Secretary Don Rumsfeld to “take out seven countries in five years,” as General Wesley Clark famously reported it several years later. All those seven are strategic to control of the huge Middle East oil flows to China, to the EU and to the world economy.
|
#!/usr/bin/python3
from abc import ABCMeta, abstractmethod
from venus.stock_base import StockEventBase, StockBase
from venus import stock_base
import pandas
import datetime
import numpy as np
from jupiter.utils import TIME_FMT
"""
趋势跟踪法
所有的择时法,最终都输出一个stock list
所有的风控,都实时输出signal
所有的选股法,最终也输出一个stock list
1.当90日线位于250日线以下,30日线上穿60日线时,发出买入信号
2.判断n日线lower than m日线
3.判断n日线上穿m日线,或下穿m日线
4.获取一个时间段内的数据线
"""
class StockDataSet(object):
"""
get data from a exterior data like pandas.DataFrame.
method: StockDataSet.data = pandas.DataFrame
"""
def __init__(self):
self.data = pandas.DataFrame()
def set_stock_data(self, df:pandas.DataFrame):
"""
:param df columns [trade_date, open_price, close_price, high_price, low_price]
"""
if df.shape[1] != 5:
print("data shape error, input date should has 5 columns, date type first, and others float.")
df.columns = ['trade_date', 'open', 'close', 'high', 'low']
df['trade_date'] = pandas.to_datetime(df['trade_date'],format=TIME_FMT)
df.set_index('trade_date', inplace=True)
mean = [5, 10,]
for i in mean:
df[f"MA{i}"] = df['close'].rolling(i).mean()
return df
def set_time_period(self, start_date:datetime.date, end_date:datetime.date):
self.data = self.data.loc[start_date:end_date]
return self.data
def get_data(self):
return self.data
def init_data(self, stock_code, start_date):
pass
def detect_cross(self):
import numpy as np
self.data['DIFF'] = self.data['MA5'] - self.data['MA10']
self.data['DIFF2'] = self.data['DIFF'].shift(1)
self.data.dropna(inplace=True)
self.data['flag'] = self.data['DIFF'] * self.data['DIFF2']
self.data['flag'] = self.data['flag'].apply(lambda x: 1 if x<=0 else 0 )
self.data['flag'] *= np.sign(self.data['DIFF'])
self.data['signal'] = self.data['flag'].apply(bs_signal )
self.data['amp'] = self.data['close'] / self.data['close'].shift(1)
# print(self.data)
def profit(self):
self.data['value'] = 1.0
p = 0
v = 1.0
for index,row in self.data.iterrows():
if p:
v *= row['amp']
self.data.loc[index,'value'] = v
if row['signal'] == 'B':
p = 1.0
elif row['signal'] == 'S':
p = 0.0
print(self.data)
import matplotlib.pyplot as plt
result = pandas.DataFrame()
#result['close'] = self.data['close']
result['value'] = self.data['value']
result.index = self.data.index
result.plot()
plt.show()
def bs_signal(x):
if x>0:
return 'B'
elif x<0:
return 'S'
else:
return np.nan
class StratagyBase(StockDataSet):
def __init__(self, header):
super(StockDataSet, self).__init__()
self.header = header
self.price_data = ClosePrice(header)
self.data = StockDataSet()
def set_benchmark(self, stock_code)->bool:
return self.price_data.get_benchmark(stock_code)
def get_stock_data(self, stock_code:str):
return self.price_data.get_benchmark(stock_code)
def detect_cross(self):
self.data['DIFF'] = self.data['MA5'] - self.data['MA10']
self.data['DIFF2'] = self.data['DIFF'].shift(-1)
self.data['flag'] = 0
print(self.data)
def conv(x:list, y:list)-> float:
result = 0
for i in range(len(x)):
result += x[i]*y[i]
return result
class ClosePrice(object):
"""
A smart application to get close price.
: stock_code : benchmark code
: header : header
: return: result like DataFrame
"""
def __init__(self, header):
self.mysql = mysqlBase(header)
def get_data(self, stock_code:str, query_type='close'):
if query_type=='close':
query_column = 'trade_date,close_price'
def_column = ['trade_date', f"{stock_code}"]
elif query_type == 'full':
query_column = 'trade_date,open_price,close_price,highest_price,lowest_price'
def_column = ['trade_date','open','close','high','low']
result = self.mysql.select_values(stock_code, query_column)
result.columns = def_column
result['trade_date'] = pandas.to_datetime(result['trade_date'])
result.set_index('trade_date', inplace=True)
return result
def get_benchmark(self, stock_code:str):
return self.get_data(stock_code, query_type='close')
class RiskBase(object):
def __init__(self):
pass
def set_threshold(self, threshold):
raise NotImplementedError
# Event (market, signal, order, fill)
# Event Queue
# portfolio
# DataHandler(abstract base class)产生market event
# Strategy
class Strategy(object):
__metaclass__ = ABCMeta
@abstractmethod
def interface(self):
raise NotImplementedError
# ExecutionHandler
# Back test
class MarketEventBase(object):
pass
class SingalBase(object):
def __init__(self):
pass
class CAMP(StockEventBase):
def __init__(self, header):
super(CAMP, self).__init__(header)
self._rate = 0.0
self._market_asset = 'SH000300'
@property
def risk_free_rate(self):
return self._rate
@risk_free_rate.setter
def risk_free_rate(self, rate):
self._rate = rate
@property
def market_asset(self):
return self.get_stock_var(self._market_asset)
@market_asset.setter
def market_asset(self, stock_code):
self._market_asset = stock_code
def get_stock_var(self, stock_code:str):
import pandas
from dev_global.env import TIME_FMT
df = self.mysql.select_values(stock_code, 'trade_date,close_price')
df.columns = ['date', 'close']
df['date'] = pandas.to_datetime(df['date'], format=TIME_FMT)
df.set_index('date', inplace=True)
df[stock_code] = ( df['close'] - df['close'].shift(1) ) / df['close'].shift(1)
result = df[stock_code]
return result
def asset_beta(self, df:pandas.DataFrame, market_asset:str):
import numpy as np
beta_matrix = {}
for index, col in df.iteritems():
beta = df[[index, market_asset]].cov().iloc[0, 1] / df[market_asset].var()
beta_matrix[index] = beta
return beta_matrix
def sharpe_ratio(self, df:pandas.DataFrame, market_asset:str):
import numpy as np
sharpe_matrix = {}
for index, col in df.iteritems():
sharpe_ratio = np.sqrt(250)*( df[index].mean() - self.risk_free_rate/250 ) / df[index].std()
sharpe_matrix[index] = sharpe_ratio
return sharpe_matrix
def event_sharpe_analysis():
from dev_global.env import GLOBAL_HEADER
event = CAMP(GLOBAL_HEADER)
event.risk_free_rate = 0.03
print(event.risk_free_rate)
market_asset = event.market_asset
stock_pool = [market_asset]
stock_list = ['SH600000', 'SZ002230', 'SH601818']
for stock in stock_list:
df = event.get_stock_var(stock)
stock_pool.append(df)
asset_group = pandas.concat(stock_pool, axis=1)
beta = event.asset_beta(asset_group[-500:], 'SH000300')
print(beta)
from datetime import date
input_group = asset_group.loc[date(2017,1,1):date(2017,12,31),:]
sharpe = event.sharpe_ratio(input_group, 'SH000300')
print(sharpe)
class filterBase(StockBase):
def filter_roe(self, threshold=0.1):
"""
filter by ROE
"""
import pandas
today = '2020-03-31'
df = self.mysql.condition_select(
'finance_perspective', 'char_stock_code,float_roe', f"report_date='{today}'")
df.columns = ['stock', 'roe']
df = df[df['roe']>threshold]
result = df['stock'].to_json()
return df
def user_defined_pool(self, tag:str):
"""
Tag file format:
{ "stock": "XXXXXX" },
"""
import os
import json
from dev_global.env import SOFT_PATH
stock_pool = StockPool()
tag_file = SOFT_PATH + f"config/{tag}-tag.json"
if os.path.exists(tag_file):
with open(tag_file, 'r') as f:
file_content = f.read()
stock_json = json.loads(file_content)
stock_pool.pool(stock_json)
return stock_pool
class StockPool(object):
def __init__(self, pool_name=None):
self._name = ''
if isinstance(pool_name, str):
self.name = pool_name
self._pool = []
@property
def pool(self):
return self._pool
@pool.setter
def pool(self, value):
if isinstance(value, dict):
self._pool.append(value)
elif isinstance(value, list):
for stock in value:
if isinstance(stock, dict):
self._pool.append(stock)
def set_empty(self):
self._pool = []
class orderBase(object):
def __init__(self):
pass
def trade_record(self, stock_code, trade_time, trade_type, unit_cost, quantity, order_time=None, flag=None):
bid = {
"order": stock_code,
"order_time": order_time if not order_time else trade_time,
"trade_time": trade_time,
"trade_type": trade_type,
"unit_cost": unit_cost,
"quantity": quantity,
"fee": 0.0,
"cost": 0.0,
"flag": False
}
return bid
def order_deal(self, order):
if isinstance(order, dict):
order['flag'] = True
return order
class assetBase(object):
"""Docstring for asset. """
def __init__(self, code, start_time, name=None, cost=0.0, quantity=0):
"""TODO: to be defined. """
# stock code
self.code = code
# stock name could be null
self.name = name
# to be delete
self.unit_cost = cost
# quantity
self.quantity = quantity
# cost
self.cost = 0.0
# asset value
self.value = 0.0
self.start_time = start_time
self.trade_record = None
def order(self):
self.cost = self.quantity * self.unit_cost
return self.cost
def reset(self):
self.unit_cost = 0.0
self.quantity = 0.0
self.cost = 0.0
self.value = 0.0
#market event engine, running and generate event signal, broadcasting to market.
#date engine generate date series.
#data engine generate data to strategy.
class NoName(object):
def __init__(self):
pass
#build object stock,get stock price series
#run strategy checking, if cross, send signal
#recieve signal, generating order
#recieve order, record.
#calculate returns.
#evaluation, beta, sharpe etc.
import datetime
class DateTimeEngine(object):
def __init__(self):
self.START_DATE = datetime.date(1990,12,19)
def date_range(self):
# full date delta from start date to today.
n = datetime.date.today() - self.START_DATE
# generate date series.
date_series = [self.START_DATE + datetime.timedelta(days=i) for i in range(n.days + 1)]
return date_series
def holiday_from_stock(self, date_series):
from polaris.mysql8 import mysqlBase
from dev_global.env import GLOBAL_HEADER
mysql = mysqlBase(GLOBAL_HEADER)
result = mysql.select_values('SH000001', 'trade_date')
trade_date = list(result[0])
for dt in trade_date:
date_series.remove(dt)
return date_series
def holiday_from_file(self):
import datetime
import time
holiday = []
with open('/home/friederich/Documents/dev/neutrino/applications/config/holiday', 'r') as f:
dt = f.readline().strip()
while dt:
holiday.append(datetime.date())
dt = f.readline().strip()
print(holiday)
if __name__ == "__main__":
from dev_global.env import GLOBAL_HEADER, TIME_FMT
dateEngine = DateTimeEngine()
date_list = dateEngine.date_range()
holiday = dateEngine.holiday_from_stock(date_list)
dateEngine.holiday_from_file()
|
New Philadelphia, Ohio - The Uhrichsville man accused of causing life-threatening injuries to a toddler will be spending the next five to seven years in prison.
Sanders was arrested on December 11th in connection with the more than two-month hospitalization of a two-year-old in his care. Assistant Prosecutor Amanda Miller said the child was injured so badly that he had no detectable brain activity when he was life-flighted to Akron Children’s from Cleveland Clinic Union Hosptial.
"It was very quickly determined that he was not showing any meaningful brain function at that time, and so when it was arranged for him to be flown to Akron Children’s, it was also discussed that Akron Children’s would be putting their transplant/harvest team protocols into place," she said.
Miller said it was only by grace and through the incredible skill of the pediatric surgeons at Akron Children’s that the child was able to survive.
Tuscarawas County Job and Family Services Director David Haverfield joined Miller in asking the judge to impose the maximum possible sentence of eight years in prison. He said that even though the child has been steadily healing from the physical injuries, the trauma inflicted on him will last a lifetime.
"He defied the odds time and time again and despite a really grim prognosis, he’s continued to persevere and improve every day but there’s no doubt he will suffer the effects of the harm the rest of his life," he says.
Defense Attorney Gerald Latanich asked the judge to impose a seven-year sentence due to Sanders having no previous felony convictions and being at low risk for recidivism. Judge Thomakos ultimately granted the request, but not before issuing a strong rebuke against Sanders for remaining silent when given an opportunity to answer for his actions.
Sanders will be eligible for judicial release five years into his sentence, but Judge Thomakos said it is unlikely she would be willing to grant that request.
|
# -*- coding: utf-8 -*-
from urlparse import urlparse, urlunparse
import hashlib
from .utils import smart_str
def format_url(params):
"""
将字典对象转换为url字符串(采用utf8编码)
:param params: 字典对象
:return: url字符串
"""
return '&'.join(['%s=%s' % (smart_str(k), smart_str(params[k])) for k in sorted(params)])
def encode_dict(params):
"""
将字典对象中的value值转换为utf8编码,去除value值为空的健值对。
:param params: 字典对象
:return: utf8编码格式的字典对象
"""
return {k: smart_str(params[k]) for k in params if params[k]}
def sign_url(params, key_secret, key_name=None, sign_type='md5', upper_case=False):
"""
计算url参数签名
:param params: 待签名字典对象
:param key_secret: 签名密钥
:param key_name: 签名名称
:param sign_type: 签名方式 md5/sha1
:param upper_case: 是否将签名转换为大写字母
:return: 签名值
"""
url = format_url(params)
url = '%s&%s=%s' % (url, key_name, key_secret) if key_name else '%s%s' % (url, key_secret)
if sign_type == 'md5':
digest = hashlib.md5(url).hexdigest()
elif sign_type == 'sha1':
digest = hashlib.sha1(url).hexdigest()
else:
raise NotImplementedError('Method %s is not supported' % sign_type)
return digest.upper() if upper_case else digest
def append_params_to_url(url, params):
"""
追加参数至目标url中
:param url: 目标url
:param params: 待追加参数
"""
(scheme, netloc, path, params, fragment) = urlparse(url)
pass
|
Shardul is Senior Solutions Architect for VMware End-User Computing (EUC).
Workspace ONE Mobile Flows are the latest addition to the VMware Workspace ONE platform.
Mobile Flows help device users perform tasks across multiple business back-end systems from a single app like VMware Boxer. This eliminates the need for end users to visit multiple websites or apps while performing business tasks.
You can use Mobile Flows to surface context-based actions and insights from other backend systems to simplify end user workflows.
In VMware Boxer, Mobile Flows detect references to tasks or business data within an email and display them as cards. Then, users interact with the cards to perform essential actions – such as assigning tickets – without transferring to another app.
To support specific business requirements, customize pre-built connectors or develop custom connectors.
The following architecture diagram demonstrates how the Mobile Flows Server can use out-of-the-box and custom connectors to connect to on-premises and cloud-based apps.
|
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
import logging
from tastypie.authorization import ReadOnlyAuthorization
logger = logging.getLogger(__name__)
class UserObjectsOnlyAuthorization(ReadOnlyAuthorization):
'''
Custom authorization class to limit the user's access to his own objects
'''
def read_detail(self, object_list, bundle):
# For models such as userprofile where we don't have an owner function
if hasattr(bundle.obj, 'user'):
return bundle.obj.user == bundle.request.user
try:
return bundle.obj.get_owner_object().user == bundle.request.user
# Objects without owner information can be accessed
except AttributeError:
return True
|
Introducing the 2017 Nissan Sentra. With fewer than 15,000 miles on the odometer, this 4 door sedan prioritizes comfort, safety and convenience. Smooth gearshifts are achieved thanks to the efficient 4 cylinder engine, and for added security, dynamic Stability Control supplements the drivetrain. Top features include a split folding rear seat, 1-touch window functionality, a tachometer, tilt steering wheel, remote keyless entry, cruise control, an overhead console, and more. Safety equipment has been integrated throughout, including: head curtain airbags, front side impact airbags, traction control, brake assist, a panic alarm, and ABS brakes. This car was designed with safety in mind, allowing you to drive with even greater assurance. We pride ourselves on providing excellent customer service. Please don't hesitate to give us a call.
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
'''
Interface with Google calendar service.
The module should really be called "calendar", not "calendars", but due to how
[poorly] imports are done by Google python API, that would generate an name
conflict.
'''
import datetime
from collections import namedtuple
from utils import log, dtfy
# This hard limit prevent the query to Google to loop forever, in case there
# are "repeat forever" recurring events in the calendar
CACHE_SIZE_HARD_LIMIT = 666
Event = namedtuple('Event', 'start end fuzzy_name')
class Calendar(object):
'''
A Google calendar interface.
Arguments:
cid: The `CalendarId` to use
'''
def __init__(self, cid, service, min_end, max_start, all_day_offset=0):
self.cid = cid
self.service = service
self.min_end = min_end
self.max_start = max_start
self.all_day_offset = all_day_offset
self.__timezone = False # `None` may be a valid timezone setting
def __iter__(self):
'''Iterate on all the events in the calendar.'''
events = self.get_events(min_end=self.min_end)
for event in events:
start = event['start']['dateTime']
end = event['end']['dateTime']
fuzzy_name = event['summary']
yield start, end, fuzzy_name
def get_events(self, min_end=None, max_start=None):
'''Retrieve a list of events for a given timespan
Arguments:
min_end: the minimum finishing ISO datetime for requested events.
max_start: the maximum starting ISO datetime for requested events.
'''
min_end = dtfy(min_end or self.min_end, as_iso_string=True)
max_start = dtfy(max_start or self.max_start, as_iso_string=True)
msg = 'Querying calendar for range: {} to {}'
log.debug(msg.format(min_end, max_start))
page_token = None
ret = []
while True:
log.debug('Issuing query with page_token = {}'.format(page_token))
events = self.service.events().list(
calendarId=self.cid,
singleEvents=True,
timeMin=min_end,
timeMax=max_start,
orderBy='startTime',
pageToken=page_token)
data = events.execute()
fix = self.fix_all_day_long_events
for event in data['items']:
ret.append(Event(fix(event['start']),
fix(event['end']),
event['summary']))
page_token = data.get('nextPageToken')
if not page_token or len(ret) >= CACHE_SIZE_HARD_LIMIT:
break
return ret
def fix_all_day_long_events(self, something):
'''Shift start date of "all day long" events to match correct start.'''
# All-day events have start and ending dates filed under the key 'date'
# rather than 'dateTime'.
if something['dateTime'] is not None:
return dtfy(something['dateTime'])
else:
date = dtfy(something['date'])
return date + datetime.timedelta(hours=self.all_day_offset)
@property
def timezone(self):
if self.__timezone is False:
tzone = self.service.settings().get(setting='timezone').execute()
self.__timezone = tzone['value']
return self.__timezone
def get_available_calendars(service):
'''Return a dictionary with all available calendars.'''
log.debug('Rtrieving available calendars...')
data = service.calendarList().list(showHidden=True).execute()
return {cal['id']: cal['summary'] for cal in data['items']}
|
MOSCHINO cropped t-shirt in stretch cotton with short sleeves and spotted Teddy Bear print and contrast brand front lettering. Classic micro ribbed crewneck.
MOSCHINO round neck cotton stretch T-shirt with short sleeves with Milan print and contrasting brand lettering.
MOSCHINO bicolour dress in stretch cotton with bear print with front speckled logo, cap sleeves and full skirt.
MOSCHINO crew-neck t-shirt in stretch cotton with short sleeves and a frontal cell bear print and contrast brand lettering.
MOSCHINO round neck cotton t-shirt with short sleeves and bear toy print on front and contrasting brand lettering.
MOSCHINO round neck cotton stretch T-shirt with front bear print and sequined hearts appliqués. Short balloon sleeves and lettering detail of the contrasting brand.
MOSCHINO stretch cotton crew neck dress with bear print and hearts appliqués with front sequins, cap sleeves and a full skirt. Micro ribbed profiles and lettering detail of the contrasting brand.
MOSCHINO stretch cotton T-shirt with maxi brand lettering print and front contrast striped pattern, classic crew neck and short sleeves.
|
import re
import csv
from os import listdir
from os.path import isfile, join
from extra_neg import extra_negative_instances
def hasNumbers(inputString):
return any(char.isdigit() for char in inputString)
def main():
mypath = "/Users/fuhao/Development/cs838webpage/textFile/"
files = [f for f in listdir(mypath) if isfile(join(mypath, f)) and f[-4:] == ".txt"]
train_files = files[:200]
test_files = files[200:]
data = []
word = ""
state_set = ["Alabama", "Alaska", "Arizona", "Arkansas", "California", "Colorado", "Connecticut", "Delaware", "Florida", "Georgia",\
"Hawaii", "Idaho", "Illinois", "Indiana", "Iowa", "Kansas", "Kentucky", "Louisiana", "Maine", "Maryland", "Massachusetts", "Michigan", "Minnesota",\
"Mississippi", "Missouri", "Montana", "Nebraska", "Nevada", "Hampshire", "Jersey", "Mexico", "York", "Carolina", "Dakota",\
"Ohio", "Oklahoma", "Oregon", "Pennsylvania", "Rhode", "Tennessee", "Texas", "Utah", "Vermont", "Virginia", "Washington", "Wisconsin", "Wyoming"
]
for i in range(len(state_set)):
state_set[i] = state_set[i].strip().lower()
fieldsName = ['word', 'has_university', 'has_state_name', 'has_state_word', 'length', 'has_dash', 'all_capital', 'has_num', 'label']
with open("train_data.csv", 'w') as csvFile:
csvWriter = csv.DictWriter(csvFile, fieldnames=fieldsName)
csvWriter.writeheader()
for f in train_files:
with open(mypath + f) as file:
lines = file.readlines()
for line in lines: #each line
data = re.findall("<[pn].*?>", line)
l = len(data)
if l != 0:
for i in range(l):#each instance
label = 0
has_university = 0
has_state_name = 0
has_state_word = 0
length = 0
has_dash = 0
all_capital = 1
has_num = 0
cur_list = data[i].split()
tmp = cur_list[0]
tmp = tmp.strip()
if tmp == "<p1" or tmp == "<p2":
label = 1
origin_list = cur_list[1:-1]
cur_list = cur_list[1:-1]
for i in range(len(cur_list)):
cur_list[i] = cur_list[i].strip().lower()
if ("university" in cur_list) or ("college" in cur_list) or ("institute" in cur_list):
has_university = 1
if "state" in cur_list:
has_state_word = 1
word = ""
for ele in cur_list:
word += ele
length += len(ele)
if ele.find("-") != -1:
has_dash = 1
if hasNumbers(ele):
has_num = 1
if ele in state_set:
has_state_name = 1
if len(origin_list) == 1:
for i in range(len(origin_list[0])):
if origin_list[0][i] > 'Z' or origin_list[0][i] < 'A':
all_capital = 0
break
else:
all_capital = 0
row = {'word':word, 'has_university' : has_university, 'has_state_name' : has_state_name, 'has_state_word' : has_state_word,\
'length' : length, 'has_dash' : has_dash, 'all_capital' : all_capital, 'has_num' : has_num, 'label' : label}
csvWriter.writerow(row)
with open("test_data.csv", 'w') as csvFile:
csvWriter = csv.DictWriter(csvFile, fieldnames=fieldsName)
csvWriter.writeheader()
for f in test_files:
with open(mypath + f) as file:
lines = file.readlines()
for line in lines: #each line
data = re.findall("<[pn].*?>", line)
l = len(data)
if l != 0:
for i in range(l):#each instance
label = 0
has_university = 0
has_state_name = 0
has_state_word = 0
length = 0
has_dash = 0
all_capital = 1
has_num = 0
cur_list = data[i].split()
tmp = cur_list[0]
tmp = tmp.strip()
if tmp == "<p1" or tmp == "<p2":
label = 1
origin_list = cur_list[1:-1]
cur_list = cur_list[1:-1]
for i in range(len(cur_list)):
cur_list[i] = cur_list[i].strip().lower()
if ("university" in cur_list) or ("college" in cur_list) or ("institute" in cur_list):
has_university = 1
if "state" in cur_list:
has_state_word = 1
word = ""
for ele in cur_list:
word += ele
length += len(ele)
if ele.find("-") != -1:
has_dash = 1
if hasNumbers(ele):
has_num = 1
if ele in state_set:
has_state_name = 1
if len(origin_list) == 1:
for i in range(len(origin_list[0])):
if origin_list[0][i] > 'Z' or origin_list[0][i] < 'A':
all_capital = 0
break
else:
all_capital = 0
row = {'word':word, 'has_university' : has_university, 'has_state_name' : has_state_name, 'has_state_word' : has_state_word,\
'length' : length, 'has_dash' : has_dash, 'all_capital' : all_capital, 'has_num' : has_num, 'label' : label}
csvWriter.writerow(row)
if __name__ == "__main__":
main()
|
In a rapidly changing world we have to think and operate differently. By merging the skills and knowledge of seasoned engineers and developers, we find new ways to look at asset management every day. All On Key users benefit from these enhancements.
Business and technology leaders are bombarded with information about the elements of Industry 4.0. Yet we still lack the understanding and know-how to bring all these elements together in a sound, practical manner to create value in industry.
On Key is purposefully designed to streamline enterprise asset management activities and to provide valuable information for effective decision making.
Do you still have some questions before you contact us for a demo?
We have endeavoured to answer all the general questions you might still have about On Key. In the event that something is unclear, you can send us your question to which we will gladly respond.
We interview Stefan Swanepoel, Pragma’s Enterprise Asset Management Product Manager, to learn more about the importance of data.
Leading physical asset management company, Pragma, offers clients using their On Key Enterprise Asset Management System a safe and secure platform to host their data.
Are you still in the dark about what exactly causes continual stoppages in your production line?
One of the biggest challenges that manufacturers face is to strike the balance between lowering spare part stock costs while ensuring uptime of their production equipment.
Maintenance Work Management – where the rubber hits the road!
Is your EAMS ISO 55000 compliant?
Asset owners in industries like oil and gas, roads and utilities have unique asset management requirements and are often left stranded with asset management software which..
A modern Enterprise Asset Management (EAM) software solution plays a key role in the successful implementation of a comprehensive Asset Management System.
The On Key Work Manager Application is the perfect solution for the asset engineer’s demand to have real time data transactions at point of performance.
Asset care plan development (ACPD) is the process of developing or improving tactical asset care plans (ACPs) on assets, by following a structured methodology.
Maintenance work management is the core of maintenance management. It is where all the plans and strategies become reality; it is where Maintenance and Operations meet face to face.
An Effectively run store should have the correct item in the correct quantity and quality, at the correct cost in the correct place at the correct time for the optimal running of a business.
Asset management information enables the management team to keep an eye on applicable KPIs, thereby identifying areas of concern and making decisions to improve performance.
1. What is the On Key Enterprise Asset Management system?
The On Key Enterprise Asset Management (EAM) system is a best of breed, specialised tool developed specifically to maximise asset availability, performance while minimising risk and maintenance and operations costs. Since 1994, On Key has been designed and maintained by asset management professionals for asset management professionals ensuring that the tool intuitive and fit for purpose in maintaining your assets.
2. What kind of companies use the On Key system?
On Key is designed for the asset-intensive market where the focus is on ensuring maximum asset availability. On Key is in use at utilities (water and power), facilities – from retail shopping centres to hospitals and health care centres, mines – open cast and underground, large manufacturing operations and the smaller fast moving consumer goods (FMCG) operations.
3. What type of assets are managed via the On Key system?
On Key is used in the management of assets used by multiple process scenarios. The unique ability to capture asset data in a parent, child relationship with a cross reference to a physical location description and GIS multi-dimensional data, places On Key as a leading tool with the ability to be able to represent the data in a manner suitable for your organisation.
4. What are the main advantages of using the On Key system?
Standardisation: The On Key system has an innovative way of managing your portfolio of assets in a way that minimises the configuration changes to apply changes throughout the system to assets of similar characteristics.
Work management: Recording the breakdowns and reactionary work, implementing the preventative tasks, executing the project tasks or just ensuring that the health, safety, environment and statutory tasks are executed; these are effectively and efficiently done with On Key.
Resource planning and scheduling: Through On Key you will ensure that the correct spares, most competent technician or contractor and the appropriate tools for the work are effectively planned and scheduled.
Real-time feedback: The use of mobile technology with the On Key system enables you to determine which contractors meet service levels, what is the effective wrench time of your technicians, real-time updates to the Contact Centre as to when the tradesperson arrived on site and exactly when the work was completed and signed off by the client.
Inventory management: By linking the consumption of spare parts to the work order On Key gives you accurate material consumption of the maintenance activities in addition to being able to configure minimum reorder levels and economical reorder quantities.
Reliability centred maintenance task development: Use of work order feedback from breakdown and adhoc work activities is used in the identification of typical failure modes and root causes of the failure, enabling you to be able to develop appropriate tasks that will improve the reliability of your assets.
Flexible reporting solutions: From the use of a built-in self-help wizard to develop adhoc custom reports for the extraction of information from the system, On Key is also shipped with a suite of standard reports that can be used to managed Key Performance Indicators (KPIs) and through On Key Analytics users are able to analyse trends in the data empowering them to utilise those trends for focused improvement activities.
5. What are the capacity limits of the On Key system?
On Key has been developed for scalability, the software has been designed with multi-currency, multilanguage on multiple sites in mind. Configuration standards are set on a global level and can be customised for use based on the characteristic of a single site. Multiple roles can be set-up with user rights that apply across multiple sites or limited to specific sites and specific transactions.
6. How is the cost of the On Key system determined?
The On Key system is designed to be modular and we believe that every employee in your business should be able to log work requests or interact with the asset management system in some capacity – so we don’t licence the number of users on the system. We licence the number assets reflected in the system and the modules that you elect to use.
The core of the system is the Asset Register, which includes the multi-currency functionality and the asset standardisation functionality. The most popular module to be added to the asset register is the Maintenance Manager module, which manages the work assignments, planning, scheduling and failure analysis. Other modules that can be added are the mobile applications, FMEA based asset task development (ACPD) functionality, spare parts and engineering materials management (Material Manager) functionality and the production management (OEE) functionality.
7. What makes the On Key system different from other asset management systems or computerised maintenance management systems?
The On Key system is a web-based system that uses the Microsoft Silverlight functionality to give powerful system interface. Our pricing structures are flexible according to your requirements and as your business grows and you asset management systems mature additional modules that enable your continued growth can be seamlessly added. Our software hosting platform included with our standard pricing means that you don’t need to be concerned with the ICT infrastructure requirements for the system. The use of web-services between On Key and your legacy systems or ERP systems means that integration is still possible from our hosting platform. In short, we remove the hassle of system management so that you can focus on asset management.
8. How can I access the On Key system?
On Key is accessible directly from your Internet Explorer 11 browser, simply type the URL address, provided to you by our Contact Centre, into your address bar and On Key will load quickly in your browser screen.
9. Can the On Key system be accessed by my contractors and technicians via their mobile devices?
Yes, our On Key mobile solutions can be enabled for use by contractors and/or technicians. Designed so that only the work that needs to be executed by that specific resource appears on the device, your technicians/contractors will quickly and easily determine what work needs to be completed next. Automatic status change updates between the mobile device and the main On Key system ensures that your Contact Centre is able to determine the exact status of the work significantly shortening the feedback time to stakeholders.
10. What is the smallest On Key system that I can use?
On Key is scalable to the smallest clients and few have a need for less than 300 registered assets.
11. Can I control my inventory system through the On Key system?
Yes, the On Key system has a Materials Management module that enables you to set-up your stores and track the use and costs of spares per asset. The module includes standard functionality for stock takes, stock revaluations, variance tracking, etc. Features such as criticality tracking, minimum re-order quantity settings and maximum stock levels all assist in keeping the costs of holding stock to a minimum.
12. Can I track the performance of my assets through the On Key system?
Yes, the On Key system has a Production Management module that enables you to track the production and downtime associated with your assets. Designed to be able to collect data from operator log sheets or from your equipment OEM machine monitoring system, On Key can be configured to reflect the downtime reasons that are preventing your operations from achieving their full potential.
13. How does the On Key system manage my safety and statutory inspections?
Safety and statutory inspections can be loaded into the On Key system to be triggered according to calendar interval or based upon usage or both. Work Order and task prioritisation calculations by the On Key system can be configured to ensure that most critical tasks receive the highest attention and don’t get lost in the mundane work of less importance.
14. How does the On Key system assist me in the development of maintenance strategies for my business?
On Key is designed to support the Failure Mode and Effects Analysis (FMEA) approach to maintenance task optimisation. Criticality analysis criteria can set-up to look at various risk scenarios in your business and depending upon the probability and consequences of failure the criticality of your assets can be determined and ranked. Then the FMEA module enables you to look at mode of failure, root cause and maintenance or inspection task per component that can be utilised minimise the risk of failure.
15. What hardware do I need for On Key 5.13?
Pragma has an ASP service and will host your database and application for you so that you have no hardware requirements on the server side.
16. What software do I need for On Key 5.13?
Windows XP SP 3 or Windows Vista SP 1/2 or Windows 7 (32-bit or 64-bit1).
Internet Explorer 72 or later with the latest Silverlight 5 Plug-in.
1 64-bit support only for Internet Explorer 9.
2 Silverlight 5 also supports Firefox 3.6+ and Chrome 12+ but we have not done any testing on these browsers. Refer to Silverlight 5 System Requirements for the full compatibility specification.
17. What software do I need for On Key Express?
Windows 7 Home Premium/Professional (32-bit or 64-bit1).
|
import argparse
import urlparse
import os
import ConfigParser
import subprocess
from requests import Session
ZONE_TO_ID = {
'eu-west-1a': 'a',
'eu-west-1b': 'b',
'eu-west-1c': 'c'
}
cur_dir = os.path.dirname(__file__)
parser = argparse.ArgumentParser(description='------ AWS Startup Script ------')
parser.add_argument('api_dest', type=str, help='Destination to database')
params = parser.parse_args()
api_ini_file_path = os.path.join(cur_dir, 'etc/openprocurement.api.ini')
session = Session()
resp = session.get('http://169.254.169.254/latest/meta-data/placement/availability-zone')
if resp.status_code == 200:
zone = resp.text
zone_suffix = ZONE_TO_ID.get(zone, '')
if zone_suffix:
domain = '{}.{}'.format(zone_suffix, params.api_dest)
else:
domain = params.api_dest
if os.path.isfile(api_ini_file_path):
config = ConfigParser.ConfigParser()
config.read([api_ini_file_path])
for k in ['couchdb.url', 'couchdb.admin_url']:
value = config.get('app:api', k)
url = urlparse.urlparse(value)
if url.username:
url = url._replace(netloc='{}:{}@{}:{}'.format(url.username, url.password,
domain, url.port))
else:
url = url._replace(netloc='{}:{}'.format(domain, url.port))
config.set('app:api', k, url.geturl())
if zone_suffix:
config.set('app:api', 'id', zone_suffix)
with open(api_ini_file_path, 'wb') as configfile:
config.write(configfile)
|
The Illustrated Stephen King Trivia Books!
Glenn Chadbourne has received a lot of requests to remarque copies of The Illustrated Stephen King Trivia Book and The Illustrated Stephen King MOVIE Trivia Book, but he really doesn’t have the time to handle the logistics of receiving and returning books that are sent to him by collectors right now due to his busy schedule.
That said, Glenn really hates to let his fans down so he’s decided to remarque his PERSONAL HARDCOVER COPIES of these two books for a handful of our collectors! He’s making room in his schedule to dedicate to these drawings next month and these special copies are being pre-sold on a First Come, First Served basis!
A remarque is an original drawing in the book itself by the artist to make a unique collectible! If you don’t own a remarqued book yet, we think you’ll find it’s a great discussion piece and a wonderful addition to your collection. If you already own a remarqued book, you’re probably not reading this because you’re too busy placing your order for another one!
Read more on our website or place your order today because we will never have more to sell!
|
# -*- coding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'l10n_ar_account_payment',
'version': '1.0',
'category': 'Accounting',
'summary': 'Recibos y ordenes de pago para Argentina',
'author': 'OPENPYME S.R.L',
'website': 'http://www.openpyme.com.ar',
'depends': [
'l10n_ar_point_of_sale',
],
'data': [
'views/account_payment_view.xml',
'views/account_payment_type_view.xml',
'wizard/account_register_payments_view.xml',
'views/menu.xml',
'data/account_journal.xml',
'security/ir.model.access.csv',
'data/security.xml',
],
'installable': True,
'auto_install': False,
'application': True,
'description': """
Recibos y ordenes de pago para Argentina
========================================
Talonarios
""",
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
The solution to fighting over the single stroller: craigslist ftw!
A kid and her cat.
Nothing like an old replica of a shack for a good run around.
It wasn’t a shack it was a replica of the tree house from Stand By Me. Get your Oregon trivia right, geez!
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : Json2HTML
Description : Function to get dictionary(json) and create HTML how a list
Date : June, 2018
copyright : (C) 2018 by Luiz Motta
email : motta.luiz@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
def getHtmlTreeMetadata(value, html):
if isinstance( value, dict ):
html += "<ul>"
for key, val in sorted( iter( value.items() ) ):
if not isinstance( val, dict ):
html += "<li>%s: %s</li> " % ( key, val )
else:
html += "<li>%s</li> " % key
html = getHtmlTreeMetadata( val, html )
html += "</ul>"
return html
return html
|
©Barry Sandland/TIMB – Rising costs all across Brixton have forced the bike shop to relocate.
“Every day, coming to work here is simply lovely. It is jus the best job I have ever had. It is such a family here. I came from a corporate bike shop, so not having a boss is probably right up there in terms of what I like about this place. But I do not live in Brixton. I cannot afford that. The gentrification of Brixton is the problem we are facing now. It is just too expensive.
This entry was posted in advocacy, bike shop and tagged 2015, London, November. Bookmark the permalink.
|
import os
import os.path
import data
from table import Table
from codecs import open
from collections import OrderedDict
TIME_PATTERN = '%0.5f'
class Report(object):
def __init__(self, options):
self.options = options
with open(options.input, 'rt') as f:
self.data = data.ExperimentData(f)
def generate_rest(self):
params = {
'CSV_FILE' : self.options.input,
'ARCHITECTURE' : self.options.architecture,
'RUNS' : self.options.runs,
'CPU' : self.options.cpu,
'COMPILER' : self.options.compiler,
'DATE' : self.options.date,
'PROCEDURES' : self.generate_procedures_descriptions(),
'TIME_TABLE' : self.generate_time_table(),
'TIME_GRAPHS' : self.generate_time_graphs_per_size(),
'SPEEDUP_TABLE' : self.generate_speedup_table(),
}
pattern = self._load_file('main-pattern.rst')
return pattern % params
def generate_time_table(self):
table = Table()
# prepare header
header = ["procedure"]
for size in self.data.sizes:
header.append('%d B' % size)
table.set_header(header)
# get data
for procedure in self.data.procedures:
data = self.data.data_for_procedure(procedure)
row = [procedure]
for item in data:
fmt = TIME_PATTERN % item.time
if item.time == self.data.get_shortest_time(item.size):
row.append('**%s**' % fmt)
else:
row.append(fmt)
table.add_row(row)
return table
def generate_time_graphs_per_size(self):
pattern = self._load_file('detail-pattern.rst')
result = ''
for size in self.data.sizes:
params = {
'SIZE' : size,
'TABLE' : self.generate_time_table_for_size(size),
}
result += pattern % params
return result
def generate_time_table_for_size(self, size):
table = Table()
table.set_header(["procedure", "time [s]", "relative time (less is better)"])
chars = 50
data = self.data.data_for_size(size)
max_time = max(item.time for item in data)
for item in data:
time = TIME_PATTERN % item.time
bar = unicode_bar(item.time/max_time, chars)
table.add_row([item.procedure, time, bar])
return table
def generate_speedup_table(self):
table = Table()
# prepare header
header = ["procedure"]
for size in self.data.sizes:
header.append('%d B' % size)
table.set_header(header)
reference_time = {}
for size in self.data.sizes:
time = self.data.get(self.data.procedures[0], size)
reference_time[size] = time
# get data
for proc in self.data.procedures:
measurments = self.data.data_for_procedure(proc)
row = [proc]
for item in measurments:
speedup = reference_time[item.size] / item.time
row.append('%0.2f' % speedup)
table.add_row(row)
return table
def generate_procedures_descriptions(self):
definitions = self.__parse_cpp()
table = Table()
header = ["procedure", "description"]
table.set_header(header)
for proc, desc in definitions.iteritems():
if proc in self.data.procedures:
table.add_row([proc, desc])
return table
def __parse_cpp(self):
root = os.path.dirname(__file__)
src = os.path.join(root, "../function_registry.cpp")
with open(src) as f:
lines = [line.strip() for line in f]
start = lines.index("// definition start")
end = lines.index("// definition end")
definitions = lines[start + 1:end]
i = 0
L = OrderedDict()
while i < len(definitions):
line = definitions[i]
if line.startswith("add_trusted("):
name = line[len("add_trusted("):][1:-2]
description = definitions[i+1][1:-2]
L[name] = description
i += 2
elif line.startswith("add("):
name = line[len("add("):][1:-2]
description = definitions[i+1][1:-2]
L[name] = description
i += 2
else:
i += 1
return L
def _load_file(self, path):
root = os.path.dirname(__file__)
src = os.path.join(root, path)
with open(src, 'rt', encoding='utf-8') as f:
return f.read()
def unicode_bar(value, width):
fractions = (
'', # 0 - empty
u'\u258f', # 1/8
u'\u258e', # 2/8
u'\u258d', # 3/8
u'\u258c', # 4/8
u'\u258b', # 5/8
u'\u258a', # 6/8
u'\u2589', # 7/8
)
block = u'\u2588'
assert 0.0 <= value <= 1.0
k8 = int(value * width * 8)
k = k8 / 8
f = k8 % 8
return block * k + fractions[f]
def get_options():
import optparse
import sys
import time
current_date = time.strftime('%Y-%m-%d')
default_output = "report.rst"
opt = optparse.OptionParser()
opt.add_option("--csv", dest="input",
help="input CSV filename")
opt.add_option("--output", dest="output", default=default_output,
help="output RST filename [default: %s]" % default_output)
# experiment details
opt.add_option("--runs", dest="runs",
help="how many times measurments were repeated")
opt.add_option("--cpu", dest="cpu",
help="CPU details")
opt.add_option("--compiler", dest="compiler",
help="compiler version")
opt.add_option("--architecture", dest="architecture",
help="target architecture (SSE for -msse, AVX2 for -mavx2, etc.)")
# for archivists :)
opt.add_option("--date", dest="date", default=current_date,
help="date [default: %s]" % current_date)
options, _ = opt.parse_args()
return options
def main():
options = get_options()
report = Report(options)
with open(options.output, 'wt', encoding='utf-8') as out:
out.write(report.generate_rest())
print "%s generated" % options.output
if __name__ == '__main__':
main()
|
webconferences.ir is a safe website. This information is from Google, AVG Threat Labs, McAfee SiteAdvisor, Wot.
Alexa traffic rank shows the popularity of your site relative to other sites. Webconferences.ir is ranked 4,199,209th in the world (among the 30 million domains). A low-numbered rank means that your website gets a lot of visitors.
The top queries driving traffic to www.webconferences.ir from search engines.
Kleido Reuniones Tuppersex a Domicilio.
Website load time is an important factor, because Google is taking the site’s loading speed into consideration in determining its ranking. Even though this will not have a big impact, it is still something we (webmasters) should really look into. The reason is pretty simple – the majority of visitors are usually in a rush and no one is fond of waiting half a century before the website finally loads its content or fails to load. At the last check on 2017-07-13, website load time was 6.73. The highest load time is 15.21, the lowest load time is 4.77, the average load time is 9.68.
|
# We look for the ds9 region files, read them, and mask corresponding regions in the sigma images.
execfile("../config.py")
from kirbybase import KirbyBase, KBError
from variousfct import *
import cosmics # used to read and write the fits files
import ds9reg
import glob
import numpy as np
import star
psfstars = star.readmancat(psfstarcat)
# We read the region files
for i, s in enumerate(psfstars):
print '---------------PSF STAR------------------'
print s.name
print '-----------------------------------------'
s.filenumber = (i+1)
possiblemaskfilepath = os.path.join(configdir, "%s_mask_%s.reg" % (psfkey, s.name))
print 'mask file path is: ',possiblemaskfilepath
if os.path.exists(possiblemaskfilepath):
s.reg = ds9reg.regions(64, 64) # hardcoded for now # Warning, can cause a lot of trouble when dealing with images other than ECAM
s.reg.readds9(possiblemaskfilepath, verbose=False)
s.reg.buildmask(verbose = False)
print "You masked %i pixels of star %s." % (np.sum(s.reg.mask), s.name)
else:
print "No mask file for star %s." % (s.name)
if not update:
proquest(askquestions)
# Select images to treat
db = KirbyBase()
if thisisatest :
print "This is a test run."
images = db.select(imgdb, ['gogogo', 'treatme', 'testlist',psfkeyflag], [True, True, True, True], returnType='dict', sortFields=['setname', 'mjd'])
elif update:
print "This is an update."
images = db.select(imgdb, ['gogogo', 'treatme', 'updating',psfkeyflag], [True, True, True, True], returnType='dict', sortFields=['setname', 'mjd'])
askquestions = False
else :
images = db.select(imgdb, ['gogogo', 'treatme',psfkeyflag], [True, True, True], returnType='dict', sortFields=['setname', 'mjd'])
print "Number of images to treat :", len(images)
proquest(askquestions)
for i, image in enumerate(images):
print "%i : %s" % (i+1, image['imgname'])
imgpsfdir = os.path.join(psfdir, image['imgname'])
os.chdir(os.path.join(imgpsfdir, "results"))
for s in psfstars:
if not hasattr(s, 'reg'): # If there is no mask for this star
continue
# We modify the sigma image
sigfilename = "starsig_%03i.fits" % s.filenumber
(sigarray, sigheader) = fromfits(sigfilename, verbose=False)
sigarray[s.reg.mask] = 1.0e8
tofits(sigfilename, sigarray, sigheader, verbose=False)
print 'saved !'
print "Done."
|
As I explore in our “Key Global and Regional Trends Shaping Toys Licensing” global briefing, toys is one of largest industries for licensed products globally. Half of the top 10 most heavily licensed toys markets are in Asia Pacific, while the US is the world’s largest. As the Frozen franchise took the industry by storm, 2014 was a particularly good year in toys licensing. As the power of social media in creating successful properties becomes more pronounced and with the much-anticipated Star Wars movie in the pipeline for 2015, the stakes could not have been higher, presenting great opportunities as well as risks in both developed and emerging markets.
While, Frozen, Teenage Mutant Ninja Turtles and The LEGO Movie made 2014 a very good year for licensing, the much-anticipated Star Wars as well as Avengers, Jurassic World and Minions movies could make 2015 even more robust.
|
# -*- coding: cp1252 -*-
"""
###############################################################################
HEADER: LogPrep.py
AUTHOR: Esa Heikkinen
DATE: 24.10.2014
DOCUMENT: -
VERSION: "$Id$"
REFERENCES: -
PURPOSE:
CHANGES: "$Log$"
###############################################################################
"""
import argparse
import os.path
import sys
import time
import re
from datetime import datetime, timedelta
from LogPrepColOpers import *
import glob
g_version = "$Id$"
output_lines = []
output_col_lines = {}
divide_col_values = {}
columns_new_list = []
#******************************************************************************
#
# CLASS: LogFile
#
#******************************************************************************
class LogFile:
global variables
global date
name = "Unknown"
#output_lines = []
columns_list = []
column_new_list = []
columns_oper = []
columns_new_oper = {}
line_csv = ""
def __init__(self,name):
self.name=name
self.output_lines = []
self.columns_list = []
self.column_new_list = []
self.columns_oper = []
self.columns_new_oper = {}
self.line_csv = ""
def check_conversions(self):
# Käydään sarakemuutokset läpi
counter = 0
for col_oper_output in self.columns_new_oper.keys():
counter += 1
col_oper = self.columns_new_oper[col_oper_output]
# Suoritetaan sarakemuutos riville
code_str = compile(col_oper,"<string>","eval")
try:
variables[col_oper_output] = eval(code_str)
except:
print("ERR: Executing: \"%s\"\n" % col_oper)
sys.exit()
#print("%3d: %-15s = %s = %s" % (counter,col_oper_output,col_oper,variables[col_oper_output]))
def set_columns_conversions(self,columns_list,columns_oper):
self.columns_list = columns_list
self.columns_oper = columns_oper
self.columns_new_oper = {}
# Käydään sarake-operaattorit läpi
for column_oper in self.columns_oper:
print("column_oper: %s" % column_oper)
columns_oper_list = column_oper.split("=")
columns_oper_list_len = len(columns_oper_list)
if columns_oper_list_len != 2:
print("ERR: in column_oper: %s" % column_oper)
continue
# Erotetaan output-muuttuja (sarake) sekä sen funktio ja input-muuttujat
output_var = columns_oper_list[0]
oper_func_vars = columns_oper_list[1]
output_var = output_var.strip("<>")
# Tukitaan onko uusi sarake ja jos on, lisätään muuttujiin ja sarakelistaan
if not output_var in self.columns_list:
print("New column: %s" % output_var)
variables[output_var]=""
self.column_new_list.append(output_var)
# Etsitään riviltä sarakkeiden (muuttujien) nimet,
# jotka "<"- ja ">"-merkkien sisällä
str_len = len(oper_func_vars)
start_ptr = 0
end_ptr = 0
new_str = oper_func_vars
while end_ptr < str_len:
start_ptr = new_str.find('<',end_ptr)
if start_ptr == -1:
#print("Not found: <")
break
start_ptr += 1
end_ptr = new_str.find('>',start_ptr)
if end_ptr == -1:
#print("Not found: >")
break
col_name = new_str[start_ptr:end_ptr]
print("col_name : %s" % (col_name) )
#print("str_len = %d, start_ptr=%d, end_ptr=%d" % (str_len,start_ptr,end_ptr))
# Korvataan sarakkeen nimet muuttujanimillä
col_name_str = "<" + col_name + ">"
col_name_var_str = "variables[\"" + col_name + "\"]"
new_str = new_str.replace(col_name_str,col_name_var_str)
str_len = len(new_str)
self.columns_new_oper[output_var] = new_str
print("new_str = %s" % new_str)
def read_column_names(self,logfile_name,output_sep_char):
#print("LogFile: read_column_names: %s" % logfile_name)
cols_list = []
# Luetaan 1. rivi lokitiedostosta
if os.path.isfile(logfile_name):
f = open(logfile_name, 'r')
line = f.readline()
# Rivinvaihto ja muut tyhjät merkit rivin lopusta pois
line = line.rstrip()
f.close()
if len(line) > 2:
cols_list = line.split(output_sep_char)
#print("read_column_names: cols_list: %s" % cols_list)
return cols_list
def read(self,logfile_name,regexps,output_sep_char,input_read_mode,output_files_divide_col):
print("")
vars_list_len = len(self.columns_list)
print("LogFile: read logfile_name: %s" % logfile_name)
# Luetaan lokitiedosto
if os.path.isfile(logfile_name):
f = open(logfile_name, 'r')
lines = f.readlines()
f.close()
line_counter = 0
line_sel_counter = 0
error_counter = 0
# Kaydaan lapi loki-tiedoston rivit
for line in lines:
# Hylätään tyhjät rivit
if len(line) < 2:
continue
# Rivinvaihto ja muut tyhjät merkit rivin lopusta pois
line = line.rstrip()
line_counter += 1
#print("LogFile: line: %5d: %s" % (line_counter,line))
# Jos regexp annettu (riviltä pitää parsia arvot)
if len(regexps) > 0:
# Parseroidaan tiedoston rivi ja sijoitetaan arvot välimuuttujiin
p = re.compile(regexps)
m = p.match(line)
#print("m: %s" % (m))
if m != None:
line_sel_counter += 1
#print("")
for cnt in range(vars_list_len):
var_name = self.columns_list[cnt]
var_value = m.group(cnt+1)
variables[var_name]=var_value
#print("%5d: Var name: %-20s value: %s" % (cnt,var_name,var_value))
self.generate_new_line(variables,output_sep_char,output_files_divide_col)
# Muuten, arvot ovat valmiina csv-tyyppisellä rivillä
else:
# Ei käsitellä otsikko riviä
if line_counter == 1:
continue
columns_value_list = line.split(output_sep_char)
vars_value_list_len = len(columns_value_list)
if vars_value_list_len != vars_list_len:
print("ERR: Number of columns: %s and %s are different in line: %s" %
(vars_value_list_len,vars_list_len,line,output_files_divide_col))
sys.exit()
line_sel_counter += 1
for cnt in range(vars_list_len):
var_name = self.columns_list[cnt]
var_value = columns_value_list[cnt]
variables[var_name]=var_value
#print("%5d: Var name: %-20s value: %s" % (cnt,var_name,var_value))
self.generate_new_line(variables,output_sep_char,output_files_divide_col)
print("LogFile: Msg-type = %s" % self.name)
print("LogFile: line_counter = %d" % line_counter)
print("LogFile: line_sel_counter = %d" % line_sel_counter)
else:
print("LogFile: ERR: Not found logfile: %s" % logfile_name)
def get(self):
print("")
return self.output_lines
def get_columns(self):
print("")
#print("self.columns_list = %s" % self.columns_list)
#print("self.column_new_list = %s" % self.column_new_list)
return self.columns_list + self.column_new_list
def generate_new_line(self,variables,output_sep_char,output_files_divide_col):
# Tehdään mahdolliset sarakkeiden konversiot
self.check_conversions()
# Käydään rivin sarakkeet läpi
column_list_all = self.columns_list + self.column_new_list
self.line_csv = ""
for col_name in column_list_all:
col_val = variables[col_name]
# Lisätään arvo tulostiedoston (csv) rivin loppuun
self.line_csv = self.line_csv + output_sep_char + col_val
if output_files_divide_col == None:
# Laitetaan tulostiedoston rivi talteen
self.output_lines.append(self.line_csv)
else:
col_value = variables[output_files_divide_col]
try:
divide_col_values[col_value] += 1
except:
divide_col_values[col_value] = 1
# Laitetaan annetun sarakkeen arvon mukaiseen tulostiedoston rivi talteen
try:
output_col_lines[col_value].append(self.line_csv)
except:
output_col_lines[col_value] = [self.line_csv]
#******************************************************************************
#
# FUNCTION: make_dir_if_no_exist
#
#******************************************************************************
def make_dir_if_no_exist(file_path_name):
# Python3
#os.makedirs(os.path.dirname(file_path_name), exist_ok=True)
# Python2
if not os.path.exists(os.path.dirname(file_path_name)):
try:
os.makedirs(os.path.dirname(file_path_name))
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
#******************************************************************************
#
# FUNCTION: write_output_file
#
#******************************************************************************
#def write_output_file(logfile_new_name,output_lines,column_name_prefix,output_sep_char,output_files_divide_col):
def write_output_file(output_path,logfile_new_name,column_name_prefix,output_sep_char,output_files_divide_col,combined_file_name,msg_type):
global output_lines
global output_col_lines
global divide_col_values
if output_files_divide_col == None:
line_cnt = 0
make_dir_if_no_exist(logfile_new_name)
f = open(logfile_new_name, 'w')
# Otsikko
f.writelines("%sCounter" % column_name_prefix)
for col_name in columns_new_list:
# Lisätään prefix sarakkeiden nimien alkuun
column_name_with_prefix = column_name_prefix + col_name
#f.writelines("\t" + col_name)
f.writelines(output_sep_char + column_name_with_prefix)
f.writelines("\n")
# Rivit
for output_line in output_lines:
line_cnt += 1
str = "%d %s\n" % (line_cnt,output_line)
#print("%s" % str)
f.writelines(str)
else:
file_cnt = 0
col_value_list = divide_col_values.keys()
for col_value in col_value_list:
line_cnt = 0
file_cnt += 1
logfile_new_name = output_path + combined_file_name + "_" + col_value + "_" + msg_type + ".csv"
print("writes: %5d: logfile = %s" % (file_cnt,logfile_new_name))
make_dir_if_no_exist(logfile_new_name)
f = open(logfile_new_name, 'w')
# Otsikko
f.writelines("%sCounter" % column_name_prefix)
for col_name in columns_new_list:
# Lisätään prefix sarakkeiden nimien alkuun
column_name_with_prefix = column_name_prefix + col_name
#f.writelines("\t" + col_name)
f.writelines(output_sep_char + column_name_with_prefix)
f.writelines("\n")
# Rivit
for output_line in output_col_lines[col_value]:
line_cnt += 1
str = "%d %s\n" % (line_cnt,output_line)
#print("%s" % str)
f.writelines(str)
f.close()
#******************************************************************************
#
# FUNCTION: main
#
#******************************************************************************
print("version: %s" % g_version)
start_time = time.time()
parser = argparse.ArgumentParser()
parser.add_argument('-input_path','--input_path', dest='input_path', help='input_path')
parser.add_argument('-input_files','--input_files', dest='input_files', help='input_files')
parser.add_argument('-input_read_mode','--input_read_mode', dest='input_read_mode', help='input_read_mode')
parser.add_argument('-combined_file_name','--combined_file_name', dest='combined_file_name', help='combined_file_name')
parser.add_argument('-output_path','--output_path', dest='output_path', help='output_path')
parser.add_argument('-output_files_divide_col','--output_files_divide_col', dest='output_files_divide_col', help='output_files_divide_col')
parser.add_argument('-output_sep_char','--output_sep_char', dest='output_sep_char', help='output_sep_char')
parser.add_argument('-date','--date', dest='date', help='date')
parser.add_argument('-msg_type','--msg_type', dest='msg_type', help='msg_type')
parser.add_argument('-column_name_prefix','--column_name_prefix', dest='column_name_prefix', help='column_name_prefix')
parser.add_argument('-columns','--columns', dest='columns', help='columns')
parser.add_argument('-regexps','--regexps', dest='regexps', help='regexps')
parser.add_argument('-column_oper','--column_oper', action='append', dest='column_oper', default=[], help='column_oper')
args = parser.parse_args()
print("input_path : %s" % args.input_path)
print("input_files : %s" % args.input_files)
print("input_read_mode : %s" % args.input_read_mode)
print("combined_file_name : %s" % args.combined_file_name)
print("output_path : %s" % args.output_path)
print("output_files_divide_col : %s" % args.output_files_divide_col)
print("output_sep_char : \"%s\"" % args.output_sep_char)
print("date : %s" % args.date)
print("msg_type : %s" % args.msg_type)
print("column_name_prefix : %s" % args.column_name_prefix)
print("columns : %s" % args.columns)
print("regexps : %s" % args.regexps)
print("column_oper : %s" % args.column_oper)
print(".....")
# Muodostetaan input-tiedostojen lista polkuineen
logfile_name_list = []
input_files_list = args.input_files.split(",")
#print("input_files_list=%s" % input_files_list)
for input_file in input_files_list:
#print("input_file=%s" % input_file)
input_file_path_name_list = glob.glob(args.input_path + input_file)
#print("input_file_path_name_list=%s" % input_file_path_name_list)
for input_file_path_name in input_file_path_name_list:
print("input_file_path_name = %s" % input_file_path_name)
logfile_name_list.append(input_file_path_name)
print(".....")
#print("logfile_name_list = %s" % logfile_name_list)
print("\n")
date = args.date
# Käydään läpi input-tiedosto(t)
for logfile_name in logfile_name_list:
variables = {}
msg_type = args.msg_type
#print("msg_type = \"%s\"" % msg_type)
print("logfile_name = \"%s\"" % logfile_name)
# Output-file path and name
head, tail = os.path.split(logfile_name)
#print("head=%s, tail=%s" % (head,tail))
file_name, file_ext =tail.split(".")
logfile_new_name = args.output_path + file_name + "_" + msg_type + ".csv"
print("logfile_new_name = \"%s\"" % logfile_new_name)
#state_search_string = state_search_strings[msg_type]
regexps = args.regexps
#columns_list = state_search_string_variables[msg_type]
log_file = LogFile(msg_type)
# Jos sarakenimet on annettu komentoriviltä
if len(args.columns) > 0:
columns_list = args.columns.split(",")
# Muuten haetaan sarakenimet tiedoston ekalta riviltä
else:
# Haetaan tiedoston ekalta riviltä sarakenimet
columns_list = log_file.read_column_names(logfile_name,args.output_sep_char)
if len(columns_list) == 0:
print("ERR: Not found column names from parameter or file")
sys.exit()
#print("regexps = \"%s\"" % regexps)
#print("columns_list = \"%s\"" % columns_list)
log_file.set_columns_conversions(columns_list,args.column_oper)
#log_file.read(logfile_name,regexps,columns_list)
log_file.read(logfile_name,regexps,args.output_sep_char,args.input_read_mode,args.output_files_divide_col)
# Haetaan uusi sarakelista (jos tullut uusia tai jotain poistettu)
columns_new_list = log_file.get_columns()
#print("columns_new_list = %s" % columns_new_list)
if args.input_read_mode == None:
# Luetaan lokien tiedot, ei tarvita enää ?
output_lines = log_file.get()
# Kirjoitetaan tiedostoon
write_output_file(args.output_path,logfile_new_name,args.column_name_prefix,
args.output_sep_char,args.output_files_divide_col,args.combined_file_name,args.msg_type)
elif args.input_read_mode == "COMBINE":
print("COMBINE")
output_lines += log_file.get()
else:
print("ERR: Unknown read mode: %s" % args.input_read_mode)
sys.exit()
if args.input_read_mode == "COMBINE":
logfile_new_name = args.output_path + args.combined_file_name + "_" + args.msg_type + ".csv"
# Kirjoitetaan tiedostoon
write_output_file(args.output_path,logfile_new_name,args.column_name_prefix,
args.output_sep_char,args.output_files_divide_col,args.combined_file_name,args.msg_type)
print(" Total execution time: %.3f seconds\n" % (time.time() - start_time))
|
Private sessions are a great place to start if you are completely new to Pilates, have any type of injuries, or any specific health issues. You will be instructed with your specific goals in mind. Sessions involve both mat and equipment work. 50 minutes in length.
|
# -*- coding: utf-8 -*-
import datetime
import json
import logging.config
import os
import pytz
import redis
from . import protos
from . import enumerations
from . import models
__all__ = ['configuration', 'enumerations', 'models', 'protos']
def get_configuration(application_name):
configuration_file_path = os.environ[
application_name.upper() + '_CONFIGURATION_FILE_PATH']
with open(configuration_file_path, 'r') as file:
parsed_configuration = json.loads(file.read())
return parsed_configuration
configuration = get_configuration(application_name=__name__)
logging.config.dictConfig(config=configuration['logging'])
def add(habit_id, value):
redis_client = redis.StrictRedis(host=configuration['redis']['hostname'],
port=configuration['redis']['port'])
# 1. Create a new event object.
event = models.Event(topic=enumerations.EventTopic.LOG_ADDED)
event.arguments.attemptId = 1
event.arguments.habitId = habit_id
event.arguments.value = value
event.arguments.createdBy = 1
# 2. Serialize the new event object and add it to the queue.
redis_client.rpush('event:all', event.to_string())
# 3. Get the newest event from the queue and deserialize it.
event = models.Event.from_string(redis_client.lindex('event:all', -1))
# 4. Handle the event.
key = 'attempt:{}:summary'.format(event.arguments.attemptId)
# Incrementing a value does not reset it's key's expiration
# timeout.
time_to_live = redis_client.ttl(key)
redis_client.hincrbyfloat(key,
event.arguments.habitId,
event.arguments.value)
if time_to_live < 0:
timezone = pytz.timezone('America/New_York')
timestamp = _get_tomorrow_in_seconds(timezone=timezone)
redis_client.expireat(key, int(timestamp))
def _get_tomorrow(timezone):
"""
Get the start of tomorrow.
The datetime is computed with respect to the specified timezone
and returned converted into UTC.
Parameters
----------
timezone : pytz.tzinfo.DstTzInfo subclass
Returns
-------
datetime.datetime
"""
now = (datetime.datetime.utcnow()
.replace(tzinfo=pytz.utc)
.astimezone(tz=timezone))
offset = now + datetime.timedelta(days=1)
# The implementation of tzinfo in pytz differs from that of the
# standard library. With a couple exceptions, you should therefore
# be using the localize method instead of the tzinfo parameter.
tomorrow_start_naive = datetime.datetime(year=offset.year,
month=offset.month,
day=offset.day)
tomorrow_start = timezone.localize(dt=tomorrow_start_naive)
return tomorrow_start.astimezone(tz=pytz.utc)
def _get_tomorrow_in_seconds(timezone):
"""
Get the start of tomorrow in seconds (i.e. as a Unix timestamp).
Parameters
----------
timezone : pytz.tzinfo.DstTzInfo subclass
Returns
-------
float
"""
epoch = datetime.datetime(year=1970, month=1, day=1, tzinfo=pytz.utc)
tomorrow_start = _get_tomorrow(timezone=timezone)
seconds = (tomorrow_start - epoch).total_seconds()
return seconds
def get_all_from_today():
redis_client = redis.StrictRedis(host=configuration['redis']['hostname'],
port=configuration['redis']['port'])
summary = redis_client.hgetall('attempt:1:summary')
return summary
|
I was so excited to finally shoot Kirsty and Philip’s wedding. I felt that me and Kirsty had been emailing back and forth about the day for so long so it was great to finally get things underway! I knew it would be a day for familiar faces as I shot Kirsty’s friends weddings – Angela & Jonathan, Debbie & Paul and Susan & Niall. We started off the day at Kirsty’s parents house where she was getting ready.
It was such a calm atmosphere at the house. Perfect for keeping things happy! Kirsty was having a great time getting ready with bridesmaid, Katie.
Kirsty looked simply stunning as she got into her dress. She was glowing with happiness.
Emotions were running high as soon as Dad, George and Mum, Morag saw Kirsty for the first time in her dress. You can tell they are such a close family unit. I was so happy to capture it on camera.
I headed up to Forrester Park to catch the boys as they arrived.
All looking handsome in their kilts!
The sun was shining as Kirsty pulled up at the venue. So much so that they decided to have the ceremony outside!
They decided on a humanist ceremony conducted by Wilson Butler.
Forrester Park offers stunning views!
We got driven in the golf buggy up to some fields on the edge of the golf course. Philip had to lift Kirsty over to the field but we got there in the end with a few funny moments in between. Thanks guys!
Forrester Park offers a gorgeous room for dinner and it complimented Kirsty’s decor perfectly.
Between the speeches we had a birthday too. Philip’s Gran was treated to a birthday cake!
After dinner we went outside to capture that last bit of light before the sun disappeared..it was beautiful.
Kirsty and Philip headed onto the dance floor for first dance as husband and wife.
The guests all joined in shortly after to party the night away. Congratulations to Kirsty and Philip it was a beautiful Forrester Park Wedding.
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# sfsimodels documentation build configuration file, created by
# sphinx-quickstart on Wed May 23 10:38:42 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
from datetime import date
file_loc = os.path.split(__file__)[0]
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(file_loc), '.')))
import sfsimodels
# -- Project information -----------------------------------------------------
project = sfsimodels.__about__.__project__
author = sfsimodels.__about__.__author__
copyright = u'Copyright 2016 - {0} {1}'.format(date.today().year, author)
# The short X.Y version
version = sfsimodels.__about__.__version__
# The full version, including alpha/beta/rc tags
release = version
import mock
MOCK_MODULES = ['numpy', 'openpyxl']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.napoleon',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
# 'sphinx_autodoc_typehints'
]
napoleon_use_param = True # to get type hinting working
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'sfsimodels'
author = 'Maxim Millen'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.6.3'
# The full version, including alpha/beta/rc tags.
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `to do` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'nature' # Switch to an ECP theme
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'sfsimodelsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'sfsimodels.tex', 'sfsimodels Documentation',
'Maxim Millen', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'sfsimodels', 'sfsimodels Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'sfsimodels', 'sfsimodels Documentation',
author, 'sfsimodels', 'A set of standard models for assessing structural and geotechnical problems.',
'Science'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
|
Would NHS.uk make you more anxious?
Monday; too many emails, project deadline Friday, working late. Come home feeling tired, anxious and unsettled.
Tuesday; working late again. Tomorrow, dentist, extraction. Hate the dentist. Get home with a headache, don’t feel hungry.
Wednesday; leave work early, dentist. On way home check messages; not heard from friend in California. Get home in pain, fed up, anxious about friend.
Thursday; working late again, talk of Brexit and redundancies. Feel fed up, tired and anxious; butterflies in stomach, headache worse.
Friday; meet project deadline, not sure good enough. Get home exhausted, feelings sick, headache, still no appetite etc. Have first date with someone met online; feel very anxious, a bit lightheaded; heart is beating fast, hands sweaty; not sure about date; remember last break up.
Decide to search the Internet. Look at NHS.uk; sectionon General Anxiety Disorder.
Generalised Anxiety Disorder (GAD) is a long-term condition that causes you to feel anxious about a wide range of situations and issues, rather than one specific event.
You look through the list one by one.
How come a site like NHS.uk, which will be the first port of call for many, doesn’t mention this? How come it focuses only on how we feel when we are anxious, (i.e., what it regards as symptoms) and not on what we are doing? It is true that this myopic view is only a reflection of ideas in the wider culture, but that such a high-profile site has this bias has five profound effects.
It encourages us to focus on our feelings and to think of these as symptoms of a condition or illness.
It discourages us from asking questions about what we are thinking and how we are thinking about it.
As the causes are apparently either unknown or beyond our control, it increases our worries about how bad we might become or how long IT might last.
It increases our concern/fear that there might be something wrong with us.
It encourages us to feel we need help because there appears to be little we can do for ourselves.
If we see anxiety as an activity with a purpose, we can connect how we are thinking and the feelings of anxiety that arise as a result. Does NHS.uk not want you to know this? Or did they simply forget to mention it in their list of causes? If not, what was their reason? What effects does it have on individuals and on services and costs?
I am often lost in anxious thoughts tumbling like a washing machine on fast spin and can find no way out.
However eventually I do find a way of stopping the cycle and letting everything settle.
|
from a10sdk.common.A10BaseClass import A10BaseClass
class Stats(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param es_resp_invalid_http: {"description": "Total non-http response", "format": "counter", "type": "number", "oid": "19", "optional": true, "size": "8"}
:param curr_req: {"description": "Current requests", "format": "counter", "type": "number", "oid": "2", "optional": true, "size": "8"}
:param total_rev_pkts_inspected_good_status_code: {"description": "Total reverse packets with good status code inspected", "format": "counter", "type": "number", "oid": "21", "optional": true, "size": "8"}
:param es_resp_count: {"description": "Total proxy response", "format": "counter", "type": "number", "oid": "18", "optional": true, "size": "8"}
:param total_fwd_bytes: {"description": "Forward bytes", "format": "counter", "type": "number", "oid": "5", "optional": true, "size": "8"}
:param es_resp_other: {"description": "Response status other", "format": "counter", "type": "number", "oid": "16", "optional": true, "size": "8"}
:param fastest_rsp_time: {"description": "Fastest response time", "format": "counter", "type": "number", "oid": "23", "optional": true, "size": "8"}
:param total_fwd_pkts: {"description": "Forward packets", "format": "counter", "type": "number", "oid": "6", "optional": true, "size": "8"}
:param es_req_count: {"description": "Total proxy request", "format": "counter", "type": "number", "oid": "17", "optional": true, "size": "8"}
:param es_resp_500: {"description": "Response status 500", "format": "counter", "type": "number", "oid": "15", "optional": true, "size": "8"}
:param peak_conn: {"description": "Peak connections", "format": "counter", "type": "number", "oid": "11", "optional": true, "size": "8"}
:param total_req: {"description": "Total Requests", "format": "counter", "type": "number", "oid": "3", "optional": true, "size": "8"}
:param es_resp_400: {"description": "Response status 400", "format": "counter", "type": "number", "oid": "14", "optional": true, "size": "8"}
:param es_resp_300: {"description": "Response status 300", "format": "counter", "type": "number", "oid": "13", "optional": true, "size": "8"}
:param curr_conn: {"description": "Current connections", "format": "counter", "type": "number", "oid": "1", "optional": true, "size": "8"}
:param es_resp_200: {"description": "Response status 200", "format": "counter", "type": "number", "oid": "12", "optional": true, "size": "8"}
:param total_rev_bytes: {"description": "Reverse bytes", "format": "counter", "type": "number", "oid": "7", "optional": true, "size": "8"}
:param response_time: {"description": "Response time", "format": "counter", "type": "number", "oid": "22", "optional": true, "size": "8"}
:param total_conn: {"description": "Total connections", "format": "counter", "type": "number", "oid": "9", "optional": true, "size": "8"}
:param total_rev_pkts: {"description": "Reverse packets", "format": "counter", "type": "number", "oid": "8", "optional": true, "size": "8"}
:param total_req_succ: {"description": "Total requests succ", "format": "counter", "type": "number", "oid": "4", "optional": true, "size": "8"}
:param last_total_conn: {"description": "Last total connections", "format": "counter", "type": "number", "oid": "10", "optional": true, "size": "8"}
:param total_rev_pkts_inspected: {"description": "Total reverse packets inspected", "format": "counter", "type": "number", "oid": "20", "optional": true, "size": "8"}
:param slowest_rsp_time: {"description": "Slowest response time", "format": "counter", "type": "number", "oid": "24", "optional": true, "size": "8"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "stats"
self.DeviceProxy = ""
self.es_resp_invalid_http = ""
self.curr_req = ""
self.total_rev_pkts_inspected_good_status_code = ""
self.es_resp_count = ""
self.total_fwd_bytes = ""
self.es_resp_other = ""
self.fastest_rsp_time = ""
self.total_fwd_pkts = ""
self.es_req_count = ""
self.es_resp_500 = ""
self.peak_conn = ""
self.total_req = ""
self.es_resp_400 = ""
self.es_resp_300 = ""
self.curr_conn = ""
self.es_resp_200 = ""
self.total_rev_bytes = ""
self.response_time = ""
self.total_conn = ""
self.total_rev_pkts = ""
self.total_req_succ = ""
self.last_total_conn = ""
self.total_rev_pkts_inspected = ""
self.slowest_rsp_time = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Port(A10BaseClass):
"""Class Description::
Statistics for the object port.
Class port supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param protocol: {"enum": ["tcp", "udp"], "description": "'tcp': TCP Port; 'udp': UDP Port; ", "format": "enum", "type": "string", "oid": "1002", "optional": false}
:param port_number: {"description": "Port Number", "format": "number", "optional": false, "oid": "1001", "maximum": 65534, "minimum": 0, "type": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/slb/server/{name}/port/{port_number}+{protocol}/stats`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required = [ "port_number","protocol"]
self.b_key = "port"
self.a10_url="/axapi/v3/slb/server/{name}/port/{port_number}+{protocol}/stats"
self.DeviceProxy = ""
self.stats = {}
self.protocol = ""
self.port_number = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
Ever wonder why you can stand upright with minimal effort, or why you can bend over and then intuitively know how to get back into an upright position? It’s all about balance, and balance is an amazingly complex skill. Even the simplest task, such as standing upright, requires that several body systems work together, providing continuous feedback about body position and orientation. And the fact that we (or some of us!) can learn to do a back flip on a balance beam is a testament to not only how complex these systems are, but also how much our ability to balance can be improved.
Though people often assume their balance will get worse as they age, most decline is preventable: A sedentary lifestyle and associated conditions such as obesity and muscle weakness are the main culprits.
But there’s a way to improve balance, and it doesn’t involve running marathons or even leaving your house. Research has shown that the muscles of the calf and ankle are especially important for maintaining balance and recovering lost balance quickly. Exercises to strengthen these muscles aren’t usually emphasized in workouts, so here’s a few to help you get started.
Front shin: Sitting in a supportive chair, place your right heel on top of your left foot. Push down with your right heel while pushing up with the left foot. Hold for 5 seconds; relax. Do 3 to 5 times on each side.
Outer shin: Place the same chair near a wall so that the outside of your right foot is flush with the wall. Keeping feet flat on the floor, push your right foot against the wall. Hold for 5 seconds; relax. Do 3 to 5 times on each side.
Inner shin: Stand or sit with feet flat on the floor, the insides of feet touching. Keeping your foot flat on the floor, push your right foot against your left foot. Hold for 5 seconds; relax. Do 3 to 5 times on each side.
With your eyes open and arms out to the sides at shoulder height, stand on your left leg. Keeping your right knee bent, slowly lift your right leg until the thigh is horizontal.
Hold this position for 10 seconds, then repeat on the other side. 3. Perform at least 5 repetitions on each leg.
With eyes open, stand on your left leg. Hold your arms out to the sides at shoulder height. Bend your right knee, and slowly lift your right leg until the thigh is horizontal. Do 3 full rotations of the right ankle.
Slowly lower your right leg, and then extend it behind you until you are in a lunge position.
Hold lunge for 3 seconds.
Bring right leg up so you are standing straight; repeat sequence on other side.
Do 3 full sequences on both legs.
Jessica Smith has a master’s degree in bioengineering. She holds certifications from the Aerobics and Fitness Association of America and the American College of Sports Medicine.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import types
import sys
from subprocess import call
from tempfile import NamedTemporaryFile
from abc import ABCMeta, abstractmethod
class STYLE:
NORMAL = "NORMAL"
BOLD = "BOLD"
UNDERLINE = "UNDERLINE"
class ANSICOLOR:
BLACK = "BLACK"
RED = "RED"
GREEN = "GREEN"
YELLOW = "YELLOW"
BLUE = "BLUE"
PURPLE = "PURPLE"
CYAN = "CYAN"
GRAY = "GRAY"
@staticmethod
def translate(color):
if (color == ANSICOLOR.BLACK):
return "30"
elif (color == ANSICOLOR.RED):
return "31"
elif (color == ANSICOLOR.GREEN):
return "32"
elif (color == ANSICOLOR.YELLOW):
return "33"
elif (color == ANSICOLOR.BLUE):
return "34"
elif (color == ANSICOLOR.PURPLE):
return "35"
elif (color == ANSICOLOR.CYAN):
return "36"
elif (color == ANSICOLOR.GRAY):
return "37"
else:
raise RuntimeError("unsupported ANSI color")
def _to_256(color):
if (color < 0 or color > 255):
raise RuntimeError("8bit color must be in range [0, 255]")
return "38;5;" + str(color)
def _normal_text():
return "\033[0m"
def _color_text(color, style):
text = '\033['
if (style == STYLE.NORMAL):
text += "0;"
elif (style == STYLE.BOLD):
text += "1;"
elif (style == STYLE.UNDERLINE):
text += "4;"
else:
raise RuntimeError("unsupported style")
if (isinstance(color, (types.IntType, types.LongType))):
text += _to_256(color)
else:
text += ANSICOLOR.translate(color)
text += "m";
return text;
class ColoredText:
_current_text = ""
@classmethod
def reset(clazz):
clazz._current_text = _normal_text()
sys.stdout.write(clazz._current_text)
@classmethod
def setup(clazz, color, style = STYLE.NORMAL):
clazz._current_text = _color_text(color, style)
sys.stdout.write(clazz._current_text)
@classmethod
def str(clazz, msg, color, style = STYLE.NORMAL):
return _color_text(color, style) + msg + clazz._current_text;
ColoredText.reset()
def system(cmd, rediret= True):
if rediret:
file = NamedTemporaryFile()
ret = call(cmd, shell = True, stdout = file, stderr = file)
file.seek(0)
return (ret, file)
else:
ret = call(cmd, shell = True)
return ret
class Application:
__metaclass__ = ABCMeta
def run(self):
try:
self.main()
except Exception, e:
print(ColoredText.str("[ERROR] ", ANSICOLOR.RED) + str(e))
@abstractmethod
def main(self):
pass
class Node:
def __init__(self, name = None, desc = None):
self.name = name
self.children = []
def _serialize(self, lastones):
str = ""
self.children = sorted(self.children, key=lambda x: x.name)
level = len(lastones)
if level > 0:
for i in range(level - 1):
if lastones[i]:
str += " "
else:
str += " │"
if lastones[-1]:
str += " └─"
else:
str += " ├─"
str += self.name
for i in range(len(self.children)):
str += "\n"
if i == len(self.children) - 1:
str += self.children[i]._serialize(lastones + [True])
else:
str += self.children[i]._serialize(lastones + [False])
return str
def str(self):
ret = ""
self.children = sorted(self.children, key=lambda x: x.name)
if self.name != None and self.name != "":
ret += self.name
for i in range(len(self.children)):
ret += "\n"
if i == len(self.children) - 1:
ret += self.children[i]._serialize([True])
else:
ret += self.children[i]._serialize([False])
else:
for i in range(len(self.children)):
if i != 0:
ret += "\n"
if i == len(self.children) - 1:
ret += self.children[i]._serialize([])
else:
ret += self.children[i]._serialize([])
return ret
|
NFP Insurance Solutions is an independent insurance advisory firm specializing in risk management and life insurance solutions for the high net worth community. We work with clients and their advisors to craft custom insurance solutions to help grow and preserve their wealth. We are completely transparent and independent and have earned a reputation within the advisor community as one of the best resources to access large, complicated insurance transactions.
|
'''
Common LISPy / Haskelly functions to use inside RIPL
Std Lib Functional stuff:
https://docs.python.org/3.4/library/itertools.html
https://docs.python.org/3.4/library/functools.html
https://docs.python.org/3.4/library/operator.html
Some info on what haskell does:
https://wiki.haskell.org/Fold
http://learnyouahaskell.com/higher-order-functions
Clojure's core reference:
https://clojuredocs.org/clojure.core
https://clojuredocs.org/quickref
'''
import functools
import itertools
import operator as op
from types import GeneratorType
from .bases import RVector
def reverse(itr):
''' :: Itr[*T] -> Itr[*T]
Reverse an iterable
'''
return itr[::-1]
# gen_reverse = lambda x: reversed(x)
def product(cont):
''' :: Itr|Gen[a] -> a
Find the product of an iterable. Contents of the iterable must
implement __mul__
'''
return functools.reduce(op.mul, cont)
def foldl(func, acc, cont):
''' :: f(a, a) -> a, Itr|Gen[a] -> a
Fold a list with a given binary function from the left
'''
for val in cont:
acc = func(acc, val)
return acc
def foldr(func, acc, cont):
''' :: f(a, a) -> a, Itr|Gen[a] -> a
Fold a list with a given binary function from the right
WARNING: Right folds and scans will blow up for
infinite generators!
'''
if isinstance(cont, GeneratorType):
# Convert to iterator to pass to reduce
cont = [c for c in cont]
for val in cont[::-1]:
acc = func(val, acc)
return acc
def scanl(func, acc, cont):
''' :: f(a, a) -> a, Itr|Gen[a] -> List[a]
Use a given accumulator value to build a list of values obtained
by repeatedly applying acc = func(acc, next(list)) from the left.
'''
# yield acc
# for val in cont:
# acc = func(acc, val)
# yield acc
lst = [acc]
for val in cont:
acc = func(acc, val)
lst.append(acc)
return lst
def scanr(func, acc, cont):
''' :: f(a, a) -> a, Itr|Gen[a] -> List[a]
Use a given accumulator value to build a list of values obtained
by repeatedly applying acc = func(next(list), acc) from the right.
WARNING: Right folds and scans will blow up for
infinite generators!
'''
if isinstance(cont, GeneratorType):
# Convert to iterator to pass to reduce
cont = [c for c in cont]
# yield acc
# for val in cont:
# acc = func(val, acc)
# yield acc
lst = [acc]
for val in cont[::-1]:
acc = func(val, acc)
lst.append(acc)
return lst
def take(num, cont):
''' :: Int, Itr|Gen[*T] -> List[*T]
Return up to the first `num` elements of an iterable or generator.
'''
try:
return cont[:num]
except TypeError:
# Taking from a generator
num_items = []
try:
for n in range(num):
num_items.append(next(cont))
return num_items
except StopIteration:
return num_items
def drop(num, cont):
''' :: Int, Itr|Gen[*T] -> List[*T]
Return everything but the first `num` elements of itr
'''
try:
items = cont[num:]
except TypeError:
items = []
for n in range(num):
# Fetch and drop the initial elements
try:
items.append(next(cont))
except StopIteration:
break
return items
def takeWhile(predicate, container):
''' :: Int, Itr|Gen[*T] -> Gen[*T]
The predicate needs to take a single argument and return a bool.
(takeWhile ~(< 3) '(1 2 3 4 5)) -> '(1 2)
'''
return itertools.takewhile(predicate, container)
def dropWhile(predicate, container):
''' :: Int, Itr|Gen[*T] -> Gen[*T]
The predicate needs to take a single argument and return a bool.
(dropWhile ~(< 3) '(1 2 3 4 5)) -> '(3 4 5)
'''
return itertools.dropwhile(predicate, container)
def flatten(lst):
''' :: Itr|Gen[*T] -> List[*T]
Flatten an arbitrarily nested list of lists down to a single list
'''
_list = ([x] if not isinstance(x, list) else flatten(x) for x in lst)
return sum(_list, [])
def drain(gen):
''' :: Gen[*T] -> List[*T]
Given a generator, convert it to a list (RVector)
'''
return RVector([elem for elem in gen])
|
Need a new trencher ?
Click on the “Go to offer” button down below to enter the contest.
Don’t miss this golden opportunity to receive this quality trencher along with its accessories !
You have until the 7th of December to enter the draw.
|
"""File contains constants (like EnvOptions)"""
from enum import Enum
INSTRUCTOR_FEEDBACK_TAG_MSG = "Tag left here so the instructor's scripts know when feedback was uploaded"
class EnvOptions(str, Enum):
"""This lists the keys for the rcfile and command line arguments.
Note that this is a 'mix-in' enum, which means that saying
EnvOptions.INFILE automatically uses the str() value for that member
(i.e., EnvOptions.INFILE is "infile", without needing a .value on
the .INFILE"""
ACTION = "action" # Key used to store which command-line option (addStudents, etc) was chosen
# common options:
SERVER = 'server'
SERVER_IP_ADDR = 'server_ip'
USERNAME = 'username'
PASSWORD = 'password'
# command line option for listing projects
# This is mostly useful to check your configuration
# and make sure you can connect to the server
LIST_PROJECTS = "listProjects"
# command line args for creating student accounts
CREATE_STUDENTS = "addStudents"
INFILE = "infile"
INFILE_TYPE = "infile_type"
SECTION = "section"
DELETE_CLASS = "deleteClass"
# Adding a new homework project:
NEW_HOMEWORK = "addHomework"
HOMEWORK_NAME = 'homework_name'
HOMEWORK_DIR = 'homework_path'
DOWNLOAD_HOMEWORK = "download"
STUDENT_WORK_DIR = "student_work_dir"
COMMIT_FEEDBACK = "commitFeedback"
UPLOAD_FEEDBACK = "uploadFeedback"
FEEDBACK_PATTERN = "pattern"
FEEDBACK_PATTERN_DEFAULT = "grade"
GIT_TAG = "tag"
GIT_TAG_DEFAULT = "GradedByInstructor-V1"
GRADING_LIST = "gradeList"
GIT_DO = "gitdo"
GIT_COMMAND = "gitCommand"
# .gltrc file
KNOWN_GOOD_ACCOUNTS = "known_good_accounts"
DATA_DIR = "data_dir"
TEMP_DIR = "temp_dir"
SECTION_LIST = 'section_list'
HW_LOC = 'student_homework_location'
HW_LOC_DEFAULT = "SECTION/ASSIGNMENT/NAME_LAST, NAME_FIRST/"
|
Note from shop owner Welcome to Mythology! Our shop is temporarily closed so we can enjoy the holidays with our family. If you need to reach us, please email mythologycandles [!at] gmail.com. Thanks!
Welcome to Mythology! Our shop is temporarily closed so we can enjoy the holidays with our family. If you need to reach us, please email mythologycandles [!at] gmail.com. Thanks!
Love the smell, it doesn't smell artificial or too overpowering.
Our FAUN soy candle marries smooth, rich sandalwood with the crisp scent of rosemary and thyme for a scent that is fresh and cleansing. One of our Etsy bestsellers!
Men love our natural soy candles just as much as women do- but you won’t find any gimmicks here. (Sorry if you love bacon- and beer- scented candles. It’s just not our thing.) Instead, you’ll find good, clean, honest scents rooted in nature.
Just a few of the things that go into making our natural soy candles- including the most important- my journal of secrets! While I keep track of our perfected formulas digitally, I need to be able to scribble down ideas when I experiment!
I make all of our candles in Richardson, TX, where I live with my husband, son, and black cat Spooky. When I'm not working, I love reading, cross-stitching, watching historical romances, and planning our next adventures.
My husband Andrew handles the legal and administrative side of Mythology LLC. He's also my chief scent guinea pig!
We're happy to discuss personalized or custom orders. Please contact us through Etsy or mythologycandles@gmail.com for more information. Please note there may be additional charges for custom work, and we generally do not do private labeling.
For information on stocking Mythology candles in your brick and mortar store please contact us through Etsy or mythologycandles@gmail.com.
We use all-natural soy wax for its clean-burning properties and ecological benefits. However, soy candles behave differently than traditional petroleum-based paraffin candles. For best results, keep these tips in mind.
-Keep the wick trimmed to ¼”. You should examine your wick every time you light your candle.
-While it's important to keep your wick trimmed, trimming it too low will result in a weak flame. If this occurs, pour out a small amount of melted wax to allow the wick to breathe.
-Burn your candle for 2-3 hours each time you use it. Candles have a memory! For the initial use, allow the melt pool to reach the edges of the container. This will prevent “tunneling,” and allow your candle to last longer with a better scent throw.
-Keep candles away from children, pets, curtains, and other flammable objects.
-If the flame in your candle is too high, blow it out and trim the wick.
We are required to collect sales tax for all orders in the state of Texas.
In order to be as eco-friendly as possible, we occasionally ship candles in recycled boxes. If this is an issue, please let us know ahead of time and we'll be happy to accommodate your request for an unused shipping box.
Many of our candles are shipped with packing peanuts. Rest-assured that we will only use eco-friendly, biodegradable peanuts. Never styrofoam.
All candles are shipped on either Monday or Tuesday through USPS Priority Mail with tracking. We do this to minimize candles melting when shipping in the summer heat.
Returns and exchanges will only be accepted if the candle is returned in it's original, unused condition. You must contact us within 14 days of delivery and ship the candle back within 30 days of delivery. The buyer is responsible for all return shipping fees.
Custom orders and items bought on sale may not be returned.
Of course, we always want you to have a 5-star experience. If you are unhappy with your purchase, please reach out to us!
The health of our environment is important to us. Please recycle any packaging and re-use the container the candle came in. They make great toiletry holders or small planters! To re-use glass and aluminum containers, simply melt out the wax with hot water or a hair dryer.
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage Check Point Firewall (c) 2019
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: cp_mgmt_run_script
short_description: Executes the script on a given list of targets.
description:
- Executes the script on a given list of targets.
- All operations are performed over Web Services API.
version_added: "2.9"
author: "Or Soffer (@chkp-orso)"
options:
script_name:
description:
- Script name.
type: str
script:
description:
- Script body.
type: str
targets:
description:
- On what targets to execute this command. Targets may be identified by their name, or object unique identifier.
type: list
args:
description:
- Script arguments.
type: str
comments:
description:
- Comments string.
type: str
extends_documentation_fragment: checkpoint_commands
"""
EXAMPLES = """
- name: run-script
cp_mgmt_run_script:
script: ls -l /
script_name: 'Script Example: List files under / dir'
targets:
- corporate-gateway
"""
RETURN = """
cp_mgmt_run_script:
description: The checkpoint run-script output.
returned: always.
type: dict
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.checkpoint.checkpoint import checkpoint_argument_spec_for_commands, api_command
def main():
argument_spec = dict(
script_name=dict(type='str'),
script=dict(type='str'),
targets=dict(type='list'),
args=dict(type='str'),
comments=dict(type='str')
)
argument_spec.update(checkpoint_argument_spec_for_commands)
module = AnsibleModule(argument_spec=argument_spec)
command = "run-script"
result = api_command(module, command)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
Jiaxing Beyondoor Electronics Co.,Ltd is communication antenna and GPS product supplier in China. Located in Jiaxing, Zhejiang, China.Our team has more than 20 years design experience in the field of antennas and other RF products.
Under strict quality control, our products are undoubtedly made to meet your demands of high quality. Nevertheless, to guarantee you the quality of every product from us, our products all carry one-year warranty.
In our opinion, compatibility is a key point in GPS,GSM,3G, WLAN applications, And that's why we make our products universally compatible. Products from us work well with a variety of prevailing GPS ,GSM,3G, WLAN products on the market and can be easily integrated with your existing equipment.
All of the products from Beyondoor are designed and developed by our experienced engineers with the most advanced microwave structure analytical software to design our products. Our engineers also utilize advanced microwave circuit and system design software to develop microwave circuits and their related products. Beyondoor has successfully developed many new and hi-tech antennas and other products which are currently deployed worldwide.Our executives at Beyondoor deeply believe in open and honest communication with our workers as well as our suppliers and customers.
Currently, Beyondoor exports very large volume of products daily to Europe, North America, Southeast Asia, and many other countries.
We intend to build on our reputation for providing the products of high quality at reasonable price and facilitating the business process for clients by continually improving our range, reaching out to more customers, Further details of our company, products, services and contact details are available on the website http://www.beyondoor.com. Please do not hesitate to contact us if you require further information or we can be of assistance.
|
# -*- coding: utf-8 -*-
"""Environmentan information and functions for filesystem operations.
.. References:
.. _appdirs:
http://github.com/ActiveState/appdirs
.. TODO::
* Add get_file for 'user_package_log', 'temp_log' etc.
"""
__author__ = 'Patrick Michl'
__email__ = 'frootlab@gmail.com'
__license__ = 'GPLv3'
__docformat__ = 'google'
import fnmatch
import os
import shutil
import string
import sys
import getpass
import locale
import platform
import re
from distutils import sysconfig
from pathlib import Path, PurePath
try:
from appdirs import AppDirs
except ImportError as err:
raise ImportError(
"requires package appdirs: "
"https://pypi.org/project/appdirs/") from err
from nemoa.base import check, this
from nemoa.types import Any, Iterable, IterAny, OptStrDict
from nemoa.types import PathLikeList, OptStr, OptStrOrBool, OptPathLike
from nemoa.types import PathLike, StrDict, Sequence, StrDictOfPaths, Union
#
# Structural Types
#
# Nested paths for tree structured path references
# TODO (patrick.michl@gmail.com): currently (Python 3.7.1) recursive type
# definition is not fully supported by the typing module. When recursive type
# definition is available replace the following lines by their respective
# recursive definitions
PathLikeSeq = Sequence[PathLike]
PathLikeSeq2 = Sequence[Union[PathLike, PathLikeSeq]]
PathLikeSeq3 = Sequence[Union[PathLike, PathLikeSeq, PathLikeSeq2]]
NestPath = Union[PathLike, PathLikeSeq, PathLikeSeq2, PathLikeSeq3]
#NestPath = Sequence[Union[str, Path, 'NestPath']]
#
# Constants
#
_DEFAULT_APPNAME = 'nemoa'
_DEFAULT_APPAUTHOR = 'frootlab'
_RECURSION_LIMIT = sys.getrecursionlimit()
#
# Public Module Functions
#
def get_var(varname: str, *args: Any, **kwds: Any) -> OptStr:
"""Get environment or application variable.
Environment variables comprise static and runtime properties of the
operating system like 'username' or 'hostname'. Application variables in
turn, are intended to describe the application distribution by authorship
information, bibliographic information, status, formal conditions and notes
or warnings. For mor information see :PEP:`345`.
Args:
varname: Name of environment variable. Typical application variable
names are:
'name': The name of the distribution
'version': A string containing the distribution's version number
'status': Development status of the distributed application.
Typical values are 'Prototype', 'Development', or 'Production'
'description': A longer description of the distribution that can
run to several paragraphs.
'keywords': A list of additional keywords to be used to assist
searching for the distribution in a larger catalog.
'url': A string containing the URL for the distribution's
homepage.
'license': Text indicating the license covering the distribution
'copyright': Notice of statutorily prescribed form that informs
users of the distribution to published copyright ownership.
'author': A string containing the author's name at a minimum;
additional contact information may be provided.
'email': A string containing the author's e-mail address. It can
contain a name and e-mail address, as described in :rfc:`822`.
'maintainer': A string containing the maintainer's name at a
minimum; additional contact information may be provided.
'company': The company, which created or maintains the distribution.
'organization': The organization, twhich created or maintains the
distribution.
'credits': list with strings, acknowledging further contributors,
Teams or supporting organizations.
*args: Optional arguments that specify the application, as required by
the function 'nemoa.base.env.update_vars'.
**kwds: Optional keyword arguments that specify the application, as
required by the function 'nemoa.base.env.update_vars'.
Returns:
String representing the value of the application variable.
"""
# Check type of 'varname'
check.has_type("'varname'", varname, str)
# Update variables if not present or if optional arguments are given
if not '_vars' in globals() or args or kwds:
update_vars(*args, **kwds)
appvars = globals().get('_vars', {})
return appvars.get(varname, None)
def get_vars(*args: Any, **kwds: Any) -> StrDict:
"""Get dictionary with environment and application variables.
Environment variables comprise static and runtime properties of the
operating system like 'username' or 'hostname'. Application variables in
turn, are intended to describe the application distribution by authorship
information, bibliographic information, status, formal conditions and notes
or warnings. For mor information see :PEP:`345`.
Args:
*args: Optional arguments that specify the application, as required by
:func:`~nemoa.base.env.update_vars`.
**kwds: Optional keyword arguments that specify the application, as
required by :func:`~nemoa.base.env.update_vars`.
Returns:
Dictionary containing application variables.
"""
# Update variables if not present or if optional arguments are given
if not '_vars' in globals() or args or kwds:
update_vars(*args, **kwds)
return globals().get('_vars', {}).copy()
def update_vars(filepath: OptPathLike = None) -> None:
"""Update environment and application variables.
Environment variables comprise static and runtime properties of the
operating system like 'username' or 'hostname'. Application variables in
turn, are intended to describe the application distribution by authorship
information, bibliographic information, status, formal conditions and notes
or warnings. For mor information see :PEP:`345`.
Args:
filepath: Valid filepath to python module, that contains the application
variables as module attributes. By default the current top level
module is used.
"""
# Get package specific environment variables by parsing a given file for
# module attributes. By default the file of the current top level module
# is taken. If name is not given, then use the name of the current top level
# module.
filepath = filepath or this.get_root().__file__
text = Path(filepath).read_text()
rekey = "__([a-zA-Z][a-zA-Z0-9_]*)__"
reval = r"['\"]([^'\"]*)['\"]"
pattern = f"^[ ]*{rekey}[ ]*=[ ]*{reval}"
info = {}
for match in re.finditer(pattern, text, re.M):
info[str(match.group(1))] = str(match.group(2))
info['name'] = info.get('name', this.get_module_name().split('.', 1)[0])
# Get plattform specific environment variables
info['encoding'] = get_encoding()
info['hostname'] = get_hostname()
info['osname'] = get_osname()
info['username'] = get_username()
# Update globals
globals()['_vars'] = info
def get_dir(dirname: str, *args: Any, **kwds: Any) -> Path:
"""Get application specific environmental directory by name.
This function returns application specific system directories by platform
independent names to allow platform independent storage for caching,
logging, configuration and permanent data storage.
Args:
dirname: Environmental directory name. Allowed values are:
:user_cache_dir: Cache directory of user
:user_config_dir: Configuration directory of user
:user_data_dir: Data directory of user
:user_log_dir: Logging directory of user
:site_config_dir: Site global configuration directory
:site_data_dir: Site global data directory
:site_package_dir: Site global package directory
:package_dir: Current package directory
:package_data_dir: Current package data directory
*args: Optional arguments that specify the application, as required by
the function 'nemoa.base.env.update_dirs'.
**kwds: Optional keyword arguments that specify the application, as
required by the function 'nemoa.base.env.update_dirs'.
Returns:
String containing path of environmental directory or None if the
pathname is not supported.
"""
# Check type of 'dirname'
check.has_type("argument 'dirname'", dirname, str)
# Update derectories if not present or if any optional arguments are given
if not '_dirs' in globals() or args or kwds:
update_dirs(*args, **kwds)
dirs = globals().get('_dirs', {})
# Check value of 'dirname'
if dirname not in dirs:
raise ValueError(f"directory name '{dirname}' is not valid")
return dirs[dirname]
def get_dirs(*args: Any, **kwds: Any) -> StrDict:
"""Get application specific environmental directories.
This function returns application specific system directories by platform
independent names to allow platform independent storage for caching,
logging, configuration and permanent data storage.
Args:
*args: Optional arguments that specify the application, as required by
the function 'nemoa.base.env.update_dirs'.
**kwds: Optional keyword arguments that specify the application, as
required by the function 'nemoa.base.env.update_dirs'.
Returns:
Dictionary containing paths of application specific environmental
directories.
"""
# Update appdirs if not present or if optional arguments are given
if not '_dirs' in globals() or args or kwds:
update_dirs(*args, **kwds)
return globals().get('_dirs', {}).copy()
def update_dirs(
appname: OptStr = None, appauthor: OptStrOrBool = None,
version: OptStr = None, **kwds: Any) -> None:
"""Update application specific directories from name, author and version.
This function retrieves application specific directories from the package
`appdirs`_. Additionally the directory 'site_package_dir' is retrieved fom
the standard library package distutils and 'package_dir' and
'package_data_dir' from the current top level module.
Args:
appname: is the name of application. If None, just the system directory
is returned.
appauthor: is the name of the appauthor or distributing body for this
application. Typically it is the owning company name. You may pass
False to disable it. Only applied in windows.
version: is an optional version path element to append to the path.
You might want to use this if you want multiple versions of your
app to be able to run independently. If used, this would typically
be "<major>.<minor>". Only applied when appname is present.
**kwds: Optional directory name specific keyword arguments. For more
information see `appdirs`_.
"""
dirs: StrDictOfPaths = {}
# Get system directories
dirs['home'] = get_home()
dirs['cwd'] = get_cwd()
# Get application directories from appdirs
appname = appname or get_var('name') or _DEFAULT_APPNAME
appauthor = appauthor or get_var('author') or _DEFAULT_APPAUTHOR
appdirs = AppDirs(
appname=appname, appauthor=appauthor, version=version, **kwds)
dirnames = [
'user_cache_dir', 'user_config_dir', 'user_data_dir',
'user_log_dir', 'site_config_dir', 'site_data_dir']
for dirname in dirnames:
dirs[dirname] = Path(getattr(appdirs, dirname))
# Get distribution directories from distutils
path = Path(sysconfig.get_python_lib(), appname)
dirs['site_package_dir'] = path
# Get current package directories from top level module
path = Path(this.get_root().__file__).parent
dirs['package_dir'] = path
dirs['package_data_dir'] = path / 'data'
globals()['_dirs'] = dirs
def get_encoding() -> str:
"""Get preferred encoding used for text data.
This is a wrapper function to the standard library function
:func:`locale.getpreferredencoding`. This function returns the encoding
used for text data, according to user preferences. User preferences are
expressed differently on different systems, and might not be available
programmatically on some systems, so this function only returns a guess.
Returns:
String representing the preferred encoding used for text data.
"""
return locale.getpreferredencoding(False)
def get_hostname() -> str:
"""Get hostname of the computer.
This is a wrapper function to the standard library function
:func:`platform.node`. This function returns the computer’s hostname. If
the value cannot be determined, an empty string is returned.
Returns:
String representing the computer’s hostname or None.
"""
return platform.node()
def get_osname() -> str:
"""Get name of the Operating System.
This is a wrapper function to the standard library function
:func:`platform.system`. This function returns the OS name, e.g. 'Linux',
'Windows', or 'Java'. If the value cannot be determined, an empty string is
returned.
Returns:
String representing the OS name or None.
"""
return platform.system()
def get_username() -> str:
"""Login name of the current user.
This is a wrapper function to the standard library function
:func:`getpass.getuser`. This function checks the environment variables
LOGNAME, USER, LNAME and USERNAME, in order, and returns the value of the
first one which is set to a non-empty string. If none are set, the login
name from the password database is returned on systems which support the
pwd module, otherwise, an exception is raised.
Returns:
String representing the login name of the current user.
"""
return getpass.getuser()
def get_cwd() -> Path:
"""Get path of current working directory.
Returns:
Path of current working directory.
"""
return Path.cwd()
def get_home() -> Path:
"""Get path of current users home directory.
Returns:
Path of current users home directory.
"""
return Path.home()
def clear_filename(fname: str) -> str:
r"""Clear filename from invalid characters.
Args:
fname: Arbitrary string, which is be cleared from invalid filename
characters.
Returns:
String containing valid path syntax.
Examples:
>>> clear_filename('3/\nE{$5}.e')
'3E5.e'
"""
valid = "-_.() " + string.ascii_letters + string.digits
fname = ''.join(c for c in fname if c in valid).replace(' ', '_')
return fname
def match_paths(paths: PathLikeList, pattern: str) -> PathLikeList:
"""Filter pathlist to matches with wildcard pattern.
Args:
paths: List of paths, which is filtered to matches with pattern.
pattern: String pattern, containing Unix shell-style wildcards:
'*': matches arbitrary strings
'?': matches single characters
[seq]: matches any character in seq
[!seq]: matches any character not in seq
Returns:
Filtered list of paths.
Examples:
>>> match_paths([Path('a.b'), Path('b.a')], '*.b')
[Path('a.b')]
"""
# Normalize path and pattern representation using POSIX standard
mapping = {PurePath(path).as_posix(): path for path in paths}
pattern = PurePath(pattern).as_posix()
# Match normalized paths with normalized pattern
names = list(mapping.keys())
matches = fnmatch.filter(names, pattern)
# Return original paths
return [mapping[name] for name in matches]
def join_path(*args: NestPath) -> Path:
r"""Join nested iterable path-like structure to single path object.
Args:
*args: Arguments containing nested iterable paths of strings and
PathLike objects.
Returns:
Single Path comprising all arguments.
Examples:
>>> join_path(('a', ('b', 'c')), 'd')
Path('a\\b\\c\\d')
"""
# Generate flat structure
def flatten(tower: Any) -> IterAny:
for token in tower:
if not isinstance(token, Iterable):
yield token
elif isinstance(token, str):
yield token
else:
yield from flatten(token)
flat = [token for token in flatten(args)]
# Create path from flat structure
try:
path = Path(*flat)
except TypeError as err:
raise TypeError(
"the tokens of nested paths require to be of types "
"str, bytes or path-like") from err
return path
def expand(
*args: NestPath, udict: OptStrDict = None,
envdirs: bool = True) -> Path:
r"""Expand path variables.
Args:
*args: Path like arguments, respectively given by a tree of strings,
which can be joined to a path.
udict: dictionary for user variables.
Thereby the keys in the dictionary are encapsulated
by the symbol '%'. The user variables may also include references.
envdirs: Boolen value which determines if environmental path variables
are expanded. For a full list of valid environmental path variables
see 'nemoa.base.env.get_dirs'. Default is True
Returns:
String containing valid path syntax.
Examples:
>>> expand('%var1%/c', 'd', udict = {'var1': 'a/%var2%', 'var2': 'b'})
'a\\b\\c\\d'
"""
path = join_path(*args)
udict = udict or {}
# Create mapping with path variables
pvars = {}
if envdirs:
for key, val in get_dirs().items():
pvars[key] = str(val)
if udict:
for key, val in udict.items():
pvars[key] = str(join_path(val))
# Itereratively expand directories
update = True
i = 0
while update:
update = False
for key, val in pvars.items():
if '%' + key + '%' not in str(path):
continue
try:
path = Path(str(path).replace('%' + key + '%', val))
except TypeError:
del pvars[key]
update = True
i += 1
if i > _RECURSION_LIMIT:
raise RecursionError('cyclic dependency in variables detected')
path = Path(path)
# Expand unix style home path '~'
if envdirs:
path = path.expanduser()
return path
def get_dirname(*args: NestPath) -> str:
r"""Extract directory name from a path like structure.
Args:
*args: Path like arguments, respectively given by a tree of strings,
which can be joined to a path.
Returns:
String containing normalized directory path of file.
Examples:
>>> get_dirname(('a', ('b', 'c'), 'd'), 'base.ext')
'a\\b\\c\\d'
"""
path = expand(*args)
if path.is_dir():
return str(path)
return str(path.parent)
def filename(*args: NestPath) -> str:
"""Extract file name from a path like structure.
Args:
*args: Path like arguments, respectively given by a tree of strings,
which can be joined to a path.
Returns:
String containing normalized directory path of file.
Examples:
>>> filename(('a', ('b', 'c')), 'base.ext')
'base.ext'
"""
path = expand(*args)
if path.is_dir():
return ''
return str(path.name)
def basename(*args: NestPath) -> str:
"""Extract file basename from a path like structure.
Args:
*args: Path like arguments, respectively given by a tree of strings,
which can be joined to a path.
Returns:
String containing basename of file.
Examples:
>>> filename(('a', ('b', 'c')), 'base.ext')
'base'
"""
path = expand(*args)
if path.is_dir():
return ''
return str(path.stem)
def fileext(*args: NestPath) -> str:
"""Fileextension of file.
Args:
*args: Path like arguments, respectively given by a tree of strings,
which can be joined to a path.
Returns:
String containing fileextension of file.
Examples:
>>> fileext(('a', ('b', 'c')), 'base.ext')
'ext'
"""
path = expand(*args)
if path.is_dir():
return ''
return str(path.suffix).lstrip('.')
def is_dir(path: NestPath) -> bool:
"""Determine if given path points to a directory.
Extends :meth:`pathlib.Path.is_dir` by nested paths and path variable
expansion.
Args:
path: Path like structure, which is expandable to a valid path
Returns:
True if the path points to a regular file (or a symbolic link pointing
to a regular file), False if it points to another kind of file.
"""
return expand(path).is_dir()
def is_file(path: NestPath) -> bool:
"""Determine if given path points to a file.
Extends :meth:`pathlib.Path.is_file` by nested paths and path variable
expansion.
Args:
path: Path like structure, which is expandable to a valid path.
Returns:
True if the path points to a directory (or a symbolic link pointing
to a directory), False if it points to another kind of file.
"""
return expand(path).is_file()
def copytree(source: NestPath, target: NestPath) -> None:
"""Copy directory structure from given source to target directory.
Args:
source: Path like structure, which comprises the path of a source folder
target: Path like structure, which comprises the path of a destination
folder
Returns:
True if the operation was successful.
"""
# Recursive copy function, that allows existing files
def copy(source: Path, target: Path) -> None:
if source.is_dir():
if not target.is_dir():
target.mkdir()
for each in source.glob('*'):
copy(each, target / each.name)
else:
shutil.copy(source, target)
copy(expand(source), expand(target))
def mkdir(*args: NestPath) -> bool:
"""Create directory.
Args:
*args: Path like structure, which comprises the path of a new directory
Returns:
True if the directory already exists, or the operation was successful.
"""
path = expand(*args)
if path.is_dir():
return True
try:
os.makedirs(path)
except Exception as err:
raise OSError("could not create directory") from err
return path.is_dir()
def rmdir(*args: NestPath) -> bool:
"""Remove directory.
Args:
*args: Path like structure, which identifies the path of a directory
Returns:
True if the directory could be deleted
"""
path = expand(*args)
if not path.is_dir():
return False
shutil.rmtree(str(path), ignore_errors=True)
return not path.exists()
def touch(
path: NestPath, parents: bool = True, mode: int = 0o666,
exist_ok: bool = True) -> bool:
"""Create an empty file at the specified path.
Args:
path: Nested :term:`path-like object`, which represents a valid filename
in the directory structure of the operating system.
parents: Boolean value, which determines if missing parents of the path
are created as needed.
mode: Integer value, which specifies the properties if the file. For
more information see :func:`os.chmod`.
exist_ok: Boolean value which determines, if the function returns False,
if the file already exists.
Returns:
True if the file could be created, else False.
"""
filepath = expand(path)
# Check type of 'filepath'
if not isinstance(filepath, Path):
return False
# Check if directory exists and optionally create it
dirpath = filepath.parent
if not dirpath.is_dir():
if not parents:
return False
dirpath.mkdir(parents=True, exist_ok=True)
if not dirpath.is_dir():
return False
# Check if file already exsists
if filepath.is_file() and not exist_ok:
return False
# Touch file with given
filepath.touch(mode=mode, exist_ok=exist_ok)
return filepath.is_file()
|
A central aspect of birdiness is the ability to make vocalizations. Birds have a unique organ for doing this, the syrinx, and virtually every bird on the planet makes use of it. Bird vocalizations have a wide variety of communicative functions, including attracting mates, declaring territory, sounding alarm, or simply keeping in touch. The syrinx isn’t the only way that birds communicate aurally; they also make a variety of mechanical noises, with their wings, their tail (see the Pin-tailed Snipe Gallinago stenura tail on the right), or their bill.
Sounds aren't the only way birds communiate. They dance, for example, and they can display certain showy feathers. They can also communicate passively; in some species males can signal fitness simply by being bigger or brighter or longer-tailed.
The most obvious way birds communicate is through song, as this Orange River Francolin Scleroptila levaillantoides is doing. Photo by Josh Engel.
Male bowerbirds (found only in Australia and nearby islands) build a decorated "bower" to try to attract a mate, even though the bower itself is not used for nesting. This is a rather unique form of communication, as it has been found that "better" bowers are built by larger males. This is the bower of a Great Bowerbird Chlamydera nuchalis. Photo by Josh Engel.
Snipe "sing" with their unusual, narrow outer tail feathers. Photo by Josh Engel.
Many birds are named after their vocalizations, such as laughingthrushes. Photo by Josh Engel.
These crows have a simple message for the Cooper's Hawk: Go away. Photo by Josh Engel.
One of the U.S.'s great songsters, the appropriately named mockingbird. Photo by Josh Engel.
|
# -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import tempfile
import locale
import codecs
from weboob.core.bcall import CallErrors
from weboob.capabilities.content import ICapContent, Revision
from weboob.tools.application.repl import ReplApplication, defaultcount
__all__ = ['WebContentEdit']
class WebContentEdit(ReplApplication):
APPNAME = 'webcontentedit'
VERSION = '0.h'
COPYRIGHT = 'Copyright(C) 2010-2011 Romain Bignon'
DESCRIPTION = "Console application allowing to display and edit contents on various websites."
SHORT_DESCRIPTION = "manage websites content"
CAPS = ICapContent
def do_edit(self, line):
"""
edit ID [ID...]
Edit a content with $EDITOR, then push it on the website.
"""
contents = []
for id in line.split():
_id, backend_name = self.parse_id(id, unique_backend=True)
backend_names = (backend_name,) if backend_name is not None else self.enabled_backends
contents += [content for backend, content in self.do('get_content', _id, backends=backend_names) if content]
if len(contents) == 0:
print >>sys.stderr, 'No contents found'
return 3
if sys.stdin.isatty():
paths = {}
for content in contents:
tmpdir = os.path.join(tempfile.gettempdir(), "weboob")
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
with tempfile.NamedTemporaryFile(prefix='%s_' % content.id.replace(os.path.sep, '_'), dir=tmpdir, delete=False) as f:
data = content.content
if isinstance(data, unicode):
data = data.encode('utf-8')
elif data is None:
content.content = u''
data = ''
f.write(data)
paths[f.name.encode('utf-8')] = content
params = ''
editor = os.environ.get('EDITOR', 'vim')
if editor == 'vim':
params = '-p'
os.system("%s %s %s" % (editor, params, ' '.join(['"%s"' % path.replace('"', '\\"') for path in paths.iterkeys()])))
for path, content in paths.iteritems():
with open(path, 'r') as f:
data = f.read()
try:
data = data.decode('utf-8')
except UnicodeError:
pass
if content.content != data:
content.content = data
else:
contents.remove(content)
if len(contents) == 0:
print >>sys.stderr, 'No changes. Abort.'
return 1
print 'Contents changed:\n%s' % ('\n'.join(' * %s' % content.id for content in contents))
message = self.ask('Enter a commit message', default='')
minor = self.ask('Is this a minor edit?', default=False)
if not self.ask('Do you want to push?', default=True):
return
errors = CallErrors([])
for content in contents:
path = [path for path, c in paths.iteritems() if c == content][0]
sys.stdout.write('Pushing %s...' % content.id.encode('utf-8'))
sys.stdout.flush()
try:
self.do('push_content', content, message, minor=minor, backends=[content.backend]).wait()
except CallErrors as e:
errors.errors += e.errors
sys.stdout.write(' error (content saved in %s)\n' % path)
else:
sys.stdout.write(' done\n')
os.unlink(path)
else:
# stdin is not a tty
if len(contents) != 1:
print >>sys.stderr, "Multiple ids not supported with pipe"
return 2
message, minor = '', False
data = sys.stdin.read()
contents[0].content = data.decode(sys.stdin.encoding or locale.getpreferredencoding())
errors = CallErrors([])
for content in contents:
sys.stdout.write('Pushing %s...' % content.id.encode('utf-8'))
sys.stdout.flush()
try:
self.do('push_content', content, message, minor=minor, backends=[content.backend]).wait()
except CallErrors as e:
errors.errors += e.errors
sys.stdout.write(' error\n')
else:
sys.stdout.write(' done\n')
if len(errors.errors) > 0:
raise errors
@defaultcount(10)
def do_log(self, line):
"""
log ID
Display log of a page
"""
if not line:
print >>sys.stderr, 'Error: please give a page ID'
return 2
_id, backend_name = self.parse_id(line)
backend_names = (backend_name,) if backend_name is not None else self.enabled_backends
_id = _id.encode('utf-8')
self.start_format()
for backend, revision in self.do('iter_revisions', _id, backends=backend_names):
self.format(revision)
def do_get(self, line):
"""
get ID [revision]
Get page contents
"""
if not line:
print >>sys.stderr, 'Error: please give a page ID'
return 2
line = line.rsplit(' ', 1)
if len(line) > 1:
revision = Revision(line[1])
else:
revision = None
_id, backend_name = self.parse_id(line[0])
backend_names = (backend_name,) if backend_name is not None else self.enabled_backends
_id = _id.encode('utf-8')
output = codecs.getwriter(sys.stdout.encoding or locale.getpreferredencoding())(sys.stdout)
for contents in [content for backend, content in self.do('get_content', _id, revision, backends=backend_names) if content]:
output.write(contents.content)
# add a newline unless we are writing
# in a file or in a pipe
if os.isatty(output.fileno()):
output.write('\n')
|
This Website has been developed solely for use in preparing and submitting qualifications and/or proposals in connection with the Norman Manley International Airport Project (the "Project"). Neither the Government of Jamaica nor any of its partners and/or consultants, make any representations (expressed or implied) or warranties as to the accuracy or completeness of the information contained herein, or any other document made available to anyone in connection with the Project and shall have no liability for any representations contained herein or for omissions or errors in this Website or for any other written or oral communication transmitted to the recipient in the course of the recipient‘s evaluation of the Project.
In addition, the Government of Jamaica and their partners and consultants will not be liable for any loss or damage including without limitation, indirect or consequential loss or damage whatsoever arising from or in connection with this Website. These entities will not be liable to reimburse or compensate the recipient for any costs or expenses incurred by the recipient in evaluating or acting upon this Website or otherwise in connection with the Project as contemplated herein.
Generally, you will be able to visit this Site without the need to reveal any personal information about yourself. However, in order to obtain information about the Norman Manley International Airport Public-Private Partnership transaction, you will be required to give personal, business and financial information and contact details.
Only persons over the age of 18 years should complete any request for information.
Only persons so authorized should complete any request for information on behalf of a company.
All information submitted are the sole property of DBJ for the use of the Government of Jamaica. While this information must be utilized within agencies of the Government of Jamaica as a necessary aspect of the established procurement process, DBJ will do all that is feasible or commercially viable to restrict access to your data and to prevent unauthorized use.
DBJ may by use of ‘cookies’, or alphanumeric identifiers transferred to your computer’s hard drive through your Web browser, automatically gather information about which sites you visit on the Internet and which pages you visit on this site. This information is used for client management, internal research and service development. This information may also be disclosed to others in the form of aggregate data that does not describe or identify any individual user and is aimed at compiling statistics on consumer behaviour or what technologies and Internet services are being used by our public. If you wish to deactivate these ‘cookies’ consult the ‘’Help” icon on your browser’s toolbar.
DBJ welcomes your questions and comments regarding this Policy. If you believe that DBJ has not adhered to this Policy, please notify us by e-mail at our contact address and we will use all commercially reasonable efforts to identify and remedy the problem.
|
"""The tests the Graph component."""
import unittest
from homeassistant.setup import setup_component
from tests.common import init_recorder_component, get_test_home_assistant
class TestGraph(unittest.TestCase):
"""Test the Google component."""
def setUp(self): # pylint: disable=invalid-name
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def test_setup_component(self):
"""Test setup component."""
self.init_recorder()
config = {"history": {}, "history_graph": {"name_1": {"entities": "test.test"}}}
assert setup_component(self.hass, "history_graph", config)
assert dict(self.hass.states.get("history_graph.name_1").attributes) == {
"entity_id": ["test.test"],
"friendly_name": "name_1",
"hours_to_show": 24,
"refresh": 0,
}
def init_recorder(self):
"""Initialize the recorder."""
init_recorder_component(self.hass)
self.hass.start()
|
Pre Engineered Buildings, Steel Buildings, Structural Steel Construction.
We are counted among one of the leading service provider of structural fabrication work in a cost effective way.
In this fast growing world Pre Engineering Buildings have set an industrial revolution by replacing the age old conventional building structures.
Polycarbonate is a polymer with unique blending properties. Polycarbonate sheets roofings are manufactured by extrusion process.
|
from django.db import models
class ProgrammingLanguage(models.Model):
name = models.CharField(max_length=64)
def __str__(self):
return self.name
class Meta:
indexes = [models.Index(fields=['name'])]
verbose_name_plural = "Programming Languages"
class Project(models.Model):
name = models.CharField(max_length=255) # .name
full_name = models.CharField(max_length=255, unique=True) # .full_name
description = models.TextField() # .description
url = models.URLField(unique=True) # .html_url
initial_stars = models.IntegerField() # .stargazers_count
current_stars = models.IntegerField(default=0) # .stargazers_count
language = models.ForeignKey(
ProgrammingLanguage, related_name="projects", on_delete=models.CASCADE
) # .language
add_date = models.DateField(auto_now_add=True)
notes = models.TextField(null=True, blank=True)
def __str__(self):
return "{}".format(
self.name
)
class Meta:
verbose_name_plural = "Stars"
indexes = [
models.Index(fields=[
'name',
'full_name',
'description'
])
]
|
Beautiful home features lots of natural light, granite counter tops in the kitchen, newer recessed lighting and updated flooring. The spacious open floor plan invites you in as the stunning master suite shows off its large walk-in closet. Added square footage off the garage makes for a great flex room or office. This well-maintained home sits on a large .24 ac lot with mature Oak trees. A must see!
|
import os
import fnmatch
from datetime import datetime
from . import base
DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
class Client(base.Client):
name = 'Directory'
description = '''
Generator for a filesystem.
'''
options = {
'required': ['path'],
'properties': {
'path': {
'description': 'A local filesystem directory.',
'type': 'string',
},
'recurse': {
'description': 'If true, directories will be recursed into.',
'type': 'boolean',
'default': True,
},
'pattern': {
'description': 'Glob pattern for directories and files.',
'type': 'string',
'default': '*',
},
'hidden': {
'description': 'If true, hidden files and directories will be included.', # noqa
'type': 'boolean',
'default': False,
},
'depth': {
'description': 'The maximum depth to recurse into.',
'type': 'integer',
}
}
}
def parse_directory(self, path):
path_id = os.path.relpath(path, self.options.path)
return {
'origins:ident': path_id,
'prov:type': 'Directory',
'prov:label': path_id,
'path': path_id,
}
def parse_file(self, path):
path_id = os.path.relpath(path, self.options.path)
stats = os.stat(path)
# Convert into datetime from timestamp floats
atime = datetime.fromtimestamp(stats.st_atime)
mtime = datetime.fromtimestamp(stats.st_mtime)
if hasattr(stats, 'st_birthtime'):
create_time = stats.st_birthtime
else:
create_time = stats.st_ctime
ctime = datetime.fromtimestamp(create_time)
return {
'origins:ident': path_id,
'prov:type': 'File',
'prov:label': path_id,
'path': path_id,
'mode': stats.st_mode,
'uid': stats.st_uid,
'gid': stats.st_gid,
'size': stats.st_size,
'accessed': atime.strftime(DATETIME_FORMAT),
'modified': mtime.strftime(DATETIME_FORMAT),
'created': ctime.strftime(DATETIME_FORMAT),
}
def parse(self):
base_path = self.options.path
for root, dirs, names in os.walk(base_path):
if self.options.depth is not None:
curpath = os.path.relpath(root, base_path)
if curpath == '.':
depth = 0
else:
depth = len(curpath.split(os.path.sep))
# Remove all subdirectories from traversal once the
# desired depth has been reached. Note a `break` does
# not work since this would stop processing sibling
# directories as well.
for dirname in dirs[:]:
if depth >= self.depth:
dirs.pop()
elif not self.options.hidden and dirname.startswith('.'):
dirs.pop()
directory = self.parse_directory(root)
self.document.add('entity', directory)
for f in fnmatch.filter(names, self.options.pattern):
if not self.options.hidden and f.startswith('.'):
continue
path = os.path.join(root, f)
_file = self.parse_file(path)
_file['directory'] = directory
self.document.add('entity', _file)
|
Analysis of web titan Yahoo and its services.
Edit: Microsoft/Yahoo could easily end up being an all-cash deal.
In most negotiation games — including pretty much all in which money can change hands — there’s one outcome that makes the most sense for all concerned. They should agree to that outcome, and haggle about nothing except price.* In this case, the best outcome for Microsoft and Yahoo is a quick Microsoft takeover of Yahoo. That’s what I thought all along, due to a whole lot of Microsoft/Yahoo synergies. Michael Arrington reports, in confirmation, that there are no viable alternative bidders.
In such cases, the haggling over price depends a lot on each side’s “threat point” — i.e., their fallback position, and the (un)desirability of that fallback position for each side. Yahoo’s fallback position is probably one or more aggressive deals with other major internet players. Merely outsourcing its search business to Google would be stupid. Selling the search business to Google could fetch a wonderful price, because Google would be even more entrenched — but for exactly that reason, it would surely fail to pass antitrust muster. That’s why the Amazon idea that’s been floated is so crucial; a Yahoo/Amazon merger would actually be synergistic in its own way, and hence could command a price at least somewhat competitive with Microsoft’s offer.
As for Microsoft — despite successes in individual Internet areas, it has consistently failed to build a coherent Internet business. Yahoo has its own issues, obviously, but on the whole it’s maintained pretty decent Internet status even as its technological efforts have been consistently disappointing. If Microsoft doesn’t buy Yahoo, it probably needs to buy somebody else with a consistent record of Internet leadership, such as Amazon. That would also involve paying a large premium. And here’s a twist: If Amazon for any reason wants to sell to fellow Washingtonian Microsoft at a big premium, it’s best move may be to sabotage the Microsoft/Yahoo deal somehow.
One final note: If Yahoo outsources its search business to Google, the possibility of a Microsoft deal is gone forever. Microsoft can not be assured of winning a waiting game, the way Oracle outlasted Peoplesoft.
Bottom line: The Microsoft/Yahoo deal should and probably will happen, and Yahoo should and probably will be able to squeeze Microsoft for more money than has first been offered.
Microsoft and Yahoo were never more than a small part of the exit opportunity anyway.
A merged Microsoft/Yahoo will be so slow-moving it will create more opportunities for competition than it destroys.
Andreesen certainly knows about slow-moving behemoths making wasted acquisitions; Netscape was acquired by two companies (AOL and Sun) that both dribbled away the parts they respectively acquired.* However, I think he and a lot of other observers are missing something this time — the Microsoft/Yahoo synergies are too large to ignore.
*The legalities of the merger were a lot more complicated than that, but in essence AOL got the “internet” piece of Netscape and Sun got the enterprise side.
Microsoft is buying FAST; what about FAST’s contractual prohibition?
As you’ve probably heard by now, Microsoft is buying enterprise search vendor FAST (Fast Search & Transfer). FAST wasn’t always focused on enterprise search; in fact, FAST built alltheweb.com. And when FAST sold alltheweb.com to Inktomi, it agreed not to reenter the web search business itself. Inktomi was subsequently bought by Yahoo, a company not much inclined to do Microsoft any favors in the web search arena.
I look forward to hearing why this won’t be a problem.
|
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from math import tan, cos, sin, pi
from scipy.integrate import odeint, simps, cumtrapz
##############
## y0 = yk
## y1 = theta
## y2 = px
## y3 = py
##############
def model(y, t):
yk, theta, vx, vy = y
# constants
m = 0.206 # Mass of pendulum
k = 10 # Stiffness of Spring
b = 0.3 # Torsional Resistance
R = 1.5 # Friction
L = 0.61 # Length of Pendulum
g = 9.81 # Gravitional acceleration
Y = 0 # Equilibrium position
# in between terms
disp = (yk - Y)
d_yk = vy + ((tan(theta) * vx))
d_theta = vx / (L * cos(theta))
d_vy = g + (( -R * d_yk - k * yk)/m)
# the derivative causality is resolved here, so adding some in between
# terms for easier debugging
e_21 = tan(theta) * (d_vy - g) # comes from the left side of bg
e_24 = d_theta * b # torsional resistance
e_22 = d_theta * tan(theta) * vx / (12 * (cos(theta)**2))
factor = 1 / (1 + (1 / ( 12 * (cos(theta)**2))))
d_vx = factor * (e_21 - e_22 - e_24)
return [d_yk, d_theta, d_vx, d_vy]
time = np.linspace(0.0, 8.0, 10000)
# Initial condition parameters
# yinit = [Vertical spring displacement, pendulum angle relative to vertical, horizontal velocity, vertical]
yinit = [0, pi/4, 0, 0]
y = odeint(model, yinit, time)
# the state equations give us velocity
# integrate again to get displacement
# our variable of interest
ped_y = cumtrapz(y[:,3], time, initial=0)
ped_x = cumtrapz(y[:,2], time, initial=0)
plt.figure(1)
plt.subplot(311)
plt.plot(time, y[:,0])
plt.xlabel('t [s]')
plt.ylabel('Displacement [m]')
plt.title('Displacement of Spring in Y')
plt.grid()
plt.legend()
# plt.subplot(312)
# plt.plot(time, y[:,1])
# plt.xlabel('t [s]')
# plt.ylabel('Displacement [rad]')
# plt.title('Angle of rotation')
# plt.legend()
plt.subplot(312)
plt.plot(time, ped_x)
plt.xlabel('t [s]')
plt.ylabel('Displacement [m]')
plt.title('Displacement of Pendulum in X')
plt.grid()
plt.legend()
plt.subplot(313)
plt.plot(time, ped_y)
plt.xlabel('t [s]')
plt.ylabel('Displacement [m]')
plt.title('Displacement of Pendulum in Y')
plt.grid()
plt.legend()
plt.tight_layout()
plt.show()
|
Thanks, Tegan. Another fine crag you have offered the community!
We climbed a few routes the other day and enjoyed them very much.
I think one of my Petzl Ange quickdraws was lost at Murrin Park's Cereal Killers area on May 13th. It is white (webbing) with an orange-colored carabiner. Thanks and good karma for its' return. Kindly PM here.
"A list of this year`s work can be found here: https://drive.google.com/open… "
|
import os
from pymongo import MongoClient
import argparse
parser = argparse.ArgumentParser(description="Python script for going through steps of adding a new genome to the Symbionts database.")
# STEP 1: Before running this script download .gb files from NCBI. If there are any plasmids concatenate these with main chromosome file.
# Place all files in folder and use this folder name as input for this script.
parser.add_argument("dataFolder",
nargs=1,
type=str,
help="Path to the folder which contains the genome files.")
parser.add_argument("toolsFolder",
nargs=1,
type=str,
help="Path to the folder which contains the python scripts (e.g. tools folder in symbionts.org")
parser.add_argument("--host",
type=str,
default='localhost',
help="Hostname for the MongoDB database (default=localhost)")
parser.add_argument("--port",
type=str,
default=27017,
help="Port where MongoDB is listening (default=27017)")
parser.add_argument("--database",
type=str,
default='symbiont',
help="Name of the database to store the data in (default=symbiont)")
parser.add_argument("--blastHitscollection",
type=str,
default='internal_blast_hits',
help="Name of the collection containing internal blast hits (default = internal_blast_hits")
parser.add_argument("--orthologuescollection",
type=str,
default='orthologues',
help="Name of the collection containing orthologues data (default=orthologues)")
args = parser.parse_args()
client = MongoClient(args.host, args.port)
db = client[args.database]
internal_blast_hits = db[args.blastHitscollection]
orthologues = db[args.orthologuescollection]
# STEP 2: Import the genomes into the MongoDB database using importSequence.py script
# dataFolder = args.dataFolder[0]
# toolsFolder = args.toolsFolder[0]
# for filename in os.listdir(dataFolder):
# print filename
# s = toolsFolder + "/importSequence.py "+dataFolder + "/" + filename
# os.system("python "+s)
# STEP 3: Create a FASTA file for all the CDS features in the database using dumpBlastDB.py (without the split file flag)
# s = toolsFolder+"/dumpBlastDB.py "+dataFolder+ "/symbionts_proteins.fasta"
# os.system("python "+s)
# STEP 4: Run blast query - every CDS in database against every other CDS in database.
# On server: makeblastdb -in 'symbionts_proteins.fasta' -dbtype prot
# blastp -db 'symbionts_proteins.fasta' -query 'symbionts_proteins.fasta' -evalue 1e-10 -outfmt 7 -out 'selfmatches.txt'
STEP 5: delete current mongodb collections - drop current internal_blast hits and orthologues collections
db.internal_blast_hits.drop()
db.orthologues.drop()
#STEP 6: import new blast hits into internal_blast hits collection using importBLASTselfmatch.py
s = toolsFolder+"importBLASTselfmatch.py "+dataFolder+ "/selfmatches.txt --host "+args.host
os.system("python "+s)
#STEP 7: create new orthologues collection using createOrthologuesCollection.py
s = toolsFolder+"createOrthologuesCollection.py --host "+args.host
os.system("python "+s)
#STEP 8: Create FASTA files for the new genomes and plasmids using dumpBlastDB.py (with the split file flag)
s = toolsFolder+"dumpBlastDB.py "+dataFolder+ "/symbionts_proteins --split"
# os.system("python "+s)
# #STEP 9: run KAAS queries using FASTA files from each genome and plasmid (http://www.genome.jp/kaas-bin/kaas_main) and save output from each as text file.
# #STEP 10: add KO details to CDS features in database by running addKONumbers.py with the FASTA file as input1 and KONumbers.txt as input2
# newFolder = "/Users/kn675/Python/NewGenomesFasta/KAASoutput"
# for filename in os.listdir(newFolder):
# print filename
# s = toolsFolder + "addKONumbers.py "+folder + "/" + filename + " "+ toolsFolder +"/KONumbers.txt"+ "--host "+args.hostcreat
# os.system("python " +s)
STEP 11: add multifun numbers to CDS features
# s = toolsFolder + "addMultiFunNumbers.py "+toolsFolder +"/uniprot_to_go_mapping.txt " + +toolsFolder +"/go_to_multifun_mapping.txt "+ "--host "+args.host
# os.system("python" +s)
|
A year ago you set up a retail business that specialises in selling electronic entertainment equipment. You are still learning and regularly read trade magazines for education purposes and to keep up with developments in the industry.
In early March 2016, you read an article in one of those magazines, ‘Modern Electronics’, about a newly developed range of lightweight digital headsets that were being produced by an overseas manufacturer for use with all forms of sound delivery equipment – including digital televisions. The article, which had been written by one of the magazine’s journalists, provided a lot of technical detail and noted that scientific tests had shown that the new headsets improved sound quality by up to 50%.
The advertisement then gave a website address for the company.
You checked the website and found a catalogue and price list with a special section for the advertised headsets. The catalogue described them as ‘the very latest technology from overseas,’ as, ‘a whole new experience in sound reproduction’ and as, ‘like nothing you have ever experienced before’.
You then telephoned the company in mid-March 2016 and spoke to a salesman. You explained that you had a retail business specialising in electronic entertainment equipment and that you were interested in ordering 1,000 of the advertised headsets. You also told the salesman that you were considering running your own special promotion in July and advertising the headsets in the local newspaper.
The salesman agreed that your idea was very good and told you that he thought that it could generate significant sales. He also told you that there would be no problem providing the required 1,000 headsets in time for your proposed July promotion. You then discussed the detail of the agreement.
You agreed that the headsets would be delivered no later than Friday 15 July 2016, a week before your special promotion and advertising campaign. In the course of that conversation you also told the salesman that you would put your advertisements in the local newspaper on the morning of Saturday 16July so you could take maximum sales advantage of the weekend trade in the shopping centre in which your shop was located.
You also agreed that that 500 of the 1,000 headsets ordered would be white and 500 would be black being the two colours that you thought would sell the best and that the full price of $10,000 would be payable on delivery. The salesman said that this was company policy because you were not an established account holder.
You signed and returned the form to Entertainment Imports Ltd without reading it.
On Thursday 14 July 2016 you were urgently and unexpectedly called away to a family emergency from which you did not return until Saturday16July 2016. Before you left, and to ensure that nothing went wrong with the delivery of the headsets you wrote a cheque for the full amount of the purchase price and couriered it to Entertainment Imports Ltd’s office that Thursday morning. It arrived at midday and Entertainment Imports Ltd banked it that afternoon.
When you finally returned on Saturday morning there was a message on your mobile phone which you had forgotten to take with you from your store manager, telling you that the headsets had not been delivered on Friday 15 July 2016.
You then tried to contact Entertainment Imports Ltd and eventually spoke to the salesman with whom you had dealt. In reply to your question about the headsets he said, ‘We had the boxes out on our loading dock for you to collect all day yesterday but no-one came to collect them and you did not answer your mobile’. You replied saying, ‘You guys were supposed to deliver them’. ‘No’, said the salesman, ‘the contract only specified the date for delivery, not that we had to deliver them to you at your shop. Whenever we have to deliver goods to a customer’s premises we charge a delivery fee and that part of the contract form was not filled in’.
You told the salesman that you were not happy but that you thought that it could still be possible to salvage the situation. You told him that you would pick the goods up that afternoon and that was arranged. You picked the boxes up just after 3pm and took them to your store – arriving just as the shopping centre was shutting for the day.
You secured the boxes in your stock room but took one full box home with you to do a quality test before putting the headsets out for sale the following day. The first thing you noticed was a notation on the box to the effect that the headsets could only be used with LCD televisions and not with all sound equipment as you had thought.
You then tested a sample of 20 headsets on your television and found that 10 of them had very poor quality sound reproduction far worse that that provided by the other headsets that you already had in stock in your shop. They were also heavier than other similar headsets and far less comfortable to wear. You also noticed that all of the headsets in that box were a black and white combination with black ear-pieces and a white cord. When you checked the remainder of the boxes the next morning, you found that the entire consignment was exactly the same: that is they all had black ear-pieces and a white cord.
You rang the salesman and told him that you were rejecting the entire consignment, that you wanted your money back, that you wanted to be reimbursed for the costs of your newspaper advertisement and that you expected to be compensated for both lost profits and the aggravation and frustration that you had suffered as a result of Entertainment Imports Ltd’s breach of contract.
The salesman told you that Entertainment Imports Ltd had done everything it was required to do under the contract law and that, if anyone was in breach, it was you as you had not paid for the headsets ‘on delivery’ as required by the contract and you had also failed to pick them up on the due date for delivery as set out in the contract.
|
from __future__ import absolute_import
import time
import logging
from types import FunctionType
from raven._compat import iteritems, get_code, text_type, string_types
from raven.utils import once
special_logging_handlers = []
special_logger_handlers = {}
logger = logging.getLogger('raven')
def event_payload_considered_equal(a, b):
return (
a['type'] == b['type'] and
a['level'] == b['level'] and
a['message'] == b['message'] and
a['category'] == b['category'] and
a['data'] == b['data']
)
class BreadcrumbBuffer(object):
def __init__(self, limit=100):
self.buffer = []
self.limit = limit
def record(self, timestamp=None, level=None, message=None,
category=None, data=None, type=None, processor=None):
if not (message or data or processor):
raise ValueError('You must pass either `message`, `data`, '
'or `processor`')
if timestamp is None:
timestamp = time.time()
self.buffer.append(({
'type': type or 'default',
'timestamp': timestamp,
'level': level,
'message': message,
'category': category,
'data': data,
}, processor))
del self.buffer[:-self.limit]
def clear(self):
del self.buffer[:]
def get_buffer(self):
rv = []
for idx, (payload, processor) in enumerate(self.buffer):
if processor is not None:
try:
processor(payload)
except Exception:
logger.exception('Failed to process breadcrumbs. Ignored')
payload = None
self.buffer[idx] = (payload, None)
if payload is not None and \
(not rv or not event_payload_considered_equal(rv[-1], payload)):
rv.append(payload)
return rv
class BlackholeBreadcrumbBuffer(BreadcrumbBuffer):
def record(self, *args, **kwargs):
pass
def make_buffer(enabled=True):
if enabled:
return BreadcrumbBuffer()
return BlackholeBreadcrumbBuffer()
def record_breadcrumb(type, *args, **kwargs):
# Legacy alias
kwargs['type'] = type
return record(*args, **kwargs)
def record(message=None, timestamp=None, level=None, category=None,
data=None, type=None, processor=None):
"""Records a breadcrumb for all active clients. This is what integration
code should use rather than invoking the `captureBreadcrumb` method
on a specific client.
"""
if timestamp is None:
timestamp = time.time()
for ctx in raven.context.get_active_contexts():
ctx.breadcrumbs.record(timestamp, level, message, category,
data, type, processor)
def _record_log_breadcrumb(logger, level, msg, *args, **kwargs):
for handler in special_logging_handlers:
rv = handler(logger, level, msg, args, kwargs)
if rv:
return
handler = special_logger_handlers.get(logger.name)
if handler is not None:
rv = handler(logger, level, msg, args, kwargs)
if rv:
return
def processor(data):
formatted_msg = msg
# If people log bad things, this can happen. Then just don't do
# anything.
try:
formatted_msg = text_type(msg)
if args:
formatted_msg = msg % args
except Exception:
pass
# We do not want to include exc_info as argument because it often
# lies (set to a constant value like 1 or True) or even if it's a
# tuple it will not be particularly useful for us as we cannot
# process it anyways.
kwargs.pop('exc_info', None)
data.update({
'message': formatted_msg,
'category': logger.name,
'level': logging.getLevelName(level).lower(),
'data': kwargs,
})
record(processor=processor)
def _wrap_logging_method(meth, level=None):
if not isinstance(meth, FunctionType):
func = meth.im_func
else:
func = meth
# We were patched for raven before
if getattr(func, '__patched_for_raven__', False):
return
if level is None:
args = ('level', 'msg')
fwd = 'level, msg'
else:
args = ('msg',)
fwd = '%d, msg' % level
code = get_code(func)
# This requires a bit of explanation why we're doing this. Due to how
# logging itself works we need to pretend that the method actually was
# created within the logging module. There are a few ways to detect
# this and we fake all of them: we use the same function globals (the
# one from the logging module), we create it entirely there which
# means that also the filename is set correctly. This fools the
# detection code in logging and it makes logging itself skip past our
# code when determining the code location.
#
# Because we point the globals to the logging module we now need to
# refer to our own functions (original and the crumb recording
# function) through a closure instead of the global scope.
#
# We also add a lot of newlines in front of the code so that the
# code location lines up again in case someone runs inspect.getsource
# on the function.
ns = {}
eval(compile('''%(offset)sif 1:
def factory(original, record_crumb):
def %(name)s(self, %(args)s, *args, **kwargs):
record_crumb(self, %(fwd)s, *args, **kwargs)
return original(self, %(args)s, *args, **kwargs)
return %(name)s
\n''' % {
'offset': '\n' * (code.co_firstlineno - 3),
'name': func.__name__,
'args': ', '.join(args),
'fwd': fwd,
'level': level,
}, logging._srcfile, 'exec'), logging.__dict__, ns)
new_func = ns['factory'](meth, _record_log_breadcrumb)
new_func.__doc__ = func.__doc__
assert code.co_firstlineno == get_code(func).co_firstlineno
assert new_func.__module__ == func.__module__
assert new_func.__name__ == func.__name__
new_func.__patched_for_raven__ = True
return new_func
def _patch_logger():
cls = logging.Logger
methods = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'warn': logging.WARN,
'error': logging.ERROR,
'exception': logging.ERROR,
'critical': logging.CRITICAL,
'fatal': logging.FATAL
}
for method_name, level in iteritems(methods):
new_func = _wrap_logging_method(
getattr(cls, method_name), level)
setattr(logging.Logger, method_name, new_func)
logging.Logger.log = _wrap_logging_method(
logging.Logger.log)
@once
def install_logging_hook():
"""Installs the logging hook if it was not installed yet. Otherwise
does nothing.
"""
_patch_logger()
def ignore_logger(name_or_logger, allow_level=None):
"""Ignores a logger for the regular breadcrumb code. This is useful
for framework integration code where some log messages should be
specially handled.
"""
def handler(logger, level, msg, args, kwargs):
if allow_level is not None and \
level >= allow_level:
return False
return True
register_special_log_handler(name_or_logger, handler)
def register_special_log_handler(name_or_logger, callback):
"""Registers a callback for log handling. The callback is invoked
with give arguments: `logger`, `level`, `msg`, `args` and `kwargs`
which are the values passed to the logging system. If the callback
returns `True` the default handling is disabled.
"""
if isinstance(name_or_logger, string_types):
name = name_or_logger
else:
name = name_or_logger.name
special_logger_handlers[name] = callback
def register_logging_handler(callback):
"""Registers a callback for log handling. The callback is invoked
with give arguments: `logger`, `level`, `msg`, `args` and `kwargs`
which are the values passed to the logging system. If the callback
returns `True` the default handling is disabled.
"""
special_logging_handlers.append(callback)
hooked_libraries = {}
def libraryhook(name):
def decorator(f):
f = once(f)
hooked_libraries[name] = f
return f
return decorator
@libraryhook('requests')
def _hook_requests():
try:
from requests.sessions import Session
except ImportError:
return
real_send = Session.send
def send(self, request, *args, **kwargs):
def _record_request(response):
record(type='http', category='requests', data={
'url': request.url,
'method': request.method,
'status_code': response and response.status_code or None,
'reason': response and response.reason or None,
})
try:
resp = real_send(self, request, *args, **kwargs)
except Exception:
_record_request(None)
raise
else:
_record_request(resp)
return resp
Session.send = send
ignore_logger('requests.packages.urllib3.connectionpool',
allow_level=logging.WARNING)
@libraryhook('httplib')
def _install_httplib():
try:
from httplib import HTTPConnection
except ImportError:
from http.client import HTTPConnection
real_putrequest = HTTPConnection.putrequest
real_getresponse = HTTPConnection.getresponse
def putrequest(self, method, url, *args, **kwargs):
self._raven_status_dict = status = {}
host = self.host
port = self.port
default_port = self.default_port
def processor(data):
real_url = url
if not real_url.startswith(('http://', 'https://')):
real_url = '%s://%s%s%s' % (
default_port == 443 and 'https' or 'http',
host,
port != default_port and ':%s' % port or '',
url,
)
data['data'] = {
'url': real_url,
'method': method,
}
data['data'].update(status)
return data
record(type='http', category='requests', processor=processor)
return real_putrequest(self, method, url, *args, **kwargs)
def getresponse(self, *args, **kwargs):
rv = real_getresponse(self, *args, **kwargs)
status = getattr(self, '_raven_status_dict', None)
if status is not None and 'status_code' not in status:
status['status_code'] = rv.status
status['reason'] = rv.reason
return rv
HTTPConnection.putrequest = putrequest
HTTPConnection.getresponse = getresponse
def hook_libraries(libraries):
if libraries is None:
libraries = hooked_libraries.keys()
for lib in libraries:
func = hooked_libraries.get(lib)
if func is None:
raise RuntimeError('Unknown library %r for hooking' % lib)
func()
import raven.context
|
I'm using gsub to substitute values in my data pill, it does not work correctly. Nothing gets replaced even though there is a match. How can I substitute data in my data pills with some other values?
Hi Saul, if you're working with data pills as your pattern, the data pills will need to be enclosed within brackets. They should not be enclosed in quotes or // unlike in cases where you define a text pattern.
|
import socket
import paramiko
import os, sys
import getpass
from time import sleep
client = None
ssh = None
address = None
def runSocketCommand( comm ) :
canc_rand = os.urandom(4).encode('hex')
compl_rand = os.urandom(4).encode('hex')
command = ' ' + comm + ' && echo %s || echo %s \n' % ( compl_rand, canc_rand )
# print "> " + command,
# try :
client.send( command )
# client.sendto( command, address )
resp = ''
while compl_rand not in resp and canc_rand not in resp :
sleep(0.1)
resp += client.recvfrom( 4096 * 4 )[0]
resp = resp.strip()
if compl_rand in resp :
return resp.replace( compl_rand, '' )
if canc_rand in resp :
return ''
return resp
def runLocalhostCommand( comm ) :
return os.popen( " " + comm ).read()
def runSSHCommand( comm ) :
stdin, stdout, stderr = ssh.exec_command( comm )
out = stdout.read()
if not out :
return stderr.read()
return out
def get_command_execute ( args ) :
global client
global ssh
global address
if args.command == "bind" :
if args.udp :
client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
else :
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
address = (args.IP, args.port )
client.connect( address )
runCommand = runSocketCommand
elif args.command == "reverse" :
if args.udp :
server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
client = server
ip, port = raw_input("IP and port of the remote host [IP:address] : ").strip().split(':')
address = ( ip.strip(), int( port.strip()) )
else :
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind( ("0.0.0.0", args.port ) )
print "Waiting for the Reverse Shell at port %d" % args.port
try :
if not args.udp :
server.listen(5)
client, address = server.accept()
except KeyboardInterrupt :
print "Aborted by user..."
sys.exit(-2)
runCommand = runSocketCommand
elif args.command == "local" :
runCommand = runLocalhostCommand
elif args.command == "ssh" :
user, host = args.connection.split('@')[:2]
password = args.password
if not password :
password = getpass.getpass("(SSH) Password for user '%s': " % user)
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy( paramiko.AutoAddPolicy() )
try :
ssh.connect( host , username = user, password = password, port = args.port )
except paramiko.ssh_exception.AuthenticationException :
print "Authentication Failed"
sys.exit(-1)
except paramiko.ssh_exception.NoValidConnectionsError :
print "No SSH server found on port %s:%d" % (host, args.port)
sys.exit(-2)
runCommand = runSSHCommand
return runCommand
|
I seem to have located a used Nelo 550 L in a great condition and reasonably priced. I have sat in a 550 L (not the same one that's for sale) and found that the bucket fits me about perfect but the foot plate cannot be moved close enough for me. By the looks of it there's enough flat space of the hull past the rails to move it an inch or two towards me. That would make a great fitting boat.
I know that Nelo can do that on new boats as per a customer request, but is that a relatively simple thing to do myself? Is it just a matter of drilling a new holes and sealing up the old ones? I don't have access to the boat for sale at the moment and haven't taken a close enough look at that ski I test-sat in.
Wesley Echols, you have a 550L modified like that I believe?
Replied by Bitemekaos on topic Nelo 550L - move the foot plate rails?
Not sure if this helps but I was keen to get a 550 and being verticaly challenged the foot plate in the L and ML were about 30mm to far away for me. I got a quote to move the rails from a good boat and ski builder of $350 Australia dollars which was quite reasonable or the other option was to use the Nelo K1 peddle system which would of extended the extra distance I needed. In the end with the cost of the ski and the mucking around involved I brought a Think Ion 3G and couldn't be happier. Good luck!
Replied by wesley on topic Nelo 550L - move the foot plate rails?
Got your text. I paddled the 550L with the regular rails but needed another half inch for it to fit with my 29.5 inch inseam. So I order the 550L with the rail moved closer to me and that was fine. I now have 560ML as one of my flat water skis and the leg length is fine but I no longer have my 550L. You could switch the footplate around but it would not adjust once you did that as I was told. I believe there was a reason I could not do what are suggesting with the Nelo 550L due to something with how the footplate was designed or how the rails are attached. I dont remember the specifics though. That's when i decided to order one with the rails moved back to get the fit I wanted. Alot of good skis there that fit short paddlers so get one that fits correctly. A good deal is no deal if it does not fit, no matter what the cost is.
Replied by SpaceSputnik on topic Nelo 550L - move the foot plate rails?
Nelo says it the rails could be moved two inches fairly easily as a diy. It bugs me having to do this sort of thing, but I do like the overall bucket fit of the L.
ML would be fantastic for summer, but with winter gear on, it would be definitely too tight.
Last edit: 3 months 9 hours ago by SpaceSputnik.
|
import numpy
import xmlstore.util
import xmlplot.common
datatypes = {3:numpy.ubyte,
4:numpy.byte,
5:numpy.float32,
6:numpy.float64,
20:numpy.int8,
21:numpy.uint8,
22:numpy.int16,
23:numpy.uint16,
24:numpy.int32,
25:numpy.uint32}
class HDF4Store(xmlplot.common.VariableStore,xmlstore.util.referencedobject):
class Variable(xmlplot.common.Variable):
def __init__(self,store,hdfvar):
xmlplot.common.Variable.__init__(self,store)
self.hdfvar = hdfvar
self.info = self.hdfvar.info()
def getName_raw(self):
return self.info[0]
def getDimensions_raw(self):
dimnames = []
for idim in range(self.info[1]):
dim = self.hdfvar.dim(idim)
dimnames.append(dim.info()[0])
return dimnames
def getLongName(self):
atts = self.getProperties()
if 'long_name' in atts: return atts['long_name']
return xmlplot.common.Variable.getLongName(self)
def getUnit(self):
atts = self.getProperties()
if 'units' in atts: return atts['units']
return xmlplot.common.Variable.getUnit(self)
def getShape(self):
shape = self.info[2]
if isinstance(shape,int): shape = (shape,)
return shape
def getDataType(self):
return datatypes.get(self.info[3],None)
def getProperties(self):
return self.hdfvar.attributes()
def getSlice(self,bounds=None,dataonly=False,transfercoordinatemask=True):
dimnames = self.getDimensions_raw()
shape = self.getShape()
# Determine final slice
if bounds is None: bounds = (Ellipsis,)
newbounds = []
for bound,dimlen,dimname in zip(xmlplot.common.processEllipsis(bounds,shape),shape,dimnames):
if isinstance(bound,int):
# Integer value provided as index.
assert bound>=-dimlen, 'Slice index %i lies below the lowest possible index for dimension %s (%i).' % (bound,dimname,-dimlen )
assert bound< dimlen, 'Slice index %i exceeds the highest possible index for dimension %s (%i).' % (bound,dimname, dimlen-1)
if bound<0: bound += dimlen
elif isinstance(bound,slice):
start,stop,step = bound.indices(dimlen)
bound = slice(start,stop,step)
newbounds.append(bound)
bounds = tuple(newbounds)
# Get data
dat = numpy.asarray(self.hdfvar[bounds])
# Mask fill value
fillvalue = self.hdfvar.attributes().get('_FillValue',None)
if fillvalue is None: fillvalue = self.hdfvar.attributes().get('Fill',None)
if fillvalue is not None: dat = numpy.ma.array(dat,mask=(dat==fillvalue),copy=False)
# Determine scale factor and offset, and cast data to acommodating type if needed.
scale = self.hdfvar.attributes().get('scale_factor',None)
offset = self.hdfvar.attributes().get('add_offset', None)
if scale is not None or offset is not None and dat.dtype!=numpy.float:
dat = dat.astype(numpy.float)
if scale is not None: dat *= scale
if offset is not None: dat += offset
if dataonly: return dat
newdimnames = [d for d,b in zip(dimnames,bounds) if not isinstance(b,int)]
varslice = self.Slice(newdimnames)
varslice.data = dat
inewdim = 0
for dimname,bound in zip(dimnames,bounds):
# Get the coordinate variable
coordvar = self.store.getVariable_raw(dimname)
if coordvar is None:
# No coordinate variable available: use indices
if not isinstance(bound,slice): continue
coorddims = [dimname]
coords = numpy.arange(bound.start,bound.stop,bound.step,dtype=numpy.float)
else:
# Coordinate variable present: use it.
coorddims = list(coordvar.getDimensions())
# Debug check: see if all coordinate dimensions are also used by the variable.
for cd in coorddims:
assert cd in dimnames, 'Coordinate dimension %s is not used by this variable (it uses %s).' % (cd,', '.join(dimnames))
# Get coordinate values
coordslice = [bounds[dimnames.index(cd)] for cd in coorddims]
coords = coordvar.getSlice(coordslice, dataonly=True)
# Get the list of coordinate dimensions after the ones with single index have been sliced out.
newcoorddims = [cd for cd in coorddims if isinstance(bounds[dimnames.index(cd)],slice)]
# Transfer the coordinate mask to the data if desired.
coordmask = numpy.ma.getmask(coords)
if transfercoordinatemask and coordmask is not numpy.ma.nomask:
coordmask = xmlplot.common.broadcastSelective(coordmask,newcoorddims,dat.shape,newdimnames)
if datamask is numpy.ma.nomask:
datamask = coordmask
else:
datamask |= coordmask
# If we take a single index for this dimension, it will not be included in the output.
if not isinstance(bound,slice): continue
# Coordinates should not have a mask - undo the masking.
if coordmask is not numpy.ma.nomask:
coords = numpy.ma.getdata(coords)
# Auto-generate staggered coordinates
coords_stag = xmlplot.common.stagger(coords)
# Insert data dimensions where they are lacking in coordinate
coords = xmlplot.common.broadcastSelective(coords, (dimname,),dat.shape, newdimnames)
coords_stag = xmlplot.common.broadcastSelective(coords_stag,(dimname,),[l+1 for l in dat.shape],newdimnames)
# Assign coordinate values
varslice.coords [inewdim] = coords
varslice.coords_stag[inewdim] = coords_stag
inewdim += 1
return varslice
def __init__(self,path):
xmlplot.common.VariableStore.__init__(self)
xmlstore.util.referencedobject.__init__(self)
from pyhdf.SD import SD, SDC
self.file = SD(str(path),SDC.READ)
def getVariable_raw(self,varname):
"""Returns a Variable object for the given original short variable name.
The method must be implemented by derived classes.
"""
if varname not in self.file.datasets().keys(): return None
return self.Variable(self,self.file.select(varname))
def getVariableNames_raw(self):
"""Returns a list of original short names for all variables present in the store.
The method must be implemented by derived classes.
"""
return self.file.datasets().keys()
def getProperties(self):
return self.file.attributes()
def unlink(self):
self.file.end()
|
Kelly Marie Tran began her career around 2011, starring in various online videos and little-seen pilots for shows that never got full-season orders. Her first major role came in the short-lived NBC sitcom About a Boy, playing the recurring character Marguerite. She continued scoring small roles in pilots and TV movies until her regular spot in CollegeHumor’s slate of original web sketches in 2014.
And here you can see Tran in the skit “If You’re Only 20-Something, Stop Saying You’re Old,” which hits a little too close to home for our taste.
Tran also had a small role in the web series Ladies Like Us, a single-camera sitcom about struggling female comedians living in Los Angeles.
In terms of movie work, Tran has appeared in projects such as Pub Quiz and XOXO, both released in 2016. Tran fits into Lucasfilm’s penchant for going with mostly unknown actors, even for the biggest movie series of all time (see: Daisy Ridley). We know Tran will be playing a member of the Resistance named Rose who teams up with Finn in the next movie (I’ll bet any amount of money she’s a spy for The First Order). Other than that, not much is known about her role. I guess we’ll just have to wait and see.
|
"""
This module contains very high-level helpers for selecting hyperparameters
for machine learning models using a train-validation-test strategy. Typical
usage looks as follows:
```
# create the hyperparameter grid
hp_grid = sklearn.model_selection.ParameterGrid({
...
})
# create an iterator over the hyperparameter grid and folds
hp_fold_it = hp_utils.get_hp_fold_iterator(hp_grid, num_folds)
# distribute training to the dask cluster
f_res = dask_utils.apply_iter(
hp_fold_it,
dask_client,
hp_utils.evaluate_hyperparameters_helper,
args=args,
...,
return_futures=True
)
# collect the results from dask
all_res = dask_utils.collect_results(f_res)
# parse the results
df_results = hp_utils.get_hp_results(all_res)
# select the best hyperparameters using the validation set results
evaluation_metric = 'micro_roc_auc_score'
best_val_hps = hp_utils.get_best_hyperparameters(
df_results,
evaluation_metric=evaluation_metric,
selection_function=np.argmax # **this depends on the evaluation metric**
)
# pull out the results on those folds
m_val_best = (df_results['hyperparameters_str'] == val_best)
```
"""
import logging
logger = logging.getLogger(__name__)
import itertools
import json
import pyllars.ml_utils as ml_utils
import pyllars.pandas_utils as pd_utils
def get_hp_fold_iterator(hp_grid, num_folds):
""" Create an iterator over all combinations of hyperparameters and folds
"""
hyperparam_grid = list(hyperparam_grid)
folds = list(range(num_folds))
hp_fold_it = itertools.product(hp_grid, folds)
hp_fold_it = list(hp_fold_it)
return hp_fold_it
def evaluate_hyperparameters_helper(hv, *args, **kwargs):
# these come from our iterator
hyperparameters = hv[0]
validation_folds = hv[1]
test_folds = hv[2]
res = ml_utils.evaluate_hyperparameters(
hyperparameters=hyperparameters,
validation_folds=validation_folds,
test_folds=test_folds,
*args,
**kwargs
)
return res
def _get_res(res):
ret_val = {
'validation_{}'.format(k): v
for k,v in res.metrics_val.items()
}
ret_test = {
'test_{}'.format(k): v
for k,v in res.metrics_test.items()
}
ret = ret_val
ret.update(ret_test)
hp_string = json.dumps(res.hyperparameters)
ret['hyperparameters_str'] = hp_string
ret['hyperparameters'] = res.hyperparameters
ret['validation_fold'] = res.fold_val
ret['test_fold'] = res.fold_test
return ret
def get_hp_results(all_res):
""" Create the results data frame
"""
results = [
_get_res(res) for res in all_res
]
df_results = pd.DataFrame(results)
return df_results
def get_best_hyperparameters(df_results, evaluation_metric, selection_function):
""" Based on the performance on the validation, select the best hyperparameters
"""
hp_groups = df_results.groupby('hyperparameters_str')
validation_evaluation_metric = "validation_{}".format(evaluation_metric)
test_evaluation_metric = "test_{}".format(evaluation_metric)
# find the mean of each set of hp's across all folds
val_performance = hp_groups[validation_evaluation_metric].mean()
# now, select the best
val_best = selection_function(val_performance)
return val_best
|
As a family owned and operated company in Nashville for six generations, we are invested in supporting and furthering our communities, Nashville and Middle Tennessee.
"The H.G. Hill Company Charitable Contributions Committee will work to identify and support non-profit organizations that strive to improve the quality of life in the Nashville community at large. The committee will support initiatives in health and human services, education and community development."
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Frédéric Magniette, Miguel Rubio-Roy
# This file is part of Pyrame.
#
# Pyrame is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrame is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrame. If not, see <http://www.gnu.org/licenses/>
import sys,os,subprocess,re
if len(sys.argv)<3:
print("usage %s vendor product [serial_number]"%(sys.argv[0]))
sys.exit(1)
vendor = sys.argv[1]
product = sys.argv[2]
if len(sys.argv)>3 and sys.argv[3]!="undef":
serialnum = sys.argv[3]
else:
serialnum = None
result = subprocess.Popen(["/usr/bin/lsusb"],stdout=subprocess.PIPE)
res,_ = result.communicate()
if result.returncode!=0:
print("error getting USB list: %s"%(res))
sys.exit(1)
buses_devs=re.findall("Bus (.*?) Device (.*?): ID %s:%s"%(vendor,product),res)
if len(buses_devs)==0:
print("vendor and/or product id's not found")
sys.exit(1)
sys.stderr.write("found %d devices\n"%(len(buses_devs)))
if not serialnum and len(buses_devs)!=1:
print("multiple devices with same vendor and product id and serial number not provided")
sys.exit(1)
devnames=[]
errors=[]
for bus_dev in buses_devs:
result = subprocess.Popen(("udevadm info -q path -n /dev/bus/usb/%s/%s"%(bus_dev[0],bus_dev[1])).split(" "),stdout=subprocess.PIPE)
res,_ = result.communicate()
if result.returncode!=0:
errors.append("error getting USB device path for bus %s dev %s"%(bus_dev[0],bus_dev[1]))
sys.stderr.write(errors[-1]+"\n")
continue
path = "/sys"+res.strip()
sys.stderr.write("\nchecking out %s\n"%(path))
result = subprocess.Popen(("find %s -name tty*"%(path)).split(" "),stdout=subprocess.PIPE)
res,_ = result.communicate()
if result.returncode!=0 or res.strip()=="":
errors.append("error getting ttyUSB device path for %s"%(path))
sys.stderr.write(errors[-1]+"\n")
continue
if serialnum:
if os.path.exists(path+"/serial"):
with open(path+"/serial","r") as f: s = f.read()
if s.strip()!=serialnum:
errors.append("invalid serial number for %s"%(path))
sys.stderr.write(errors[-1]+"\n")
continue
else:
errors.append("no serial number on %s"%(path))
sys.stderr.write(errors[-1]+"\n")
continue
devnames.append("/dev/"+res.split("\n")[0].split("/")[-1])
sys.stderr.write("found device at %s\n"%(devnames[-1]))
sys.stderr.write("\n")
if len(devnames)>1:
print("multiple matches found")
if len(errors)!=0:
print(":"+";".join(errors))
sys.exit(1)
if len(devnames)==0:
print("no device found")
sys.exit(1)
print(devnames[0])
sys.exit(0)
|
1. A team of 25 Staff members of Lebenshilfe together with 7 well trained mentally challenged people traveled to 6 revenue districts of Andhra Pradesh, presenting a humorous street play Soodi Mandu, under the stewardship of Saraswathi Devi Tallapragada, to educate the masses against 6 child-killing or child-crippling dangerous diseases POLIO, TUBECULOSIS, PERTUSIS,DIPTHERIA, TETANUS AND MEASLES. It is a mass education, emphasizing on the importance of preventive measures, against disability. Utilizing the limited talents of the retarded they are being taken into the society to bring them into the mainstream of life through theatre. Beneficiaries: over two lakhs of population.
2. The humanitarian activity taken up by the mentally challenged and their families together with the professionals, collecting used clothes, utensils, candles, match-boxes, bread etc. to help the cyclone victims of the neighboring state ORISSA’ caught the attention of everybody in the city. Mentally retarded went round in the streets with begging bowls and bags in hand, creating a sensation among the crowds which indeed touched every human heart in the city. .
3. A 3-hour cultural show with classical, semi classical and folk dance items, organized by the mentally challenged of Lebenshilfe at Hampi Open Air Theatre, in Shivaji Park, ( public park belongs to Municipal Corporation) in aid of Gujarat Earth Quake Victims, moved the hearts of one and all in the city. A spontaneous donation of rupees 12, 250/- collected from the audience was handed over by the retarded to District Collector for onward transfer to Chief Minister’s fund. This activity proved that retarded are also part of the society and they are prepared to share the happiness as well as grief with the fellow human beings.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from pydrive.auth import GoogleAuth
from googleapiclient.errors import HttpError
gdrive_support = True
except ImportError:
gdrive_support = False
import mimetypes
import logging
from logging.handlers import RotatingFileHandler
import textwrap
from flask import (Flask, render_template, request, Response, redirect,
url_for, send_from_directory, make_response, g, flash,
abort, Markup, stream_with_context)
from flask import __version__ as flaskVersion
import ub
from ub import config
import helper
import os
import errno
from sqlalchemy.sql.expression import func
from sqlalchemy.sql.expression import false
from sqlalchemy.exc import IntegrityError
from sqlalchemy import __version__ as sqlalchemyVersion
from math import ceil
from flask_login import (LoginManager, login_user, logout_user,
login_required, current_user)
from flask_principal import Principal
from flask_principal import __version__ as flask_principalVersion
from flask_babel import Babel
from flask_babel import gettext as _
import requests
import zipfile
from werkzeug.security import generate_password_hash, check_password_hash
from werkzeug.datastructures import Headers
from babel import Locale as LC
from babel import negotiate_locale
from babel import __version__ as babelVersion
from babel.dates import format_date
from functools import wraps
import base64
from sqlalchemy.sql import *
import json
import datetime
from iso639 import languages as isoLanguages
from iso639 import __version__ as iso639Version
from uuid import uuid4
import os.path
import sys
import subprocess
import re
import db
from shutil import move, copyfile
from tornado.ioloop import IOLoop
import shutil
import gdriveutils
import tempfile
import hashlib
from tornado import version as tornadoVersion
try:
from urllib.parse import quote
from imp import reload
except ImportError:
from urllib import quote
try:
from flask_login import __version__ as flask_loginVersion
except ImportError:
from flask_login.__about__ import __version__ as flask_loginVersion
import time
current_milli_time = lambda: int(round(time.time() * 1000))
try:
from wand.image import Image
use_generic_pdf_cover = False
except ImportError:
use_generic_pdf_cover = True
# Global variables
gdrive_watch_callback_token = 'target=calibreweb-watch_files'
global_task = None
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'epub', 'mobi', 'azw', 'azw3', 'cbr', 'cbz', 'cbt', 'djvu', 'prc', 'doc', 'docx', 'fb2'])
def md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
class Singleton:
"""
A non-thread-safe helper class to ease implementing singletons.
This should be used as a decorator -- not a metaclass -- to the
class that should be a singleton.
The decorated class can define one `__init__` function that
takes only the `self` argument. Also, the decorated class cannot be
inherited from. Other than that, there are no restrictions that apply
to the decorated class.
To get the singleton instance, use the `Instance` method. Trying
to use `__call__` will result in a `TypeError` being raised.
"""
def __init__(self, decorated):
self._decorated = decorated
def Instance(self):
"""
Returns the singleton instance. Upon its first call, it creates a
new instance of the decorated class and calls its `__init__` method.
On all subsequent calls, the already created instance is returned.
"""
try:
return self._instance
except AttributeError:
self._instance = self._decorated()
return self._instance
def __call__(self):
raise TypeError('Singletons must be accessed through `Instance()`.')
def __instancecheck__(self, inst):
return isinstance(inst, self._decorated)
@Singleton
class Gauth:
def __init__(self):
self.auth = GoogleAuth(settings_file='settings.yaml')
@Singleton
class Gdrive:
def __init__(self):
self.drive = gdriveutils.getDrive(Gauth.Instance().auth)
class ReverseProxied(object):
"""Wrap the application in this middleware and configure the
front-end server to add these headers, to let you quietly bind
this to a URL other than / and to an HTTP scheme that is
different than what is used locally.
Code courtesy of: http://flask.pocoo.org/snippets/35/
In nginx:
location /myprefix {
proxy_pass http://127.0.0.1:8083;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Scheme $scheme;
proxy_set_header X-Script-Name /myprefix;
}
"""
def __init__(self, application):
self.app = application
def __call__(self, environ, start_response):
script_name = environ.get('HTTP_X_SCRIPT_NAME', '')
if script_name:
environ['SCRIPT_NAME'] = script_name
path_info = environ.get('PATH_INFO', '')
if path_info and path_info.startswith(script_name):
environ['PATH_INFO'] = path_info[len(script_name):]
scheme = environ.get('HTTP_X_SCHEME', '')
if scheme:
environ['wsgi.url_scheme'] = scheme
server = environ.get('HTTP_X_FORWARDED_SERVER', '')
if server:
environ['HTTP_HOST'] = server
return self.app(environ, start_response)
# Main code
mimetypes.init()
mimetypes.add_type('application/xhtml+xml', '.xhtml')
mimetypes.add_type('application/epub+zip', '.epub')
mimetypes.add_type('application/x-mobipocket-ebook', '.mobi')
mimetypes.add_type('application/x-mobipocket-ebook', '.prc')
mimetypes.add_type('application/vnd.amazon.ebook', '.azw')
mimetypes.add_type('application/x-cbr', '.cbr')
mimetypes.add_type('application/x-cbz', '.cbz')
mimetypes.add_type('application/x-cbt', '.cbt')
mimetypes.add_type('image/vnd.djvu', '.djvu')
app = (Flask(__name__))
app.wsgi_app = ReverseProxied(app.wsgi_app)
gevent_server = None
formatter = logging.Formatter(
"[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s")
file_handler = RotatingFileHandler(os.path.join(config.get_main_dir, "calibre-web.log"), maxBytes=50000, backupCount=2)
file_handler.setFormatter(formatter)
app.logger.addHandler(file_handler)
app.logger.setLevel(config.config_log_level)
app.logger.info('Starting Calibre Web...')
logging.getLogger("book_formats").addHandler(file_handler)
logging.getLogger("book_formats").setLevel(config.config_log_level)
Principal(app)
babel = Babel(app)
import uploader
lm = LoginManager(app)
lm.init_app(app)
lm.login_view = 'login'
lm.anonymous_user = ub.Anonymous
app.secret_key = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RT'
db.setup_db()
if config.config_log_level == logging.DEBUG:
logging.getLogger("sqlalchemy.engine").addHandler(file_handler)
logging.getLogger("sqlalchemy.engine").setLevel(logging.INFO)
logging.getLogger("sqlalchemy.pool").addHandler(file_handler)
logging.getLogger("sqlalchemy.pool").setLevel(config.config_log_level)
logging.getLogger("sqlalchemy.orm").addHandler(file_handler)
logging.getLogger("sqlalchemy.orm").setLevel(config.config_log_level)
def is_gdrive_ready():
return os.path.exists('settings.yaml') and os.path.exists('gdrive_credentials')
@babel.localeselector
def get_locale():
# if a user is logged in, use the locale from the user settings
user = getattr(g, 'user', None)
if user is not None and hasattr(user, "locale"):
return user.locale
translations = [item.language for item in babel.list_translations()] + ['en']
preferred = [x.replace('-', '_') for x in request.accept_languages.values()]
return negotiate_locale(preferred, translations)
@babel.timezoneselector
def get_timezone():
user = getattr(g, 'user', None)
if user is not None:
return user.timezone
@lm.user_loader
def load_user(user_id):
return ub.session.query(ub.User).filter(ub.User.id == int(user_id)).first()
@lm.header_loader
def load_user_from_header(header_val):
if header_val.startswith('Basic '):
header_val = header_val.replace('Basic ', '', 1)
basic_username = basic_password = ''
try:
header_val = base64.b64decode(header_val)
basic_username = header_val.split(':')[0]
basic_password = header_val.split(':')[1]
except TypeError:
pass
user = ub.session.query(ub.User).filter(ub.User.nickname == basic_username).first()
if user and check_password_hash(user.password, basic_password):
return user
return
def check_auth(username, password):
user = ub.session.query(ub.User).filter(ub.User.nickname == username).first()
return bool(user and check_password_hash(user.password, password))
def authenticate():
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
def updateGdriveCalibreFromLocal():
gdriveutils.backupCalibreDbAndOptionalDownload(Gdrive.Instance().drive)
gdriveutils.copyToDrive(Gdrive.Instance().drive, config.config_calibre_dir, False, True)
for x in os.listdir(config.config_calibre_dir):
if os.path.isdir(os.path.join(config.config_calibre_dir, x)):
shutil.rmtree(os.path.join(config.config_calibre_dir, x))
def requires_basic_auth_if_no_ano(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if config.config_anonbrowse != 1:
if not auth or not check_auth(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
# simple pagination for the feed
class Pagination(object):
def __init__(self, page, per_page, total_count):
self.page = int(page)
self.per_page = int(per_page)
self.total_count = int(total_count)
@property
def next_offset(self):
return int(self.page * self.per_page)
@property
def previous_offset(self):
return int((self.page - 2) * self.per_page)
@property
def last_offset(self):
last = int(self.total_count) - int(self.per_page)
if last < 0:
last = 0
return int(last)
@property
def pages(self):
return int(ceil(self.total_count / float(self.per_page)))
@property
def has_prev(self):
return self.page > 1
@property
def has_next(self):
return self.page < self.pages
def iter_pages(self, left_edge=2, left_current=2,
right_current=5, right_edge=2):
last = 0
for num in range(self.pages, (self.pages + 1)): # ToDo: can be simplified
if num <= left_edge or (num > self.page - left_current - 1 and num < self.page + right_current) \
or num > self.pages - right_edge:
if last + 1 != num:
yield None
yield num
last = num
# pagination links in jinja
def url_for_other_page(page):
args = request.view_args.copy()
args['page'] = page
return url_for(request.endpoint, **args)
app.jinja_env.globals['url_for_other_page'] = url_for_other_page
def login_required_if_no_ano(func):
if config.config_anonbrowse == 1:
return func
return login_required(func)
# custom jinja filters
@app.template_filter('shortentitle')
def shortentitle_filter(s):
if len(s) > 60:
s = s.split(':', 1)[0]
if len(s) > 60:
s = textwrap.wrap(s, 60, break_long_words=False)[0] + ' [...]'
return s
@app.template_filter('mimetype')
def mimetype_filter(val):
try:
s = mimetypes.types_map['.' + val]
except Exception:
s = 'application/octet-stream'
return s
@app.template_filter('formatdate')
def formatdate(val):
conformed_timestamp = re.sub(r"[:]|([-](?!((\d{2}[:]\d{2})|(\d{4}))$))", '', val)
formatdate = datetime.datetime.strptime(conformed_timestamp[:15], "%Y%m%d %H%M%S")
return format_date(formatdate, format='medium', locale=get_locale())
@app.template_filter('strftime')
def timestamptodate(date, fmt=None):
date = datetime.datetime.fromtimestamp(
int(date)/1000
)
native = date.replace(tzinfo=None)
if fmt:
time_format = fmt
else:
time_format = '%d %m %Y - %H:%S'
return native.strftime(time_format)
def admin_required(f):
"""
Checks if current_user.role == 1
"""
@wraps(f)
def inner(*args, **kwargs):
if current_user.role_admin():
return f(*args, **kwargs)
abort(403)
return inner
def unconfigured(f):
"""
Checks if current_user.role == 1
"""
@wraps(f)
def inner(*args, **kwargs):
if not config.db_configured:
return f(*args, **kwargs)
abort(403)
return inner
def download_required(f):
@wraps(f)
def inner(*args, **kwargs):
if current_user.role_download() or current_user.role_admin():
return f(*args, **kwargs)
abort(403)
return inner
def upload_required(f):
@wraps(f)
def inner(*args, **kwargs):
if current_user.role_upload() or current_user.role_admin():
return f(*args, **kwargs)
abort(403)
return inner
def edit_required(f):
@wraps(f)
def inner(*args, **kwargs):
if current_user.role_edit() or current_user.role_admin():
return f(*args, **kwargs)
abort(403)
return inner
# Fill indexpage with all requested data from database
def fill_indexpage(page, database, db_filter, order):
if current_user.filter_language() != "all":
lang_filter = db.Books.languages.any(db.Languages.lang_code == current_user.filter_language())
else:
lang_filter = True
if current_user.show_detail_random():
random = db.session.query(db.Books).filter(lang_filter).order_by(func.random()).limit(config.config_random_books)
else:
random = false
off = int(int(config.config_books_per_page) * (page - 1))
pagination = Pagination(page, config.config_books_per_page,
len(db.session.query(database).filter(db_filter).filter(lang_filter).all()))
entries = db.session.query(database).filter(db_filter).filter(lang_filter).order_by(order).offset(off).limit(
config.config_books_per_page)
return entries, random, pagination
def modify_database_object(input_elements, db_book_object, db_object, db_session, db_type):
input_elements = [x for x in input_elements if x != '']
# we have all input element (authors, series, tags) names now
# 1. search for elements to remove
del_elements = []
for c_elements in db_book_object:
found = False
if db_type == 'languages':
type_elements = c_elements.lang_code
elif db_type == 'custom':
type_elements = c_elements.value
else:
type_elements = c_elements.name
for inp_element in input_elements:
if inp_element == type_elements:
found = True
break
# if the element was not found in the new list, add it to remove list
if not found:
del_elements.append(c_elements)
# 2. search for elements that need to be added
add_elements = []
for inp_element in input_elements:
found = False
for c_elements in db_book_object:
if db_type == 'languages':
type_elements = c_elements.lang_code
elif db_type == 'custom':
type_elements = c_elements.value
else:
type_elements = c_elements.name
if inp_element == type_elements:
found = True
break
if not found:
add_elements.append(inp_element)
# if there are elements to remove, we remove them now
if len(del_elements) > 0:
for del_element in del_elements:
db_book_object.remove(del_element)
if len(del_element.books) == 0:
db_session.delete(del_element)
# if there are elements to add, we add them now!
if len(add_elements) > 0:
if db_type == 'languages':
db_filter = db_object.lang_code
elif db_type == 'custom':
db_filter = db_object.value
else:
db_filter = db_object.name
for add_element in add_elements:
# check if a element with that name exists
new_element = db_session.query(db_object).filter(db_filter == add_element).first()
# if no element is found add it
if new_element is None:
if db_type == 'author':
new_element = db_object(add_element, add_element, "")
elif db_type == 'series':
new_element = db_object(add_element, add_element)
elif db_type == 'custom':
new_element = db_object(value=add_element)
else: # db_type should be tag, or languages
new_element = db_object(add_element)
db_session.add(new_element)
new_element = db.session.query(db_object).filter(db_filter == add_element).first()
# add element to book
db_book_object.append(new_element)
def render_title_template(*args, **kwargs):
return render_template(instance=config.config_calibre_web_title, *args, **kwargs)
@app.before_request
def before_request():
if ub.DEVELOPMENT:
reload(ub)
g.user = current_user
g.allow_registration = config.config_public_reg
g.allow_upload = config.config_uploading
g.public_shelfes = ub.session.query(ub.Shelf).filter(ub.Shelf.is_public == 1).all()
if not config.db_configured and request.endpoint not in ('basic_configuration', 'login') and '/static/' not in request.path:
return redirect(url_for('basic_configuration'))
# Routing functions
@app.route("/opds")
@requires_basic_auth_if_no_ano
def feed_index():
xml = render_title_template('index.xml')
response = make_response(xml)
response.headers["Content-Type"] = "application/xml"
return response
@app.route("/opds/osd")
@requires_basic_auth_if_no_ano
def feed_osd():
xml = render_title_template('osd.xml', lang='de-DE')
response = make_response(xml)
response.headers["Content-Type"] = "application/xml"
return response
@app.route("/opds/search/<query>")
@requires_basic_auth_if_no_ano
def feed_cc_search(query):
return feed_search(query.strip())
@app.route("/opds/search", methods=["GET"])
@requires_basic_auth_if_no_ano
def feed_normal_search():
return feed_search(request.args.get("query").strip())
def feed_search(term):
if current_user.filter_language() != "all":
lang_filter = db.Books.languages.any(db.Languages.lang_code == current_user.filter_language())
else:
lang_filter = True
if term:
entries = db.session.query(db.Books).filter(db.or_(db.Books.tags.any(db.Tags.name.like("%" + term + "%")),
db.Books.series.any(db.Series.name.like("%" + term + "%")),
db.Books.authors.any(db.Authors.name.like("%" + term + "%")),
db.Books.publishers.any(db.Publishers.name.like("%" + term + "%")),
db.Books.title.like("%" + term + "%"))).filter(lang_filter).all()
entriescount = len(entries) if len(entries) > 0 else 1
pagination = Pagination(1, entriescount, entriescount)
xml = render_title_template('feed.xml', searchterm=term, entries=entries, pagination=pagination)
else:
xml = render_title_template('feed.xml', searchterm="")
response = make_response(xml)
response.headers["Content-Type"] = "application/xml"
return response
@app.route("/opds/new")
@requires_basic_auth_if_no_ano
def feed_new():
off = request.args.get("offset")
if not off:
off = 0
entries, __, pagination = fill_indexpage((int(off) / (int(config.config_books_per_page)) + 1),
db.Books, True, db.Books.timestamp.desc())
xml = render_title_template('feed.xml', entries=entries, pagination=pagination)
response = make_response(xml)
response.headers["Content-Type"] = "application/xml"
return response
@app.route("/opds/discover")
@requires_basic_auth_if_no_ano
def feed_discover():
if current_user.filter_language() != "all":
lang_filter = db.Books.languages.any(db.Languages.lang_code == current_user.filter_language())
else:
lang_filter = True
entries = db.session.query(db.Books).filter(lang_filter).order_by(func.random()).limit(config.config_books_per_page)
pagination = Pagination(1, config.config_books_per_page, int(config.config_books_per_page))
xml = render_title_template('feed.xml', entries=entries, pagination=pagination)
response = make_response(xml)
response.headers["Content-Type"] = "application/xml"
return response
@app.route("/opds/rated")
@requires_basic_auth_if_no_ano
def feed_best_rated():
off = request.args.get("offset")
if not off:
off = 0
entries, __, pagination = fill_indexpage((int(off) / (int(config.config_books_per_page)) + 1),
db.Books, db.Books.ratings.any(db.Ratings.rating > 9), db.Books.timestamp.desc())
xml = render_title_template('feed.xml', entries=entries, pagination=pagination)
response = make_response(xml)
response.headers["Content-Type"] = "application/xml"
return response
@app.route("/opds/hot")
@requires_basic_auth_if_no_ano
def feed_hot():
off = request.args.get("offset")
if not off:
off = 0
if current_user.filter_language() != "all":
lang_filter = db.Books.languages.any(db.Languages.lang_code == current_user.filter_language())
else:
lang_filter = True
all_books = ub.session.query(ub.Downloads, ub.func.count(ub.Downloads.book_id)).order_by(
ub.func.count(ub.Downloads.book_id).desc()).group_by(ub.Downloads.book_id)
hot_books = all_books.offset(off).limit(config.config_books_per_page)
entries = list()
for book in hot_books:
downloadBook = db.session.query(db.Books).filter(db.Books.id == book.Downloads.book_id).first()
if downloadBook:
entries.append(
db.session.query(db.Books).filter(lang_filter).filter(db.Books.id == book.Downloads.book_id).first())
else:
ub.session.query(ub.Downloads).filter(book.Downloads.book_id == ub.Downloads.book_id).delete()
ub.session.commit()
numBooks = entries.__len__()
pagination = Pagination((int(off) / (int(config.config_books_per_page)) + 1), config.config_books_per_page, numBooks)
xml = render_title_template('feed.xml', entries=entries, pagination=pagination)
response = make_response(xml)
response.headers["Content-Type"] = "application/xml"
return response
@app.route("/opds/author")
@requires_basic_auth_if_no_ano
def feed_authorindex():
off = request.args.get("offset")
if not off:
off = 0
if current_user.filter_language() != "all":
lang_filter = db.Books.languages.any(db.Languages.lang_code == current_user.filter_language())
else:
lang_filter = True
entries = db.session.query(db.Authors).join(db.books_authors_link).join(db.Books).filter(lang_filter)\
.group_by('books_authors_link.author').order_by(db.Authors.sort).limit(config.config_books_per_page).offset(off)
pagination = Pagination((int(off) / (int(config.config_books_per_page)) + 1), config.config_books_per_page,
len(db.session.query(db.Authors).all()))
xml = render_title_template('feed.xml', listelements=entries, folder='feed_author', pagination=pagination)
response = make_response(xml)
response.headers["Content-Type"] = "application/xml"
return response
@app.route("/opds/author/<int:book_id>")
@requires_basic_auth_if_no_ano
def feed_author(book_id):
off = request.args.get("offset")
if not off:
off = 0
entries, random, pagination = fill_indexpage((int(off) / (int(config.config_books_per_page)) + 1),
db.Books, db.Books.authors.any(db.Authors.id == book_id), db.Books.timestamp.desc())
xml = render_title_template('feed.xml', entries=entries, pagination=pagination)
response = make_response(xml)
response.headers["Content-Type"] = "application/xml"
return response
@app.route("/opds/category")
@requires_basic_auth_if_no_ano
def feed_categoryindex():
off = request.args.get("offset")
if not off:
off = 0
if current_user.filter_language() != "all":
lang_filter = db.Books.languages.any(db.Languages.lang_code == current_user.filter_language())
else:
lang_filter = True
entries = db.session.query(db.Tags).join(db.books_tags_link).join(db.Books).filter(lang_filter).\
group_by('books_tags_link.tag').order_by(db.Tags.name).offset(off).limit(config.config_books_per_page)
pagination = Pagination((int(off) / (int(config.config_books_per_page)) + 1), config.config_books_per_page,
len(db.session.query(db.Tags).all()))
xml = render_title_template('feed.xml', listelements=entries, folder='feed_category', pagination=pagination)
response = make_response(xml)
response.headers["Content-Type"] = "application/xml"
return response
@app.route("/opds/category/<int:book_id>")
@requires_basic_auth_if_no_ano
def feed_category(book_id):
off = request.args.get("offset")
if not off:
off = 0
entries, random, pagination = fill_indexpage((int(off) / (int(config.config_books_per_page)) + 1),
db.Books, db.Books.tags.any(db.Tags.id == book_id), db.Books.timestamp.desc())
xml = render_title_template('feed.xml', entries=entries, pagination=pagination)
response = make_response(xml)
response.headers["Content-Type"] = "application/xml"
return response
@app.route("/opds/series")
@requires_basic_auth_if_no_ano
def feed_seriesindex():
off = request.args.get("offset")
if not off:
off = 0
if current_user.filter_language() != "all":
lang_filter = db.Books.languages.any(db.Languages.lang_code == current_user.filter_language())
else:
lang_filter = True
entries = db.session.query(db.Series).join(db.books_series_link).join(db.Books).filter(lang_filter).\
group_by('books_series_link.series').order_by(db.Series.sort).offset(off).all()
pagination = Pagination((int(off) / (int(config.config_books_per_page)) + 1), config.config_books_per_page,
len(db.session.query(db.Series).all()))
xml = render_title_template('feed.xml', listelements=entries, folder='feed_series', pagination=pagination)
response = make_response(xml)
response.headers["Content-Type"] = "application/xml"
return response
@app.route("/opds/series/<int:book_id>")
@requires_basic_auth_if_no_ano
def feed_series(book_id):
off = request.args.get("offset")
if not off:
off = 0
entries, random, pagination = fill_indexpage((int(off) / (int(config.config_books_per_page)) + 1),
db.Books, db.Books.series.any(db.Series.id == book_id),db.Books.series_index)
xml = render_title_template('feed.xml', entries=entries, pagination=pagination)
response = make_response(xml)
response.headers["Content-Type"] = "application/xml"
return response
def partial(total_byte_len, part_size_limit):
s = []
for p in range(0, total_byte_len, part_size_limit):
last = min(total_byte_len - 1, p + part_size_limit - 1)
s.append([p, last])
return s
def do_gdrive_download(df, headers):
total_size = int(df.metadata.get('fileSize'))
download_url = df.metadata.get('downloadUrl')
s = partial(total_size, 1024 * 1024) # I'm downloading BIG files, so 100M chunk size is fine for me
def stream():
for byte in s:
headers = {"Range": 'bytes=%s-%s' % (byte[0], byte[1])}
resp, content = df.auth.Get_Http_Object().request(download_url, headers=headers)
if resp.status == 206:
yield content
else:
app.logger.info('An error occurred: %s' % resp)
return
return Response(stream_with_context(stream()), headers=headers)
@app.route("/opds/download/<book_id>/<book_format>/")
@requires_basic_auth_if_no_ano
@download_required
def get_opds_download_link(book_id, book_format):
startTime = time.time()
book_format = book_format.split(".")[0]
book = db.session.query(db.Books).filter(db.Books.id == book_id).first()
data = db.session.query(db.Data).filter(db.Data.book == book.id).filter(db.Data.format == book_format.upper()).first()
app.logger.info(data.name)
if current_user.is_authenticated:
helper.update_download(book_id, int(current_user.id))
file_name = book.title
if len(book.authors) > 0:
file_name = book.authors[0].name + '_' + file_name
file_name = helper.get_valid_filename(file_name)
headers = Headers()
headers["Content-Disposition"] = "attachment; filename*=UTF-8''%s.%s" % (quote(file_name.encode('utf8')), book_format)
app.logger.info(time.time()-startTime)
startTime = time.time()
if config.config_use_google_drive:
app.logger.info(time.time() - startTime)
df = gdriveutils.getFileFromEbooksFolder(Gdrive.Instance().drive, book.path, data.name + "." + book_format)
return do_gdrive_download(df, headers)
else:
response = make_response(send_from_directory(os.path.join(config.config_calibre_dir, book.path), data.name + "." + book_format))
response.headers = headers
return response
@app.route("/ajax/book/<string:uuid>")
@requires_basic_auth_if_no_ano
def get_metadata_calibre_companion(uuid):
entry = db.session.query(db.Books).filter(db.Books.uuid.like("%" + uuid + "%")).first()
if entry is not None:
js = render_template('json.txt', entry=entry)
response = make_response(js)
response.headers["Content-Type"] = "application/json; charset=utf-8"
return response
else:
return ""
@app.route("/get_authors_json", methods=['GET', 'POST'])
@login_required_if_no_ano
def get_authors_json():
if request.method == "GET":
query = request.args.get('q')
# entries = db.session.execute("select name from authors where name like '%" + query + "%'")
entries = db.session.query(db.Authors).filter(db.Authors.name.like("%" + query + "%")).all()
json_dumps = json.dumps([dict(name=r.name) for r in entries])
return json_dumps
@app.route("/get_tags_json", methods=['GET', 'POST'])
@login_required_if_no_ano
def get_tags_json():
if request.method == "GET":
query = request.args.get('q')
# entries = db.session.execute("select name from tags where name like '%" + query + "%'")
entries = db.session.query(db.Tags).filter(db.Tags.name.like("%" + query + "%")).all()
# for x in entries:
# alfa = dict(name=x.name)
json_dumps = json.dumps([dict(name=r.name) for r in entries])
return json_dumps
@app.route("/get_update_status", methods=['GET'])
@login_required_if_no_ano
def get_update_status():
status = {}
if request.method == "GET":
# should be automatically replaced by git with current commit hash
commit_id = '$Format:%H$'
commit = requests.get('https://api.github.com/repos/ajurcevic/calibre-web/git/refs/heads/master').json()
if "object" in commit and commit['object']['sha'] != commit_id:
status['status'] = True
commitdate = requests.get('https://api.github.com/repos/ajurcevic/calibre-web/git/commits/'+commit['object']['sha']).json()
if "committer" in commitdate:
status['commit'] = commitdate['committer']['date']
else:
status['commit'] = u'Unknown'
else:
status['status'] = False
return json.dumps(status)
@app.route("/get_updater_status", methods=['GET', 'POST'])
@login_required
@admin_required
def get_updater_status():
status = {}
if request.method == "POST":
commit = request.form.to_dict()
if "start" in commit and commit['start'] == 'True':
text = {
"1": _(u'Requesting update package'),
"2": _(u'Downloading update package'),
"3": _(u'Unzipping update package'),
"4": _(u'Files are replaced'),
"5": _(u'Database connections are closed'),
"6": _(u'Server is stopped'),
"7": _(u'Update finished, please press okay and reload page')
}
status['text']=text
helper.updater_thread = helper.Updater()
helper.updater_thread.start()
status['status']=helper.updater_thread.get_update_status()
elif request.method == "GET":
try:
status['status']=helper.updater_thread.get_update_status()
except Exception:
status['status'] = 7
return json.dumps(status)
@app.route("/get_languages_json", methods=['GET', 'POST'])
@login_required_if_no_ano
def get_languages_json():
if request.method == "GET":
query = request.args.get('q').lower()
languages = db.session.query(db.Languages).all()
for lang in languages:
try:
cur_l = LC.parse(lang.lang_code)
lang.name = cur_l.get_language_name(get_locale())
except Exception:
lang.name = _(isoLanguages.get(part3=lang.lang_code).name)
entries = [s for s in languages if query in s.name.lower()]
json_dumps = json.dumps([dict(name=r.name) for r in entries])
return json_dumps
@app.route("/get_series_json", methods=['GET', 'POST'])
@login_required_if_no_ano
def get_series_json():
if request.method == "GET":
query = request.args.get('q')
entries = db.session.query(db.Series).filter(db.Series.name.like("%" + query + "%")).all()
# entries = db.session.execute("select name from series where name like '%" + query + "%'")
json_dumps = json.dumps([dict(name=r.name) for r in entries])
return json_dumps
@app.route("/get_matching_tags", methods=['GET', 'POST'])
@login_required_if_no_ano
def get_matching_tags():
tag_dict = {'tags': []}
if request.method == "GET":
q = db.session.query(db.Books)
author_input = request.args.get('author_name')
title_input = request.args.get('book_title')
include_tag_inputs = request.args.getlist('include_tag')
exclude_tag_inputs = request.args.getlist('exclude_tag')
q = q.filter(db.Books.authors.any(db.Authors.name.like("%" + author_input + "%")),
db.Books.title.like("%" + title_input + "%"))
if len(include_tag_inputs) > 0:
for tag in include_tag_inputs:
q = q.filter(db.Books.tags.any(db.Tags.id == tag))
if len(exclude_tag_inputs) > 0:
for tag in exclude_tag_inputs:
q = q.filter(not_(db.Books.tags.any(db.Tags.id == tag)))
for book in q:
for tag in book.tags:
if tag.id not in tag_dict['tags']:
tag_dict['tags'].append(tag.id)
json_dumps = json.dumps(tag_dict)
return json_dumps
@app.route("/", defaults={'page': 1})
@app.route('/page/<int:page>')
@login_required_if_no_ano
def index(page):
entries, random, pagination = fill_indexpage(page, db.Books, True, db.Books.timestamp.desc())
return render_title_template('index.html', random=random, entries=entries, pagination=pagination,
title=_(u"Latest Books"))
@app.route("/hot", defaults={'page': 1})
@app.route('/hot/page/<int:page>')
@login_required_if_no_ano
def hot_books(page):
if current_user.filter_language() != "all":
lang_filter = db.Books.languages.any(db.Languages.lang_code == current_user.filter_language())
else:
lang_filter = True
if current_user.show_detail_random():
random = db.session.query(db.Books).filter(lang_filter).order_by(func.random()).limit(config.config_random_books)
else:
random = false
off = int(int(config.config_books_per_page) * (page - 1))
all_books = ub.session.query(ub.Downloads, ub.func.count(ub.Downloads.book_id)).order_by(
ub.func.count(ub.Downloads.book_id).desc()).group_by(ub.Downloads.book_id)
hot_books = all_books.offset(off).limit(config.config_books_per_page)
entries = list()
for book in hot_books:
downloadBook = db.session.query(db.Books).filter(db.Books.id == book.Downloads.book_id).first()
if downloadBook:
entries.append(
db.session.query(db.Books).filter(lang_filter).filter(db.Books.id == book.Downloads.book_id).first())
else:
ub.session.query(ub.Downloads).filter(book.Downloads.book_id == ub.Downloads.book_id).delete()
ub.session.commit()
numBooks = entries.__len__()
pagination = Pagination(page, config.config_books_per_page, numBooks)
return render_title_template('index.html', random=random, entries=entries, pagination=pagination,
title=_(u"Hot Books (most downloaded)"))
@app.route("/rated", defaults={'page': 1})
@app.route('/rated/page/<int:page>')
@login_required_if_no_ano
def best_rated_books(page):
entries, random, pagination = fill_indexpage(page, db.Books, db.Books.ratings.any(db.Ratings.rating > 9),
db.Books.timestamp.desc())
return render_title_template('index.html', random=random, entries=entries, pagination=pagination,
title=_(u"Best rated books"))
@app.route("/discover", defaults={'page': 1})
@app.route('/discover/page/<int:page>')
@login_required_if_no_ano
def discover(page):
entries, __, pagination = fill_indexpage(page, db.Books, True, func.randomblob(2))
pagination = Pagination(1, config.config_books_per_page,config.config_books_per_page)
return render_title_template('discover.html', entries=entries, pagination=pagination, title=_(u"Random Books"))
@app.route("/author")
@login_required_if_no_ano
def author_list():
if current_user.filter_language() != "all":
lang_filter = db.Books.languages.any(db.Languages.lang_code == current_user.filter_language())
else:
lang_filter = True
entries = db.session.query(db.Authors, func.count('books_authors_link.book').label('count')).join(
db.books_authors_link).join(db.Books).filter(
lang_filter).group_by('books_authors_link.author').order_by(db.Authors.sort).all()
return render_title_template('list.html', entries=entries, folder='author', title=_(u"Author list"))
@app.route("/author/<int:book_id>", defaults={'page': 1})
@app.route("/author/<int:book_id>/<int:page>'")
@login_required_if_no_ano
def author(book_id, page):
entries, random, pagination = fill_indexpage(page, db.Books, db.Books.authors.any(db.Authors.id == book_id),
db.Books.timestamp.desc())
name = db.session.query(db.Authors).filter(db.Authors.id == book_id).first().name
if entries:
return render_title_template('index.html', random=random, entries=entries, pagination=pagination,
title=_(u"Author: %(name)s", name=name))
else:
flash(_(u"Error opening eBook. File does not exist or file is not accessible:"), category="error")
return redirect(url_for("index"))
@app.route("/series")
@login_required_if_no_ano
def series_list():
if current_user.filter_language() != "all":
lang_filter = db.Books.languages.any(db.Languages.lang_code == current_user.filter_language())
else:
lang_filter = True
entries = db.session.query(db.Series, func.count('books_series_link.book').label('count')).join(
db.books_series_link).join(db.Books).filter(
lang_filter).group_by('books_series_link.series').order_by(db.Series.sort).all()
return render_title_template('list.html', entries=entries, folder='series', title=_(u"Series list"))
@app.route("/series/<int:book_id>/", defaults={'page': 1})
@app.route("/series/<int:book_id>/<int:page>'")
@login_required_if_no_ano
def series(book_id, page):
entries, random, pagination = fill_indexpage(page, db.Books, db.Books.series.any(db.Series.id == book_id),
db.Books.series_index)
name = db.session.query(db.Series).filter(db.Series.id == book_id).first().name
if entries:
return render_title_template('index.html', random=random, pagination=pagination, entries=entries,
title=_(u"Series: %(serie)s", serie=name))
else:
flash(_(u"Error opening eBook. File does not exist or file is not accessible:"), category="error")
return redirect(url_for("index"))
@app.route("/language")
@login_required_if_no_ano
def language_overview():
if current_user.filter_language() == u"all":
languages = db.session.query(db.Languages).all()
for lang in languages:
try:
cur_l = LC.parse(lang.lang_code)
lang.name = cur_l.get_language_name(get_locale())
except Exception:
lang.name = _(isoLanguages.get(part3=lang.lang_code).name)
else:
try:
langfound = 1
cur_l = LC.parse(current_user.filter_language())
except Exception:
langfound = 0
languages = db.session.query(db.Languages).filter(
db.Languages.lang_code == current_user.filter_language()).all()
if langfound:
languages[0].name = cur_l.get_language_name(get_locale())
else:
languages[0].name = _(isoLanguages.get(part3=languages[0].lang_code).name)
lang_counter = db.session.query(db.books_languages_link,
func.count('books_languages_link.book').label('bookcount')).group_by(
'books_languages_link.lang_code').all()
return render_title_template('languages.html', languages=languages, lang_counter=lang_counter,
title=_(u"Available languages"))
@app.route("/language/<name>", defaults={'page': 1})
@app.route('/language/<name>/page/<int:page>')
@login_required_if_no_ano
def language(name, page):
entries, random, pagination = fill_indexpage(page, db.Books, db.Books.languages.any(db.Languages.lang_code == name),
db.Books.timestamp.desc())
try:
cur_l = LC.parse(name)
name = cur_l.get_language_name(get_locale())
except Exception:
name = _(isoLanguages.get(part3=name).name)
return render_title_template('index.html', random=random, entries=entries, pagination=pagination,
title=_(u"Language: %(name)s", name=name))
@app.route("/category")
@login_required_if_no_ano
def category_list():
if current_user.filter_language() != "all":
lang_filter = db.Books.languages.any(db.Languages.lang_code == current_user.filter_language())
else:
lang_filter = True
entries = db.session.query(db.Tags, func.count('books_tags_link.book').label('count')).join(
db.books_tags_link).join(db.Books).filter(
lang_filter).group_by('books_tags_link.tag').all()
return render_title_template('list.html', entries=entries, folder='category', title=_(u"Category list"))
@app.route("/category/<int:book_id>", defaults={'page': 1})
@app.route('/category/<int:book_id>/<int:page>')
@login_required_if_no_ano
def category(book_id, page):
entries, random, pagination = fill_indexpage(page, db.Books, db.Books.tags.any(db.Tags.id == book_id),
db.Books.timestamp.desc())
name = db.session.query(db.Tags).filter(db.Tags.id == book_id).first().name
return render_title_template('index.html', random=random, entries=entries, pagination=pagination,
title=_(u"Category: %(name)s", name=name))
@app.route("/ajax/toggleread/<int:book_id>", methods=['POST'])
@login_required
def toggle_read(book_id):
book = ub.session.query(ub.ReadBook).filter(ub.and_(ub.ReadBook.user_id == int(current_user.id),
ub.ReadBook.book_id == book_id)).first()
if book:
book.is_read = not book.is_read
else:
readBook = ub.ReadBook()
readBook.user_id = int(current_user.id)
readBook.book_id = book_id
readBook.is_read = True
book = readBook
ub.session.merge(book)
ub.session.commit()
return ""
@app.route("/book/<int:book_id>")
@login_required_if_no_ano
def show_book(book_id):
if current_user.filter_language() != "all":
lang_filter = db.Books.languages.any(db.Languages.lang_code == current_user.filter_language())
else:
lang_filter = True
entries = db.session.query(db.Books).filter(db.Books.id == book_id).filter(lang_filter).first()
if entries:
for index in range(0, len(entries.languages)):
try:
entries.languages[index].language_name = LC.parse(entries.languages[index].lang_code).get_language_name(
get_locale())
except Exception:
entries.languages[index].language_name = _(
isoLanguages.get(part3=entries.languages[index].lang_code).name)
tmpcc = db.session.query(db.Custom_Columns).filter(db.Custom_Columns.datatype.notin_(db.cc_exceptions)).all()
if config.config_columns_to_ignore:
cc = []
for col in tmpcc:
r = re.compile(config.config_columns_to_ignore)
if r.match(col.label):
cc.append(col)
else:
cc = tmpcc
book_in_shelfs = []
shelfs = ub.session.query(ub.BookShelf).filter(ub.BookShelf.book_id == book_id).all()
for entry in shelfs:
book_in_shelfs.append(entry.shelf)
if not current_user.is_anonymous():
matching_have_read_book = ub.session.query(ub.ReadBook).filter(ub.and_(ub.ReadBook.user_id == int(current_user.id),
ub.ReadBook.book_id == book_id)).all()
have_read = len(matching_have_read_book) > 0 and matching_have_read_book[0].is_read
else:
have_read = None
return render_title_template('detail.html', entry=entries, cc=cc,
title=entries.title, books_shelfs=book_in_shelfs, have_read=have_read)
else:
flash(_(u"Error opening eBook. File does not exist or file is not accessible:"), category="error")
return redirect(url_for("index"))
@app.route("/admin")
@login_required
def admin_forbidden():
abort(403)
@app.route("/stats")
@login_required
def stats():
counter = len(db.session.query(db.Books).all())
authors = len(db.session.query(db.Authors).all())
categorys = len(db.session.query(db.Tags).all())
series = len(db.session.query(db.Series).all())
versions = uploader.book_formats.get_versions()
vendorpath = os.path.join(config.get_main_dir, "vendor")
if sys.platform == "win32":
kindlegen = os.path.join(vendorpath, u"kindlegen.exe")
else:
kindlegen = os.path.join(vendorpath, u"kindlegen")
versions['KindlegenVersion'] = _('not installed')
if os.path.exists(kindlegen):
p = subprocess.Popen(kindlegen, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
for lines in p.stdout.readlines():
if isinstance(lines, bytes):
lines = lines.decode('utf-8')
if re.search('Amazon kindlegen\(', lines):
versions['KindlegenVersion'] = lines
versions['PythonVersion'] = sys.version
versions['babel'] = babelVersion
versions['sqlalchemy'] = sqlalchemyVersion
versions['flask'] = flaskVersion
versions['flasklogin'] = flask_loginVersion
versions['flask_principal'] = flask_principalVersion
versions['tornado'] = tornadoVersion
versions['iso639'] = iso639Version
versions['requests'] = requests.__version__
versions['pysqlite'] = db.engine.dialect.dbapi.version
versions['sqlite'] = db.engine.dialect.dbapi.sqlite_version
return render_title_template('stats.html', bookcounter=counter, authorcounter=authors, versions=versions,
categorycounter=categorys, seriecounter=series, title=_(u"Statistics"))
@app.route("/delete/<int:book_id>/")
@login_required
def delete_book(book_id):
if current_user.role_delete_books():
book = db.session.query(db.Books).filter(db.Books.id == book_id).first()
if book:
if config.config_use_google_drive:
helper.delete_book_gdrive(book) # ToDo really delete file
else:
helper.delete_book(book,config.config_calibre_dir)
# check if only this book links to:
# author, language, series, tags, custom columns
modify_database_object([u''], book.authors, db.Authors, db.session, 'author')
modify_database_object([u''], book.tags, db.Tags, db.session, 'tags')
modify_database_object([u''], book.series, db.Series, db.session, 'series')
modify_database_object([u''], book.languages, db.Languages, db.session, 'languages')
modify_database_object([u''], book.publishers, db.Publishers, db.session, 'series')
cc = db.session.query(db.Custom_Columns).filter(db.Custom_Columns.datatype.notin_(db.cc_exceptions)).all()
for c in cc:
cc_string = "custom_column_" + str(c.id)
if not c.is_multiple:
if len(getattr(book, cc_string)) > 0:
if c.datatype == 'bool':
del_cc = getattr(book, cc_string)[0]
getattr(book, cc_string).remove(del_cc)
db.session.delete(del_cc)
elif c.datatype == 'rating':
del_cc = getattr(book, cc_string)[0]
getattr(book, cc_string).remove(del_cc)
if len(del_cc.books) == 0:
db.session.delete(del_cc)
else:
del_cc = getattr(book, cc_string)[0]
getattr(book, cc_string).remove(del_cc)
db.session.delete(del_cc)
else:
modify_database_object([u''], getattr(book, cc_string),db.cc_classes[c.id], db.session, 'custom')
db.session.query(db.Books).filter(db.Books.id == book_id).delete()
db.session.commit()
else:
# book not found
app.logger.info('Book with id "'+book_id+'" could not be deleted')
return redirect(url_for('index'))
@app.route("/gdrive/authenticate")
@login_required
@admin_required
def authenticate_google_drive():
authUrl = Gauth.Instance().auth.GetAuthUrl()
return redirect(authUrl)
@app.route("/gdrive/callback")
def google_drive_callback():
auth_code = request.args.get('code')
credentials = Gauth.Instance().auth.flow.step2_exchange(auth_code)
with open('gdrive_credentials', 'w') as f:
f.write(credentials.to_json())
return redirect(url_for('configuration'))
@app.route("/gdrive/watch/subscribe")
@login_required
@admin_required
def watch_gdrive():
if not config.config_google_drive_watch_changes_response:
address = '%sgdrive/watch/callback' % config.config_google_drive_calibre_url_base
notification_id = str(uuid4())
result = gdriveutils.watchChange(Gdrive.Instance().drive, notification_id,
'web_hook', address, gdrive_watch_callback_token, current_milli_time() + 604800*1000)
print (result)
settings = ub.session.query(ub.Settings).first()
settings.config_google_drive_watch_changes_response = json.dumps(result)
ub.session.merge(settings)
ub.session.commit()
settings = ub.session.query(ub.Settings).first()
config.loadSettings()
print (settings.config_google_drive_watch_changes_response)
return redirect(url_for('configuration'))
@app.route("/gdrive/watch/revoke")
@login_required
@admin_required
def revoke_watch_gdrive():
last_watch_response = config.config_google_drive_watch_changes_response
if last_watch_response:
try:
gdriveutils.stopChannel(Gdrive.Instance().drive, last_watch_response['id'], last_watch_response['resourceId'])
except HttpError:
pass
settings = ub.session.query(ub.Settings).first()
settings.config_google_drive_watch_changes_response = None
ub.session.merge(settings)
ub.session.commit()
config.loadSettings()
return redirect(url_for('configuration'))
@app.route("/gdrive/watch/callback", methods=['GET', 'POST'])
def on_received_watch_confirmation():
app.logger.info(request.headers)
if request.headers.get('X-Goog-Channel-Token') == gdrive_watch_callback_token \
and request.headers.get('X-Goog-Resource-State') == 'change' \
and request.data:
data = request.data
def updateMetaData():
app.logger.info('Change received from gdrive')
app.logger.info(data)
try:
j = json.loads(data)
app.logger.info('Getting change details')
response = gdriveutils.getChangeById(Gdrive.Instance().drive, j['id'])
app.logger.info(response)
if response:
dbpath = os.path.join(config.config_calibre_dir, "metadata.db")
if not response['deleted'] and response['file']['title'] == 'metadata.db' and response['file']['md5Checksum'] != md5(dbpath):
tmpDir = tempfile.gettempdir()
app.logger.info('Database file updated')
copyfile(dbpath, os.path.join(tmpDir, "metadata.db_" + str(current_milli_time())))
app.logger.info('Backing up existing and downloading updated metadata.db')
gdriveutils.downloadFile(Gdrive.Instance().drive, None, "metadata.db", os.path.join(tmpDir, "tmp_metadata.db"))
app.logger.info('Setting up new DB')
os.rename(os.path.join(tmpDir, "tmp_metadata.db"), dbpath)
db.setup_db()
except Exception as e:
app.logger.exception(e)
updateMetaData()
return ''
@app.route("/shutdown")
@login_required
@admin_required
def shutdown():
# global global_task
task = int(request.args.get("parameter").strip())
helper.global_task = task
if task == 1 or task == 0: # valid commandos received
# close all database connections
db.session.close()
db.engine.dispose()
ub.session.close()
ub.engine.dispose()
# stop tornado server
server = IOLoop.instance()
server.add_callback(server.stop)
showtext = {}
if task == 0:
showtext['text'] = _(u'Server restarted, please reload page')
else:
showtext['text'] = _(u'Performing shutdown of server, please close window')
return json.dumps(showtext)
else:
if task == 2:
db.session.close()
db.engine.dispose()
db.setup_db()
return json.dumps({})
abort(404)
@app.route("/update")
@login_required
@admin_required
def update():
helper.updater_thread = helper.Updater()
flash(_(u"Update done"), category="info")
return abort(404)
@app.route("/search", methods=["GET"])
@login_required_if_no_ano
def search():
term = request.args.get("query").strip()
if term:
if current_user.filter_language() != "all":
lang_filter = db.Books.languages.any(db.Languages.lang_code == current_user.filter_language())
else:
lang_filter = True
entries = db.session.query(db.Books).filter(db.or_(db.Books.tags.any(db.Tags.name.like("%" + term + "%")),
db.Books.series.any(db.Series.name.like("%" + term + "%")),
db.Books.authors.any(db.Authors.name.like("%" + term + "%")),
db.Books.publishers.any(db.Publishers.name.like("%" + term + "%")),
db.Books.title.like("%" + term + "%"))).filter(lang_filter).all()
return render_title_template('search.html', searchterm=term, entries=entries)
else:
return render_title_template('search.html', searchterm="")
@app.route("/advanced_search", methods=["GET"])
@login_required_if_no_ano
def advanced_search():
if request.method == 'GET':
q = db.session.query(db.Books)
include_tag_inputs = request.args.getlist('include_tag')
exclude_tag_inputs = request.args.getlist('exclude_tag')
include_series_inputs = request.args.getlist('include_serie')
exclude_series_inputs = request.args.getlist('exclude_serie')
include_languages_inputs = request.args.getlist('include_language')
exclude_languages_inputs = request.args.getlist('exclude_language')
author_name = request.args.get("author_name")
book_title = request.args.get("book_title")
publisher = request.args.get("publisher")
if author_name: author_name = author_name.strip()
if book_title: book_title = book_title.strip()
if publisher: publisher = publisher.strip()
if include_tag_inputs or exclude_tag_inputs or include_series_inputs or exclude_series_inputs or \
include_languages_inputs or exclude_languages_inputs or author_name or book_title or publisher:
searchterm = []
searchterm.extend((author_name, book_title, publisher))
tag_names = db.session.query(db.Tags).filter(db.Tags.id.in_(include_tag_inputs)).all()
searchterm.extend(tag.name for tag in tag_names)
# searchterm = " + ".join(filter(None, searchterm))
serie_names = db.session.query(db.Series).filter(db.Series.id.in_(include_series_inputs)).all()
searchterm.extend(serie.name for serie in serie_names)
language_names = db.session.query(db.Languages).filter(db.Languages.id.in_(include_languages_inputs)).all()
for lang in language_names:
try:
cur_l = LC.parse(lang.lang_code)
lang.name = cur_l.get_language_name(get_locale())
except Exception:
lang.name = _(isoLanguages.get(part3=lang.lang_code).name)
searchterm.extend(language.name for language in language_names)
searchterm = " + ".join(filter(None, searchterm))
q = q.filter(db.Books.authors.any(db.Authors.name.like("%" + author_name + "%")),
db.Books.title.like("%" + book_title + "%"),
db.Books.publishers.any(db.Publishers.name.like("%" + publisher + "%")))
for tag in include_tag_inputs:
q = q.filter(db.Books.tags.any(db.Tags.id == tag))
for tag in exclude_tag_inputs:
q = q.filter(not_(db.Books.tags.any(db.Tags.id == tag)))
for serie in include_series_inputs:
q = q.filter(db.Books.series.any(db.Series.id == serie))
for serie in exclude_series_inputs:
q = q.filter(not_(db.Books.series.any(db.Series.id == serie)))
if current_user.filter_language() != "all":
q = q.filter(db.Books.languages.any(db.Languages.lang_code == current_user.filter_language()))
else:
for language in include_languages_inputs:
q = q.filter(db.Books.languages.any(db.Languages.id == language))
for language in exclude_languages_inputs:
q = q.filter(not_(db.Books.series.any(db.Languages.id == language)))
q = q.all()
return render_title_template('search.html', searchterm=searchterm, entries=q, title=_(u"search"))
tags = db.session.query(db.Tags).order_by(db.Tags.name).all()
series = db.session.query(db.Series).order_by(db.Series.name).all()
if current_user.filter_language() == u"all":
languages = db.session.query(db.Languages).all()
for lang in languages:
try:
cur_l = LC.parse(lang.lang_code)
lang.name = cur_l.get_language_name(get_locale())
except Exception:
lang.name = _(isoLanguages.get(part3=lang.lang_code).name)
else:
languages = None
return render_title_template('search_form.html', tags=tags, languages=languages, series=series, title=_(u"search"))
def get_cover_via_gdrive(cover_path):
df = gdriveutils.getFileFromEbooksFolder(Gdrive.Instance().drive, cover_path, 'cover.jpg')
if not gdriveutils.session.query(gdriveutils.PermissionAdded).filter(gdriveutils.PermissionAdded.gdrive_id == df['id']).first():
df.GetPermissions()
df.InsertPermission({
'type': 'anyone',
'value': 'anyone',
'role': 'reader',
'withLink': True})
permissionAdded = gdriveutils.PermissionAdded()
permissionAdded.gdrive_id = df['id']
gdriveutils.session.add(permissionAdded)
gdriveutils.session.commit()
return df.metadata.get('webContentLink')
@app.route("/cover/<path:cover_path>")
@login_required_if_no_ano
def get_cover(cover_path):
if config.config_use_google_drive:
return redirect(get_cover_via_gdrive(cover_path))
else:
return send_from_directory(os.path.join(config.config_calibre_dir, cover_path), "cover.jpg")
@app.route("/opds/thumb_240_240/<path:book_id>")
@app.route("/opds/cover_240_240/<path:book_id>")
@app.route("/opds/cover_90_90/<path:book_id>")
@app.route("/opds/cover/<path:book_id>")
@requires_basic_auth_if_no_ano
def feed_get_cover(book_id):
book = db.session.query(db.Books).filter(db.Books.id == book_id).first()
if config.config_use_google_drive:
return redirect(get_cover_via_gdrive(book.path))
else:
return send_from_directory(os.path.join(config.config_calibre_dir, book.path), "cover.jpg")
def render_read_books(page, are_read, as_xml=False):
readBooks = ub.session.query(ub.ReadBook).filter(ub.ReadBook.user_id == int(current_user.id)).filter(ub.ReadBook.is_read == True).all()
readBookIds = [x.book_id for x in readBooks]
if are_read:
db_filter = db.Books.id.in_(readBookIds)
else:
db_filter = ~db.Books.id.in_(readBookIds)
entries, random, pagination = fill_indexpage(page, db.Books,
db_filter, db.Books.timestamp.desc())
if as_xml:
xml = render_title_template('feed.xml', entries=entries, pagination=pagination)
response = make_response(xml)
response.headers["Content-Type"] = "application/xml"
return response
else:
name = u'Read Books' if are_read else u'Unread Books'
return render_title_template('index.html', random=random, entries=entries, pagination=pagination,
title=_(name, name=name))
@app.route("/opds/readbooks/")
@login_required_if_no_ano
def feed_read_books():
off = request.args.get("offset")
if not off:
off = 0
return render_read_books(int(off) / (int(config.config_books_per_page)) + 1, True, True)
@app.route("/readbooks/", defaults={'page': 1})
@app.route("/readbooks/<int:page>'")
@login_required_if_no_ano
def read_books(page):
return render_read_books(page, True)
@app.route("/opds/unreadbooks/")
@login_required_if_no_ano
def feed_unread_books():
off = request.args.get("offset")
if not off:
off = 0
return render_read_books(int(off) / (int(config.config_books_per_page)) + 1, False, True)
@app.route("/unreadbooks/", defaults={'page': 1})
@app.route("/unreadbooks/<int:page>'")
@login_required_if_no_ano
def unread_books(page):
return render_read_books(page, False)
@app.route("/read/<int:book_id>/<book_format>")
@login_required_if_no_ano
def read_book(book_id, book_format):
book = db.session.query(db.Books).filter(db.Books.id == book_id).first()
if book:
book_dir = os.path.join(config.get_main_dir, "cps", "static", str(book_id))
if not os.path.exists(book_dir):
os.mkdir(book_dir)
if book_format.lower() == "epub":
# check if mimetype file is exists
mime_file = str(book_id) + "/mimetype"
if not os.path.exists(mime_file):
epub_file = os.path.join(config.config_calibre_dir, book.path, book.data[0].name) + ".epub"
if not os.path.isfile(epub_file):
raise ValueError('Error opening eBook. File does not exist: ', epub_file)
zfile = zipfile.ZipFile(epub_file)
for name in zfile.namelist():
(dirName, fileName) = os.path.split(name)
newDir = os.path.join(book_dir, dirName)
if not os.path.exists(newDir):
try:
os.makedirs(newDir)
except OSError as exception:
if not exception.errno == errno.EEXIST:
raise
if fileName:
fd = open(os.path.join(newDir, fileName), "wb")
fd.write(zfile.read(name))
fd.close()
zfile.close()
return render_title_template('read.html', bookid=book_id, title=_(u"Read a Book"))
elif book_format.lower() == "pdf":
all_name = str(book_id) + "/" + book.data[0].name + ".pdf"
tmp_file = os.path.join(book_dir, book.data[0].name) + ".pdf"
if not os.path.exists(tmp_file):
pdf_file = os.path.join(config.config_calibre_dir, book.path, book.data[0].name) + ".pdf"
copyfile(pdf_file, tmp_file)
return render_title_template('readpdf.html', pdffile=all_name, title=_(u"Read a Book"))
elif book_format.lower() == "txt":
all_name = str(book_id) + "/" + book.data[0].name + ".txt"
tmp_file = os.path.join(book_dir, book.data[0].name) + ".txt"
if not os.path.exists(all_name):
txt_file = os.path.join(config.config_calibre_dir, book.path, book.data[0].name) + ".txt"
copyfile(txt_file, tmp_file)
return render_title_template('readtxt.html', txtfile=all_name, title=_(u"Read a Book"))
elif book_format.lower() == "cbr":
all_name = str(book_id) + "/" + book.data[0].name + ".cbr"
tmp_file = os.path.join(book_dir, book.data[0].name) + ".cbr"
if not os.path.exists(all_name):
cbr_file = os.path.join(config.config_calibre_dir, book.path, book.data[0].name) + ".cbr"
copyfile(cbr_file, tmp_file)
return render_title_template('readcbr.html', comicfile=all_name, title=_(u"Read a Book"))
else:
flash(_(u"Error opening eBook. File does not exist or file is not accessible:"), category="error")
return redirect(url_for("index"))
@app.route("/download/<int:book_id>/<book_format>")
@login_required_if_no_ano
@download_required
def get_download_link(book_id, book_format):
book_format = book_format.split(".")[0]
book = db.session.query(db.Books).filter(db.Books.id == book_id).first()
data = db.session.query(db.Data).filter(db.Data.book == book.id).filter(db.Data.format == book_format.upper()).first()
if data:
# collect downloaded books only for registered user and not for anonymous user
if current_user.is_authenticated:
helper.update_download(book_id, int(current_user.id))
file_name = book.title
if len(book.authors) > 0:
file_name = book.authors[0].name + '_' + file_name
file_name = helper.get_valid_filename(file_name)
headers = Headers()
try:
headers["Content-Type"] = mimetypes.types_map['.' + book_format]
except KeyError:
headers["Content-Type"] = "application/octet-stream"
headers["Content-Disposition"] = "attachment; filename*=UTF-8''%s.%s" % (quote(file_name.encode('utf-8')), book_format)
if config.config_use_google_drive:
df = gdriveutils.getFileFromEbooksFolder(Gdrive.Instance().drive, book.path, '%s.%s' % (data.name, book_format))
return do_gdrive_download(df, headers)
else:
response = make_response(send_from_directory(os.path.join(config.config_calibre_dir, book.path), data.name + "." + book_format))
response.headers = headers
return response
else:
abort(404)
@app.route("/download/<int:book_id>/<book_format>/<anyname>")
@login_required_if_no_ano
@download_required
def get_download_link_ext(book_id, book_format, anyname):
return get_download_link(book_id, book_format)
@app.route('/register', methods=['GET', 'POST'])
def register():
if not config.config_public_reg:
abort(404)
if current_user is not None and current_user.is_authenticated:
return redirect(url_for('index'))
if request.method == "POST":
to_save = request.form.to_dict()
if not to_save["nickname"] or not to_save["email"] or not to_save["password"]:
flash(_(u"Please fill out all fields!"), category="error")
return render_title_template('register.html', title=_(u"register"))
existing_user = ub.session.query(ub.User).filter(ub.User.nickname == to_save["nickname"]).first()
existing_email = ub.session.query(ub.User).filter(ub.User.email == to_save["email"]).first()
if not existing_user and not existing_email:
content = ub.User()
content.password = generate_password_hash(to_save["password"])
content.nickname = to_save["nickname"]
content.email = to_save["email"]
content.role = config.config_default_role
try:
ub.session.add(content)
ub.session.commit()
except Exception:
ub.session.rollback()
flash(_(u"An unknown error occured. Please try again later."), category="error")
return render_title_template('register.html', title=_(u"register"))
flash("Your account has been created. Please login.", category="success")
return redirect(url_for('login'))
else:
flash(_(u"This username or email address is already in use."), category="error")
return render_title_template('register.html', title=_(u"register"))
return render_title_template('register.html', title=_(u"register"))
@app.route('/login', methods=['GET', 'POST'])
def login():
if not config.db_configured:
return redirect(url_for('basic_configuration'))
if current_user is not None and current_user.is_authenticated:
return redirect(url_for('index'))
if request.method == "POST":
form = request.form.to_dict()
user = ub.session.query(ub.User).filter(ub.User.nickname == form['username'].strip()).first()
if user and check_password_hash(user.password, form['password']):
login_user(user, remember=True)
flash(_(u"you are now logged in as: '%(nickname)s'", nickname=user.nickname), category="success")
# test=
return redirect(url_for("index"))
else:
app.logger.info('Login failed for user "'+form['username']+'"')
flash(_(u"Wrong Username or Password"), category="error")
return render_title_template('login.html', title=_(u"login"))
@app.route('/logout')
@login_required
def logout():
if current_user is not None and current_user.is_authenticated:
logout_user()
return redirect(url_for('login'))
@app.route('/send/<int:book_id>')
@login_required
@download_required
def send_to_kindle(book_id):
settings = ub.get_mail_settings()
if settings.get("mail_server", "mail.example.com") == "mail.example.com":
flash(_(u"Please configure the SMTP mail settings first..."), category="error")
elif current_user.kindle_mail:
result = helper.send_mail(book_id, current_user.kindle_mail, config.config_calibre_dir)
if result is None:
flash(_(u"Book successfully send to %(kindlemail)s", kindlemail=current_user.kindle_mail),
category="success")
helper.update_download(book_id, int(current_user.id))
else:
flash(_(u"There was an error sending this book: %(res)s", res=result), category="error")
else:
flash(_(u"Please configure your kindle email address first..."), category="error")
return redirect(request.environ["HTTP_REFERER"])
@app.route("/shelf/add/<int:shelf_id>/<int:book_id>")
@login_required
def add_to_shelf(shelf_id, book_id):
shelf = ub.session.query(ub.Shelf).filter(ub.Shelf.id == shelf_id).first()
if not shelf.is_public and not shelf.user_id == int(current_user.id):
app.logger.info("Sorry you are not allowed to add a book to the the shelf: %s" % shelf.name)
return redirect(url_for('index'))
maxOrder = ub.session.query(func.max(ub.BookShelf.order)).filter(ub.BookShelf.shelf == shelf_id).first()
book_in_shelf = ub.session.query(ub.BookShelf).filter(ub.BookShelf.shelf == shelf_id,
ub.BookShelf.book_id == book_id).first()
if book_in_shelf:
app.logger.info("Book is already part of the shelf: %s" % shelf.name)
return redirect(url_for('index'))
if maxOrder[0] is None:
maxOrder = 0
else:
maxOrder = maxOrder[0]
if (shelf.is_public and current_user.role_edit_shelfs()) or not shelf.is_public:
ins = ub.BookShelf(shelf=shelf.id, book_id=book_id, order=maxOrder + 1)
ub.session.add(ins)
ub.session.commit()
flash(_(u"Book has been added to shelf: %(sname)s", sname=shelf.name), category="success")
return redirect(request.environ["HTTP_REFERER"])
else:
app.logger.info("User is not allowed to edit public shelfs")
return redirect(url_for('index'))
@app.route("/shelf/remove/<int:shelf_id>/<int:book_id>")
@login_required
def remove_from_shelf(shelf_id, book_id):
shelf = ub.session.query(ub.Shelf).filter(ub.Shelf.id == shelf_id).first()
if not shelf.is_public and not shelf.user_id == int(current_user.id) \
or (shelf.is_public and current_user.role_edit_shelfs()):
app.logger.info("Sorry you are not allowed to remove a book from this shelf: %s" % shelf.name)
return redirect(url_for('index'))
book_shelf = ub.session.query(ub.BookShelf).filter(ub.BookShelf.shelf == shelf_id,
ub.BookShelf.book_id == book_id).first()
ub.session.delete(book_shelf)
ub.session.commit()
flash(_(u"Book has been removed from shelf: %(sname)s", sname=shelf.name), category="success")
return redirect(request.environ["HTTP_REFERER"])
@app.route("/shelf/create", methods=["GET", "POST"])
@login_required
def create_shelf():
shelf = ub.Shelf()
if request.method == "POST":
to_save = request.form.to_dict()
if "is_public" in to_save:
shelf.is_public = 1
shelf.name = to_save["title"]
shelf.user_id = int(current_user.id)
existing_shelf = ub.session.query(ub.Shelf).filter(
or_((ub.Shelf.name == to_save["title"]) & (ub.Shelf.is_public == 1),
(ub.Shelf.name == to_save["title"]) & (ub.Shelf.user_id == int(current_user.id)))).first()
if existing_shelf:
flash(_(u"A shelf with the name '%(title)s' already exists.", title=to_save["title"]), category="error")
else:
try:
ub.session.add(shelf)
ub.session.commit()
flash(_(u"Shelf %(title)s created", title=to_save["title"]), category="success")
except Exception:
flash(_(u"There was an error"), category="error")
return render_title_template('shelf_edit.html', shelf=shelf, title=_(u"create a shelf"))
else:
return render_title_template('shelf_edit.html', shelf=shelf, title=_(u"create a shelf"))
@app.route("/shelf/edit/<int:shelf_id>", methods=["GET", "POST"])
@login_required
def edit_shelf(shelf_id):
shelf = ub.session.query(ub.Shelf).filter(ub.Shelf.id == shelf_id).first()
if request.method == "POST":
to_save = request.form.to_dict()
existing_shelf = ub.session.query(ub.Shelf).filter(
or_((ub.Shelf.name == to_save["title"]) & (ub.Shelf.is_public == 1),
(ub.Shelf.name == to_save["title"]) & (ub.Shelf.user_id == int(current_user.id)))).filter(
ub.Shelf.id != shelf_id).first()
if existing_shelf:
flash(_(u"A shelf with the name '%(title)s' already exists.", title=to_save["title"]), category="error")
else:
shelf.name = to_save["title"]
if "is_public" in to_save:
shelf.is_public = 1
else:
shelf.is_public = 0
try:
ub.session.commit()
flash(_(u"Shelf %(title)s changed", title=to_save["title"]), category="success")
except Exception:
flash(_(u"There was an error"), category="error")
return render_title_template('shelf_edit.html', shelf=shelf, title=_(u"Edit a shelf"))
else:
return render_title_template('shelf_edit.html', shelf=shelf, title=_(u"Edit a shelf"))
@app.route("/shelf/delete/<int:shelf_id>")
@login_required
def delete_shelf(shelf_id):
cur_shelf = ub.session.query(ub.Shelf).filter(ub.Shelf.id == shelf_id).first()
if current_user.role_admin():
deleted = ub.session.query(ub.Shelf).filter(ub.Shelf.id == shelf_id).delete()
else:
if not cur_shelf.is_public and not cur_shelf.user_id == int(current_user.id) \
or (cur_shelf.is_public and current_user.role_edit_shelfs()):
deleted = ub.session.query(ub.Shelf).filter(ub.or_(ub.and_(ub.Shelf.user_id == int(current_user.id),
ub.Shelf.id == shelf_id),
ub.and_(ub.Shelf.is_public == 1,
ub.Shelf.id == shelf_id))).delete()
if deleted:
ub.session.query(ub.BookShelf).filter(ub.BookShelf.shelf == shelf_id).delete()
ub.session.commit()
app.logger.info(_(u"successfully deleted shelf %(name)s", name=cur_shelf.name, category="success"))
return redirect(url_for('index'))
@app.route("/shelf/<int:shelf_id>")
@login_required_if_no_ano
def show_shelf(shelf_id):
if current_user.is_anonymous():
shelf = ub.session.query(ub.Shelf).filter(ub.Shelf.is_public == 1, ub.Shelf.id == shelf_id).first()
else:
shelf = ub.session.query(ub.Shelf).filter(ub.or_(ub.and_(ub.Shelf.user_id == int(current_user.id),
ub.Shelf.id == shelf_id),
ub.and_(ub.Shelf.is_public == 1,
ub.Shelf.id == shelf_id))).first()
result = list()
if shelf:
books_in_shelf = ub.session.query(ub.BookShelf).filter(ub.BookShelf.shelf == shelf_id).order_by(
ub.BookShelf.order.asc()).all()
for book in books_in_shelf:
cur_book = db.session.query(db.Books).filter(db.Books.id == book.book_id).first()
result.append(cur_book)
return render_title_template('shelf.html', entries=result, title=_(u"Shelf: '%(name)s'", name=shelf.name),
shelf=shelf)
@app.route("/shelf/order/<int:shelf_id>", methods=["GET", "POST"])
@login_required
def order_shelf(shelf_id):
if request.method == "POST":
to_save = request.form.to_dict()
books_in_shelf = ub.session.query(ub.BookShelf).filter(ub.BookShelf.shelf == shelf_id).order_by(
ub.BookShelf.order.asc()).all()
counter = 0
for book in books_in_shelf:
setattr(book, 'order', to_save[str(book.book_id)])
counter += 1
ub.session.commit()
if current_user.is_anonymous():
shelf = ub.session.query(ub.Shelf).filter(ub.Shelf.is_public == 1, ub.Shelf.id == shelf_id).first()
else:
shelf = ub.session.query(ub.Shelf).filter(ub.or_(ub.and_(ub.Shelf.user_id == int(current_user.id),
ub.Shelf.id == shelf_id),
ub.and_(ub.Shelf.is_public == 1,
ub.Shelf.id == shelf_id))).first()
result = list()
if shelf:
books_in_shelf2 = ub.session.query(ub.BookShelf).filter(ub.BookShelf.shelf == shelf_id) \
.order_by(ub.BookShelf.order.asc()).all()
for book in books_in_shelf2:
cur_book = db.session.query(db.Books).filter(db.Books.id == book.book_id).first()
result.append(cur_book)
return render_title_template('shelf_order.html', entries=result,
title=_(u"Change order of Shelf: '%(name)s'", name=shelf.name), shelf=shelf)
@app.route("/me", methods=["GET", "POST"])
@login_required
def profile():
content = ub.session.query(ub.User).filter(ub.User.id == int(current_user.id)).first()
downloads = list()
languages = db.session.query(db.Languages).all()
for lang in languages:
try:
cur_l = LC.parse(lang.lang_code)
lang.name = cur_l.get_language_name(get_locale())
except Exception:
lang.name = _(isoLanguages.get(part3=lang.lang_code).name)
translations = babel.list_translations() + [LC('en')]
for book in content.downloads:
downloadBook = db.session.query(db.Books).filter(db.Books.id == book.book_id).first()
if downloadBook:
downloads.append(db.session.query(db.Books).filter(db.Books.id == book.book_id).first())
else:
ub.session.query(ub.Downloads).filter(book.book_id == ub.Downloads.book_id).delete()
ub.session.commit()
if request.method == "POST":
to_save = request.form.to_dict()
content.random_books = 0
if current_user.role_passwd() or current_user.role_admin():
if to_save["password"]:
content.password = generate_password_hash(to_save["password"])
if "kindle_mail" in to_save and to_save["kindle_mail"] != content.kindle_mail:
content.kindle_mail = to_save["kindle_mail"]
if to_save["email"] and to_save["email"] != content.email:
content.email = to_save["email"]
if "show_random" in to_save and to_save["show_random"] == "on":
content.random_books = 1
if "default_language" in to_save:
content.default_language = to_save["default_language"]
if to_save["locale"]:
content.locale = to_save["locale"]
content.sidebar_view = 0
if "show_random" in to_save:
content.sidebar_view += ub.SIDEBAR_RANDOM
if "show_language" in to_save:
content.sidebar_view += ub.SIDEBAR_LANGUAGE
if "show_series" in to_save:
content.sidebar_view += ub.SIDEBAR_SERIES
if "show_category" in to_save:
content.sidebar_view += ub.SIDEBAR_CATEGORY
if "show_hot" in to_save:
content.sidebar_view += ub.SIDEBAR_HOT
if "show_best_rated" in to_save:
content.sidebar_view += ub.SIDEBAR_BEST_RATED
if "show_author" in to_save:
content.sidebar_view += ub.SIDEBAR_AUTHOR
if "show_read_and_unread" in to_save:
content.sidebar_view += ub.SIDEBAR_READ_AND_UNREAD
if "show_detail_random" in to_save:
content.sidebar_view += ub.DETAIL_RANDOM
if "default_language" in to_save:
content.default_language = to_save["default_language"]
try:
ub.session.commit()
except IntegrityError:
ub.session.rollback()
flash(_(u"Found an existing account for this email address."), category="error")
return render_title_template("user_edit.html", content=content, downloads=downloads,
title=_(u"%(name)s's profile", name=current_user.nickname))
flash(_(u"Profile updated"), category="success")
return render_title_template("user_edit.html", translations=translations, profile=1, languages=languages,
content=content,
downloads=downloads, title=_(u"%(name)s's profile", name=current_user.nickname))
@app.route("/admin/view")
@login_required
@admin_required
def admin():
commit = '$Format:%cI$'
content = ub.session.query(ub.User).all()
settings = ub.session.query(ub.Settings).first()
return render_title_template("admin.html", content=content, email=settings, config=config, commit=commit,
development=ub.DEVELOPMENT, title=_(u"Admin page"))
@app.route("/admin/config", methods=["GET", "POST"])
@login_required
@admin_required
def configuration():
return configuration_helper(0)
@app.route("/config", methods=["GET", "POST"])
@unconfigured
def basic_configuration():
return configuration_helper(1)
def configuration_helper(origin):
# global global_task
reboot_required = False
db_change = False
success = False
if request.method == "POST":
to_save = request.form.to_dict()
content = ub.session.query(ub.Settings).first()
if "config_calibre_dir" in to_save:
if content.config_calibre_dir != to_save["config_calibre_dir"]:
content.config_calibre_dir = to_save["config_calibre_dir"]
db_change = True
# Google drive setup
create_new_yaml = False
if "config_google_drive_client_id" in to_save:
if content.config_google_drive_client_id != to_save["config_google_drive_client_id"]:
content.config_google_drive_client_id = to_save["config_google_drive_client_id"]
create_new_yaml = True
if "config_google_drive_client_secret" in to_save:
if content.config_google_drive_client_secret != to_save["config_google_drive_client_secret"]:
content.config_google_drive_client_secret = to_save["config_google_drive_client_secret"]
create_new_yaml = True
if "config_google_drive_calibre_url_base" in to_save:
if content.config_google_drive_calibre_url_base != to_save["config_google_drive_calibre_url_base"]:
content.config_google_drive_calibre_url_base = to_save["config_google_drive_calibre_url_base"]
create_new_yaml = True
if ("config_use_google_drive" in to_save and not content.config_use_google_drive) or ("config_use_google_drive" not in to_save and content.config_use_google_drive):
content.config_use_google_drive = "config_use_google_drive" in to_save
db_change = True
if not content.config_use_google_drive:
create_new_yaml = False
if create_new_yaml:
with open('settings.yaml', 'w') as f:
with open('gdrive_template.yaml', 'r') as t:
f.write(t.read() % {'client_id': content.config_google_drive_client_id, 'client_secret': content.config_google_drive_client_secret,
"redirect_uri": content.config_google_drive_calibre_url_base + 'gdrive/callback'})
if "config_google_drive_folder" in to_save:
if content.config_google_drive_folder != to_save["config_google_drive_folder"]:
content.config_google_drive_folder = to_save["config_google_drive_folder"]
db_change = True
##
if "config_port" in to_save:
if content.config_port != int(to_save["config_port"]):
content.config_port = int(to_save["config_port"])
reboot_required = True
if "config_calibre_web_title" in to_save:
content.config_calibre_web_title = to_save["config_calibre_web_title"]
if "config_columns_to_ignore" in to_save:
content.config_columns_to_ignore = to_save["config_columns_to_ignore"]
if "config_title_regex" in to_save:
if content.config_title_regex != to_save["config_title_regex"]:
content.config_title_regex = to_save["config_title_regex"]
reboot_required = True
if "config_log_level" in to_save:
content.config_log_level = int(to_save["config_log_level"])
if "config_random_books" in to_save:
content.config_random_books = int(to_save["config_random_books"])
if "config_books_per_page" in to_save:
content.config_books_per_page = int(to_save["config_books_per_page"])
content.config_uploading = 0
content.config_anonbrowse = 0
content.config_public_reg = 0
if "config_uploading" in to_save and to_save["config_uploading"] == "on":
content.config_uploading = 1
if "config_anonbrowse" in to_save and to_save["config_anonbrowse"] == "on":
content.config_anonbrowse = 1
if "config_public_reg" in to_save and to_save["config_public_reg"] == "on":
content.config_public_reg = 1
content.config_default_role = 0
if "admin_role" in to_save:
content.config_default_role = content.config_default_role + ub.ROLE_ADMIN
if "download_role" in to_save:
content.config_default_role = content.config_default_role + ub.ROLE_DOWNLOAD
if "upload_role" in to_save:
content.config_default_role = content.config_default_role + ub.ROLE_UPLOAD
if "edit_role" in to_save:
content.config_default_role = content.config_default_role + ub.ROLE_EDIT
if "delete_role" in to_save:
content.config_default_role = content.config_default_role + ub.ROLE_DELETE_BOOKS
if "passwd_role" in to_save:
content.config_default_role = content.config_default_role + ub.ROLE_PASSWD
if "passwd_role" in to_save:
content.config_default_role = content.config_default_role + ub.ROLE_EDIT_SHELFS
try:
if content.config_use_google_drive and is_gdrive_ready() and not os.path.exists(config.config_calibre_dir + "/metadata.db"):
gdriveutils.downloadFile(Gdrive.Instance().drive, None, "metadata.db", config.config_calibre_dir + "/metadata.db")
if db_change:
if config.db_configured:
db.session.close()
db.engine.dispose()
ub.session.commit()
flash(_(u"Calibre-web configuration updated"), category="success")
config.loadSettings()
app.logger.setLevel(config.config_log_level)
logging.getLogger("book_formats").setLevel(config.config_log_level)
except e:
flash(e, category="error")
return render_title_template("config_edit.html", content=config, origin=origin, gdrive=gdrive_support,
title=_(u"Basic Configuration"))
if db_change:
reload(db)
if not db.setup_db():
flash(_(u'DB location is not valid, please enter correct path'), category="error")
return render_title_template("config_edit.html", content=config, origin=origin, gdrive=gdrive_support,
title=_(u"Basic Configuration"))
if reboot_required:
# db.engine.dispose() # ToDo verify correct
ub.session.close()
ub.engine.dispose()
# stop tornado server
server = IOLoop.instance()
server.add_callback(server.stop)
helper.global_task = 0
app.logger.info('Reboot required, restarting')
if origin:
success = True
return render_title_template("config_edit.html", origin=origin, success=success, content=config,
show_authenticate_google_drive=not is_gdrive_ready(), gdrive=gdrive_support,
title=_(u"Basic Configuration"))
@app.route("/admin/user/new", methods=["GET", "POST"])
@login_required
@admin_required
def new_user():
content = ub.User()
languages = db.session.query(db.Languages).all()
for lang in languages:
try:
cur_l = LC.parse(lang.lang_code)
lang.name = cur_l.get_language_name(get_locale())
except Exception:
lang.name = _(isoLanguages.get(part3=lang.lang_code).name)
translations = [LC('en')] + babel.list_translations()
if request.method == "POST":
to_save = request.form.to_dict()
if not to_save["nickname"] or not to_save["email"] or not to_save["password"]:
flash(_(u"Please fill out all fields!"), category="error")
return render_title_template("user_edit.html", new_user=1, content=content, translations=translations,
title=_(u"Add new user"))
content.password = generate_password_hash(to_save["password"])
content.nickname = to_save["nickname"]
content.email = to_save["email"]
content.default_language = to_save["default_language"]
if "locale" in to_save:
content.locale = to_save["locale"]
content.sidebar_view = 0
if "show_random" in to_save:
content.sidebar_view += ub.SIDEBAR_RANDOM
if "show_language" in to_save:
content.sidebar_view += ub.SIDEBAR_LANGUAGE
if "show_series" in to_save:
content.sidebar_view += ub.SIDEBAR_SERIES
if "show_category" in to_save:
content.sidebar_view += ub.SIDEBAR_CATEGORY
if "show_hot" in to_save:
content.sidebar_view += ub.SIDEBAR_HOT
if "show_read_and_unread" in to_save:
content.sidebar_view += ub.SIDEBAR_READ_AND_UNREAD
if "show_best_rated" in to_save:
content.sidebar_view += ub.SIDEBAR_BEST_RATED
if "show_author" in to_save:
content.sidebar_view += ub.SIDEBAR_AUTHOR
if "show_detail_random" in to_save:
content.sidebar_view += ub.DETAIL_RANDOM
content.role = 0
if "admin_role" in to_save:
content.role = content.role + ub.ROLE_ADMIN
if "download_role" in to_save:
content.role = content.role + ub.ROLE_DOWNLOAD
if "upload_role" in to_save:
content.role = content.role + ub.ROLE_UPLOAD
if "edit_role" in to_save:
content.role = content.role + ub.ROLE_DELETE_BOOKS
if "delete_role" in to_save:
content.role = content.role + ub.ROLE_EDIT
if "passwd_role" in to_save:
content.role = content.role + ub.ROLE_PASSWD
if "edit_shelf_role" in to_save:
content.role = content.role + ub.ROLE_EDIT_SHELFS
try:
ub.session.add(content)
ub.session.commit()
flash(_(u"User '%(user)s' created", user=content.nickname), category="success")
return redirect(url_for('admin'))
except IntegrityError:
ub.session.rollback()
flash(_(u"Found an existing account for this email address or nickname."), category="error")
else:
content.role = config.config_default_role
return render_title_template("user_edit.html", new_user=1, content=content, translations=translations,
languages=languages, title=_(u"Add new user"))
@app.route("/admin/mailsettings", methods=["GET", "POST"])
@login_required
@admin_required
def edit_mailsettings():
content = ub.session.query(ub.Settings).first()
if request.method == "POST":
to_save = request.form.to_dict()
content.mail_server = to_save["mail_server"]
content.mail_port = int(to_save["mail_port"])
content.mail_login = to_save["mail_login"]
content.mail_password = to_save["mail_password"]
content.mail_from = to_save["mail_from"]
content.mail_use_ssl = int(to_save["mail_use_ssl"])
try:
ub.session.commit()
flash(_(u"Mail settings updated"), category="success")
except e:
flash(e, category="error")
if "test" in to_save and to_save["test"]:
if current_user.kindle_mail:
result = helper.send_test_mail(current_user.kindle_mail)
if result is None:
flash(_(u"Test E-Mail successfully send to %(kindlemail)s", kindlemail=current_user.kindle_mail),
category="success")
else:
flash(_(u"There was an error sending the Test E-Mail: %(res)s", res=result), category="error")
else:
flash(_(u"Please configure your kindle email address first..."), category="error")
else:
flash(_(u"E-Mail settings updated"), category="success")
return render_title_template("email_edit.html", content=content, title=_(u"Edit mail settings"))
@app.route("/admin/user/<int:user_id>", methods=["GET", "POST"])
@login_required
@admin_required
def edit_user(user_id):
content = ub.session.query(ub.User).filter(ub.User.id == int(user_id)).first()
downloads = list()
languages = db.session.query(db.Languages).all()
for lang in languages:
try:
cur_l = LC.parse(lang.lang_code)
lang.name = cur_l.get_language_name(get_locale())
except Exception:
lang.name = _(isoLanguages.get(part3=lang.lang_code).name)
translations = babel.list_translations() + [LC('en')]
for book in content.downloads:
downloadBook = db.session.query(db.Books).filter(db.Books.id == book.book_id).first()
if downloadBook:
downloads.append(db.session.query(db.Books).filter(db.Books.id == book.book_id).first())
else:
ub.session.query(ub.Downloads).filter(book.book_id == ub.Downloads.book_id).delete()
ub.session.commit()
if request.method == "POST":
to_save = request.form.to_dict()
if "delete" in to_save:
ub.session.delete(content)
flash(_(u"User '%(nick)s' deleted", nick=content.nickname), category="success")
return redirect(url_for('admin'))
else:
if "password" in to_save and to_save["password"]:
content.password = generate_password_hash(to_save["password"])
if "admin_role" in to_save and not content.role_admin():
content.role = content.role + ub.ROLE_ADMIN
elif "admin_role" not in to_save and content.role_admin():
content.role = content.role - ub.ROLE_ADMIN
if "download_role" in to_save and not content.role_download():
content.role = content.role + ub.ROLE_DOWNLOAD
elif "download_role" not in to_save and content.role_download():
content.role = content.role - ub.ROLE_DOWNLOAD
if "upload_role" in to_save and not content.role_upload():
content.role = content.role + ub.ROLE_UPLOAD
elif "upload_role" not in to_save and content.role_upload():
content.role = content.role - ub.ROLE_UPLOAD
if "edit_role" in to_save and not content.role_edit():
content.role = content.role + ub.ROLE_EDIT
elif "edit_role" not in to_save and content.role_edit():
content.role = content.role - ub.ROLE_EDIT
if "delete_role" in to_save and not content.role_delete_books():
content.role = content.role + ub.ROLE_DELETE_BOOKS
elif "delete_role" not in to_save and content.role_delete_books():
content.role = content.role - ub.ROLE_DELETE_BOOKS
if "passwd_role" in to_save and not content.role_passwd():
content.role = content.role + ub.ROLE_PASSWD
elif "passwd_role" not in to_save and content.role_passwd():
content.role = content.role - ub.ROLE_PASSWD
if "edit_shelf_role" in to_save and not content.role_edit_shelfs():
content.role = content.role + ub.ROLE_EDIT_SHELFS
elif "edit_shelf_role" not in to_save and content.role_edit_shelfs():
content.role = content.role - ub.ROLE_EDIT_SHELFS
if "show_random" in to_save and not content.show_random_books():
content.sidebar_view += ub.SIDEBAR_RANDOM
elif "show_random" not in to_save and content.show_random_books():
content.sidebar_view -= ub.SIDEBAR_RANDOM
if "show_language" in to_save and not content.show_language():
content.sidebar_view += ub.SIDEBAR_LANGUAGE
elif "show_language" not in to_save and content.show_language():
content.sidebar_view -= ub.SIDEBAR_LANGUAGE
if "show_series" in to_save and not content.show_series():
content.sidebar_view += ub.SIDEBAR_SERIES
elif "show_series" not in to_save and content.show_series():
content.sidebar_view -= ub.SIDEBAR_SERIES
if "show_category" in to_save and not content.show_category():
content.sidebar_view += ub.SIDEBAR_CATEGORY
elif "show_category" not in to_save and content.show_category():
content.sidebar_view -= ub.SIDEBAR_CATEGORY
if "show_hot" in to_save and not content.show_hot_books():
content.sidebar_view += ub.SIDEBAR_HOT
elif "show_hot" not in to_save and content.show_hot_books():
content.sidebar_view -= ub.SIDEBAR_HOT
if "show_best_rated" in to_save and not content.show_best_rated_books():
content.sidebar_view += ub.SIDEBAR_BEST_RATED
elif "show_best_rated" not in to_save and content.show_best_rated_books():
content.sidebar_view -= ub.SIDEBAR_BEST_RATED
if "show_read_and_unread" in to_save and not content.show_read_and_unread():
content.sidebar_view += ub.SIDEBAR_READ_AND_UNREAD
elif "show_read_and_unread" not in to_save and content.show_read_and_unread():
content.sidebar_view -= ub.SIDEBAR_READ_AND_UNREAD
if "show_author" in to_save and not content.show_author():
content.sidebar_view += ub.SIDEBAR_AUTHOR
elif "show_author" not in to_save and content.show_author():
content.sidebar_view -= ub.SIDEBAR_AUTHOR
if "show_detail_random" in to_save and not content.show_detail_random():
content.sidebar_view += ub.DETAIL_RANDOM
elif "show_detail_random" not in to_save and content.show_detail_random():
content.sidebar_view -= ub.DETAIL_RANDOM
if "default_language" in to_save:
content.default_language = to_save["default_language"]
if "locale" in to_save and to_save["locale"]:
content.locale = to_save["locale"]
if to_save["email"] and to_save["email"] != content.email:
content.email = to_save["email"]
if "kindle_mail" in to_save and to_save["kindle_mail"] != content.kindle_mail:
content.kindle_mail = to_save["kindle_mail"]
try:
ub.session.commit()
flash(_(u"User '%(nick)s' updated", nick=content.nickname), category="success")
except IntegrityError:
ub.session.rollback()
flash(_(u"An unknown error occured."), category="error")
return render_title_template("user_edit.html", translations=translations, languages=languages, new_user=0,
content=content, downloads=downloads,
title=_(u"Edit User %(nick)s", nick=content.nickname))
@app.route("/admin/book/<int:book_id>", methods=['GET', 'POST'])
@login_required_if_no_ano
@edit_required
def edit_book(book_id):
# create the function for sorting...
db.session.connection().connection.connection.create_function("title_sort", 1, db.title_sort)
cc = db.session.query(db.Custom_Columns).filter(db.Custom_Columns.datatype.notin_(db.cc_exceptions)).all()
if current_user.filter_language() != "all":
lang_filter = db.Books.languages.any(db.Languages.lang_code == current_user.filter_language())
else:
lang_filter = True
book = db.session.query(db.Books).filter(db.Books.id == book_id).filter(lang_filter).first()
author_names = []
if book:
for index in range(0, len(book.languages)):
try:
book.languages[index].language_name = LC.parse(book.languages[index].lang_code).get_language_name(
get_locale())
except Exception:
book.languages[index].language_name = _(isoLanguages.get(part3=book.languages[index].lang_code).name)
for author in book.authors:
author_names.append(author.name)
if request.method == 'POST':
edited_books_id = set()
to_save = request.form.to_dict()
if book.title != to_save["book_title"]:
book.title = to_save["book_title"]
edited_books_id.add(book.id)
input_authors = to_save["author_name"].split('&')
input_authors = map(lambda it: it.strip(), input_authors)
# we have all author names now
if input_authors == ['']:
input_authors = [_(u'unknown')] # prevent empty Author
if book.authors:
author0_before_edit = book.authors[0].name
else:
author0_before_edit = db.Authors(_(u'unknown'),'',0)
modify_database_object(input_authors, book.authors, db.Authors, db.session, 'author')
if book.authors:
if author0_before_edit != book.authors[0].name:
edited_books_id.add(book.id)
book.author_sort = helper.get_sorted_author(input_authors[0])
if to_save["cover_url"] and os.path.splitext(to_save["cover_url"])[1].lower() == ".jpg":
img = requests.get(to_save["cover_url"])
if config.config_use_google_drive:
tmpDir = tempfile.gettempdir()
f = open(os.path.join(tmpDir, "uploaded_cover.jpg"), "wb")
f.write(img.content)
f.close()
gdriveutils.uploadFileToEbooksFolder(Gdrive.Instance().drive, os.path.join(book.path, 'cover.jpg'), os.path.join(tmpDir, f.name))
else:
f = open(os.path.join(config.config_calibre_dir, book.path, "cover.jpg"), "wb")
f.write(img.content)
f.close()
book.has_cover = 1
if book.series_index != to_save["series_index"]:
book.series_index = to_save["series_index"]
if len(book.comments):
book.comments[0].text = to_save["description"]
else:
book.comments.append(db.Comments(text=to_save["description"], book=book.id))
input_tags = to_save["tags"].split(',')
input_tags = map(lambda it: it.strip(), input_tags)
modify_database_object(input_tags, book.tags, db.Tags, db.session, 'tags')
input_series = [to_save["series"].strip()]
input_series = [x for x in input_series if x != '']
modify_database_object(input_series, book.series, db.Series, db.session, 'series')
input_languages = to_save["languages"].split(',')
input_languages = map(lambda it: it.strip().lower(), input_languages)
# retranslate displayed text to language codes
languages = db.session.query(db.Languages).all()
input_l = []
for lang in languages:
try:
lang.name = LC.parse(lang.lang_code).get_language_name(get_locale()).lower()
except Exception:
lang.name = _(isoLanguages.get(part3=lang.lang_code).name).lower()
for inp_lang in input_languages:
if inp_lang == lang.name:
input_l.append(lang.lang_code)
modify_database_object(input_l, book.languages, db.Languages, db.session, 'languages')
if to_save["rating"].strip():
old_rating = False
if len(book.ratings) > 0:
old_rating = book.ratings[0].rating
ratingx2 = int(float(to_save["rating"]) * 2)
if ratingx2 != old_rating:
is_rating = db.session.query(db.Ratings).filter(db.Ratings.rating == ratingx2).first()
if is_rating:
book.ratings.append(is_rating)
else:
new_rating = db.Ratings(rating=ratingx2)
book.ratings.append(new_rating)
if old_rating:
book.ratings.remove(book.ratings[0])
else:
if len(book.ratings) > 0:
book.ratings.remove(book.ratings[0])
for c in cc:
cc_string = "custom_column_" + str(c.id)
if not c.is_multiple:
if len(getattr(book, cc_string)) > 0:
cc_db_value = getattr(book, cc_string)[0].value
else:
cc_db_value = None
if to_save[cc_string].strip():
if c.datatype == 'bool':
if to_save[cc_string] == 'None':
to_save[cc_string] = None
else:
to_save[cc_string] = 1 if to_save[cc_string] == 'True' else 0
if to_save[cc_string] != cc_db_value:
if cc_db_value is not None:
if to_save[cc_string] is not None:
setattr(getattr(book, cc_string)[0], 'value', to_save[cc_string])
else:
del_cc = getattr(book, cc_string)[0]
getattr(book, cc_string).remove(del_cc)
db.session.delete(del_cc)
else:
cc_class = db.cc_classes[c.id]
new_cc = cc_class(value=to_save[cc_string], book=book_id)
db.session.add(new_cc)
else:
if c.datatype == 'rating':
to_save[cc_string] = str(int(float(to_save[cc_string]) * 2))
if to_save[cc_string].strip() != cc_db_value:
if cc_db_value is not None:
# remove old cc_val
del_cc = getattr(book, cc_string)[0]
getattr(book, cc_string).remove(del_cc)
if len(del_cc.books) == 0:
db.session.delete(del_cc)
cc_class = db.cc_classes[c.id]
new_cc = db.session.query(cc_class).filter(
cc_class.value == to_save[cc_string].strip()).first()
# if no cc val is found add it
if new_cc is None:
new_cc = cc_class(value=to_save[cc_string].strip())
db.session.add(new_cc)
new_cc = db.session.query(cc_class).filter(
cc_class.value == to_save[cc_string].strip()).first()
# add cc value to book
getattr(book, cc_string).append(new_cc)
else:
if cc_db_value is not None:
# remove old cc_val
del_cc = getattr(book, cc_string)[0]
getattr(book, cc_string).remove(del_cc)
if len(del_cc.books) == 0:
db.session.delete(del_cc)
else:
input_tags = to_save[cc_string].split(',')
input_tags = map(lambda it: it.strip(), input_tags)
modify_database_object(input_tags, getattr(book, cc_string),db.cc_classes[c.id], db.session, 'custom')
db.session.commit()
author_names = []
for author in book.authors:
author_names.append(author.name)
for b in edited_books_id:
if config.config_use_google_drive:
helper.update_dir_structure_gdrive(b)
else:
helper.update_dir_stucture(b, config.config_calibre_dir)
if config.config_use_google_drive:
updateGdriveCalibreFromLocal()
if "detail_view" in to_save:
return redirect(url_for('show_book', book_id=book.id))
else:
return render_title_template('book_edit.html', book=book, authors=author_names, cc=cc,
title=_(u"edit metadata"))
else:
return render_title_template('book_edit.html', book=book, authors=author_names, cc=cc,
title=_(u"edit metadata"))
else:
flash(_(u"Error opening eBook. File does not exist or file is not accessible:"), category="error")
return redirect(url_for("index"))
@app.route("/upload", methods=["GET", "POST"])
@login_required_if_no_ano
@upload_required
def upload():
if not config.config_uploading:
abort(404)
# create the function for sorting...
db.session.connection().connection.connection.create_function("title_sort", 1, db.title_sort)
db.session.connection().connection.connection.create_function('uuid4', 0, lambda: str(uuid4()))
if request.method == 'POST' and 'btn-upload' in request.files:
requested_file = request.files['btn-upload']
if '.' in requested_file.filename:
file_ext = requested_file.filename.rsplit('.', 1)[-1].lower()
if file_ext not in ALLOWED_EXTENSIONS:
flash(
_('File extension "%s" is not allowed to be uploaded to this server' %
file_ext),
category="error"
)
return redirect(url_for('index'))
else:
flash(_('File to be uploaded must have an extension'), category="error")
return redirect(url_for('index'))
meta = uploader.upload(requested_file)
title = meta.title
author = meta.author
title_dir = helper.get_valid_filename(title, False)
author_dir = helper.get_valid_filename(author, False)
data_name = title_dir
filepath = config.config_calibre_dir + os.sep + author_dir + os.sep + title_dir
saved_filename = filepath + os.sep + data_name + meta.extension
if not os.path.exists(filepath):
try:
os.makedirs(filepath)
except OSError:
flash(_(u"Failed to create path %s (Permission denied)." % filepath), category="error")
return redirect(url_for('index'))
try:
copyfile(meta.file_path, saved_filename)
except OSError:
flash(_(u"Failed to store file %s (Permission denied)." % saved_filename), category="error")
return redirect(url_for('index'))
try:
os.unlink(meta.file_path)
except OSError:
flash(_(u"Failed to delete file %s (Permission denied)." % meta.file_path), category="warning")
file_size = os.path.getsize(saved_filename)
if meta.cover is None:
has_cover = 0
basedir = os.path.dirname(__file__)
copyfile(os.path.join(basedir, "static/generic_cover.jpg"), os.path.join(filepath, "cover.jpg"))
else:
has_cover = 1
move(meta.cover, os.path.join(filepath, "cover.jpg"))
is_author = db.session.query(db.Authors).filter(db.Authors.name == author).first()
if is_author:
db_author = is_author
else:
db_author = db.Authors(author, helper.get_sorted_author(author), "")
db.session.add(db_author)
# add language actually one value in list
input_language = meta.languages
db_language = None
if input_language != "":
input_language = isoLanguages.get(name=input_language).part3
hasLanguage = db.session.query(db.Languages).filter(db.Languages.lang_code == input_language).first()
if hasLanguage:
db_language = hasLanguage
else:
db_language = db.Languages(input_language)
db.session.add(db_language)
# combine path and normalize path from windows systems
path = os.path.join(author_dir, title_dir).replace('\\', '/')
db_book = db.Books(title, "", db_author.sort, datetime.datetime.now(), datetime.datetime(101, 1, 1), 1,
datetime.datetime.now(), path, has_cover, db_author, [], db_language)
db_book.authors.append(db_author)
if db_language is not None:
db_book.languages.append(db_language)
db_data = db.Data(db_book, meta.extension.upper()[1:], file_size, data_name)
db_book.data.append(db_data)
db.session.add(db_book)
db.session.flush() # flush content get db_book.id avalible
# add comment
upload_comment = Markup(meta.description).unescape()
if upload_comment != "":
db.session.add(db.Comments(upload_comment, db_book.id))
db.session.commit()
if db_language is not None: # display Full name instead of iso639.part3
db_book.languages[0].language_name = _(meta.languages)
author_names = []
for author in db_book.authors:
author_names.append(author.name)
if config.config_use_google_drive:
updateGdriveCalibreFromLocal()
cc = db.session.query(db.Custom_Columns).filter(db.Custom_Columns.datatype.notin_(db.cc_exceptions)).all()
if current_user.role_edit() or current_user.role_admin():
return render_title_template('book_edit.html', book=db_book, authors=author_names, cc=cc,
title=_(u"edit metadata"))
book_in_shelfs = []
return render_title_template('detail.html', entry=db_book, cc=cc, title=db_book.title,
books_shelfs=book_in_shelfs, )
else:
return redirect(url_for("index"))
def start_gevent():
from gevent.wsgi import WSGIServer
global gevent_server
gevent_server = WSGIServer(('', ub.config.config_port), app)
gevent_server.serve_forever()
|
Gain Potential Leads with Our Websense Users Mailing List!!
Businesses and Government organization all over the world have reposed its faith in Websense for cybercrime, security against malware/ network security, data-threat and more. Our Websense Users Email List is a well segmented and focused list, which supports marketers to get in touch with the right person without difficulty. The Websense Users Mailing List has been created to make marketing campaign much more appealing to the decision makers of this industry.
The Websense Users Email List will assist you to recognize the appropriate audience for your services and products that you are interested in proffering. We link you to thousands of Websense network experts and professionals with the optimum contact details available. This is a wide set of data that has essential information of those involved with the Websense. Owing to our Websense Users List you can simply reach out to the top officials and decision makers of this industry.
The Websense Users Email List provided by Optin Contacts is an impending and efficient compilation of database. The objective is to make the database a precise one for multi-channel campaign purposes. The much focused Websense Users Mailing List is beneficial for gaining new customers and retaining the old ones.
We agree that the more repertoire of information you contain, the better the business grows. We provide you every bit of contact details of websense professionals and organizations from all over the world.
Whether it’s a large or small sized organization, we will always aid you in gaining high profits. We guarantee that our Websense Users List is routinely updated and tailored as per professional demands. Our profoundly built databases will free you of undelivered emails and email bounces.
Our email and mailing List is a lucrative investment that will aid you in making the healthier bond with the target audiences.
Purchase our Websense Users Email List now and provide your business the needful resources to flourish. To know more contact us today!
Hurry Contact Optin Contacts and Optimize Your Business Leads More than Before!
|
# -*- coding: utf-8 -*-
"""
mchem.postgres
~~~~~~~~~~~~~~
Functions to build and benchmark PostgreSQL database for comparison.
:copyright: Copyright 2014 by Matt Swain.
:license: MIT, see LICENSE file for more details.
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
import logging
import time
import click
import numpy as np
import psycopg2
from psycopg2.extensions import AsIs
log = logging.getLogger(__name__)
# Start by creating the database and loading the chembl dump via the command line:
# createdb chembl
# psql chembl < chembl_19.pgdump.sql
@click.group()
@click.option('--db', '-d', default='mchem', envvar='MCHEM_POSTGRES_DB', help='PostgreSQL database name (default: mchem).')
@click.option('--user', '-u', default='root', envvar='MCHEM_POSTGRES_USER', help='PostgreSQL username (default: root).')
@click.option('--password', '-p', default=None, envvar='MCHEM_POSTGRES_PASSWORD', help='PostgreSQL password.')
@click.option('--verbose', '-v', is_flag=True, help='Verbose debug logging.')
@click.help_option('--help', '-h')
@click.pass_context
def cli(ctx, db, user, password, verbose):
"""PostgreSQL command line interface."""
click.echo('Connecting %s@%s' % (user, db))
logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO, format='%(levelname)s: %(message)s')
ctx.obj = psycopg2.connect(database=db, user=user, password=password)
@cli.command()
@click.pass_obj
def load(conn):
"""Build PostgreSQL database."""
cur = conn.cursor()
cur.execute('create extension if not exists rdkit;')
cur.execute('create schema rdk;')
cur.execute('drop table if exists biotherapeutics, drug_mechanism, activities, assays, assay_parameters, compound_records, compound_properties, molecule_hierarchy, ligand_eff, predicted_binding_domains, molecule_synonyms, docs, formulations, molecule_atc_classification cascade;')
cur.execute('select * into rdk.mols from (select molregno,mol_from_ctab(molfile::cstring) m from compound_structures) tmp where m is not null;')
cur.execute('create index molidx on rdk.mols using gist(m);')
cur.execute('alter table rdk.mols add primary key (molregno);')
cur.execute('select molregno, m into rdk.fps from rdk.mols;')
cur.execute('alter table rdk.fps add column m2l512 bfp;')
cur.execute('alter table rdk.fps add column m2l2048 bfp;')
cur.execute('alter table rdk.fps add column m2 sfp;')
cur.execute('alter table rdk.fps add column m3 sfp;')
cur.execute('update rdk.fps set m2 = morgan_fp(m);')
cur.execute('update rdk.fps set m3 = morgan_fp(m, 3);')
cur.execute('set rdkit.morgan_fp_size=2048;')
cur.execute('update rdk.fps set m2l2048 = morganbv_fp(m);')
cur.execute('set rdkit.morgan_fp_size=512;')
cur.execute('update rdk.fps set m2l512 = morganbv_fp(m);')
cur.execute('alter table rdk.fps drop column m;')
cur.execute('create index fps_m2_idx on rdk.fps using gist(m2);')
cur.execute('create index fps_m3_idx on rdk.fps using gist(m3);')
cur.execute('create index fps_m2l2048_idx on rdk.fps using gist(m2l2048);')
cur.execute('create index fps_m2l512_idx on rdk.fps using gist(m2l512);')
cur.execute('alter table rdk.fps add primary key (molregno);')
conn.commit()
cur.close()
conn.close()
@cli.command()
@click.option('--sample', '-s', type=click.File('r'), help='File containing sample ids.')
@click.option('--fp', '-f', default='m2', type=click.Choice(['m2', 'm3', 'm2l2048', 'm2l512', 'm3l2048', 'm3l512']), help='Fingerprint type (default: m2).')
@click.option('--threshold', '-t', default=0.8, help='Tanimoto threshold (default: 0.8).')
@click.pass_obj
def profile(conn, sample, fp, threshold):
cur = conn.cursor()
mol_ids = sample.read().strip().split('\n')
times = []
cur.execute("set rdkit.tanimoto_threshold=%s;", (threshold,))
for i, mol_id in enumerate(mol_ids[:100]):
log.debug('Query molecule %s of %s: %s' % (i+1, len(mol_ids), mol_id))
# ARGH! The CHEMBL ID vs. molregno thing is a nightmare
cur.execute("select entity_id from chembl_id_lookup where chembl_id = %s", (mol_id,))
molregno = cur.fetchone()[0]
#cur.execute("select m from rdk.mols where molregno = %s", (molregno,))
#smiles = cur.fetchone()[0]
cur.execute("select %s from rdk.fps where molregno = %s", (AsIs(fp), molregno,))
qfp = cur.fetchone()[0]
log.debug(mol_id)
start = time.time()
cur.execute("select molregno from rdk.fps where %s%%%s", (AsIs(fp), qfp,))
#cur.execute("select molregno from rdk.fps where %s%%morganbv_fp(%s)", (fp, smiles,)) # using smiles
results = cur.fetchall()
end = time.time()
times.append(end - start)
# Save results
result = {
'median_time': np.median(times),
'mean_time': np.mean(times),
'fp': fp,
'threshold': threshold
}
log.info(result)
cur.close()
conn.close()
@cli.command()
@click.option('--sample', '-s', type=click.File('r'), help='File containing sample ids.')
@click.option('--fp', default='m2', type=click.Choice(['m2', 'm3', 'm2l2048', 'm2l512', 'm3l2048', 'm3l512']), help='Fingerprint type (default: m2).')
@click.option('--threshold', default=0.8, help='Similarity search threshold (default 0.8).')
@click.pass_obj
def samplesim(conn, sample, threshold, fp):
"""Perform a similarity search on every molecule in sample and print results."""
click.echo('Fingerprint: %s, Threshold: %s' % (fp, threshold))
cur = conn.cursor()
mol_ids = sample.read().strip().split('\n')
cur.execute("set rdkit.tanimoto_threshold=%s;", (threshold,))
for i, mol_id in enumerate(mol_ids[:100]):
click.echo('Query: %s (%s of %s)' % (mol_id, i+1, len(mol_ids)))
cur.execute("select entity_id from chembl_id_lookup where chembl_id = %s", (mol_id,))
molregno = cur.fetchone()[0]
cur.execute("select %s from rdk.fps where molregno = %s", (AsIs(fp), molregno,))
qfp = cur.fetchone()[0]
cur.execute("select molregno from rdk.fps where %s%%%s", (AsIs(fp), qfp,))
results = [r[0] for r in cur.fetchall()]
chembl_ids = []
for mrn in results:
cur.execute("select chembl_id from chembl_id_lookup where entity_id = %s and entity_type = 'COMPOUND'", (mrn,))
chembl_ids.append(cur.fetchone()[0])
click.echo(chembl_ids)
cur.close()
conn.close()
|
Citation Example: Editorial Board Index of volume 24 of „Ukrainian Mathematical Journal” // Ukr. Mat. Zh. - 1972. - 24, № 6. - pp. 856-859.
|
import sqlite3
import time
class Client(object):
def __init__(self, conn_string):
"""
A SQLite-based ``Client``.
Args:
conn_string (str): The DSN. The host/port/db are parsed out of it.
Should be of the format ``sqlite:///path/to/db/file.db``
"""
# This is actually the filepath to the DB file.
self.conn_string = conn_string
# Kill the 'sqlite://' portion.
path = self.conn_string.split("://", 1)[1]
self.conn = sqlite3.connect(path)
def _run_query(self, query, args):
cur = self.conn.cursor()
if not args:
cur.execute(query)
else:
cur.execute(query, args)
self.conn.commit()
return cur
def setup_tables(self, queue_name="all"):
"""
Allows for manual creation of the needed tables.
Args:
queue_name (str): Optional. The name of the queue. Default is
`all`.
"""
# For manually creating the tables...
query = (
"CREATE TABLE `queue_{}` "
"(task_id text, data text, delay_until integer)"
).format(queue_name)
self._run_query(query, None)
def len(self, queue_name):
"""
Returns the length of the queue.
Args:
queue_name (str): The name of the queue. Usually handled by the
`Gator`` instance.
Returns:
int: The length of the queue
"""
query = "SELECT COUNT(task_id) FROM `queue_{}`".format(queue_name)
cur = self._run_query(query, [])
res = cur.fetchone()
return res[0]
def drop_all(self, queue_name):
"""
Drops all the task in the queue.
Args:
queue_name (str): The name of the queue. Usually handled by the
``Gator`` instance.
"""
query = "DELETE FROM `queue_{}`".format(queue_name)
self._run_query(query, [])
def push(self, queue_name, task_id, data, delay_until=None):
"""
Pushes a task onto the queue.
Args:
queue_name (str): The name of the queue. Usually handled by the
``Gator`` instance.
task_id (str): The identifier of the task.
data (str): The relevant data for the task.
delay_until (float): Optional. The Unix timestamp to delay
processing of the task until. Default is `None`.
Returns:
str: The task ID.
"""
if delay_until is None:
delay_until = time.time()
query = (
"INSERT INTO `queue_{}` "
"(task_id, data, delay_until) "
"VALUES (?, ?, ?)"
).format(queue_name)
self._run_query(query, [task_id, data, int(delay_until)])
return task_id
def pop(self, queue_name):
"""
Pops a task off the queue.
Args:
queue_name (str): The name of the queue. Usually handled by the
``Gator`` instance.
Returns:
str: The data for the task.
"""
now = int(time.time())
query = (
"SELECT task_id, data, delay_until "
"FROM `queue_{}` "
"WHERE delay_until <= ? "
"LIMIT 1"
).format(queue_name)
cur = self._run_query(query, [now])
res = cur.fetchone()
if res:
query = "DELETE FROM `queue_{}` WHERE task_id = ?".format(
queue_name
)
self._run_query(query, [res[0]])
return res[1]
def get(self, queue_name, task_id):
"""
Pops a specific task off the queue by identifier.
Args:
queue_name (str): The name of the queue. Usually handled by the
``Gator`` instance.
task_id (str): The identifier of the task.
Returns:
str: The data for the task.
"""
# fmt: off
query = (
"SELECT task_id, data "
"FROM `queue_{}` "
"WHERE task_id = ?"
).format(queue_name)
# fmt: on
cur = self._run_query(query, [task_id])
res = cur.fetchone()
query = "DELETE FROM `queue_{}` WHERE task_id = ?".format(queue_name)
self._run_query(query, [task_id])
return res[1]
|
Description: Simply start to lift your hood and let the Hood Lift system take over. Your hood will lift and hold firmly at the raised position -no wind or bumping will knock your hood down on your head. These gas struts are rated at 90 pounds to hold up OEM and specialty hoods.
*Fits 2015-2018 Mustangs - does not fit GT350.
*Ford Performance Hood Lift Kit w/ Laser Engraved Ford Performance Logo.
*Designed for all OEM base Mustang, Mustang GT and many aftermarket hoods.
*These hood lifts utilize two gas struts that lift and hold the hood without the need for a prop-rod.
*Made from lightweight steel and aluminum, they feature a durable black power coated finish to give your engine bay a much cleaner appearance.
*If your aftermarket hood is lighter than the factory hood, use caution when opening the hood due to the power of the strut.
* Click <HERE> for the installation video.
|
# -*- coding: utf-8 -*-
from flask import (Blueprint, request, render_template, flash, url_for,
redirect, session)
from flask.ext.login import login_required
from websterton.user.models import User
from random import randint
from forismatic import Forismatic
import os
import praw
import json
SITE_ROOT = os.path.realpath(os.path.dirname(__file__))
blueprint = Blueprint("user_manager", __name__, url_prefix='/user_manager',
static_folder="../static")
@blueprint.route("/get_new_background")
@login_required
def get_new_background():
user = load_user(session['user_id'])
theme = user.current_theme
path = os.path.join(SITE_ROOT, "../static", theme)
print path
backgrounds = os.listdir(path)[1:]
new_background_num = randint(0,len(backgrounds)-1)
return url_for('static', filename='%s/%s' % (theme, backgrounds[new_background_num]))
@blueprint.route("/save_user_settings", methods=["GET", "POST"])
@login_required
def save_user_settings():
user_info = request.args.to_dict()
user = load_user(session['user_id'])
news_feed = {}
user.current_theme = user_info.pop('theme')
user.location = user_info.pop('location')
print user_info
for i, k in user_info.iteritems():
news_feed.update({i:k})
user.news_feed = news_feed
user.save()
return "theme changed"
@blueprint.route("/save_new_reddit", methods=["GET", "POST"])
@login_required
def save_new_reddit():
info = request.args.to_dict()
user = load_user(session['user_id'])
new_key = ""
for i, k in info.iteritems():
new_key = i
upvote_limit = k
monitored_reddits = json.loads(user.monitored_reddits)
if monitored_reddits.has_key(new_key) and upvote_limit > 0:
return "failed", 404
else:
for i, k in info.iteritems():
monitored_reddits.update({i : k})
user.monitored_reddits = json.dumps(monitored_reddits)
user.save()
return "success"
@blueprint.route("/remove_reddit", methods=["GET", "POST"])
@login_required
def remove_reddit():
info = request.args.to_dict()
user = load_user(session['user_id'])
monitored_reddits = json.loads(user.monitored_reddits)
for i, k in info.iteritems():
monitored_reddits.pop(i.strip())
user.monitored_reddits = json.dumps(monitored_reddits)
user.save()
return "deleted"
@blueprint.route("/get_user_location", methods=["GET", "POST"])
@login_required
def get_user_location():
return load_user(session['user_id']).location
@blueprint.route("/get_quote", methods=["GET", "POST"])
@login_required
def get_quote():
# Initializing manager
f = Forismatic()
q = f.get_quote()
quote = {'quote':q.quote, 'author': q.author}
print quote
return json.dumps(quote)
def load_user(id):
return User.get_by_id(int(id))
|
Do you have a damaged or junk car in Camarillo you're trying to get rid of? Perfect! We want to buy your damaged or junk car. Give us some basic information about your damaged or junk car, and we'll give you an offer instantly. Seriously, you could get cash for that junk car in your driveway! We want to buy all the junk, salvage, scrap, or damaged cars in Camarillo, CA.
|
#
# FRETBursts - A single-molecule FRET burst analysis toolkit.
#
# Copyright (C) 2014 Antonino Ingargiola <tritemio@gmail.com>
#
"""
This module implements a list of arrays stored into a file with pytables.
The list is created empty (if the file does not exist) and must be populated
with the `append()` method.
If the file-name exists the list is populated with arrays stored
in the file.
Each list element is a reference to a pytable array. To read the array in
memory use the slicing notation (like pytable_array[:]).
"""
from __future__ import print_function
from builtins import range, zip
import os
import tables
_default_compression = dict(complevel=6, complib='blosc')
class PyTablesList(list):
def __init__(self, file, overwrite=False, parent_node='/',
group_name='array_list', group_descr='List of arrays',
prefix='data', compression=_default_compression,
load_array=False):
"""List of arrays stored in a pytables file.
The list is inizialized empty and populated with `.append()`.
Arguments:
load_array (bool): if True, read the data and put numpy arrays
in the list. If False, put only pytable arrays.
`group_descr`, `prefix`, `compression` are only used if a new group is
created (for example for a new file).
"""
super(PyTablesList, self).__init__()
self.parent_node = parent_node
self.group_name = group_name
self.load_array = load_array
# Ignored if group exist
self.size = 0
self.prefix = prefix
self.compression = compression
## Retrive the file reference file
if type(file) is tables.file.File:
self.data_file = file
elif os.path.exists(file) and not overwrite:
self.data_file = tables.open_file(file, mode = "a")
else:
self.data_file = tables.open_file(file, mode = "w",
title = "Container for lists of arrays")
## Create the group if not existent
if group_name not in self.data_file.get_node(parent_node):
self.data_file.create_group(parent_node, group_name,
title=group_descr)
self.group = self.data_file.get_node(parent_node, group_name)
if 'size' in self.group._v_attrs:
## If the group was already present read the data
self.size = self.group._v_attrs.size
self.prefix = self.group._v_attrs.prefix
for i in range(self.group._v_attrs.size):
array_ = self.group._f_get_child(self.get_name(i))
if self.load_array:
array_ = array_[:]
super(PyTablesList, self).append(array_)
else:
## If a new group save some metadata
self.group._v_attrs.size = self.size
self.group._v_attrs.prefix = self.prefix
self.group._v_attrs.load_array = self.load_array
def get_name(self, i=None):
if i is None:
i = self.size
return self.prefix + str(i)
def append(self, ndarray):
name = self.get_name()
comp_filter = tables.Filters(**self.compression)
tarray = self.data_file.create_carray(self.group, name, obj=ndarray,
filters=comp_filter)
self.data_file.flush()
super(PyTablesList, self).append(tarray)
#print(self.prefix+str(self.size), ndarray)
self.size += 1
self.group._v_attrs.size = self.size
def get_array_list(self):
return [array_[:] for array_ in self]
|
Let your representative know that as a secular constituent you support the National Day of Reason resolution and would like them to join you by co-sponsoring the bill.
Let our simple automated contact system contact your representative for you. The automated wording in the comment section is editable. We encourage you to add your own thoughts to personalize the message text, subject heading or signature. This will make your message more effective.
As your secular constituent, I urge you to support the resolution to designate May 3 as the National Day of Reason. This judicious, commonsense resolution asserts that our nation is governed by the powers of reason and logic rather than by dogma and superstition. Please stand up for a day to honor these enlightened ideals on which our democracy was founded.
Please encourage others to act by sharing on social media or in a letter to the editor.
|
#!/usr/local/bin/python
#fix freebayes header
import os
import glob
from optparse import OptionParser
# -------------------------------------------------
parser = OptionParser()
parser.add_option("--vcfdir", dest="vcfdir", help="Path to directory containing VCF files", default=False)
(options, args) = parser.parse_args()
# -------------------------------------------------
SAMPLEHEADER="#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT %s %s\n"
# -------------------------------------------------
# CHECK AND GENERATE GZ AND TBI
def fix_header(vcffile):
freader = open(vcffile, 'r')
fwriter = open(vcffile.replace(".vcf","_fixed.vcf"), 'w')
samples = []
header = False
for line in freader:
if line.startswith("##"):
fwriter.write(line)
if line.startswith("##commandline="):
##commandline="/home/cog/pprins/run6/bin/freebayes -f /hpc/cog_bioinf/GENOMES/human_GATK_GRCh37/GRCh37_gatk.fasta -C 3 -t /hpc/cog_bioinf/ENRICH/kinome_design_SS_V2_110811.bed --pooled-discrete --genotype-qualities --min-coverage 5 --no-indels --no-mnps --no-complex /home/cog/pprins/run6/data/freebayes/merged_MBC019R_F3_20130528_rmdup_kinome_design_SS_V2_110811.bam /home/cog/pprins/run6/data/freebayes/merged_MBC019T_F3_20130528_rmdup_kinome_design_SS_V2_110811.bam""
items = line.strip().split(" ")[-2:]
#print items
samples = [k.split("_")[1] for k in items]
#print samples
elif not header:
fwriter.write(SAMPLEHEADER%(samples[0], samples[1]))
header=True
else:
fwriter.write(line)
freader.close()
fwriter.close()
# -------------------------------------------------
file_list = glob.glob(os.path.join(options.vcfdir, "*.vcf"))
for vcf_file in file_list:
fix_header(vcf_file)
os.system("mkdir fixed")
os.system("mv *_fixed.vcf fixed")
# -------------------------------------------------
|
At Stonybrook Water, we believe that everyone should have access to clean water. Anywhere. That’s why Stonybrook Water is a proud partner of Water For People, an organization that helps people in developing countries improve their quality of life by supporting the development of locally sustainable drinking water resources, sanitation facilities and health and hygiene education programs.
Every day, more than 6,000 people in our world die from water-related illnesses and nearly 2 million die each year… and most of them are young children. Uncollected garbage, overflowing latrines and non-functional residential and municipal drainage pipes plague poor people in urban areas. Children play in “latrine streams” filled with disease. Epidemics of cholera, dysentery and hepatitis are common in the developing world and these preventable diseases can be traced directly to inadequate water and/or sanitation. The combination of unsafe water and poor sanitation arguably comprises the greatest humanitarian crisis of our time.
The water crisis is hitting the most vulnerable populations – the world’s poor. Half the world, nearly 3 billion people, lives in abject poverty on less than $2 per day. Water-related diseases are killing them at an alarming rate. Women are most impacted by the lack of access to water and frequently travel many miles from their homes to collect water for their families, preventing them from engaging in more productive activities like going to school or contributing to their family income.
The time and physical energy spent collecting water, poor health and short life expectancy caused by unsafe water and sanitation limit the opportunities available to people in the developing world. Water-related diseases force families to focus on basic survival and prevent them from moving forward economically, locking them in an endless cycle of poverty. Without access to clean water, adequate sanitation and proper hygiene, no other development goals can be met.
Stonybrook Water is committed to helping these developing countries and donates five percent of its profits to this worthy cause. Please take a minute to review their website. We think you’ll be inspired.
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
class ResCompany(models.Model):
_inherit = 'res.company'
resource_calendar_ids = fields.One2many(
'resource.calendar', 'company_id', 'Working Hours')
resource_calendar_id = fields.Many2one(
'resource.calendar', 'Default Working Hours', ondelete='restrict')
@api.model
def _init_data_resource_calendar(self):
self.search([('resource_calendar_id', '=', False)])._create_resource_calendar()
def _create_resource_calendar(self):
for company in self:
company.resource_calendar_id = self.env['resource.calendar'].create({
'name': _('Standard 40 hours/week'),
'company_id': company.id
}).id
@api.model
def create(self, values):
company = super(ResCompany, self).create(values)
if not company.resource_calendar_id:
company.sudo()._create_resource_calendar()
# calendar created from form view: no company_id set because record was still not created
if not company.resource_calendar_id.company_id:
company.resource_calendar_id.company_id = company.id
return company
|
This entry was posted on Monday, July 2nd, 2012 at 11:07 am and tagged with 1000 views, blogging, Brian Melican, google, milestone, search terms, website and posted in Musings. You can follow any responses to this entry through the RSS 2.0 feed.
Well done! Very impressive work.
|
#!/usr/bin/env python
'''
* Description: This code is a bridge between Arduino and Linux in an Intel Galileo Gen 1 board
used for send data via MQTT. The user must specify the host and topic in order to send data.
* Author: Gustavo Adrián Jiménez González (Ruxaxup)
* Date: 03 - 27 - 2017
* Version: 1.0.0
* Contact: gustavo.jim.gonz@gmail.com
'''
import sys
import os
import paho.mqtt.publish as publish
import errno
from socket import error as socket_error
host = ""
topic = ""
idSensor = {4:"temperature",5:"pressure",6:"light",3:"noise",0:"power",2:"gas",1:"humidity"}
readings = {"temperature":"C",
"pressure":"kPa",
"light":"lumens",
"noise":"dB",
"power":"W",
"gas":"ppm",
"humidity":"%"}
gases = {0:"NH3",1:"CO",2:"NO2",3:"C3H8",4:"C4H10",5:"CH4",6:"H2",7:"C2H5OH"}
statusDict = {"U":"update","S":"start"}
deli = ';'
def buildMetaData():
binary = open('binary.txt','w')
binario = str(bin(int(sys.argv[2]))[2:])
binary.write(binario)
binary.close()
binSize = len(binario)
diferencia = 7 - binSize
#Llena la cadena con ceros
for i in range(0,diferencia):
binario = '0' + binario
print "Binary string: " + binario
sensorsString = ""
for x in range(0,7):
print str(x) + " " + idSensor[x] + " -- " + binario[x]
if binario[x] != '0':
if idSensor[x] == "gas":
sensorsString = sensorsString + "setIni" + deli
for gas in range(0,8):
sensorsString = sensorsString + gases[gas] + deli + readings[idSensor[x]] + deli
sensorsString = sensorsString + "setEnd" + deli
else:
sensorsString = sensorsString + idSensor[x] + deli + readings[idSensor[x]] + deli
return sensorsString
if len(sys.argv) == 3:
mensaje = buildMetaData()
f = open('/sys/class/net/eth0/address','r')
mac = f.readline()
mensaje = mensaje + "macAddress" + deli + mac.strip('\n') + deli + "status" + deli + statusDict[sys.argv[1]]
print mensaje
f.close()
try:
publish.single(topic, mensaje, hostname=host)
except socket_error as serr:
print "No internet connection."
else:
print "3 arguments are needed."
|
Shay Schuessler, B.S., L.M.T., C.A.
If you have any questions, concerns, or comments regarding Pro-Active Medical Center, LLC, please fill out the short contact form below.
- Select One -Brian Schuessler, D.C.Brandy Lackey, APRNLanae Grover, L.M.T.Shay Schuessler, B.S., L.M.T., C.A.Chester Cyrus, L.M.T.Nancy Rosenblatt, L.M.T.
"Dr. Brian has been my chiropractor for over 6 years and I couldn't be happier with the treatment and service his office provides."
"Brian and Shea were great. Resolved my issue in a very short time. Informative and pleasant. I will continue to see them on a regular basis."
"I always look forward to my visits. I go every six weeks or so for an adjustment, but if I’m ever having pain, it’s always a huge difference after I go!"
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSDFeatureExtractor for MobilenetV1 PPN features."""
import tensorflow as tf
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import feature_map_generators
from object_detection.utils import context_manager
from object_detection.utils import ops
from object_detection.utils import shape_utils
from nets import mobilenet_v1
slim = tf.contrib.slim
class SSDMobileNetV1PpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
"""SSD Feature Extractor using MobilenetV1 PPN features."""
def preprocess(self, resized_inputs):
"""SSD preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
preprocessed_inputs = shape_utils.check_min_image_dim(
33, preprocessed_inputs)
with tf.variable_scope('MobilenetV1',
reuse=self._reuse_weights) as scope:
with slim.arg_scope(
mobilenet_v1.mobilenet_v1_arg_scope(
is_training=None, regularize_depthwise=True)):
with (slim.arg_scope(self._conv_hyperparams_fn())
if self._override_base_feature_extractor_hyperparams
else context_manager.IdentityContextManager()):
_, image_features = mobilenet_v1.mobilenet_v1_base(
ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple),
final_endpoint='Conv2d_13_pointwise',
min_depth=self._min_depth,
depth_multiplier=self._depth_multiplier,
use_explicit_padding=self._use_explicit_padding,
scope=scope)
with slim.arg_scope(self._conv_hyperparams_fn()):
feature_maps = feature_map_generators.pooling_pyramid_feature_maps(
base_feature_map_depth=0,
num_layers=6,
image_features={
'image_features': image_features['Conv2d_11_pointwise']
})
return feature_maps.values()
|
Beautiful Miami Lakes Lakefront Townhouse for Rent. 2 Master Bedrooms, 2 Master Bathrooms, Large Family/Dining Area, Tile Thru-Out, Nice remodeled Kitchen W Brand New Stainless Steel Appliances, NEW Central AC, Washer/Dryer, Large Covered Patio overlooking Lake.
Tastefully Upgraded and Remodeled Pool Home in Pembroke Pines. 3 Bed 2 Baths with Travertine Circular Driveway and around entire house and Backyard. PGT Impact Windows. New Impact Double Front Door. Granite Kitchen w/Stainless Steel Appliance Package.
1,350 SF Office/Warehouse in best Area of Hialeah. Gated and Secure community in the I-75/826 Corridor of West Hialeah.2 Offices,20' High Ceilings, 3 Parking Spaces, 3-Phase Electricity, Gated and Secure Community. Garbage Included in Rent.
2,221 SF Office/Warehouse with Excellent Office area. Open office plan with approx. 1,200 SF Office and 1,000 SF Warehouse.
1,359 SF Lakefront Office/Warehouse for Rent. Great for many uses. Excellent Location with I-75, 826 and FLA Turnpike within 1 min.
Office/Warehouse in best area of Hialeah. 1,475 SF with mezzanine. Great Location within 1 minute to 826 & I-75. Great for any use.
2,214 SF Office/Showroom/Warehouse on busy NW 138 St Corner Unit. High Traffic Count with great visibility. Nicely Built out Office/Showroom space with Reception, Large Kitchen & Breakfast Area, Conference Room, ADA Bathroom and Open Warehouse area on 1st Floor.
6,000 SF Warehouse with outside storage yard. 1,100 SF offices with reception and 4 offices, 2 bathrooms 1 with Shower. 22' High Ceilings, 2 Overhead doors 1 in front and 1 in back.
2592 W 78 St Warehouse JUST SOLD!!
Office/Showroom/Warehouse FOR LEASE on NW 138 St. & Okeechobee Rd.
2205 W 80 St #5 Hialeah, FL 33016 Warehouse Office FOR SALE!!
7860 W 25 Ave. 10,000 SF Warehouse Hialeah, FL JUST CLOSED!!
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fileencoding=utf-8
import math, argparse
class CGError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class CGBadArg(CGError): pass
class CGInternal(CGError): pass
def mbuild(width, height):
"""Build a NxN matrix filled with 0."""
result = list()
for i in range(height):
result.append(list())
for j in range(width):
result[i].append(0.0)
return result
def mdump(matrix):
"""Dump a matrix in natural format."""
for col in matrix:
print("[ ", end = '');
for ele in col:
print(format(ele, "13.6g") + ", ", end = " ")
print("],")
def mdumpcompton(matrix):
"""Dump a matrix in compton's format."""
width = len(matrix[0])
height = len(matrix)
print("{},{},".format(width, height), end = '')
for i in range(height):
for j in range(width):
if int(height / 2) == i and int(width / 2) == j:
continue;
print(format(matrix[i][j], ".6f"), end = ",")
print()
def mnormalize(matrix):
"""Scale a matrix according to the value in the center."""
width = len(matrix[0])
height = len(matrix)
factor = 1.0 / matrix[int(height / 2)][int(width / 2)]
if 1.0 == factor: return matrix
for i in range(height):
for j in range(width):
matrix[i][j] *= factor
return matrix
def mmirror4(matrix):
"""Do a 4-way mirroring on a matrix from top-left corner."""
width = len(matrix[0])
height = len(matrix)
for i in range(height):
for j in range(width):
x = min(i, height - 1 - i)
y = min(j, width - 1 - j)
matrix[i][j] = matrix[x][y]
return matrix
def gen_gaussian(width, height, factors):
"""Build a Gaussian blur kernel."""
if width != height:
raise CGBadArg("Cannot build an uneven Gaussian blur kernel.")
size = width
sigma = float(factors.get('sigma', 0.84089642))
result = mbuild(size, size)
for i in range(int(size / 2) + 1):
for j in range(int(size / 2) + 1):
diffx = i - int(size / 2);
diffy = j - int(size / 2);
result[i][j] = 1.0 / (2 * math.pi * sigma) * pow(math.e, - (diffx * diffx + diffy * diffy) / (2 * sigma * sigma))
mnormalize(result)
mmirror4(result)
return result
def gen_box(width, height, factors):
"""Build a box blur kernel."""
result = mbuild(width, height)
for i in range(height):
for j in range(width):
result[i][j] = 1.0
return result
def gen_invalid(width, height, factors):
raise CGBadArg("Unknown kernel type.")
def args_readfactors(lst):
"""Parse the factor arguments."""
factors = dict()
if lst:
for s in lst:
res = s.partition('=')
if not res[0]:
raise CGBadArg("Factor has no key.")
if not res[2]:
raise CGBadArg("Factor has no value.")
factors[res[0]] = float(res[2])
return factors
parser = argparse.ArgumentParser(description='Build a convolution kernel.')
parser.add_argument('type', help='Type of convolution kernel. May be "gaussian" (factor sigma = 0.84089642) or "box".')
parser.add_argument('width', type=int, help='Width of convolution kernel. Must be an odd number.')
parser.add_argument('height', nargs='?', type=int, help='Height of convolution kernel. Must be an odd number. Equals to width if omitted.')
parser.add_argument('-f', '--factor', nargs='+', help='Factors of the convolution kernel, in name=value format.')
parser.add_argument('--dump-compton', action='store_true', help='Dump in compton format.')
args = parser.parse_args()
width = args.width
height = args.height
if not height:
height = width
if not (width > 0 and height > 0):
raise CGBadArg("Invalid width/height.")
factors = args_readfactors(args.factor)
funcs = dict(gaussian = gen_gaussian, box = gen_box)
matrix = (funcs.get(args.type, gen_invalid))(width, height, factors)
if args.dump_compton:
mdumpcompton(matrix)
else:
mdump(matrix)
|
Puto, recipe whose main ingredient is rice (what else) and sugar and cooked by steaming, has many variants including the simple puto calasiao (pangasinan), puto polo (of valenzuela), puto binan (laguna) and the puto of marikina (origin related to polo).
Puto Manapla though is quite similar to the sayongsong of Surigao where the aroma and flavor of banana leaves is evident.
question: why is puto and dinuguan always paired together?
mind if i share this at Facebook.
|
import pika
import json
import redis
from datetime import timedelta, datetime
from consumer import HRConsumer
red = None
expiryTTL = timedelta(minutes=5)
candidate_dict = None
party_dict = None
def callback(ch, method, properties, body):
data = json.loads(body)
geoCoords = None
if 'coordinates' in data and data['coordinates'] is not None:
geoCoords = {'type': 'Point',
'coordinates': data['coordinates']['coordinates']}
elif 'place' in data and data['place'] is not None:
if 'bounding_box' in data['place'] and data['place']['bounding_box'] is not None:
coordinates = data['place']['bounding_box']['coordinates'][0]
num_c = len(coordinates)
coords = [0.0, 0.0]
# Note: faster to do one div at the end but may lose
# some precision because floating-points are more
# accurate closer to 0
for c in coordinates:
coords[0] += c[0]
coords[1] += c[1]
coords[0] /= num_c
coords[1] /= num_c
geoCoords = {'type':'Point', 'coordinates':coords}
if geoCoords is not None:
tweet = {'geometry': geoCoords,
'properties': categorize(data)}
# Ignore people with no direct hashtags, very rare
if bool(tweet['properties']):
tweet['properties']['id'] = data['id'].encode('ascii')
store(tweet)
def categorize(data):
dict = {}
for hash in data['hashtags']:
if hash['text'].lower() in candidate_dict:
dict['candidate'] = candidate_dict[hash['text'].lower()]
if hash['text'].lower() in party_dict:
dict['party'] = party_dict[hash['text'].lower()]
return dict
def store(tweet):
datastring = str(tweet) + ":\\:" + str(datetime.now()+expiryTTL)
red.sadd("tweets", datastring)
if __name__ == "__main__":
red = redis.StrictRedis(host='localhost', port=6379, db=0)
candidate_dict = {'hillary2016': 'Hillary',
'hillaryforpresident': 'Hillary',
'clinton2016': 'Hillary',
'imwithher': 'Hillary',
'bernie2016': 'Bernie',
'bernieforpresident': 'Bernie',
'sanders2016': 'Bernie',
'voteberniesanders': 'Bernie',
'feelthebern': 'Bernie',
'debatewithbernie': 'Bernie',
'trump2016': 'Trump',
'donaldtrumpforpresident': 'Trump',
'trumpforpresident2016': 'Trump',
'votetrump2016': 'Trump',
'votetrump': 'Trump',
'makeamericagreatagain': 'Trump',
'bencarsonforprez': 'Carson',
'carson2016': 'Carson',
'omalley2016': 'OMalley',
'newleadership': 'OMalley',
'actionsnotwords': 'OMalley'}
party_dict = {'hillary2016': 'democrat',
'hillaryforpresident': 'democrat',
'clinton2016': 'democrat',
'imwithher': 'democrat',
'bernie2016': 'democrat',
'bernieforpresident': 'democrat',
'sanders2016': 'democrat',
'voteberniesanders': 'democrat',
'feelthebern':'democrat',
'debatewithbernie': 'democrat',
'omalley2016': 'democrat',
'newleadership': 'democrat',
'actionsnotwords': 'democrat',
'donaldtrumpforpresident': 'republican',
'trump2016': 'republican',
'trumpforpresident2016': 'republican',
'votetrump2016': 'republican',
'votetrump': 'republican',
'makeamericagreatagain': 'republican',
'bencarsonforprez': 'republican',
'carson2016': 'republican'}
rmq_connection = pika.BlockingConnection(
pika.ConnectionParameters('localhost'))
rmq_consumer = HRConsumer(rmq_connection, callback)
rmq_consumer.consume()
|
Srinagar Sep 15 (IANS) A massive fire broke out at a hotel here on Saturday with fire tenders trying to contain it from spreading.
The blaze started at the top floor of the multi-storeyed Pamposh Hotel on Residency Road.
The building houses offices of some news channels and business establishments.
There were no immediate reports of casualties and the cause of the fire remains unknown.
|
# -*- coding: utf-8 *-*
import httplib
import json
import common
def getTicker(coin):
""" Return the ticker information of the current state of the exchange.
The ticker contains a summary of the current state of the exchange for a
given coin.
This information is given as a dict in the following arrangement:
{
"ticker": {
"high": Highest traded price (BRL) today,
"low": Lowest traded price (BRL) today,
"vol": Amount of coins (LTC or BTC) traded today,
"last": Price (BRL) of the last transaction,
"buy": Current highest price (BRL) offered by people buying,
"sell": Current lowest price (BRL) offered by people selling,
"date": timestamp of the last ticker update
}
}
Arguments:
coin -- "btc" or "ltc", defines which coin the info is about
"""
headers = {"Content-type": "application/x-www-form-urlencoded"}
connection = httplib.HTTPSConnection(common.mbDomain)
address = "/api/ticker/"
if coin == 'ltc':
address = address[:-1] + "_litecoin" + address[-1:]
connection.request("GET", address, "", headers)
response = connection.getresponse()
output = json.load(response)
return common.convert(output)
def getOrderBook(coin):
"""Return the active orders for the given coin
The orders are given as a dict of lists of lists in the following
arrangement
{
"asks": list of the selling offers available.
"bids": list of the buying offers available.
}
Where each offer is a list of two elements [price per unit, amount]
Arguments:
coin -- "btc" or "ltc", defines which coin the info is about
"""
headers = {"Content-type": "application/x-www-form-urlencoded"}
connection = httplib.HTTPSConnection(common.mbDomain)
address = "/api/orderbook/"
if coin == 'ltc':
address = address[:-1] + "_litecoin" + address[-1:]
connection.request("GET", address, "", headers)
response = connection.getresponse()
output = json.load(response)
return common.convert(output)
def getTrades(coin, timeBegin=None, timeEnd=None):
""" Return the history of trades of a given coin in a period of time
The history of the transactions is given as a list of dicts in the
following arrangement:
[
{
"date": Timestamp of the transaction,
"price": Price (BRL) per unit of coin (LTC or BTC),
"amount": Amount of coin (LTC or BTC),
"tid": Transaction ID,
"type": 'buy' or 'sell'
}
]
Arguments:
coin -- "btc" or "ltc", defines which coin the info is about
timeBegin -- (optional) Timestamp of the beginning of the wanted history
timeEnd -- (optional) Timestamp of the end of the wanted history
"""
headers = {"Content-type": "application/x-www-form-urlencoded"}
connection = httplib.HTTPSConnection(common.mbDomain)
address = "/api/trades/"
if coin == 'ltc':
address = address[:-1] + "_litecoin" + address[-1:]
if timeBegin is not None:
address = address[:-1] + str(timeBegin) + address[-1:]
if timeEnd is not None:
address = address[:-1] + str(timeEnd) + address[-1:]
connection.request("GET", address, "", headers)
response = connection.getresponse()
output = json.load(response)
return common.convert(output)
|
In my opinion it's not very good puzzle. After you see the solution, Whites victory isn't obvious.
The solution takes careful planning but the majority of enthusiastic puzzlers reached the controversial solution without flaw ! The solved flag indicates correctly that white will win this one without a blunder ! After pawn to e6 + Check only two moves are required to convert it to Queen status The Black King must back up ( g8 is best ) or risk Rook f2+ check holding f1-f8 file ! Black must attempt if possible to exchange Rooks and begin a long balanced outer pawn rush ! I'll play white anytime ! Cheers !
What is good about gambits?
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.