code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
from json import loads, dumps
from oauthlib.common import to_unicode
def weibo_compliance_fix(session):
def _missing_token_type(r):
token = loads(r.text)
token['token_type'] = 'Bearer'
r._content = to_unicode(dumps(token)).encode('UTF-8')
return r
session._client.default_token_placement = 'query'
session.register_compliance_hook('access_token_response',
_missing_token_type)
return session
|
py-geek/City-Air
|
venv/lib/python2.7/site-packages/requests_oauthlib/compliance_fixes/weibo.py
|
Python
|
mit
| 482
|
#!/usr/bin/env python
"""
* ultrasonic.py
* A library for ultrasonic sensor at RP
*
* Copyright (c) 2012 seeed technology inc.
* Website : www.seeed.cc
* Author : seeed fellow
* Create Time:
* Change Log :
*
* The MIT License (MIT)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
"""
import RPi.GPIO as GPIO
import time
GPIO_SIG = 11
def getAndPrint():
print "SeeedStudio Grove Ultrasonic get data and print"
# test 100 times
for i in range(100):
measurementInCM()
# Reset GPIO settings
GPIO.cleanup()
def measurementInCM():
# setup the GPIO_SIG as output
GPIO.setup(GPIO_SIG, GPIO.OUT)
GPIO.output(GPIO_SIG, GPIO.LOW)
time.sleep(0.2)
GPIO.output(GPIO_SIG, GPIO.HIGH)
time.sleep(0.5)
GPIO.output(GPIO_SIG, GPIO.LOW)
start = time.time()
# setup GPIO_SIG as input
GPIO.setup(GPIO_SIG, GPIO.IN)
# get duration from Ultrasonic SIG pin
while GPIO.input(GPIO_SIG) == 0:
start = time.time()
while GPIO.input(GPIO_SIG) == 1:
stop = time.time()
measurementPulse(start, stop)
def measurementPulse(start, stop):
print "Ultrasonic Measurement"
# Calculate pulse length
elapsed = stop-start
# Distance pulse travelled in that time is time
# multiplied by the speed of sound (cm/s)
distance = elapsed * 34300
# That was the distance there and back so halve the value
distance = distance / 2
print "Distance : %.1f CM" % distance
if __name__ == '__main__':
# rpi board gpio or bcm gpio
GPIO.setmode(GPIO.BOARD)
# loop method
getAndPrint()
|
Seeed-Studio/Grove-RaspberryPi
|
Grove - Ultrasonic Ranger/ultrasonic.py
|
Python
|
mit
| 2,644
|
from get_store_data import *
from pprint import pprint
import traceback
# generate_log_file function used as the default logging fucntion for cdp_runner
def generate_log_file(log_name, log_type, log_object, log_directory):
'''Generate a log file after a block or system run.
Arguments:
log_name -- the actual storage name of the file to be stored. Default interaction: created and passed by the cdp_runner method.
log_type -- the type of log desired for storage; 'block', 'system', or 'consolidated'. Default interaction: created and passed by the cdp_runner method.
log_object -- the dictionary object to store. Default interaction: created and passed by the cdp_runner method.
log_directory -- the directory or folder os path for where to store the log file.
'''
# ensure path safety
log_directory = check_path_safety(log_directory)
# check if the log folder exists, if not, create it
if not os.path.exists(log_directory):
os.mkdir(log_directory)
# check if it is a system log
if log_type == 'system':
# create a consolidated log if it is
consolidated = dict()
consolidated['completed_feeds'] = 0
consolidated['avg_feeds_duration'] = 0
consolidated['completed_videos'] = 0
consolidated['avg_videos_duration'] = 0
consolidated['completed_audios'] = 0
consolidated['avg_audios_duration'] = 0
consolidated['completed_transcripts'] = 0
consolidated['avg_transcripts_duration'] = 0
consolidated['tfidf_duration'] = 0
consolidated['search_duration'] = 0
consolidated['avg_data_pull_duration'] = 0
consolidated['avg_block_duration'] = 0
# ethier set the most recent duration as the current duration or add the duration for later averaging
for block in log_object:
consolidated['system_start'] = block['system_start']
consolidated['completed_feeds'] += block['completed_feeds']
consolidated['avg_feeds_duration'] += block['feeds_duration']
consolidated['completed_videos'] += block['completed_videos']
consolidated['avg_videos_duration'] += block['videos_duration']
consolidated['completed_audios'] += block['completed_audios']
consolidated['avg_audios_duration'] += block['audios_duration']
consolidated['completed_transcripts'] += block['completed_transcripts']
consolidated['avg_transcripts_duration'] += block['transcripts_duration']
consolidated['tfidf_duration'] = block['tfidf_duration']
consolidated['search_duration'] = block['search_duration']
consolidated['avg_block_duration'] += block['block_duration']
consolidated['system_runtime'] = block['system_runtime']
consolidated['combination_duration'] = block['combination_duration']
consolidated['database_duration'] = block['database_duration']
consolidated['avg_data_pull_duration'] += block['data_pull_duration']
num_blocks = len(blocks)
# average specific values
consolidated['avg_feeds_duration'] = consolidated['avg_feeds_duration'] / num_blocks
consolidated['avg_videos_duration'] = consolidated['avg_videos_duration'] / num_blocks
consolidated['avg_audios_duration'] = consolidated['avg_audios_duration'] / num_blocks
consolidated['avg_transcripts_duration'] = consolidated['avg_transcripts_duration'] / num_blocks
consolidated['avg_block_duration'] = consolidated['avg_block_duration'] / num_blocks
consolidated['avg_data_pull_duration'] = consolidated['avg_data_pull_duration'] / num_blocks
# print the cosolidated log
pprint(log_object)
# generate a log with consolidated data
log_name = 'consolidated_' + str(datetime.datetime.fromtimestamp(consolidated['system_start'])).replace(' ', '_').replace(':', '-')[:-7]
generate_log_file(log_name=log_name, log_type='consolidated', log_object=consolidated, log_directory=log_directory)
# store the log_object in specified path
with open(log_directory + log_name + '.json', 'w', encoding='utf-8') as logfile:
json.dump(log_object, logfile)
# ensure log_file safety
logfile.close()
time.sleep(1)
# run_cdp function used to collect, transcribe, and store videos
def transcription_runner(project_directory, json_directory, log_directory, video_routes, scraping_function, pull_from_database, database_head, versioning_path, relevant_tfidf_storage_key, ignore_files_path, commit_to_database, delete_videos=False, delete_splits=False, test_search_term='bicycle infrastructure', prints=True, block_sleep_duration=900, run_duration=-1, logging=True):
'''Run the backend transcription, local, and database storage system.
Arguments:
project_directory -- the directory or folder os path for where all created and generated non-storage (JSON) files will be stored.
example path: 'C:/transcription_runner/seattle/'
json_directory -- the directory or folder os path for where all created and generated storage (JSON) files will be stored.
example path: 'C:/transcription_runner/seattle/json/'
log_directory -- the directory or folder os path for where all created and generated log files will be stored.
example path: 'C:/transcription_runner/seattle/logs/'
video_routes -- the packed_routes dictionary object that contains labeling/ pathing as a each key, and a list of page url and a more specific naming target.
formatted:
video_routes = {
label/ path_one: [url_one, specific_name_one],
label/ path_two: [url_two, specific_name_two],
...
label/ path_n: [url_n, specific_name_n]
}
scraping_function -- the function to return a completed list of information dictionaries regarding each video present in the video_routes provided.
example function: scrape_seattle_channel in get_store_data.py
pull_from_database -- the function used to retrieve data from a database storage system.
example function: get_firebase_data in get_store_data.py
database_head -- the part/ path of the database you would like to pull from and store with.
versioning_path -- the path to where versioning data is stored in the database.
relevant_tfidf_storage_key -- the key/ path for navigating the database information to retrieve specifically the tfidf information.
ignore_files_path -- the os file path to a json file containing an array of ignorable file names
commit_to_database -- the function used to push data from the local JSON storage to a database.
example function: commit_to_firebase in get_store_data.py
delete_videos -- boolean value to determine to keep or delete videos after audio has been stripped. Default: False (keep videos)
delete_splits -- boolean value to determine to keep or delete audio splits after transcript has been created. Default: False (keep audio splits)
test_search_term -- string value to act as a test search word or phrase to calculate search time of the tfidf tree. Default: 'bicycle infrastructure'
prints -- boolean value to determine to show helpful print statements during the course of the run to indicate where the runner is at in the process. Default: True (show prints)
block_sleep_duration -- integer value for time in seconds for how long the system should wait after checking for need videos. Default: 900 (0.25 hours)
run_duration -- integer value for time in seconds for how long the system should run for. Default: -1s (endless)
logging -- boolean value to determine if the system should create log files after each block run and system run. Default: True (create log files)
'''
# ensure safety of paths
project_directory = check_path_safety(project_directory)
log_directory = check_path_safety(log_directory)
# create system logging information
system_start = time.time()
time_elapsed = 0
# create blocks list for logging information
blocks = list()
# check to see if the runner should continue
while (((time_elapsed + block_sleep_duration) <= run_duration) or run_duration == -1):
# create block logging information
block_start = time.time()
block = dict()
block['system_start'] = system_start
block['block_start'] = block_start
# @RUN
# Run for video feeds
feeds_start = time.time()
noNewFeedsAvailable = True
checkCounter = 0
block['completed_feeds'] = 0
while (noNewFeedsAvailable and (checkCounter < 12)):
feed_results = get_video_feeds(packed_routes=video_routes, storage_directory=json_directory, scraping_function=scraping_function, prints=prints)
block['completed_feeds'] = len(feed_results['feeds'])
noNewFeedsAvailable = not feed_results['difference']
time_elapsed = time.time() - system_start
checkCounter += 1
# sleep the system if it wont overflow into system downtime
if (noNewFeedsAvailable and (checkCounter < 12)):
print('collecting feeds again in:', (float(block_sleep_duration) / 60.0 / 60.0), 'HOURS...')
print('-------------------------------------------------------')
time.sleep(block_sleep_duration)
feeds_duration = time.time() - feeds_start
block['feeds_duration'] = (float(feeds_duration) / 60.0 / 60.0)
# @RUN
# Run for mass video collection
videos_start = time.time()
block['completed_videos'] = get_video_sources(objects_file=(json_directory + 'video_feeds.json'), storage_directory=(project_directory + 'video/'), throughput_directory=(project_directory + 'audio/'), prints=prints)
videos_duration = time.time() - videos_start
block['videos_duration'] = (float(videos_duration) / 60.0 / 60.0)
# @RUN
# Run for mass audio stripping
audios_start = time.time()
block['completed_audios'] = strip_audio_from_directory(video_directory=(project_directory + 'video/'), audio_directory=(project_directory + 'audio/'), delete_videos=delete_videos, prints=prints)
audios_duration = time.time() - audios_start
block['audios_duration'] = (float(audios_duration) / 60.0 / 60.0)
# @RUN
# Run for mass transcripts
transcripts_start = time.time()
block['completed_transcripts'] = generate_transcripts_from_directory(audio_directory=(project_directory + 'audio/'), transcripts_directory=(project_directory + 'transcripts/'), ignore_files=ignore_files_path, delete_splits=delete_splits, prints=prints)
transcripts_duration = time.time() - transcripts_start
block['transcripts_duration'] = (float(transcripts_duration) / 60.0 / 60.0)
# @RUN
# Run for tfidf saftey
data_pull_start = time.time()
prior_stored_data = pull_from_database(db_root=database_head, path=versioning_path)
data_pull_duration = time.time() - data_pull_start
block['data_pull_duration'] = (float(data_pull_duration) / 60.0 / 60.0)
# @RUN
# Run for mass tfidf
tfidf_start = time.time()
if (type(prior_stored_data) is collections.OrderedDict) or (type(prior_stored_data) is dict):
generate_tfidf_from_directory(transcript_directory=(project_directory + 'transcripts/'), storage_directory=json_directory, stored_versions=prior_stored_data, prints=prints)
else:
generate_tfidf_from_directory(transcript_directory=(project_directory + 'transcripts/'), storage_directory=json_directory, stored_versions=None, prints=prints)
tfidf_duration = time.time() - tfidf_start
block['tfidf_duration'] = (float(tfidf_duration) / 60.0 / 60.0)
# @RUN
# Run for testing speed of search
search_start = time.time()
if prints:
print('highest relevancy found:', predict_relevancy(search=test_search_term, tfidf_store=(json_directory + 'tfidf.json'))[0][1]['relevancy'])
else:
predict_relevancy(search=test_search_term, tfidf_store=(json_directory + 'tfidf.json'))
print('-------------------------------------------------------')
search_duration = time.time() - search_start
block['search_duration'] = (float(search_duration) / 60.0 / 60.0)
# @RUN
# Run for data combination
combination_start = time.time()
combine_data_sources(feeds_store=(json_directory + 'video_feeds.json'), tfidf_store=(json_directory + 'tfidf.json'), versioning_store=(json_directory + 'events_versioning.json'), storage_directory=json_directory, prints=prints)
combination_duration = time.time() - combination_start
block['combination_duration'] = (float(combination_duration) / 60.0 / 60.0)
# @RUN
# Run for database storage
database_start = time.time()
commit_to_database(data_store=(json_directory + 'combined_data.json'), db_root=database_head, prints=prints)
database_duration = time.time() - database_start
block['database_duration'] = (float(database_duration) / 60.0 / 60.0)
block_duration = time.time() - block_start
block['block_duration'] = block_duration
time_elapsed = time.time() - system_start
block['system_runtime'] = time_elapsed
# check logging to log the block information
if logging:
log_name = 'block_' + str(datetime.datetime.fromtimestamp(block_start)).replace(' ', '_').replace(':', '-')[:-7]
generate_log_file(log_name=log_name, log_type='block', log_object=block, log_directory=log_directory)
# append the block to system log
blocks.append(block)
# check logging to log the system information
if logging:
log_name = 'system_' + str(datetime.datetime.fromtimestamp(system_start)).replace(' ', '_').replace(':', '-')[:-7]
generate_log_file(log_name=log_name, log_type='system', log_object=blocks, log_directory=log_directory)
# return the basic block information
return blocks
|
CouncilDataProject/transcription_runner
|
v1.0/cdp_runner.py
|
Python
|
gpl-3.0
| 14,332
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
# http://blog.dokenzy.com/archives/986
# https://pythonhosted.org/setuptools/setuptools.html#building-and-distributing-packages-with-setuptools
# http://python-packaging-user-guide.readthedocs.org/en/latest/distributing/#name
# http://stackoverflow.com/questions/7522250/how-to-include-package-data-with-setuptools-distribute
setup(
name="xing-plus",
version="1.0.2",
license="MIT License",
author="sculove",
author_email="sculove@gmail.com",
url="https://github.com/sculove/xing-plus",
keywords=["xing","stock","systemtrading"],
description="more easy eBEST INVESTMENT API",
long_description="xing plus supports more easy api for eBEST INVESTMENT",
classifiers=[
"Environment :: Win32 (MS Windows)",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3.5",
"Natural Language :: Korean"
],
packages=find_packages(),
package_dir={"xing": "xing", "res": "xing/res"},
package_data={"xing": ["res/*.res"]},
install_requires=["pandas>=0.17.0", "ta-lib>=0.4.9"]
)
|
sculove/xing-plus
|
setup.py
|
Python
|
mit
| 1,187
|
from horizon import tables
from django.utils.translation import ugettext_lazy as _
from openstack_dashboard.dashboards.admin.datacenter \
import tables as datacenter_tables
from openstack_dashboard.openstack.common.requestapi import RequestApi
from openstack_dashboard.openstack.common.dictutils import DictList2ObjectList
from horizon import exceptions
from horizon import messages
class IndexView(tables.DataTableView):
template_name = 'admin/datacenter/index.html'
table_class = datacenter_tables.DataCenterTable
def get_data(self):
request = self.request
plats = []
try:
request_api = RequestApi()
plats = request_api.getRequestInfo('api/heterogeneous/platforms/datacenters')
if plats and type(plats) == type([]):
plats.sort(key=lambda aggregate: aggregate['name'].lower())
res = DictList2ObjectList(plats)
return res
elif plats and type(plats) == type({}) and plats.get('action') == 'failed':
messages.error(request,
_('Unable to retrieve datacenters list.'))
except Exception:
exceptions.handle(request,
_('Unable to retrieve datacenters list.'))
return []
|
ChinaMassClouds/copenstack-server
|
openstack/src/horizon-2014.2/openstack_dashboard/dashboards/admin/datacenter/views.py
|
Python
|
gpl-2.0
| 1,339
|
"""
Swiss-specific Form helpers
"""
from __future__ import absolute_import
import re
from django.contrib.localflavor.ch.ch_states import STATE_CHOICES
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, RegexField, Select
from django.utils.encoding import smart_unicode
from django.utils.translation import ugettext_lazy as _
id_re = re.compile(r"^(?P<idnumber>\w{8})(?P<pos9>(\d{1}|<))(?P<checksum>\d{1})$")
phone_digits_re = re.compile(r'^0([1-9]{1})\d{8}$')
class CHZipCodeField(RegexField):
default_error_messages = {
'invalid': _('Enter a zip code in the format XXXX.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(CHZipCodeField, self).__init__(r'^\d{4}$',
max_length, min_length, *args, **kwargs)
class CHPhoneNumberField(Field):
"""
Validate local Swiss phone number (not international ones)
The correct format is '0XX XXX XX XX'.
'0XX.XXX.XX.XX' and '0XXXXXXXXX' validate but are corrected to
'0XX XXX XX XX'.
"""
default_error_messages = {
'invalid': 'Phone numbers must be in 0XX XXX XX XX format.',
}
def clean(self, value):
super(CHPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = re.sub('(\.|\s|/|-)', '', smart_unicode(value))
m = phone_digits_re.search(value)
if m:
return u'%s %s %s %s' % (value[0:3], value[3:6], value[6:8], value[8:10])
raise ValidationError(self.error_messages['invalid'])
class CHStateSelect(Select):
"""
A Select widget that uses a list of CH states as its choices.
"""
def __init__(self, attrs=None):
super(CHStateSelect, self).__init__(attrs, choices=STATE_CHOICES)
class CHIdentityCardNumberField(Field):
"""
A Swiss identity card number.
Checks the following rules to determine whether the number is valid:
* Conforms to the X1234567<0 or 1234567890 format.
* Included checksums match calculated checksums
"""
default_error_messages = {
'invalid': _('Enter a valid Swiss identity or passport card number in X1234567<0 or 1234567890 format.'),
}
def has_valid_checksum(self, number):
given_number, given_checksum = number[:-1], number[-1]
new_number = given_number
calculated_checksum = 0
fragment = ""
parameter = 7
first = str(number[:1])
if first.isalpha():
num = ord(first.upper()) - 65
if num < 0 or num > 8:
return False
new_number = str(num) + new_number[1:]
new_number = new_number[:8] + '0'
if not new_number.isdigit():
return False
for i in range(len(new_number)):
fragment = int(new_number[i])*parameter
calculated_checksum += fragment
if parameter == 1:
parameter = 7
elif parameter == 3:
parameter = 1
elif parameter ==7:
parameter = 3
return str(calculated_checksum)[-1] == given_checksum
def clean(self, value):
super(CHIdentityCardNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
match = re.match(id_re, value)
if not match:
raise ValidationError(self.error_messages['invalid'])
idnumber, pos9, checksum = match.groupdict()['idnumber'], match.groupdict()['pos9'], match.groupdict()['checksum']
if idnumber == '00000000' or \
idnumber == 'A0000000':
raise ValidationError(self.error_messages['invalid'])
all_digits = "%s%s%s" % (idnumber, pos9, checksum)
if not self.has_valid_checksum(all_digits):
raise ValidationError(self.error_messages['invalid'])
return u'%s%s%s' % (idnumber, pos9, checksum)
|
ychen820/microblog
|
y/google-cloud-sdk/platform/google_appengine/lib/django-1.4/django/contrib/localflavor/ch/forms.py
|
Python
|
bsd-3-clause
| 3,952
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from selenium.webdriver.common.by import By
from pages.base import BasePage
class FirefoxWhatsNewIndiaPage(BasePage):
URL_TEMPLATE = '/{locale}/firefox/whatsnew/india/'
_qr_code_locator = (By.CSS_SELECTOR, '.lite-qrcode-container > img')
@property
def is_qr_code_displayed(self):
return self.is_element_displayed(*self._qr_code_locator)
|
ericawright/bedrock
|
tests/pages/firefox/whatsnew/whatsnew_india.py
|
Python
|
mpl-2.0
| 567
|
#!/bin/env python3
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plot
import argparse
def plot_injection(t1, t2, iw, r, c, ir):
l = ir+1
x = np.linspace(-l, l, num=r)
z = np.linspace(-l, l, num=r)
v = np.zeros((x.shape[0], z.shape[0]))
for i in range(v.shape[0]):
for k in range(v.shape[1]):
r = np.sqrt(x[i]**2 + z[k]**2)
if z[k] > 0:
off = iw / np.tan(t1)
ih = ir * np.cos(t1)
else:
off = iw / np.tan(np.pi-t2)
ih = ir * np.cos(t2)
th = np.arccos((z[k] + off) / np.sqrt(x[i]**2 + (z[k] + off)**2))
h = np.abs(r * np.cos(th))
if (r <= ir and c == 'cap') or (h <= ih and c == 'nocap'):
if z[k] > 0 and th < t1:
v[i,k] = 1
elif z[k] < 0 and th > np.pi - t2:
v[i,k] = -1
fig,ax = plot.subplots(1, 1, subplot_kw={'aspect': 'equal'})
ax.pcolormesh(x, z, v.T)
plot.show()
return
def main():
parser = argparse.ArgumentParser(
description = 'Plot jet injection region'
)
parser.add_argument('theta1', type=float, help='First jet opening angle (degrees)')
parser.add_argument('theta2', type=float, help='Second jet opening angle (degrees)')
parser.add_argument('initial_width', type=float, help='Initial width of jet injection cone')
parser.add_argument('-r', '--resolution', help='Grid cell count (default: %(default)s)', type=int, default=100)
parser.add_argument('-c', '--conetype', help='Cone type (with cap or without)', choices=['cap', 'nocap'], default='cap')
parser.add_argument('-i', '--injectionradius', help='Injection radius (default: %(default)s)', type=float, default=1.0)
args = parser.parse_args()
plot_injection(np.deg2rad(args.theta1), np.deg2rad(args.theta2), args.initial_width, args.resolution, args.conetype, args.injectionradius)
if __name__ == "__main__":
main()
|
opcon/plutokore
|
scripts/plot-jet-injection.py
|
Python
|
mit
| 2,019
|
"""Test Z-Wave cover devices."""
from homeassistant.components.cover import SUPPORT_CLOSE, SUPPORT_OPEN
from homeassistant.components.zwave import (
CONF_INVERT_OPENCLOSE_BUTTONS,
CONF_INVERT_PERCENT,
const,
cover,
)
from tests.async_mock import MagicMock
from tests.mock.zwave import MockEntityValues, MockNode, MockValue, value_changed
def test_get_device_detects_none(hass, mock_openzwave):
"""Test device returns none."""
node = MockNode()
value = MockValue(data=0, node=node)
values = MockEntityValues(primary=value, node=node)
device = cover.get_device(hass=hass, node=node, values=values, node_config={})
assert device is None
def test_get_device_detects_rollershutter(hass, mock_openzwave):
"""Test device returns rollershutter."""
hass.data[const.DATA_NETWORK] = MagicMock()
node = MockNode()
value = MockValue(
data=0, node=node, command_class=const.COMMAND_CLASS_SWITCH_MULTILEVEL
)
values = MockEntityValues(primary=value, open=None, close=None, node=node)
device = cover.get_device(hass=hass, node=node, values=values, node_config={})
assert isinstance(device, cover.ZwaveRollershutter)
def test_get_device_detects_garagedoor_switch(hass, mock_openzwave):
"""Test device returns garage door."""
node = MockNode()
value = MockValue(
data=False, node=node, command_class=const.COMMAND_CLASS_SWITCH_BINARY
)
values = MockEntityValues(primary=value, node=node)
device = cover.get_device(hass=hass, node=node, values=values, node_config={})
assert isinstance(device, cover.ZwaveGarageDoorSwitch)
assert device.device_class == "garage"
assert device.supported_features == SUPPORT_OPEN | SUPPORT_CLOSE
def test_get_device_detects_garagedoor_barrier(hass, mock_openzwave):
"""Test device returns garage door."""
node = MockNode()
value = MockValue(
data="Closed", node=node, command_class=const.COMMAND_CLASS_BARRIER_OPERATOR
)
values = MockEntityValues(primary=value, node=node)
device = cover.get_device(hass=hass, node=node, values=values, node_config={})
assert isinstance(device, cover.ZwaveGarageDoorBarrier)
assert device.device_class == "garage"
assert device.supported_features == SUPPORT_OPEN | SUPPORT_CLOSE
def test_roller_no_position_workaround(hass, mock_openzwave):
"""Test position changed."""
hass.data[const.DATA_NETWORK] = MagicMock()
node = MockNode(manufacturer_id="0047", product_type="5a52")
value = MockValue(
data=45, node=node, command_class=const.COMMAND_CLASS_SWITCH_MULTILEVEL
)
values = MockEntityValues(primary=value, open=None, close=None, node=node)
device = cover.get_device(hass=hass, node=node, values=values, node_config={})
assert device.current_cover_position is None
def test_roller_value_changed(hass, mock_openzwave):
"""Test position changed."""
hass.data[const.DATA_NETWORK] = MagicMock()
node = MockNode()
value = MockValue(
data=None, node=node, command_class=const.COMMAND_CLASS_SWITCH_MULTILEVEL
)
values = MockEntityValues(primary=value, open=None, close=None, node=node)
device = cover.get_device(hass=hass, node=node, values=values, node_config={})
assert device.current_cover_position is None
assert device.is_closed is None
value.data = 2
value_changed(value)
assert device.current_cover_position == 0
assert device.is_closed
value.data = 35
value_changed(value)
assert device.current_cover_position == 35
assert not device.is_closed
value.data = 97
value_changed(value)
assert device.current_cover_position == 100
assert not device.is_closed
def test_roller_commands(hass, mock_openzwave):
"""Test position changed."""
mock_network = hass.data[const.DATA_NETWORK] = MagicMock()
node = MockNode()
value = MockValue(
data=50, node=node, command_class=const.COMMAND_CLASS_SWITCH_MULTILEVEL
)
open_value = MockValue(data=False, node=node)
close_value = MockValue(data=False, node=node)
values = MockEntityValues(
primary=value, open=open_value, close=close_value, node=node
)
device = cover.get_device(hass=hass, node=node, values=values, node_config={})
device.set_cover_position(position=25)
assert node.set_dimmer.called
value_id, brightness = node.set_dimmer.mock_calls[0][1]
assert value_id == value.value_id
assert brightness == 25
device.open_cover()
assert mock_network.manager.pressButton.called
(value_id,) = mock_network.manager.pressButton.mock_calls.pop(0)[1]
assert value_id == open_value.value_id
device.close_cover()
assert mock_network.manager.pressButton.called
(value_id,) = mock_network.manager.pressButton.mock_calls.pop(0)[1]
assert value_id == close_value.value_id
device.stop_cover()
assert mock_network.manager.releaseButton.called
(value_id,) = mock_network.manager.releaseButton.mock_calls.pop(0)[1]
assert value_id == open_value.value_id
def test_roller_invert_percent(hass, mock_openzwave):
"""Test position changed."""
mock_network = hass.data[const.DATA_NETWORK] = MagicMock()
node = MockNode()
value = MockValue(
data=50, node=node, command_class=const.COMMAND_CLASS_SWITCH_MULTILEVEL
)
open_value = MockValue(data=False, node=node)
close_value = MockValue(data=False, node=node)
values = MockEntityValues(
primary=value, open=open_value, close=close_value, node=node
)
device = cover.get_device(
hass=hass, node=node, values=values, node_config={CONF_INVERT_PERCENT: True}
)
device.set_cover_position(position=25)
assert node.set_dimmer.called
value_id, brightness = node.set_dimmer.mock_calls[0][1]
assert value_id == value.value_id
assert brightness == 75
device.open_cover()
assert mock_network.manager.pressButton.called
(value_id,) = mock_network.manager.pressButton.mock_calls.pop(0)[1]
assert value_id == open_value.value_id
def test_roller_reverse_open_close(hass, mock_openzwave):
"""Test position changed."""
mock_network = hass.data[const.DATA_NETWORK] = MagicMock()
node = MockNode()
value = MockValue(
data=50, node=node, command_class=const.COMMAND_CLASS_SWITCH_MULTILEVEL
)
open_value = MockValue(data=False, node=node)
close_value = MockValue(data=False, node=node)
values = MockEntityValues(
primary=value, open=open_value, close=close_value, node=node
)
device = cover.get_device(
hass=hass,
node=node,
values=values,
node_config={CONF_INVERT_OPENCLOSE_BUTTONS: True},
)
device.open_cover()
assert mock_network.manager.pressButton.called
(value_id,) = mock_network.manager.pressButton.mock_calls.pop(0)[1]
assert value_id == close_value.value_id
device.close_cover()
assert mock_network.manager.pressButton.called
(value_id,) = mock_network.manager.pressButton.mock_calls.pop(0)[1]
assert value_id == open_value.value_id
device.stop_cover()
assert mock_network.manager.releaseButton.called
(value_id,) = mock_network.manager.releaseButton.mock_calls.pop(0)[1]
assert value_id == close_value.value_id
def test_switch_garage_value_changed(hass, mock_openzwave):
"""Test position changed."""
node = MockNode()
value = MockValue(
data=False, node=node, command_class=const.COMMAND_CLASS_SWITCH_BINARY
)
values = MockEntityValues(primary=value, node=node)
device = cover.get_device(hass=hass, node=node, values=values, node_config={})
assert device.is_closed
value.data = True
value_changed(value)
assert not device.is_closed
def test_switch_garage_commands(hass, mock_openzwave):
"""Test position changed."""
node = MockNode()
value = MockValue(
data=False, node=node, command_class=const.COMMAND_CLASS_SWITCH_BINARY
)
values = MockEntityValues(primary=value, node=node)
device = cover.get_device(hass=hass, node=node, values=values, node_config={})
assert value.data is False
device.open_cover()
assert value.data is True
device.close_cover()
assert value.data is False
def test_barrier_garage_value_changed(hass, mock_openzwave):
"""Test position changed."""
node = MockNode()
value = MockValue(
data="Closed", node=node, command_class=const.COMMAND_CLASS_BARRIER_OPERATOR
)
values = MockEntityValues(primary=value, node=node)
device = cover.get_device(hass=hass, node=node, values=values, node_config={})
assert device.is_closed
assert not device.is_opening
assert not device.is_closing
value.data = "Opening"
value_changed(value)
assert not device.is_closed
assert device.is_opening
assert not device.is_closing
value.data = "Opened"
value_changed(value)
assert not device.is_closed
assert not device.is_opening
assert not device.is_closing
value.data = "Closing"
value_changed(value)
assert not device.is_closed
assert not device.is_opening
assert device.is_closing
def test_barrier_garage_commands(hass, mock_openzwave):
"""Test position changed."""
node = MockNode()
value = MockValue(
data="Closed", node=node, command_class=const.COMMAND_CLASS_BARRIER_OPERATOR
)
values = MockEntityValues(primary=value, node=node)
device = cover.get_device(hass=hass, node=node, values=values, node_config={})
assert value.data == "Closed"
device.open_cover()
assert value.data == "Opened"
device.close_cover()
assert value.data == "Closed"
|
tchellomello/home-assistant
|
tests/components/zwave/test_cover.py
|
Python
|
apache-2.0
| 9,716
|
import datetime
import json
import os
import re
import tempfile
from collections import OrderedDict
from pprint import pformat
from infra_buddy.aws import s3
from infra_buddy.context.artifact_definition import ArtifactDefinition
from infra_buddy.context.monitor_definition import MonitorDefinition
from infra_buddy.context.service_definition import ServiceDefinition
from infra_buddy.notifier.datadog_notifier import DataDogNotifier
from infra_buddy.template.template_manager import TemplateManager
from infra_buddy.utility import print_utility
STACK_NAME = 'STACK_NAME'
DOCKER_REGISTRY = 'DOCKER_REGISTRY_URL'
ROLE = 'ROLE'
IMAGE = 'IMAGE'
APPLICATION = 'APPLICATION'
ENVIRONMENT = 'ENVIRONMENT'
REGION = 'REGION'
SKIP_ECS = 'SKIP_ECS'
built_in = [DOCKER_REGISTRY, ROLE, APPLICATION, ENVIRONMENT, REGION, SKIP_ECS]
env_variables = OrderedDict()
env_variables['VPCAPP'] = "${VPCAPP}"
env_variables['DEPLOY_DATE'] = "${DEPLOY_DATE}"
env_variables[STACK_NAME] = "${ENVIRONMENT}-${APPLICATION}-${ROLE}"
env_variables['EnvName'] = "${STACK_NAME}" # alias
env_variables['ECS_SERVICE_STACK_NAME'] = "${STACK_NAME}" # alias
env_variables['VPC_STACK_NAME'] = "${ENVIRONMENT}-${VPCAPP}-vpc"
env_variables['CF_BUCKET_NAME'] = "${ENVIRONMENT}-${VPCAPP}-cloudformation-deploy-resources"
env_variables['TEMPLATE_BUCKET'] = "${ENVIRONMENT}-${VPCAPP}-cloudformation-deploy-resources" # alias
env_variables['CF_DEPLOY_RESOURCE_PATH'] = "${STACK_NAME}/${DEPLOY_DATE}"
env_variables['CONFIG_TEMPLATES_URL'] = "https://s3-${REGION}.amazonaws.com/${CF_BUCKET_NAME}/${CF_DEPLOY_RESOURCE_PATH}"
env_variables['CONFIG_TEMPLATES_EAST_URL'] = "https://s3.amazonaws.com/${CF_BUCKET_NAME}/${CF_DEPLOY_RESOURCE_PATH}"
env_variables['CLUSTER_STACK_NAME'] = "${ENVIRONMENT}-${APPLICATION}-cluster"
env_variables['RESOURCE_STACK_NAME'] = "${ENVIRONMENT}-${APPLICATION}-${ROLE}-resources"
env_variables['ECS_SERVICE_RESOURCE_STACK_NAME'] = "${RESOURCE_STACK_NAME}" # alias
env_variables['KEY_NAME'] = "${ENVIRONMENT}-${APPLICATION}"
env_variables['CHANGE_SET_NAME'] = "${STACK_NAME}-deploy-cloudformation-change-set"
class DeployContext(dict):
def __init__(self, defaults, environment):
super(DeployContext, self).__init__()
self.current_deploy = None
self.temp_files = []
self._initalize_defaults(defaults,environment)
@classmethod
def create_deploy_context_artifact(cls, artifact_directory, environment, defaults=None):
# type: (str, str) -> DeployContext
"""
:rtype DeployContext
:param artifact_directory: Path to directory containing service definition.
May be a s3 URL pointing at a zip archive
:param defaults: Path to json file containing default environment settings
"""
ret = DeployContext(defaults=defaults, environment=environment)
ret._initialize_artifact_directory(artifact_directory)
ret._initialize_environment_variables()
return ret
@classmethod
def create_deploy_context(cls, application, role, environment, defaults=None):
# type: (str, str, str, str) -> DeployContext
"""
:rtype DeployContext
:param application: Application name
:param role: Role of service
:param environment: Environment to deploy
:param defaults: Path to json file containing default environment settings
"""
ret = DeployContext(defaults=defaults, environment=environment)
ret['APPLICATION'] = application
ret['ROLE'] = role
ret._initialize_environment_variables()
return ret
def print_self(self):
print_utility.warn("Context:")
print_utility.warn("Stack: {}".format(self.stack_name))
if len(self.stack_name_cache)>0:
print_utility.warn("Depth: {}".format(self.stack_name_cache))
if self.current_deploy:
print_utility.banner_info("Deploy Defaults:",pformat(self.current_deploy.defaults))
print_utility.banner_info("Environment:",pformat(self))
def _initialize_artifact_directory(self, artifact_directory):
# type: (str) -> None
if artifact_directory.startswith("s3://"):
tmp_dir = tempfile.mkdtemp()
s3.download_zip_from_s3_url(artifact_directory, destination=tmp_dir)
artifact_directory = tmp_dir
service_definition = ServiceDefinition(artifact_directory, self['ENVIRONMENT'])
self[APPLICATION] = service_definition.application
self[ROLE] = service_definition.role
self[DOCKER_REGISTRY] = service_definition.docker_registry
self.update(service_definition.deployment_parameters)
self.service_definition = service_definition
self.artifact_definition = ArtifactDefinition.create_from_directory(artifact_directory)
self.monitor_definition = MonitorDefinition.create_from_directory(artifact_directory)
self.artifact_definition.register_env_variables(self)
def _initialize_environment_variables(self):
application = self['APPLICATION']
self['VPCAPP'] = application if not application or '-' not in application else application[:application.find('-')]
# allow for partial stack names for validation and introspection usecases
stack_template = "${ENVIRONMENT}"
if application:
stack_template += "-${APPLICATION}"
if self['ROLE']:
stack_template += "-${ROLE}"
env_variables[STACK_NAME] = stack_template
self['DEPLOY_DATE'] = datetime.datetime.now().strftime("%b_%d_%Y_Time_%H_%M")
for property_name in built_in:
self.__dict__[property_name.lower()] = self.get(property_name, None)
for variable, template in env_variables.items():
evaluated_template = self.expandvars(template)
self[variable] = evaluated_template
self.__dict__[variable.lower()] = evaluated_template
#s3 has non-standardized behavior in us-east-1 you can not use the region in the url
if self['REGION'] == 'us-east-1':
self['CONFIG_TEMPLATES_URL'] = self['CONFIG_TEMPLATES_EAST_URL']
self.__dict__['CONFIG_TEMPLATES_URL'.lower()] = self['CONFIG_TEMPLATES_EAST_URL']
print_utility.info("deploy_ctx = {}".format(repr(self.__dict__)))
def _initalize_defaults(self, defaults,environment):
self['DATADOG_KEY'] = ""
self['ENVIRONMENT'] = environment.lower() if environment else "dev"
if defaults:
self.update(defaults)
self.update(os.environ)
if 'REGION' not in self:
print_utility.warn("Region not configured using default 'us-west-1'. "
"This is probably not what you want - N. California is slow, like real slow."
" Set the environment variable 'REGION' or pass a default configuration file to override. ")
self['REGION'] = 'us-west-1'
self.template_manager = TemplateManager(self.get_deploy_templates(),self.get_service_modification_templates())
self.stack_name_cache = []
if self.get('DATADOG_KEY','') != '':
self.notifier = DataDogNotifier(key=self['DATADOG_KEY'],deploy_context=self)
else:
self.notifier = None
def get_deploy_templates(self):
return self.get('service-templates', {})
def get_service_modification_templates(self):
return self.get('service-modification-templates', {})
def generate_modification_stack_name(self, mod_name):
return "{ENVIRONMENT}-{APPLICATION}-{ROLE}-{mod_name}".format(mod_name=mod_name, **self)
def generate_modification_resource_stack_name(self, mod_name):
return "{ENVIRONMENT}-{APPLICATION}-{ROLE}-{mod_name}-resources".format(mod_name=mod_name, **self)
def get_region(self):
return self._get_required_default_configuration(REGION)
def _get_required_default_configuration(self, key):
region = self.get(key, os.environ.get(key, None))
if not region:
raise Exception("Required default not set {key}.\n"
"Configure --configuration-defaults or set ENVIRONMENT variable {key}".format(
key=key))
return region
def notify_event(self, title, type, message=None):
if self.notifier:
self.notifier.notify_event(title,type,message)
else:
print_utility.warn("Notify {type}: {title} - {message}".format(type=type,title=title,message=message))
def get_service_modifications(self):
return self.service_definition.service_modifications
def should_skip_ecs_trivial_update(self):
return self.get(SKIP_ECS, os.environ.get(SKIP_ECS, "True")) == "True"
def render_template(self, file,destination):
with open(file, 'r') as source:
with open(os.path.join(destination,os.path.basename(file).replace('.tmpl','')),'w+') as destination:
temp_file_path = os.path.abspath(destination.name)
print_utility.info("Rendering template to path: {}".format(temp_file_path))
self.temp_files.append(temp_file_path)
for line in source:
destination.write(self.expandvars(line))
return temp_file_path
def __del__(self):
for file in self.temp_files:
os.remove(file)
def get_execution_plan(self):
# type: () -> list(Deploy)
execution_plan = self.service_definition.generate_execution_plan(self.template_manager, self)
artifact_plan = self.artifact_definition.generate_execution_plan(self)
if artifact_plan:
execution_plan.extend(artifact_plan)
monitor_plan = self.monitor_definition.generate_execution_plan(self)
if monitor_plan:
execution_plan.extend(monitor_plan)
print_utility.progress("Execution Plan:")
for deploy in execution_plan:
print_utility.info_banner("\t"+str(deploy))
return execution_plan
def expandvars(self, template_string, aux_dict=None):
if not template_string: return template_string #if you pass none, return none
"""Expand ENVIRONMENT variables of form $var and ${var}.
"""
def replace_var(m):
if aux_dict:
val = aux_dict.get(m.group(2) or m.group(1), None)
if val is not None:return transform(val)
# if we are in a deployment values set in that context take precedent
if self.current_deploy is not None:
val = self.current_deploy.defaults.get(m.group(2) or m.group(1), None)
if val is not None:return transform(val)
return transform(self.get(m.group(2) or m.group(1), m.group(0)))
def transform( val):
if isinstance(val, bool):
return str(val).lower()
return str(val)
reVar = r'(?<!\\)\$(\w+|\{([^}]*)\})'
sub = re.sub(reVar, replace_var, template_string)
return sub
def recursive_expand_vars(self,source):
if isinstance(source,dict):
ret = {}
for key,value in source.items():
ret[key] = self.recursive_expand_vars(value)
return ret
elif isinstance(source, list):
ret = []
for item in source:
ret.append(self.recursive_expand_vars(item))
return ret
elif isinstance(source, str):
return self.expandvars(source)
else:
return source
def push_deploy_ctx(self, deploy_):
# type: (CloudFormationDeploy) -> None
if deploy_.stack_name:
self.stack_name_cache.append(self[STACK_NAME])
self._update_stack_name(deploy_.stack_name)
self.current_deploy = deploy_
def _update_stack_name(self, new_val):
self[STACK_NAME] = new_val
self.stack_name = new_val
def pop_deploy_ctx(self):
if self.current_deploy.stack_name:
new_val = self.stack_name_cache.pop()
self._update_stack_name(new_val)
self.current_deploy = None
|
AlienVault-Engineering/infra-buddy
|
src/main/python/infra_buddy/context/deploy_ctx.py
|
Python
|
apache-2.0
| 12,201
|
"""
Enumerations.
"""
from collections.abc import Sequence
from uqbar.enums import IntEnumeration, StrictEnumeration
class AddAction(IntEnumeration):
"""
An enumeration of scsynth node add actions.
"""
### CLASS VARIABLES ###
ADD_TO_HEAD = 0
ADD_TO_TAIL = 1
ADD_BEFORE = 2
ADD_AFTER = 3
REPLACE = 4
class BinaryOperator(IntEnumeration):
### CLASS VARIABLES ###
ABSOLUTE_DIFFERENCE = 38 # |a - b|
ADDITION = 0
AMCLIP = 40
ATAN2 = 22
BIT_AND = 14
BIT_OR = 15
BIT_XOR = 16
CLIP2 = 42
DIFFERENCE_OF_SQUARES = 34 # a*a - b*b
EQUAL = 6
EXCESS = 43
EXPRANDRANGE = 48
FLOAT_DIVISION = 4
FILL = 29
FIRST_ARG = 46
FOLD2 = 44
GREATEST_COMMON_DIVISOR = 18
GREATER_THAN_OR_EQUAL = 11
GREATER_THAN = 9
HYPOT = 23
HYPOTX = 24
INTEGER_DIVISION = 3
LEAST_COMMON_MULTIPLE = 17
LESS_THAN_OR_EQUAL = 10
LESS_THAN = 8
MAXIMUM = 13
MINIMUM = 12
MODULO = 5
MULTIPLICATION = 2
NOT_EQUAL = 7
POWER = 25
RANDRANGE = 47
RING1 = 30 # a * (b + 1) == a * b + a
RING2 = 31 # a * b + a + b
RING3 = 32 # a*a*b
RING4 = 33 # a*a*b - a*b*b
ROUND = 19
ROUND_UP = 20
SCALE_NEG = 41
SHIFT_LEFT = 26
SHIFT_RIGHT = 27
SQUARE_OF_DIFFERENCE = 37 # (a - b)^2
SQUARE_OF_SUM = 36 # (a + b)^2
SUBTRACTION = 1
SUM_OF_SQUARES = 35 # a*a + b*b
THRESHOLD = 39
TRUNCATION = 21
UNSIGNED_SHIFT = 28
WRAP2 = 45
class CalculationRate(IntEnumeration):
"""
An enumeration of scsynth calculation-rates.
::
>>> import supriya.synthdefs
>>> supriya.CalculationRate.AUDIO
CalculationRate.AUDIO
::
>>> supriya.CalculationRate.from_expr("demand")
CalculationRate.DEMAND
"""
### CLASS VARIABLES ###
AUDIO = 2
CONTROL = 1
DEMAND = 3
SCALAR = 0
AR = 2
KR = 1
DR = 3
IR = 0
### PUBLIC METHODS ###
@classmethod
def from_expr(cls, expr):
"""
Gets calculation-rate.
::
>>> import supriya.synthdefs
>>> import supriya.ugens
::
>>> supriya.CalculationRate.from_expr(1)
CalculationRate.SCALAR
::
>>> supriya.CalculationRate.from_expr("demand")
CalculationRate.DEMAND
::
>>> collection = []
>>> collection.append(supriya.ugens.DC.ar(0))
>>> collection.append(supriya.ugens.DC.kr(1))
>>> collection.append(2.0)
>>> supriya.CalculationRate.from_expr(collection)
CalculationRate.AUDIO
::
>>> collection = []
>>> collection.append(supriya.ugens.DC.kr(1))
>>> collection.append(2.0)
>>> supriya.CalculationRate.from_expr(collection)
CalculationRate.CONTROL
Return calculation-rate.
"""
import supriya.synthdefs
import supriya.ugens
if isinstance(expr, (int, float)) and not isinstance(expr, cls):
return CalculationRate.SCALAR
elif isinstance(expr, (supriya.synthdefs.OutputProxy, supriya.synthdefs.UGen)):
return expr.calculation_rate
elif isinstance(expr, supriya.synthdefs.Parameter):
name = expr.parameter_rate.name
if name == "TRIGGER":
return CalculationRate.CONTROL
return CalculationRate.from_expr(name)
elif isinstance(expr, str):
return super().from_expr(expr)
elif isinstance(expr, Sequence):
return max(CalculationRate.from_expr(item) for item in expr)
elif hasattr(expr, "calculation_rate"):
return cls.from_expr(expr.calculation_rate)
return super().from_expr(expr)
### PUBLIC PROPERTIES ###
@property
def token(self):
if self == CalculationRate.SCALAR:
return "ir"
elif self == CalculationRate.CONTROL:
return "kr"
elif self == CalculationRate.AUDIO:
return "ar"
return "new"
class DoneAction(IntEnumeration):
"""
An enumeration of ``scsynth`` UGen "done" actions.
::
>>> import supriya.synthdefs
>>> supriya.DoneAction(2)
DoneAction.FREE_SYNTH
::
>>> supriya.DoneAction.from_expr("pause synth")
DoneAction.PAUSE_SYNTH
"""
### CLASS VARIABLES ###
NOTHING = 0
PAUSE_SYNTH = 1
FREE_SYNTH = 2
FREE_SYNTH_AND_PRECEDING_NODE = 3
FREE_SYNTH_AND_FOLLOWING_NODE = 4
FREE_SYNTH_AND_FREEALL_PRECEDING_NODE = 5
FREE_SYNTH_AND_FREEALL_FOLLOWING_NODE = 6
FREE_SYNTH_AND_ALL_PRECEDING_NODES_IN_GROUP = 7
FREE_SYNTH_AND_ALL_FOLLOWING_NODES_IN_GROUP = 8
FREE_SYNTH_AND_PAUSE_PRECEDING_NODE = 9
FREE_SYNTH_AND_PAUSE_FOLLOWING_NODE = 10
FREE_SYNTH_AND_DEEPFREE_PRECEDING_NODE = 11
FREE_SYNTH_AND_DEEPFREE_FOLLOWING_NODE = 12
FREE_SYNTH_AND_ALL_SIBLING_NODES = 13
FREE_SYNTH_AND_ENCLOSING_GROUP = 14
class EnvelopeShape(IntEnumeration):
### CLASS VARIABLES ###
CUBED = 7
CUSTOM = 5
EXPONENTIAL = 2
LINEAR = 1
SINE = 3
SQUARED = 6
STEP = 0
WELCH = 4
class HeaderFormat(IntEnumeration):
"""
An enumeration of soundfile header formats.
::
>>> supriya.HeaderFormat.AIFF
HeaderFormat.AIFF
::
>>> supriya.HeaderFormat.from_expr("wav")
HeaderFormat.WAV
::
>>> header_format = supriya.HeaderFormat.from_expr("wav")
>>> header_format.name.lower()
'wav'
"""
### CLASS VARIABLES ###
AIFF = 0
IRCAM = 1
NEXT = 2
RAW = 3
WAV = 4
class NodeAction(IntEnumeration):
### CLASS VARIABLES ###
NODE_CREATED = 0
NODE_REMOVED = 1
NODE_ACTIVATED = 2
NODE_DEACTIVATED = 3
NODE_MOVED = 4
NODE_QUERIED = 5
### PUBLIC METHODS ###
@classmethod
def from_address(cls, address):
addresses = {
"/n_end": cls.NODE_REMOVED,
"/n_go": cls.NODE_CREATED,
"/n_info": cls.NODE_QUERIED,
"/n_move": cls.NODE_MOVED,
"/n_off": cls.NODE_DEACTIVATED,
"/n_on": cls.NODE_ACTIVATED,
}
action = addresses[address]
return action
class ParameterRate(IntEnumeration):
"""
An enumeration of synthdef control rates.
"""
### CLASS VARIABLES ###
AUDIO = 2
CONTROL = 3
SCALAR = 0
TRIGGER = 1
AR = 2
KR = 3
IR = 0
TR = 1
class RequestId(IntEnumeration):
"""
An enumeration of scsynth request ids.
"""
### CLASS VARIABLES ###
BUFFER_ALLOCATE = 28
BUFFER_ALLOCATE_READ = 29
BUFFER_ALLOCATE_READ_CHANNEL = 54
BUFFER_CLOSE = 33
BUFFER_FILL = 37
BUFFER_FREE = 32
BUFFER_GENERATE = 38
BUFFER_GET = 42
BUFFER_GET_CONTIGUOUS = 43
BUFFER_QUERY = 47
BUFFER_READ = 30
BUFFER_READ_CHANNEL = 55
BUFFER_SET = 35
BUFFER_SET_CONTIGUOUS = 36
BUFFER_WRITE = 31
BUFFER_ZERO = 34
CLEAR_SCHEDULE = 51
COMMAND = 4
CONTROL_BUS_FILL = 27
CONTROL_BUS_GET = 40
CONTROL_BUS_GET_CONTIGUOUS = 41
CONTROL_BUS_SET = 25
CONTROL_BUS_SET_CONTIGUOUS = 26
DUMP_OSC = 39
ERROR = 58
GROUP_DEEP_FREE = 50
GROUP_DUMP_TREE = 56
GROUP_FREE_ALL = 24
GROUP_HEAD = 22
GROUP_NEW = 21
GROUP_QUERY_TREE = 57
GROUP_TAIL = 23
NODE_AFTER = 19
NODE_BEFORE = 18
NODE_COMMAND = 13
NODE_FILL = 17
NODE_FREE = 11
NODE_MAP_TO_CONTROL_BUS = 14
NODE_MAP_TO_AUDIO_BUS = 60
NODE_MAP_TO_AUDIO_BUS_CONTIGUOUS = 61
NODE_MAP_TO_CONTROL_BUS_CONTIGUOUS = 48
NODE_ORDER = 62
NODE_QUERY = 46
NODE_RUN = 12
NODE_SET = 15
NODE_SET_CONTIGUOUS = 16
NODE_TRACE = 10
NOTHING = 0
NOTIFY = 1
PARALLEL_GROUP_NEW = 63
QUIT = 3
STATUS = 2
SYNC = 52
SYNTHDEF_FREE = 53
SYNTHDEF_FREE_ALL = 8
SYNTHDEF_LOAD = 6
SYNTHDEF_LOAD_DIR = 7
SYNTHDEF_RECEIVE = 5
SYNTH_GET = 44
SYNTH_GET_CONTIGUOUS = 45
SYNTH_NEW = 9
SYNTH_NEWARGS = 59
SYNTH_NOID = 49
SYNTH_QUERY = 65
UGEN_COMMAND = 20
VERSION = 64
@property
def request_name(self):
return RequestName.from_expr(self.name)
class RequestName(StrictEnumeration):
"""
An enumeration of scsynth request names.
"""
### CLASS VARIABLES ###
BUFFER_ALLOCATE = "/b_alloc"
BUFFER_ALLOCATE_READ = "/b_allocRead"
BUFFER_ALLOCATE_READ_CHANNEL = "/b_allocReadChannel"
BUFFER_CLOSE = "/b_close"
BUFFER_FILL = "/b_fill"
BUFFER_FREE = "/b_free"
BUFFER_GENERATE = "/b_gen"
BUFFER_GET = "/b_get"
BUFFER_GET_CONTIGUOUS = "/b_getn"
BUFFER_QUERY = "/b_query"
BUFFER_READ = "/b_read"
BUFFER_READ_CHANNEL = "/b_readChannel"
BUFFER_SET = "/b_set"
BUFFER_SET_CONTIGUOUS = "/b_setn"
BUFFER_WRITE = "/b_write"
BUFFER_ZERO = "/b_zero"
CLEAR_SCHEDULE = "/clearSched"
COMMAND = "/cmd"
CONTROL_BUS_FILL = "/c_fill"
CONTROL_BUS_GET = "/c_get"
CONTROL_BUS_GET_CONTIGUOUS = "/c_getn"
CONTROL_BUS_SET = "/c_set"
CONTROL_BUS_SET_CONTIGUOUS = "/c_setn"
DUMP_OSC = "/dumpOSC"
ERROR = "/error"
GROUP_DEEP_FREE = "/g_deepFree"
GROUP_DUMP_TREE = "/g_dumpTree"
GROUP_FREE_ALL = "/g_freeAll"
GROUP_HEAD = "/g_head"
GROUP_NEW = "/g_new"
GROUP_QUERY_TREE = "/g_queryTree"
GROUP_TAIL = "/g_tail"
NODE_AFTER = "/n_after"
NODE_BEFORE = "/n_before"
# NODE_COMMAND = None
NODE_FILL = "/n_fill"
NODE_FREE = "/n_free"
NODE_MAP_TO_AUDIO_BUS = "/n_mapa"
NODE_MAP_TO_AUDIO_BUS_CONTIGUOUS = "/n_mapan"
NODE_MAP_TO_CONTROL_BUS = "/n_map"
NODE_MAP_TO_CONTROL_BUS_CONTIGUOUS = "/n_mapn"
NODE_ORDER = "/n_order"
NODE_QUERY = "/n_query"
NODE_RUN = "/n_run"
NODE_SET = "/n_set"
NODE_SET_CONTIGUOUS = "/n_setn"
NODE_TRACE = "/n_trace"
# NOTHING = None
NOTIFY = "/notify"
PARALLEL_GROUP_NEW = "/p_new"
QUIT = "/quit"
STATUS = "/status"
SYNC = "/sync"
SYNTHDEF_FREE = "/d_free"
# SYNTHDEF_FREE_ALL = None
SYNTHDEF_LOAD = "/d_load"
SYNTHDEF_LOAD_DIR = "/d_loadDir"
SYNTHDEF_RECEIVE = "/d_recv"
SYNTH_GET = "/s_get"
SYNTH_GET_CONTIGUOUS = "/s_getn"
SYNTH_NEW = "/s_new"
SYNTH_QUERY = "/s_query"
# SYNTH_NEWARGS = None
SYNTH_NOID = "/s_noid"
UGEN_COMMAND = "/u_cmd"
VERSION = "/version"
### PUBLIC PROPERTIES ###
@property
def request_id(self):
return RequestId.from_expr(self.name)
class SampleFormat(IntEnumeration):
"""
An enumeration of soundfile sample formats.
::
>>> supriya.SampleFormat.INT24
SampleFormat.INT24
::
>>> supriya.SampleFormat.from_expr("float")
SampleFormat.FLOAT
::
>>> sample_format = supriya.SampleFormat.INT24
>>> sample_format.name.lower()
'int24'
"""
### CLASS VARIABLES ###
INT24 = 0
ALAW = 1
DOUBLE = 2
FLOAT = 3
INT8 = 4
INT16 = 5
INT32 = 6
MULAW = 7
class SignalRange(IntEnumeration):
"""
An enumeration of scsynth UGen signal ranges.
::
>>> supriya.SignalRange.UNIPOLAR
SignalRange.UNIPOLAR
::
>>> supriya.SignalRange.from_expr("bipolar")
SignalRange.BIPOLAR
"""
### CLASS VARIABLES ###
UNIPOLAR = 0
BIPOLAR = 1
class UnaryOperator(IntEnumeration):
### CLASS VARIABLES ###
ABSOLUTE_VALUE = 5
AMPLITUDE_TO_DB = 22
ARCCOS = 32
ARCSIN = 31
ARCTAN = 33
AS_FLOAT = 6
AS_INT = 7
BILINRAND = 40
BIT_NOT = 4
CEILING = 8
COIN = 44
COS = 29
COSH = 35
CUBED = 13
DB_TO_AMPLITUDE = 21
DIGIT_VALUE = 45
DISTORT = 42
EXPONENTIAL = 15
FLOOR = 9
FRACTIONAL_PART = 10
HZ_TO_MIDI = 18
HZ_TO_OCTAVE = 24
HANNING_WINDOW = 49
IS_NIL = 2
LINRAND = 39
LOG = 25
LOG10 = 27
LOG2 = 26
MIDI_TO_HZ = 17
SEMITONES_TO_RATIO = 19
NEGATIVE = 0
NOT = 1
NOT_NIL = 3
OCTAVE_TO_HZ = 23
RAMP = 52
RAND = 37
RAND2 = 38
RATIO_TO_SEMITONES = 20
RECIPROCAL = 16
RECTANGLE_WINDOW = 48
S_CURVE = 53
SIGN = 11
SILENCE = 46
SIN = 28
SINH = 34
SOFTCLIP = 43
SQUARE_ROOT = 14
SQUARED = 12
SUM3RAND = 41
TAN = 30
TANH = 36
THRU = 47
TRIANGLE_WINDOW = 51
WELCH_WINDOW = 50
class Unit(IntEnumeration):
### CLASS VARIABLES ###
UNDEFINED = 0
DECIBELS = 1
AMPLITUDE = 2
SECONDS = 3
MILLISECONDS = 4
HERTZ = 5
SEMITONES = 6
|
josiah-wolf-oberholtzer/supriya
|
supriya/enums.py
|
Python
|
mit
| 12,735
|
"""Utility functions for Certbot plugin tests."""
import argparse
import copy
import contextlib
import os
import re
import shutil
import socket
import tarfile
from acme import jose
from acme import test_util
from certbot import constants
from certbot_compatibility_test import errors
_KEY_BASE = "rsa1024_key.pem"
KEY_PATH = test_util.vector_path(_KEY_BASE)
KEY = test_util.load_pyopenssl_private_key(_KEY_BASE)
JWK = jose.JWKRSA(key=test_util.load_rsa_private_key(_KEY_BASE))
IP_REGEX = re.compile(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$")
def create_le_config(parent_dir):
"""Sets up LE dirs in parent_dir and returns the config dict"""
config = copy.deepcopy(constants.CLI_DEFAULTS)
le_dir = os.path.join(parent_dir, "certbot")
config["config_dir"] = os.path.join(le_dir, "config")
config["work_dir"] = os.path.join(le_dir, "work")
config["logs_dir"] = os.path.join(le_dir, "logs_dir")
os.makedirs(config["config_dir"])
os.mkdir(config["work_dir"])
os.mkdir(config["logs_dir"])
config["domains"] = None
return argparse.Namespace(**config) # pylint: disable=star-args
def extract_configs(configs, parent_dir):
"""Extracts configs to a new dir under parent_dir and returns it"""
config_dir = os.path.join(parent_dir, "configs")
if os.path.isdir(configs):
shutil.copytree(configs, config_dir, symlinks=True)
elif tarfile.is_tarfile(configs):
with tarfile.open(configs, "r") as tar:
tar.extractall(config_dir)
else:
raise errors.Error("Unknown configurations file type")
return config_dir
def get_two_free_ports():
"""Returns two free ports to use for the tests"""
with contextlib.closing(socket.socket()) as sock1:
with contextlib.closing(socket.socket()) as sock2:
sock1.bind(("", 0))
sock2.bind(("", 0))
return sock1.getsockname()[1], sock2.getsockname()[1]
|
brentdax/letsencrypt
|
certbot-compatibility-test/certbot_compatibility_test/util.py
|
Python
|
apache-2.0
| 1,932
|
# Copyright (c) 2016 Hewlett Packard Enterprise Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api import converters
from neutron_lib.api.definitions import provider_net
from neutron_lib.api.definitions import subnet
from neutron_lib import constants
from neutron_lib.db import constants as db_constants
SEGMENT_ID = 'segment_id'
NETWORK_TYPE = 'network_type'
PHYSICAL_NETWORK = 'physical_network'
SEGMENTATION_ID = 'segmentation_id'
NAME_LEN = db_constants.NAME_FIELD_SIZE
DESC_LEN = db_constants.DESCRIPTION_FIELD_SIZE
ALIAS = 'segment'
IS_SHIM_EXTENSION = False
IS_STANDARD_ATTR_EXTENSION = False
NAME = 'Segment'
API_PREFIX = ''
DESCRIPTION = 'Segments extension.'
UPDATED_TIMESTAMP = '2016-02-24T17:00:00-00:00'
RESOURCE_NAME = 'segment'
COLLECTION_NAME = RESOURCE_NAME + 's'
RESOURCE_ATTRIBUTE_MAP = {
COLLECTION_NAME: {
'id': {
'allow_post': False,
'allow_put': False,
'validate': {
'type:uuid': None
},
'is_filter': True,
'is_sort_key': True,
'is_visible': True,
'primary_key': True
},
'tenant_id': {
'allow_post': True,
'allow_put': False,
'validate': {
'type:string': db_constants.PROJECT_ID_FIELD_SIZE
},
'is_visible': False},
'network_id': {
'allow_post': True,
'allow_put': False,
'validate': {
'type:uuid': None
},
'is_filter': True,
'is_sort_key': True,
'is_visible': True
},
PHYSICAL_NETWORK: {
'allow_post': True,
'allow_put': False,
'default': constants.ATTR_NOT_SPECIFIED,
'validate': {
'type:string': provider_net.PHYSICAL_NETWORK_MAX_LEN
},
'is_filter': True,
'is_sort_key': True,
'is_visible': True
},
NETWORK_TYPE: {
'allow_post': True,
'allow_put': False,
'validate': {
'type:string': provider_net.NETWORK_TYPE_MAX_LEN
},
'is_filter': True,
'is_sort_key': True,
'is_visible': True
},
SEGMENTATION_ID: {
'allow_post': True,
'allow_put': False,
'default': constants.ATTR_NOT_SPECIFIED,
'convert_to': converters.convert_to_int,
'is_sort_key': True,
'is_visible': True
},
'name': {
'allow_post': True,
'allow_put': True,
'default': constants.ATTR_NOT_SPECIFIED,
'validate': {
'type:string_or_none': NAME_LEN
},
'is_filter': True,
'is_sort_key': True,
'is_visible': True
}
},
subnet.COLLECTION_NAME: {
SEGMENT_ID: {
'allow_post': True,
'allow_put': False,
'default': None,
'validate': {
'type:uuid_or_none': None
},
'is_filter': True,
'is_sort_key': True,
'is_visible': True
}
}
}
SUB_RESOURCE_ATTRIBUTE_MAP = {}
ACTION_MAP = {}
REQUIRED_EXTENSIONS = [
'standard-attr-description'
]
OPTIONAL_EXTENSIONS = [
# Use string instead of constant to avoid circulated import
'standard-attr-segment'
]
ACTION_STATUS = {}
|
openstack/neutron-lib
|
neutron_lib/api/definitions/segment.py
|
Python
|
apache-2.0
| 4,049
|
from pypom import Region
from selenium.webdriver.common.by import By
from pages.desktop.base import Base
class Home(Base):
"""Addons Home page"""
_extensions_category_locator = (By.CLASS_NAME, 'Home-CuratedCollections')
_featured_extensions_locator = (By.CLASS_NAME, 'Home-FeaturedExtensions')
_featured_themes_locator = (By.CLASS_NAME, 'Home-FeaturedThemes')
_hero_locator = (By.CLASS_NAME, 'Hero-name-HomeHeroGuides')
_popular_extensions_locator = (By.CLASS_NAME, 'Home-PopularExtensions')
_popular_themes_locator = (By.CLASS_NAME, 'Home-PopularThemes')
_themes_category_locator = (By.CLASS_NAME, 'Home-CuratedThemes')
_toprated_themes_locator = (By.CLASS_NAME, 'Home-TopRatedThemes')
def wait_for_page_to_load(self):
self.wait.until(
lambda _: self.is_element_displayed(*self._hero_locator)
)
return self
@property
def hero_banner(self):
return self.find_element(*self._hero_locator)
@property
def popular_extensions(self):
el = self.find_element(*self._popular_extensions_locator)
return self.Extensions(self, el)
@property
def featured_extensions(self):
el = self.find_element(*self._featured_extensions_locator)
return self.Extensions(self, el)
@property
def featured_themes(self):
el = self.find_element(*self._featured_themes_locator)
return self.Themes(self, el)
@property
def popular_themes(self):
el = self.find_element(*self._popular_themes_locator)
return self.Themes(self, el)
@property
def toprated_themes(self):
el = self.find_element(*self._toprated_themes_locator)
return self.Themes(self, el)
@property
def extension_category(self):
el = self.find_element(*self._extensions_category_locator)
return self.Category(self, el)
@property
def theme_category(self):
el = self.find_element(*self._themes_category_locator)
return self.Category(self, el)
class Category(Region):
_extensions_locator = (By.CLASS_NAME, 'Home-SubjectShelf-list-item')
@property
def list(self):
items = self.find_elements(*self._extensions_locator)
return [self.CategoryDetail(self.page, el) for el in items]
class CategoryDetail(Region):
_extension_link_locator = (By.CLASS_NAME, 'Home-SubjectShelf-link')
_extension_name_locator = (
By.CSS_SELECTOR, '.Home-SubjectShelf-link span')
@property
def name(self):
return self.find_element(*self._extension_name_locator).text
def click(self):
self.root.click()
from pages.desktop.extensions import Extensions
return Extensions(self.selenium, self.page.base_url)
class Extensions(Region):
_browse_all_locator = (By.CSS_SELECTOR, '.Card-footer-link > a')
_extensions_locator = (By.CLASS_NAME, 'SearchResult')
_extension_card_locator = (By.CSS_SELECTOR, '.Home-category-li')
@property
def list(self):
items = self.find_elements(*self._extensions_locator)
return [Home.ExtensionsList(self.page, el) for el in items]
@property
def browse_all(self):
self.find_element(*self._browse_all_locator).click()
from pages.desktop.search import Search
search = Search(self.selenium, self.page.base_url)
return search.wait_for_page_to_load()
class Themes(Region):
_browse_all_locator = (By.CSS_SELECTOR, '.Card-footer-link > a')
_themes_locator = (By.CLASS_NAME, 'SearchResult--theme')
_theme_card_locator = (By.CSS_SELECTOR, '.Home-category-li')
@property
def list(self):
items = self.find_elements(*self._themes_locator)
return [Home.ExtensionsList(self.page, el) for el in items]
@property
def browse_all(self):
self.find_element(*self._browse_all_locator).click()
from pages.desktop.search import Search
search = Search(self.selenium, self.page.base_url)
return search.wait_for_page_to_load()
class ExtensionsList(Region):
_extension_link_locator = (By.CLASS_NAME, 'SearchResult-link')
_extension_name_locator = (By.CLASS_NAME, 'SearchResult-name')
@property
def name(self):
return self.find_element(*self._extension_name_locator).text
def click(self):
self.find_element(*self._extension_link_locator).click()
from pages.desktop.extensions import Extensions
return Extensions(self.selenium, self.page.base_url)
|
aviarypl/mozilla-l10n-addons-server
|
tests/ui/pages/desktop/home.py
|
Python
|
bsd-3-clause
| 4,776
|
# Copyright (C) 2001-2005 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS rdata.
@var _rdata_modules: A dictionary mapping a (rdclass, rdtype) tuple to
the module which implements that type.
@type _rdata_modules: dict
@var _module_prefix: The prefix to use when forming modules names. The
default is 'dns.rdtypes'. Changing this value will break the library.
@type _module_prefix: string
@var _hex_chunk: At most this many octets that will be represented in each
chunk of hexstring that _hexify() produces before whitespace occurs.
@type _hex_chunk: int"""
import dns.exception
import dns.rdataclass
import dns.rdatatype
import dns.tokenizer
_hex_chunksize = 32
def _hexify(data, chunksize=None):
"""Convert a binary string into its hex encoding, broken up into chunks
of I{chunksize} characters separated by a space.
@param data: the binary string
@type data: string
@param chunksize: the chunk size. Default is L{dns.rdata._hex_chunksize}
@rtype: string
"""
if chunksize is None:
chunksize = _hex_chunksize
hex = data.encode('hex_codec')
l = len(hex)
if l > chunksize:
chunks = []
i = 0
while i < l:
chunks.append(hex[i : i + chunksize])
i += chunksize
hex = ' '.join(chunks)
return hex
_base64_chunksize = 32
def _base64ify(data, chunksize=None):
"""Convert a binary string into its base64 encoding, broken up into chunks
of I{chunksize} characters separated by a space.
@param data: the binary string
@type data: string
@param chunksize: the chunk size. Default is
L{dns.rdata._base64_chunksize}
@rtype: string
"""
if chunksize is None:
chunksize = _base64_chunksize
b64 = data.encode('base64_codec')
b64 = b64.replace('\n', '')
l = len(b64)
if l > chunksize:
chunks = []
i = 0
while i < l:
chunks.append(b64[i : i + chunksize])
i += chunksize
b64 = ' '.join(chunks)
return b64
__escaped = {
'"' : True,
'\\' : True,
}
def _escapify(qstring):
"""Escape the characters in a quoted string which need it.
@param qstring: the string
@type qstring: string
@returns: the escaped string
@rtype: string
"""
text = ''
for c in qstring:
if c in __escaped:
text += '\\' + c
elif ord(c) >= 0x20 and ord(c) < 0x7F:
text += c
else:
text += '\\%03d' % ord(c)
return text
def _truncate_bitmap(what):
"""Determine the index of greatest byte that isn't all zeros, and
return the bitmap that contains all the bytes less than that index.
@param what: a string of octets representing a bitmap.
@type what: string
@rtype: string
"""
for i in xrange(len(what) - 1, -1, -1):
if what[i] != '\x00':
break
return ''.join(what[0 : i + 1])
class Rdata(object):
"""Base class for all DNS rdata types.
"""
__slots__ = ['rdclass', 'rdtype']
def __init__(self, rdclass, rdtype):
"""Initialize an rdata.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
"""
self.rdclass = rdclass
self.rdtype = rdtype
def covers(self):
"""DNS SIG/RRSIG rdatas apply to a specific type; this type is
returned by the covers() function. If the rdata type is not
SIG or RRSIG, dns.rdatatype.NONE is returned. This is useful when
creating rdatasets, allowing the rdataset to contain only RRSIGs
of a particular type, e.g. RRSIG(NS).
@rtype: int
"""
return dns.rdatatype.NONE
def extended_rdatatype(self):
"""Return a 32-bit type value, the least significant 16 bits of
which are the ordinary DNS type, and the upper 16 bits of which are
the "covered" type, if any.
@rtype: int
"""
return self.covers() << 16 | self.rdtype
def to_text(self, origin=None, relativize=True, **kw):
"""Convert an rdata to text format.
@rtype: string
"""
raise NotImplementedError
def to_wire(self, file, compress = None, origin = None):
"""Convert an rdata to wire format.
@rtype: string
"""
raise NotImplementedError
def validate(self):
"""Check that the current contents of the rdata's fields are
valid. If you change an rdata by assigning to its fields,
it is a good idea to call validate() when you are done making
changes.
"""
dns.rdata.from_text(self.rdclass, self.rdtype, self.to_text())
def __repr__(self):
covers = self.covers()
if covers == dns.rdatatype.NONE:
ctext = ''
else:
ctext = '(' + dns.rdatatype.to_text(covers) + ')'
return '<DNS ' + dns.rdataclass.to_text(self.rdclass) + ' ' + \
dns.rdatatype.to_text(self.rdtype) + ctext + ' rdata: ' + \
str(self) + '>'
def __str__(self):
return self.to_text()
def _cmp(self, other):
"""Compare an rdata with another rdata of the same rdtype and
rdclass. Return < 0 if self < other in the DNSSEC ordering,
0 if self == other, and > 0 if self > other.
"""
raise NotImplementedError
def __eq__(self, other):
if not isinstance(other, Rdata):
return False
if self.rdclass != other.rdclass or \
self.rdtype != other.rdtype:
return False
return self._cmp(other) == 0
def __ne__(self, other):
if not isinstance(other, Rdata):
return True
if self.rdclass != other.rdclass or \
self.rdtype != other.rdtype:
return True
return self._cmp(other) != 0
def __lt__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or \
self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) < 0
def __le__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or \
self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) <= 0
def __ge__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or \
self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) >= 0
def __gt__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or \
self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) > 0
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
"""Build an rdata object from text format.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param tok: The tokenizer
@type tok: dns.tokenizer.Tokenizer
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@param relativize: should names be relativized?
@type relativize: bool
@rtype: dns.rdata.Rdata instance
"""
raise NotImplementedError
from_text = classmethod(from_text)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
"""Build an rdata object from wire format
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param wire: The wire-format message
@type wire: string
@param current: The offet in wire of the beginning of the rdata.
@type current: int
@param rdlen: The length of the wire-format rdata
@type rdlen: int
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@rtype: dns.rdata.Rdata instance
"""
raise NotImplementedError
from_wire = classmethod(from_wire)
def choose_relativity(self, origin = None, relativize = True):
"""Convert any domain names in the rdata to the specified
relativization.
"""
pass
class GenericRdata(Rdata):
"""Generate Rdata Class
This class is used for rdata types for which we have no better
implementation. It implements the DNS "unknown RRs" scheme.
"""
__slots__ = ['data']
def __init__(self, rdclass, rdtype, data):
super(GenericRdata, self).__init__(rdclass, rdtype)
self.data = data
def to_text(self, origin=None, relativize=True, **kw):
return r'\# %d ' % len(self.data) + _hexify(self.data)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
if tok.get_string() != r'\#':
raise dns.exception.SyntaxError, \
r'generic rdata does not start with \#'
length = tok.get_int()
chunks = []
while 1:
(ttype, value) = tok.get()
if ttype == dns.tokenizer.EOL or ttype == dns.tokenizer.EOF:
break
chunks.append(value)
hex = ''.join(chunks)
data = hex.decode('hex_codec')
if len(data) != length:
raise dns.exception.SyntaxError, \
'generic rdata hex data has wrong length'
return cls(rdclass, rdtype, data)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
file.write(self.data)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
return cls(rdclass, rdtype, wire[current : current + rdlen])
from_wire = classmethod(from_wire)
def _cmp(self, other):
return cmp(self.data, other.data)
_rdata_modules = {}
_module_prefix = 'dns.rdtypes'
def get_rdata_class(rdclass, rdtype):
def import_module(name):
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
mod = _rdata_modules.get((rdclass, rdtype))
rdclass_text = dns.rdataclass.to_text(rdclass)
rdtype_text = dns.rdatatype.to_text(rdtype)
rdtype_text = rdtype_text.replace('-', '_')
if not mod:
mod = _rdata_modules.get((dns.rdatatype.ANY, rdtype))
if not mod:
try:
mod = import_module('.'.join([_module_prefix,
rdclass_text, rdtype_text]))
_rdata_modules[(rdclass, rdtype)] = mod
except ImportError:
try:
mod = import_module('.'.join([_module_prefix,
'ANY', rdtype_text]))
_rdata_modules[(dns.rdataclass.ANY, rdtype)] = mod
except ImportError:
mod = None
if mod:
cls = getattr(mod, rdtype_text)
else:
cls = GenericRdata
return cls
def from_text(rdclass, rdtype, tok, origin = None, relativize = True):
"""Build an rdata object from text format.
This function attempts to dynamically load a class which
implements the specified rdata class and type. If there is no
class-and-type-specific implementation, the GenericRdata class
is used.
Once a class is chosen, its from_text() class method is called
with the parameters to this function.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param tok: The tokenizer
@type tok: dns.tokenizer.Tokenizer
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@param relativize: Should names be relativized?
@type relativize: bool
@rtype: dns.rdata.Rdata instance"""
if isinstance(tok, str):
tok = dns.tokenizer.Tokenizer(tok)
cls = get_rdata_class(rdclass, rdtype)
if cls != GenericRdata:
# peek at first token
token = tok.get()
tok.unget(token)
if token[0] == dns.tokenizer.IDENTIFIER and \
token[1] == r'\#':
#
# Known type using the generic syntax. Extract the
# wire form from the generic syntax, and then run
# from_wire on it.
#
rdata = GenericRdata.from_text(rdclass, rdtype, tok, origin,
relativize)
return from_wire(rdclass, rdtype, rdata.data, 0, len(rdata.data),
origin)
return cls.from_text(rdclass, rdtype, tok, origin, relativize)
def from_wire(rdclass, rdtype, wire, current, rdlen, origin = None):
"""Build an rdata object from wire format
This function attempts to dynamically load a class which
implements the specified rdata class and type. If there is no
class-and-type-specific implementation, the GenericRdata class
is used.
Once a class is chosen, its from_wire() class method is called
with the parameters to this function.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param wire: The wire-format message
@type wire: string
@param current: The offet in wire of the beginning of the rdata.
@type current: int
@param rdlen: The length of the wire-format rdata
@type rdlen: int
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@rtype: dns.rdata.Rdata instance"""
cls = get_rdata_class(rdclass, rdtype)
return cls.from_wire(rdclass, rdtype, wire, current, rdlen, origin)
|
liyongyue/dnsspider
|
dns/rdata.py
|
Python
|
isc
| 14,671
|
#!/usr/bin/env python
import sys
import csv
import json
if len(sys.argv) != 3:
print 'Incorrect number of arguments.'
print 'Usage: csv_to_json.py path_to_csv path_to_json'
exit()
print 'Argument List:', str(sys.argv)
csvFileName = sys.argv[1]
jsonFileName = sys.argv[2]
csvFile = open (csvFileName, 'rU')
myReader = csv.reader(csvFile)
header = myReader.next()
print "Header fields:", header
myReader = csv.DictReader( csvFile, fieldnames = header)
parsedJson = json.dumps( [ row for row in myReader ] )
print "JSON parsed."
jsonFile = open( jsonFileName, 'w')
jsonFile.write(parsedJson)
print "JSON saved to: ", jsonFileName
|
stacybird/CS510CouchDB
|
scripts/csv_to_json.py
|
Python
|
apache-2.0
| 651
|
"""Reads vehicle status from BMW connected drive portal."""
import logging
from bimmer_connected.state import ChargingState, LockState
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_OPENING,
DEVICE_CLASS_PLUG,
DEVICE_CLASS_PROBLEM,
BinarySensorEntity,
)
from homeassistant.const import LENGTH_KILOMETERS
from . import DOMAIN as BMW_DOMAIN, BMWConnectedDriveBaseEntity
from .const import CONF_ACCOUNT, DATA_ENTRIES
_LOGGER = logging.getLogger(__name__)
SENSOR_TYPES = {
"lids": ["Doors", DEVICE_CLASS_OPENING, "mdi:car-door-lock"],
"windows": ["Windows", DEVICE_CLASS_OPENING, "mdi:car-door"],
"door_lock_state": ["Door lock state", "lock", "mdi:car-key"],
"lights_parking": ["Parking lights", "light", "mdi:car-parking-lights"],
"condition_based_services": [
"Condition based services",
DEVICE_CLASS_PROBLEM,
"mdi:wrench",
],
"check_control_messages": [
"Control messages",
DEVICE_CLASS_PROBLEM,
"mdi:car-tire-alert",
],
}
SENSOR_TYPES_ELEC = {
"charging_status": ["Charging status", "power", "mdi:ev-station"],
"connection_status": ["Connection status", DEVICE_CLASS_PLUG, "mdi:car-electric"],
}
SENSOR_TYPES_ELEC.update(SENSOR_TYPES)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the BMW ConnectedDrive binary sensors from config entry."""
account = hass.data[BMW_DOMAIN][DATA_ENTRIES][config_entry.entry_id][CONF_ACCOUNT]
entities = []
for vehicle in account.account.vehicles:
if vehicle.has_hv_battery:
_LOGGER.debug("BMW with a high voltage battery")
for key, value in sorted(SENSOR_TYPES_ELEC.items()):
if key in vehicle.available_attributes:
device = BMWConnectedDriveSensor(
account, vehicle, key, value[0], value[1], value[2]
)
entities.append(device)
elif vehicle.has_internal_combustion_engine:
_LOGGER.debug("BMW with an internal combustion engine")
for key, value in sorted(SENSOR_TYPES.items()):
if key in vehicle.available_attributes:
device = BMWConnectedDriveSensor(
account, vehicle, key, value[0], value[1], value[2]
)
entities.append(device)
async_add_entities(entities, True)
class BMWConnectedDriveSensor(BMWConnectedDriveBaseEntity, BinarySensorEntity):
"""Representation of a BMW vehicle binary sensor."""
def __init__(
self, account, vehicle, attribute: str, sensor_name, device_class, icon
):
"""Initialize sensor."""
super().__init__(account, vehicle)
self._attribute = attribute
self._attr_name = f"{vehicle.name} {attribute}"
self._attr_unique_id = f"{vehicle.vin}-{attribute}"
self._sensor_name = sensor_name
self._attr_device_class = device_class
self._attr_icon = icon
def update(self):
"""Read new state data from the library."""
vehicle_state = self._vehicle.state
result = self._attrs.copy()
# device class opening: On means open, Off means closed
if self._attribute == "lids":
_LOGGER.debug("Status of lid: %s", vehicle_state.all_lids_closed)
self._attr_is_on = not vehicle_state.all_lids_closed
for lid in vehicle_state.lids:
result[lid.name] = lid.state.value
elif self._attribute == "windows":
self._attr_is_on = not vehicle_state.all_windows_closed
for window in vehicle_state.windows:
result[window.name] = window.state.value
# device class lock: On means unlocked, Off means locked
elif self._attribute == "door_lock_state":
# Possible values: LOCKED, SECURED, SELECTIVE_LOCKED, UNLOCKED
self._attr_is_on = vehicle_state.door_lock_state not in [
LockState.LOCKED,
LockState.SECURED,
]
result["door_lock_state"] = vehicle_state.door_lock_state.value
result["last_update_reason"] = vehicle_state.last_update_reason
# device class light: On means light detected, Off means no light
elif self._attribute == "lights_parking":
self._attr_is_on = vehicle_state.are_parking_lights_on
result["lights_parking"] = vehicle_state.parking_lights.value
# device class problem: On means problem detected, Off means no problem
elif self._attribute == "condition_based_services":
self._attr_is_on = not vehicle_state.are_all_cbs_ok
for report in vehicle_state.condition_based_services:
result.update(self._format_cbs_report(report))
elif self._attribute == "check_control_messages":
self._attr_is_on = vehicle_state.has_check_control_messages
check_control_messages = vehicle_state.check_control_messages
has_check_control_messages = vehicle_state.has_check_control_messages
if has_check_control_messages:
cbs_list = []
for message in check_control_messages:
cbs_list.append(message["ccmDescriptionShort"])
result["check_control_messages"] = cbs_list
else:
result["check_control_messages"] = "OK"
# device class power: On means power detected, Off means no power
elif self._attribute == "charging_status":
self._attr_is_on = vehicle_state.charging_status in [ChargingState.CHARGING]
result["charging_status"] = vehicle_state.charging_status.value
result["last_charging_end_result"] = vehicle_state.last_charging_end_result
# device class plug: On means device is plugged in,
# Off means device is unplugged
elif self._attribute == "connection_status":
self._attr_is_on = vehicle_state.connection_status == "CONNECTED"
result["connection_status"] = vehicle_state.connection_status
self._attr_extra_state_attributes = result
def _format_cbs_report(self, report):
result = {}
service_type = report.service_type.lower().replace("_", " ")
result[f"{service_type} status"] = report.state.value
if report.due_date is not None:
result[f"{service_type} date"] = report.due_date.strftime("%Y-%m-%d")
if report.due_distance is not None:
distance = round(
self.hass.config.units.length(report.due_distance, LENGTH_KILOMETERS)
)
result[
f"{service_type} distance"
] = f"{distance} {self.hass.config.units.length_unit}"
return result
|
sander76/home-assistant
|
homeassistant/components/bmw_connected_drive/binary_sensor.py
|
Python
|
apache-2.0
| 6,857
|
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from wikiciteparser.parser import parse_citation_template
from urllib import urlencode
import mwparserfromhell
import requests
import json
import codecs
import sys
import urllib
from unidecode import unidecode
import re
from datetime import datetime
from copy import deepcopy
import os
from arguments import template_arg_mappings, get_value
from ranking import sort_links, is_blacklisted
from settings import *
from ondiskcache import OnDiskCache
from classifier import AcademicPaperFilter
import md5
from time import sleep
urls_cache = OnDiskCache('urls_cache.pkl')
paper_filter = AcademicPaperFilter()
class TemplateEdit(object):
"""
This represents a proposed change (possibly empty)
on a citation template
"""
def __init__(self, tpl, page):
"""
:param tpl: a mwparserfromhell template: the original template
that we want to change
"""
self.template = tpl
self.orig_string = unicode(self.template)
r = md5.md5()
r.update(self.orig_string.encode('utf-8'))
self.orig_hash = r.hexdigest()
self.classification = None
self.conflicting_value = ''
self.proposed_change = ''
self.proposed_link = None
self.index = None
self.page = page
self.proposed_link_policy = None
self.issn = None
def is_https(self):
return self.proposed_link and self.proposed_link.startswith('https')
def json(self):
return {
'orig_string': self.orig_string,
'orig_hash': self.orig_hash,
'classification': self.classification,
'conflicting_value': self.conflicting_value,
'proposed_change': self.proposed_change,
'proposed_link': self.proposed_link,
'index': self.index,
'policy': self.proposed_link_policy,
'issn': self.issn,
}
def propose_change(self):
"""
Fetches open urls for that template and proposes a change
"""
reference = parse_citation_template(self.template)
tpl_name = unicode(self.template.name).lower().strip()
if not reference or tpl_name in excluded_templates:
self.classification = 'ignored'
return
sys.stdout.write('.')
sys.stdout.flush()
# First check if there is already a link to a full text
# in the citation.
already_oa_param = None
already_oa_value = None
for argmap in template_arg_mappings:
if argmap.present_and_free(self.template):
already_oa_param = argmap.name
already_oa_value = argmap.get(self.template)
change = {}
# If so, we just skip it - no need for more free links
if already_oa_param:
self.classification = 'already_open'
self.conflicting_value = already_oa_value
return
# --- Disabled for now ----
# If the template is marked with |registration= or
# |subscription= , let's assume that the editor tried to find
# a better version themselves so it's not worth trying.
if ((get_value(self.template, 'subscription')
or get_value(self.template, 'registration')) in
['yes','y','true']):
self.classification = 'registration_subscription'
# return
dissemin_paper_object = get_dissemin_paper(reference)
# Otherwise, try to get a free link
link = get_oa_link(dissemin_paper_object)
if not link:
self.classification = 'not_found'
return
# We found an OA link!
self.proposed_link = link
self.proposed_link_policy = get_paper_values(dissemin_paper_object, 'policy')
self.issn = get_paper_values(dissemin_paper_object, 'issn')
# Try to match it with an argument
argument_found = False
for argmap in template_arg_mappings:
# Did the link we have got match that argument place?
match = argmap.extract(link)
if not match:
continue
argument_found = True
# If this parameter is already present in the template:
current_value = argmap.get(self.template)
if current_value:
change['new_'+argmap.name] = (match,link)
#if argmap.custom_access:
# stats['changed'] += 1
# template.add(argmap.custom_access, 'free')
#else:
self.classification = 'already_present'
# don't change anything
break
# If the parameter is not present yet, add it
self.classification = 'link_added'
if argmap.is_id:
self.proposed_change = 'id={{%s|%s}}' % (argmap.name,match)
else:
self.proposed_change = '%s=%s' % (argmap.name,match)
break
def update_template(self, change):
"""
Given a change of the form "param=value", add it to the template
"""
bits = re.split('=', change, maxsplit=1)
if len(bits) != 2:
raise ValueError('invalid change')
param = bits[0].lower().strip()
value = bits[1].strip()
# Escape various characters in Wikicode
value = value.replace(' ', '%20')
value = value.replace('|', '{{!}}')
self.template.add(param, value)
def remove_diacritics(s):
return unidecode(s) if type(s) == unicode else s
def get_dissemin_paper(reference):
"""
Given a citation template (as parsed by wikiciteparser and a proposed link)
get dissemin API information for that link
"""
doi = reference.get('ID_list', {}).get('DOI')
title = reference.get('Title')
authors = reference.get('Authors', [])
date = reference.get('Date')
# CS1 represents unparsed authors as {'last':'First Last'}
for i in range(len(authors)):
if 'first' not in authors[i]:
authors[i] = {'plain':authors[i].get('last','')}
args = {
'title':title,
'authors':authors,
'date':date,
'doi':doi,
}
for retry in range(5):
try:
req = requests.post('https://dissem.in/api/query',
json=args,
headers={'User-Agent':OABOT_USER_AGENT})
resp = req.json()
paper_object = resp.get('paper', {})
return paper_object
except (ValueError, requests.exceptions.RequestException) as e:
sleep(5)
continue
return {}
def get_paper_values(paper, attribute):
for record in paper.get('records',[]):
if record.get(attribute):
return record.get(attribute)
return None
def get_oa_link(paper):
doi = paper.get('doi')
if doi is not None:
doi = "/".join(doi.split("/")[-2:])
# Dissemin's full text detection is not always accurate, so
# we manually go through each url for the paper and check
# if it is free to read.
# if we want more accurate (but slower) results
# we can check availability manually:
candidate_urls = sort_links([
record.get('pdf_url') for record in
paper.get('records',[]) if record.get('pdf_url')
])
for url in sort_links(candidate_urls):
if url:
if not is_blacklisted(url):
return url
# then, try OAdoi
# (OAdoi finds full texts that dissemin does not, so it's always good to have!)
if doi:
resp = None
attempts = 0
while resp is None:
email = '{}@{}.in'.format('contact', 'dissem')
try:
req = requests.get('https://api.oadoi.org/v2/:{}'.format(doi), {'email':email})
resp = req.json()
except ValueError:
sleep(10)
attempts += 1
if attempts >= 3:
return None
else:
continue
best_oa = (resp.get('best_oa_location') or {})
if best_oa.get('host_type') == 'publisher':
return None
if best_oa.get('url'):
# try to HEAD the url just to check it's still there
try:
url = best_oa['url']
head = requests.head(url)
head.raise_for_status()
if not is_blacklisted(url):
return url
except requests.exceptions.RequestException:
return None
def add_oa_links_in_references(text, page):
"""
Main function of the bot.
:param text: the wikicode of the page to edit
:returns: a tuple: the new wikicode, the list of changed templates,
and edit statistics
"""
wikicode = mwparserfromhell.parse(text)
for index, template in enumerate(wikicode.filter_templates()):
edit = TemplateEdit(template, page)
edit.index = index
edit.propose_change()
yield edit
def get_page_over_api(page_name):
r = requests.get('https://en.wikipedia.org/w/api.php', params={
'action':'query',
'titles':page_name,
'prop':'revisions',
'rvprop':'content',
'format':'json',},
headers={'User-Agent':OABOT_USER_AGENT})
r.raise_for_status()
js = r.json()
page = js.get('query',{}).get('pages',{}).values()[0]
pagid = page.get('pageid', -1)
if pagid == -1:
raise ValueError("Invalid page.")
text = page.get('revisions',[{}])[0]['*']
return text
def bot_is_allowed(text, user):
"""
Taken from https://en.wikipedia.org/wiki/Template:Bots
For bot exclusion compliance.
"""
user = user.lower().strip()
text = mwparserfromhell.parse(text)
for tl in text.filter_templates():
if tl.name in ('bots', 'nobots'):
break
else:
return True
for param in tl.params:
bots = [x.lower().strip() for x in param.value.split(",")]
if param.name == 'allow':
if ''.join(bots) == 'none': return False
for bot in bots:
if bot in (user, 'all'):
return True
elif param.name == 'deny':
if ''.join(bots) == 'none': return True
for bot in bots:
if bot in (user, 'all'):
return False
return True
|
CristianCantoro/oabot
|
src/oabot/main.py
|
Python
|
mit
| 10,570
|
"""Constants for the Kostal Plenticore Solar Inverter integration."""
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_ICON,
ATTR_UNIT_OF_MEASUREMENT,
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_ENERGY,
DEVICE_CLASS_POWER,
ENERGY_KILO_WATT_HOUR,
PERCENTAGE,
POWER_WATT,
)
DOMAIN = "kostal_plenticore"
ATTR_ENABLED_DEFAULT = "entity_registry_enabled_default"
# Defines all entities for process data.
#
# Each entry is defined with a tuple of these values:
# - module id (str)
# - process data id (str)
# - entity name suffix (str)
# - sensor properties (dict)
# - value formatter (str)
SENSOR_PROCESS_DATA = [
(
"devices:local",
"Inverter:State",
"Inverter State",
{ATTR_ICON: "mdi:state-machine"},
"format_inverter_state",
),
(
"devices:local",
"Dc_P",
"Solar Power",
{
ATTR_UNIT_OF_MEASUREMENT: POWER_WATT,
ATTR_DEVICE_CLASS: DEVICE_CLASS_POWER,
ATTR_ENABLED_DEFAULT: True,
},
"format_round",
),
(
"devices:local",
"Grid_P",
"Grid Power",
{
ATTR_UNIT_OF_MEASUREMENT: POWER_WATT,
ATTR_DEVICE_CLASS: DEVICE_CLASS_POWER,
ATTR_ENABLED_DEFAULT: True,
},
"format_round",
),
(
"devices:local",
"HomeBat_P",
"Home Power from Battery",
{ATTR_UNIT_OF_MEASUREMENT: POWER_WATT, ATTR_DEVICE_CLASS: DEVICE_CLASS_POWER},
"format_round",
),
(
"devices:local",
"HomeGrid_P",
"Home Power from Grid",
{ATTR_UNIT_OF_MEASUREMENT: POWER_WATT, ATTR_DEVICE_CLASS: DEVICE_CLASS_POWER},
"format_round",
),
(
"devices:local",
"HomeOwn_P",
"Home Power from Own",
{ATTR_UNIT_OF_MEASUREMENT: POWER_WATT, ATTR_DEVICE_CLASS: DEVICE_CLASS_POWER},
"format_round",
),
(
"devices:local",
"HomePv_P",
"Home Power from PV",
{ATTR_UNIT_OF_MEASUREMENT: POWER_WATT, ATTR_DEVICE_CLASS: DEVICE_CLASS_POWER},
"format_round",
),
(
"devices:local",
"Home_P",
"Home Power",
{ATTR_UNIT_OF_MEASUREMENT: POWER_WATT, ATTR_DEVICE_CLASS: DEVICE_CLASS_POWER},
"format_round",
),
(
"devices:local:ac",
"P",
"AC Power",
{
ATTR_UNIT_OF_MEASUREMENT: POWER_WATT,
ATTR_DEVICE_CLASS: DEVICE_CLASS_POWER,
ATTR_ENABLED_DEFAULT: True,
},
"format_round",
),
(
"devices:local:pv1",
"P",
"DC1 Power",
{ATTR_UNIT_OF_MEASUREMENT: POWER_WATT, ATTR_DEVICE_CLASS: DEVICE_CLASS_POWER},
"format_round",
),
(
"devices:local:pv2",
"P",
"DC2 Power",
{ATTR_UNIT_OF_MEASUREMENT: POWER_WATT, ATTR_DEVICE_CLASS: DEVICE_CLASS_POWER},
"format_round",
),
(
"devices:local",
"PV2Bat_P",
"PV to Battery Power",
{ATTR_UNIT_OF_MEASUREMENT: POWER_WATT, ATTR_DEVICE_CLASS: DEVICE_CLASS_POWER},
"format_round",
),
(
"devices:local",
"EM_State",
"Energy Manager State",
{ATTR_ICON: "mdi:state-machine"},
"format_em_manager_state",
),
(
"devices:local:battery",
"Cycles",
"Battery Cycles",
{ATTR_ICON: "mdi:recycle"},
"format_round",
),
(
"devices:local:battery",
"P",
"Battery Power",
{ATTR_UNIT_OF_MEASUREMENT: POWER_WATT, ATTR_DEVICE_CLASS: DEVICE_CLASS_POWER},
"format_round",
),
(
"devices:local:battery",
"SoC",
"Battery SoC",
{ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE, ATTR_DEVICE_CLASS: DEVICE_CLASS_BATTERY},
"format_round",
),
(
"scb:statistic:EnergyFlow",
"Statistic:Autarky:Day",
"Autarky Day",
{ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE, ATTR_ICON: "mdi:chart-donut"},
"format_round",
),
(
"scb:statistic:EnergyFlow",
"Statistic:Autarky:Month",
"Autarky Month",
{ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE, ATTR_ICON: "mdi:chart-donut"},
"format_round",
),
(
"scb:statistic:EnergyFlow",
"Statistic:Autarky:Total",
"Autarky Total",
{ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE, ATTR_ICON: "mdi:chart-donut"},
"format_round",
),
(
"scb:statistic:EnergyFlow",
"Statistic:Autarky:Year",
"Autarky Year",
{ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE, ATTR_ICON: "mdi:chart-donut"},
"format_round",
),
(
"scb:statistic:EnergyFlow",
"Statistic:OwnConsumptionRate:Day",
"Own Consumption Rate Day",
{ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE, ATTR_ICON: "mdi:chart-donut"},
"format_round",
),
(
"scb:statistic:EnergyFlow",
"Statistic:OwnConsumptionRate:Month",
"Own Consumption Rate Month",
{ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE, ATTR_ICON: "mdi:chart-donut"},
"format_round",
),
(
"scb:statistic:EnergyFlow",
"Statistic:OwnConsumptionRate:Total",
"Own Consumption Rate Total",
{ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE, ATTR_ICON: "mdi:chart-donut"},
"format_round",
),
(
"scb:statistic:EnergyFlow",
"Statistic:OwnConsumptionRate:Year",
"Own Consumption Rate Year",
{ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE, ATTR_ICON: "mdi:chart-donut"},
"format_round",
),
(
"scb:statistic:EnergyFlow",
"Statistic:EnergyHome:Day",
"Home Consumption Day",
{
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: DEVICE_CLASS_ENERGY,
},
"format_energy",
),
(
"scb:statistic:EnergyFlow",
"Statistic:EnergyHome:Month",
"Home Consumption Month",
{
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: DEVICE_CLASS_ENERGY,
},
"format_energy",
),
(
"scb:statistic:EnergyFlow",
"Statistic:EnergyHome:Year",
"Home Consumption Year",
{
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: DEVICE_CLASS_ENERGY,
},
"format_energy",
),
(
"scb:statistic:EnergyFlow",
"Statistic:EnergyHome:Total",
"Home Consumption Total",
{
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: DEVICE_CLASS_ENERGY,
},
"format_energy",
),
(
"scb:statistic:EnergyFlow",
"Statistic:EnergyHomeBat:Day",
"Home Consumption from Battery Day",
{
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: DEVICE_CLASS_ENERGY,
},
"format_energy",
),
(
"scb:statistic:EnergyFlow",
"Statistic:EnergyHomeBat:Month",
"Home Consumption from Battery Month",
{
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: DEVICE_CLASS_ENERGY,
},
"format_energy",
),
(
"scb:statistic:EnergyFlow",
"Statistic:EnergyHomeBat:Year",
"Home Consumption from Battery Year",
{
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: DEVICE_CLASS_ENERGY,
},
"format_energy",
),
(
"scb:statistic:EnergyFlow",
"Statistic:EnergyHomeBat:Total",
"Home Consumption from Battery Total",
{
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: DEVICE_CLASS_ENERGY,
},
"format_energy",
),
(
"scb:statistic:EnergyFlow",
"Statistic:EnergyHomeGrid:Day",
"Home Consumption from Grid Day",
{
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: DEVICE_CLASS_ENERGY,
},
"format_energy",
),
(
"scb:statistic:EnergyFlow",
"Statistic:EnergyHomeGrid:Month",
"Home Consumption from Grid Month",
{
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: DEVICE_CLASS_ENERGY,
},
"format_energy",
),
(
"scb:statistic:EnergyFlow",
"Statistic:EnergyHomeGrid:Year",
"Home Consumption from Grid Year",
{
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: DEVICE_CLASS_ENERGY,
},
"format_energy",
),
(
"scb:statistic:EnergyFlow",
"Statistic:EnergyHomeGrid:Total",
"Home Consumption from Grid Total",
{
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: DEVICE_CLASS_ENERGY,
},
"format_energy",
),
(
"scb:statistic:EnergyFlow",
"Statistic:EnergyHomePv:Day",
"Home Consumption from PV Day",
{
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: DEVICE_CLASS_ENERGY,
},
"format_energy",
),
(
"scb:statistic:EnergyFlow",
"Statistic:EnergyHomePv:Month",
"Home Consumption from PV Month",
{
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: DEVICE_CLASS_ENERGY,
},
"format_energy",
),
(
"scb:statistic:EnergyFlow",
"Statistic:EnergyHomePv:Year",
"Home Consumption from PV Year",
{
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: DEVICE_CLASS_ENERGY,
},
"format_energy",
),
(
"scb:statistic:EnergyFlow",
"Statistic:EnergyHomePv:Total",
"Home Consumption from PV Total",
{
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: DEVICE_CLASS_ENERGY,
},
"format_energy",
),
(
"scb:statistic:EnergyFlow",
"Statistic:EnergyPv1:Day",
"Energy PV1 Day",
{
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: DEVICE_CLASS_ENERGY,
},
"format_energy",
),
(
"scb:statistic:EnergyFlow",
"Statistic:EnergyPv1:Month",
"Energy PV1 Month",
{
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: DEVICE_CLASS_ENERGY,
},
"format_energy",
),
(
"scb:statistic:EnergyFlow",
"Statistic:EnergyPv1:Year",
"Energy PV1 Year",
{
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: DEVICE_CLASS_ENERGY,
},
"format_energy",
),
(
"scb:statistic:EnergyFlow",
"Statistic:EnergyPv1:Total",
"Energy PV1 Total",
{
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: DEVICE_CLASS_ENERGY,
},
"format_energy",
),
(
"scb:statistic:EnergyFlow",
"Statistic:EnergyPv2:Day",
"Energy PV2 Day",
{
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: DEVICE_CLASS_ENERGY,
},
"format_energy",
),
(
"scb:statistic:EnergyFlow",
"Statistic:EnergyPv2:Month",
"Energy PV2 Month",
{
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: DEVICE_CLASS_ENERGY,
},
"format_energy",
),
(
"scb:statistic:EnergyFlow",
"Statistic:EnergyPv2:Year",
"Energy PV2 Year",
{
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: DEVICE_CLASS_ENERGY,
},
"format_energy",
),
(
"scb:statistic:EnergyFlow",
"Statistic:EnergyPv2:Total",
"Energy PV2 Total",
{
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: DEVICE_CLASS_ENERGY,
},
"format_energy",
),
(
"scb:statistic:EnergyFlow",
"Statistic:Yield:Day",
"Energy Yield Day",
{
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: DEVICE_CLASS_ENERGY,
ATTR_ENABLED_DEFAULT: True,
},
"format_energy",
),
(
"scb:statistic:EnergyFlow",
"Statistic:Yield:Month",
"Energy Yield Month",
{
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: DEVICE_CLASS_ENERGY,
},
"format_energy",
),
(
"scb:statistic:EnergyFlow",
"Statistic:Yield:Year",
"Energy Yield Year",
{
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: DEVICE_CLASS_ENERGY,
},
"format_energy",
),
(
"scb:statistic:EnergyFlow",
"Statistic:Yield:Total",
"Energy Yield Total",
{
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: DEVICE_CLASS_ENERGY,
},
"format_energy",
),
]
# Defines all entities for settings.
#
# Each entry is defined with a tuple of these values:
# - module id (str)
# - process data id (str)
# - entity name suffix (str)
# - sensor properties (dict)
# - value formatter (str)
SENSOR_SETTINGS_DATA = [
(
"devices:local",
"Battery:MinHomeComsumption",
"Battery min Home Consumption",
{ATTR_UNIT_OF_MEASUREMENT: POWER_WATT, ATTR_DEVICE_CLASS: DEVICE_CLASS_POWER},
"format_round",
),
(
"devices:local",
"Battery:MinSoc",
"Battery min Soc",
{ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE, ATTR_ICON: "mdi:battery-negative"},
"format_round",
),
(
"devices:local",
"Battery:Strategy",
"Battery Strategy",
{},
"format_round",
),
]
|
w1ll1am23/home-assistant
|
homeassistant/components/kostal_plenticore/const.py
|
Python
|
apache-2.0
| 14,377
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-03-24 22:47
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cart', '0003_auto_20160324_1935'),
]
operations = [
migrations.AlterField(
model_name='cartitem',
name='cart_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='cartitem',
name='product',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='restaurant.MenuPosition'),
),
]
|
dmecha/apof
|
apof/cart/migrations/0004_auto_20160324_2247.py
|
Python
|
gpl-3.0
| 793
|
import torch.nn as nn
class VAE(nn.Module):
def __init__(self, input_dims, code_dims):
super(VAE, self).__init__()
self.name = "abstract_VAE"
self.input_dims = input_dims
self.code_dims = code_dims
def encode(self, x):
self.handle_unsupported_op()
return None
def decode(self, z):
self.handle_unsupported_op()
return None
def handle_unsupported_op(self):
print("Unsupported Operation")
raise(Exception("Unsupported Operation"))
|
Jueast/DisentangleVAE
|
model/abstract_VAE.py
|
Python
|
mit
| 530
|
# -*- coding: latin-1 -*-
import spotipy_twisted
from spotipy_twisted import util
from twisted.internet import defer
from twisted.trial import unittest
import pprint
import sys
'''
Since these tests require authentication they are maintained
separately from the other tests.
These tests try to be benign and leave your collection and
playlists in a relatively stable state.
'''
class AuthTestSpotipy(unittest.TestCase):
'''
These tests require user authentication
'''
playlist = "spotify:user:plamere:playlist:2oCEWyyAPbZp9xhVSxZavx"
four_tracks = ["spotify:track:6RtPijgfPKROxEzTHNRiDp",
"spotify:track:7IHOIqZUUInxjVkko181PB",
"4VrWlk8IQxevMvERoX08iC",
"http://open.spotify.com/track/3cySlItpiPiIAzU3NyHCJf"]
two_tracks = ["spotify:track:6RtPijgfPKROxEzTHNRiDp",
"spotify:track:7IHOIqZUUInxjVkko181PB"]
other_tracks=["spotify:track:2wySlB6vMzCbQrRnNGOYKa",
"spotify:track:29xKs5BAHlmlX1u4gzQAbJ",
"spotify:track:1PB7gRWcvefzu7t3LJLUlf"]
bad_id = 'BAD_ID'
@defer.inlineCallbacks
def test_track_bad_id(self):
try:
track = yield spotify.track(self.bad_id)
self.assertTrue(False)
except spotipy_twisted.SpotifyException:
self.assertTrue(True)
@defer.inlineCallbacks
def test_basic_user_profile(self):
user = yield spotify.user(username)
self.assertTrue(user['id'] == username)
@defer.inlineCallbacks
def test_current_user(self):
user = yield spotify.current_user()
self.assertTrue(user['id'] == username)
@defer.inlineCallbacks
def test_me(self):
user = yield spotify.me()
self.assertTrue(user['id'] == username)
@defer.inlineCallbacks
def test_user_playlists(self):
playlists = yield spotify.user_playlists(username, limit=5)
self.assertTrue('items' in playlists)
# known API issue currently causes this test to fail
# the issue is that the API doesn't currently respect the
# limit paramter
self.assertTrue(len(playlists['items']) == 5)
@defer.inlineCallbacks
def test_user_playlist_tracks(self):
playlists = yield spotify.user_playlists(username, limit=5)
self.assertTrue('items' in playlists)
for playlist in playlists['items']:
user = playlist['owner']['id']
pid = playlist['id']
results = yield spotify.user_playlist_tracks(user, pid)
self.assertTrue(len(results['items']) > 0)
def user_playlist_tracks(self, user, playlist_id = None, fields=None,
limit=100, offset=0):
# known API issue currently causes this test to fail
# the issue is that the API doesn't currently respect the
# limit paramter
self.assertTrue(len(playlists['items']) == 5)
@defer.inlineCallbacks
def test_current_user_saved_tracks(self):
tracks = yield spotify.current_user_saved_tracks()
self.assertTrue(len(tracks['items']) > 0)
@defer.inlineCallbacks
def test_current_user_save_and_unsave_tracks(self):
tracks = yield spotify.current_user_saved_tracks()
total = tracks['total']
yield spotify.current_user_saved_tracks_add(self.four_tracks)
tracks = yield spotify.current_user_saved_tracks()
new_total = tracks['total']
self.assertTrue(new_total - total == len(self.four_tracks))
tracks = yield spotify.current_user_saved_tracks_delete(self.four_tracks)
tracks = yield spotify.current_user_saved_tracks()
new_total = tracks['total']
self.assertTrue(new_total == total)
@defer.inlineCallbacks
def test_new_releases(self):
response = yield spotify.new_releases()
self.assertTrue(len(response['albums']) > 0)
@defer.inlineCallbacks
def test_featured_releases(self):
response = yield spotify.featured_playlists()
self.assertTrue(len(response['playlists']) > 0)
@defer.inlineCallbacks
def get_or_create_spotify_playlist(self, username, playlist_name):
playlists = yield spotify.user_playlists(username)
while playlists:
for item in playlists['items']:
if item['name'] == playlist_name:
defer.returnValue(item['id'])
playlists = yield spotify.next(playlists)
playlist = yield spotify.user_playlist_create(username, playlist_name)
playlist_id = playlist['uri']
defer.returnValue(playlist_id)
@defer.inlineCallbacks
def test_user_playlist_ops(self):
# create empty playlist
playlist_id = yield self.get_or_create_spotify_playlist(username,
'spotipy-testing-playlist-1')
# remove all tracks from it
yield spotify.user_playlist_replace_tracks(username, playlist_id,[])
playlist = yield spotify.user_playlist(username, playlist_id)
self.assertTrue(playlist['tracks']['total'] == 0)
self.assertTrue(len(playlist['tracks']['items']) == 0)
# add tracks to it
yield spotify.user_playlist_add_tracks(username, playlist_id, self.four_tracks)
playlist = yield spotify.user_playlist(username, playlist_id)
self.assertTrue(playlist['tracks']['total'] == 4)
self.assertTrue(len(playlist['tracks']['items']) == 4)
# remove two tracks from it
yield spotify.user_playlist_remove_all_occurrences_of_tracks (username,
playlist_id, self.two_tracks)
playlist = yield spotify.user_playlist(username, playlist_id)
self.assertTrue(playlist['tracks']['total'] == 2)
self.assertTrue(len(playlist['tracks']['items']) == 2)
# replace with 3 other tracks
yield spotify.user_playlist_replace_tracks(username,
playlist_id, self.other_tracks)
playlist = yield spotify.user_playlist(username, playlist_id)
self.assertTrue(playlist['tracks']['total'] == 3)
self.assertTrue(len(playlist['tracks']['items']) == 3)
if __name__ == '__main__':
if len(sys.argv) > 1:
username = sys.argv[1]
del sys.argv[1]
scope = 'playlist-modify-public '
scope += 'user-library-read '
scope += 'user-library-modify '
scope += 'user-read-private'
token = util.prompt_for_user_token(username, scope)
spotify = spotipy_twisted.Spotify(auth=token)
spotify.trace = False
import sys
from twisted.scripts import trial
sys.argv.extend([__name__])
trial.run()
else:
print "Usage: %s username" % (sys.argv[0],)
|
jimcortez/spotipy_twisted
|
tests/authtests.py
|
Python
|
mit
| 6,738
|
#!/usr/bin/env python
__all__ = ['iqiyi_download']
from ..common import *
from uuid import uuid4
from random import random,randint
import json
from math import floor
from zlib import decompress
import hashlib
'''
Changelog:
-> http://www.iqiyi.com/common/flashplayer/20150710/MainPlayer_5_2_25_c3_3_5_1.swf
-> http://www.iqiyi.com/common/flashplayer/20150703/MainPlayer_5_2_24_1_c3_3_3.swf
SingletonClass.ekam
-> http://www.iqiyi.com/common/flashplayer/20150618/MainPlayer_5_2_24_1_c3_3_2.swf
In this version Z7elzzup.cexe,just use node.js to run this code(with some modification) and get innerkey.
-> http://www.iqiyi.com/common/flashplayer/20150612/MainPlayer_5_2_23_1_c3_2_6_5.swf
In this version do not directly use enc key
gen enc key (so called sc ) in DMEmagelzzup.mix(tvid) -> (tm->getTimer(),src='hsalf',sc)
encrypy alogrithm is md5(DMEmagelzzup.mix.genInnerKey +tm+tvid)
how to gen genInnerKey ,can see first 3 lin in mix function in this file
'''
'''
com.qiyi.player.core.model.def.DefinitonEnum
bid meaning for quality
0 none
1 standard
2 high
3 super
4 suprt-high
5 fullhd
10 4k
96 topspeed
'''
def mix(tvid):
enc = []
enc.append('341c0055ad1d4e798c2b784d9dbed29f')
tm = str(randint(2000,4000))
src = 'hsalf'
enc.append(str(tm))
enc.append(tvid)
sc = hashlib.new('md5',bytes("".join(enc),'utf-8')).hexdigest()
return tm,sc,src
def getVRSXORCode(arg1,arg2):
loc3=arg2 %3
if loc3 == 1:
return arg1^121
if loc3 == 2:
return arg1^72
return arg1^103
def getVrsEncodeCode(vlink):
loc6=0
loc2=''
loc3=vlink.split("-")
loc4=len(loc3)
# loc5=loc4-1
for i in range(loc4-1,-1,-1):
loc6=getVRSXORCode(int(loc3[loc4-i-1],16),i)
loc2+=chr(loc6)
return loc2[::-1]
def getVMS(tvid,vid,uid):
#tm ->the flash run time for md5 usage
#um -> vip 1 normal 0
#authkey -> for password protected video ,replace '' with your password
#puid user.passportid may empty?
#TODO: support password protected video
tm,sc,src = mix(tvid)
vmsreq='http://cache.video.qiyi.com/vms?key=fvip&src=1702633101b340d8917a69cf8a4b8c7' +\
"&tvId="+tvid+"&vid="+vid+"&vinfo=1&tm="+tm+\
"&enc="+sc+\
"&qyid="+uid+"&tn="+str(random()) +"&um=0" +\
"&authkey="+hashlib.new('md5',bytes(''+str(tm)+tvid,'utf-8')).hexdigest()
return json.loads(get_content(vmsreq))
def getDispathKey(rid):
tp=")(*&^flash@#$%a" #magic from swf
time=json.loads(get_content("http://data.video.qiyi.com/t?tn="+str(random())))["t"]
t=str(int(floor(int(time)/(10*60.0))))
return hashlib.new("md5",bytes(t+tp+rid,"utf-8")).hexdigest()
def iqiyi_download(url, output_dir = '.', merge = True, info_only = False):
gen_uid=uuid4().hex
html = get_html(url)
tvid = r1(r'data-player-tvid="([^"]+)"', html) or r1(r'tvid=([^&]+)', url)
videoid = r1(r'data-player-videoid="([^"]+)"', html) or r1(r'vid=([^&]+)', url)
assert tvid
assert videoid
info = getVMS(tvid, videoid, gen_uid)
assert info["code"] == "A000000"
title = info["data"]["vi"]["vn"]
# data.vp = json.data.vp
# data.vi = json.data.vi
# data.f4v = json.data.f4v
# if movieIsMember data.vp = json.data.np
#for highest qualities
#for http://www.iqiyi.com/v_19rrmmz5yw.html not vp -> np
try:
if info["data"]['vp']["tkl"]=='' :
raise ValueError
except:
log.e("[Error] Do not support for iQIYI VIP video.")
exit(-1)
bid=0
for i in info["data"]["vp"]["tkl"][0]["vs"]:
if int(i["bid"])<=10 and int(i["bid"])>=bid:
bid=int(i["bid"])
video_links=i["fs"] #now in i["flvs"] not in i["fs"]
if not i["fs"][0]["l"].startswith("/"):
tmp = getVrsEncodeCode(i["fs"][0]["l"])
if tmp.endswith('mp4'):
video_links = i["flvs"]
urls=[]
size=0
for i in video_links:
vlink=i["l"]
if not vlink.startswith("/"):
#vlink is encode
vlink=getVrsEncodeCode(vlink)
key=getDispathKey(vlink.split("/")[-1].split(".")[0])
size+=i["b"]
baseurl=info["data"]["vp"]["du"].split("/")
baseurl.insert(-1,key)
url="/".join(baseurl)+vlink+'?su='+gen_uid+'&qyid='+uuid4().hex+'&client=&z=&bt=&ct=&tn='+str(randint(10000,20000))
urls.append(json.loads(get_content(url))["l"])
#download should be complete in 10 minutes
#because the url is generated before start downloading
#and the key may be expired after 10 minutes
print_info(site_info, title, 'flv', size)
if not info_only:
download_urls(urls, title, 'flv', size, output_dir = output_dir, merge = merge)
site_info = "iQIYI.com"
download = iqiyi_download
download_playlist = playlist_not_supported('iqiyi')
|
flwh/you-get
|
src/you_get/extractors/iqiyi.py
|
Python
|
mit
| 4,916
|
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.core.mail import send_mail
from .models import Subscriber
class SubscribeForm(forms.ModelForm):
class Meta:
model = Subscriber
fields = ('email',)
widgets = {
'email': forms.EmailInput(attrs={'class': 'form-control', 'placeholder':'Enter email to subscribe'})
}
class MyRegistrationForm(UserCreationForm):
email = forms.EmailField(required=True)
class Meta:
model = User
fields = ('username', 'email', 'password1', 'password2')
def save(self, commit=True):
user = super(MyRegistrationForm, self).save(commit=False)
user.email = self.cleaned_data['email']
if commit:
user.save()
self.send_registration_confirm_email()
return user
def send_registration_confirm_email(self):
username = self.cleaned_data['username']
password = self.cleaned_data['password1']
email = self.cleaned_data['email']
send_mail(
subject="Thank you for joining the conversation on American tax policy",
message = """Welcome!
Thank you for registering with ospc.org. This is the best way to stay up to date on
the latest news from the Open Source Policy Center. We also invite you to try
the TaxBrain webapp.
Username: {username}
Password: {password}
""".format(username=username, password=password),
from_email = "Open Source Policy Center <mailing@ospc.org>",
recipient_list = [email]
)
class LoginForm(AuthenticationForm):
class Meta:
fields = ('username', 'password')
|
zrisher/webapp-public
|
webapp/apps/register/forms.py
|
Python
|
mit
| 1,753
|
from __future__ import division
from sympy import (Abs, Catalan, cos, Derivative, E, EulerGamma, exp,
factorial, factorial2, Function, GoldenRatio, I, Integer, Integral,
Interval, Lambda, Limit, Matrix, nan, O, oo, pi, Pow, Rational, Float, Rel,
S, sin, SparseMatrix, sqrt, summation, Sum, Symbol, symbols, Wild,
WildFunction, zeta, zoo, Dummy, Dict, Tuple, FiniteSet, factor,
subfactorial, true, false, Equivalent, Xor, Complement, SymmetricDifference,
AccumBounds, UnevaluatedExpr, Eq, Ne, Quaternion)
from sympy.core import Expr, Mul
from sympy.physics.units import second, joule
from sympy.polys import Poly, rootof, RootSum, groebner, ring, field, ZZ, QQ, lex, grlex
from sympy.geometry import Point, Circle
from sympy.utilities.pytest import raises
from sympy.core.compatibility import range
from sympy.printing import sstr, sstrrepr, StrPrinter
from sympy.core.trace import Tr
from sympy import MatrixSymbol
x, y, z, w, t = symbols('x,y,z,w,t')
d = Dummy('d')
def test_printmethod():
class R(Abs):
def _sympystr(self, printer):
return "foo(%s)" % printer._print(self.args[0])
assert sstr(R(x)) == "foo(x)"
class R(Abs):
def _sympystr(self, printer):
return "foo"
assert sstr(R(x)) == "foo"
def test_Abs():
assert str(Abs(x)) == "Abs(x)"
assert str(Abs(Rational(1, 6))) == "1/6"
assert str(Abs(Rational(-1, 6))) == "1/6"
def test_Add():
assert str(x + y) == "x + y"
assert str(x + 1) == "x + 1"
assert str(x + x**2) == "x**2 + x"
assert str(5 + x + y + x*y + x**2 + y**2) == "x**2 + x*y + x + y**2 + y + 5"
assert str(1 + x + x**2/2 + x**3/3) == "x**3/3 + x**2/2 + x + 1"
assert str(2*x - 7*x**2 + 2 + 3*y) == "-7*x**2 + 2*x + 3*y + 2"
assert str(x - y) == "x - y"
assert str(2 - x) == "-x + 2"
assert str(x - 2) == "x - 2"
assert str(x - y - z - w) == "-w + x - y - z"
assert str(x - z*y**2*z*w) == "-w*y**2*z**2 + x"
assert str(x - 1*y*x*y) == "-x*y**2 + x"
assert str(sin(x).series(x, 0, 15)) == "x - x**3/6 + x**5/120 - x**7/5040 + x**9/362880 - x**11/39916800 + x**13/6227020800 + O(x**15)"
def test_Catalan():
assert str(Catalan) == "Catalan"
def test_ComplexInfinity():
assert str(zoo) == "zoo"
def test_Derivative():
assert str(Derivative(x, y)) == "Derivative(x, y)"
assert str(Derivative(x**2, x, evaluate=False)) == "Derivative(x**2, x)"
assert str(Derivative(
x**2/y, x, y, evaluate=False)) == "Derivative(x**2/y, x, y)"
def test_dict():
assert str({1: 1 + x}) == sstr({1: 1 + x}) == "{1: x + 1}"
assert str({1: x**2, 2: y*x}) in ("{1: x**2, 2: x*y}", "{2: x*y, 1: x**2}")
assert sstr({1: x**2, 2: y*x}) == "{1: x**2, 2: x*y}"
def test_Dict():
assert str(Dict({1: 1 + x})) == sstr({1: 1 + x}) == "{1: x + 1}"
assert str(Dict({1: x**2, 2: y*x})) in (
"{1: x**2, 2: x*y}", "{2: x*y, 1: x**2}")
assert sstr(Dict({1: x**2, 2: y*x})) == "{1: x**2, 2: x*y}"
def test_Dummy():
assert str(d) == "_d"
assert str(d + x) == "_d + x"
def test_EulerGamma():
assert str(EulerGamma) == "EulerGamma"
def test_Exp():
assert str(E) == "E"
def test_factorial():
n = Symbol('n', integer=True)
assert str(factorial(-2)) == "zoo"
assert str(factorial(0)) == "1"
assert str(factorial(7)) == "5040"
assert str(factorial(n)) == "factorial(n)"
assert str(factorial(2*n)) == "factorial(2*n)"
assert str(factorial(factorial(n))) == 'factorial(factorial(n))'
assert str(factorial(factorial2(n))) == 'factorial(factorial2(n))'
assert str(factorial2(factorial(n))) == 'factorial2(factorial(n))'
assert str(factorial2(factorial2(n))) == 'factorial2(factorial2(n))'
assert str(subfactorial(3)) == "2"
assert str(subfactorial(n)) == "subfactorial(n)"
assert str(subfactorial(2*n)) == "subfactorial(2*n)"
def test_Function():
f = Function('f')
fx = f(x)
w = WildFunction('w')
assert str(f) == "f"
assert str(fx) == "f(x)"
assert str(w) == "w_"
def test_Geometry():
assert sstr(Point(0, 0)) == 'Point2D(0, 0)'
assert sstr(Circle(Point(0, 0), 3)) == 'Circle(Point2D(0, 0), 3)'
# TODO test other Geometry entities
def test_GoldenRatio():
assert str(GoldenRatio) == "GoldenRatio"
def test_ImaginaryUnit():
assert str(I) == "I"
def test_Infinity():
assert str(oo) == "oo"
assert str(oo*I) == "oo*I"
def test_Integer():
assert str(Integer(-1)) == "-1"
assert str(Integer(1)) == "1"
assert str(Integer(-3)) == "-3"
assert str(Integer(0)) == "0"
assert str(Integer(25)) == "25"
def test_Integral():
assert str(Integral(sin(x), y)) == "Integral(sin(x), y)"
assert str(Integral(sin(x), (y, 0, 1))) == "Integral(sin(x), (y, 0, 1))"
def test_Interval():
n = (S.NegativeInfinity, 1, 2, S.Infinity)
for i in range(len(n)):
for j in range(i + 1, len(n)):
for l in (True, False):
for r in (True, False):
ival = Interval(n[i], n[j], l, r)
assert S(str(ival)) == ival
def test_AccumBounds():
a = Symbol('a', real=True)
assert str(AccumBounds(0, a)) == "AccumBounds(0, a)"
assert str(AccumBounds(0, 1)) == "AccumBounds(0, 1)"
def test_Lambda():
assert str(Lambda(d, d**2)) == "Lambda(_d, _d**2)"
# issue 2908
assert str(Lambda((), 1)) == "Lambda((), 1)"
assert str(Lambda((), x)) == "Lambda((), x)"
def test_Limit():
assert str(Limit(sin(x)/x, x, y)) == "Limit(sin(x)/x, x, y)"
assert str(Limit(1/x, x, 0)) == "Limit(1/x, x, 0)"
assert str(
Limit(sin(x)/x, x, y, dir="-")) == "Limit(sin(x)/x, x, y, dir='-')"
def test_list():
assert str([x]) == sstr([x]) == "[x]"
assert str([x**2, x*y + 1]) == sstr([x**2, x*y + 1]) == "[x**2, x*y + 1]"
assert str([x**2, [y + x]]) == sstr([x**2, [y + x]]) == "[x**2, [x + y]]"
def test_Matrix_str():
M = Matrix([[x**+1, 1], [y, x + y]])
assert str(M) == "Matrix([[x, 1], [y, x + y]])"
assert sstr(M) == "Matrix([\n[x, 1],\n[y, x + y]])"
M = Matrix([[1]])
assert str(M) == sstr(M) == "Matrix([[1]])"
M = Matrix([[1, 2]])
assert str(M) == sstr(M) == "Matrix([[1, 2]])"
M = Matrix()
assert str(M) == sstr(M) == "Matrix(0, 0, [])"
M = Matrix(0, 1, lambda i, j: 0)
assert str(M) == sstr(M) == "Matrix(0, 1, [])"
def test_Mul():
assert str(x/y) == "x/y"
assert str(y/x) == "y/x"
assert str(x/y/z) == "x/(y*z)"
assert str((x + 1)/(y + 2)) == "(x + 1)/(y + 2)"
assert str(2*x/3) == '2*x/3'
assert str(-2*x/3) == '-2*x/3'
assert str(-1.0*x) == '-1.0*x'
assert str(1.0*x) == '1.0*x'
# For issue 14160
assert str(Mul(-2, x, Pow(Mul(y,y,evaluate=False), -1, evaluate=False),
evaluate=False)) == '-2*x/(y*y)'
class CustomClass1(Expr):
is_commutative = True
class CustomClass2(Expr):
is_commutative = True
cc1 = CustomClass1()
cc2 = CustomClass2()
assert str(Rational(2)*cc1) == '2*CustomClass1()'
assert str(cc1*Rational(2)) == '2*CustomClass1()'
assert str(cc1*Float("1.5")) == '1.5*CustomClass1()'
assert str(cc2*Rational(2)) == '2*CustomClass2()'
assert str(cc2*Rational(2)*cc1) == '2*CustomClass1()*CustomClass2()'
assert str(cc1*Rational(2)*cc2) == '2*CustomClass1()*CustomClass2()'
def test_NaN():
assert str(nan) == "nan"
def test_NegativeInfinity():
assert str(-oo) == "-oo"
def test_Order():
assert str(O(x)) == "O(x)"
assert str(O(x**2)) == "O(x**2)"
assert str(O(x*y)) == "O(x*y, x, y)"
assert str(O(x, x)) == "O(x)"
assert str(O(x, (x, 0))) == "O(x)"
assert str(O(x, (x, oo))) == "O(x, (x, oo))"
assert str(O(x, x, y)) == "O(x, x, y)"
assert str(O(x, x, y)) == "O(x, x, y)"
assert str(O(x, (x, oo), (y, oo))) == "O(x, (x, oo), (y, oo))"
def test_Permutation_Cycle():
from sympy.combinatorics import Permutation, Cycle
# general principle: economically, canonically show all moved elements
# and the size of the permutation.
for p, s in [
(Cycle(),
'()'),
(Cycle(2),
'(2)'),
(Cycle(2, 1),
'(1 2)'),
(Cycle(1, 2)(5)(6, 7)(10),
'(1 2)(6 7)(10)'),
(Cycle(3, 4)(1, 2)(3, 4),
'(1 2)(4)'),
]:
assert str(p) == s
Permutation.print_cyclic = False
for p, s in [
(Permutation([]),
'Permutation([])'),
(Permutation([], size=1),
'Permutation([0])'),
(Permutation([], size=2),
'Permutation([0, 1])'),
(Permutation([], size=10),
'Permutation([], size=10)'),
(Permutation([1, 0, 2]),
'Permutation([1, 0, 2])'),
(Permutation([1, 0, 2, 3, 4, 5]),
'Permutation([1, 0], size=6)'),
(Permutation([1, 0, 2, 3, 4, 5], size=10),
'Permutation([1, 0], size=10)'),
]:
assert str(p) == s
Permutation.print_cyclic = True
for p, s in [
(Permutation([]),
'()'),
(Permutation([], size=1),
'(0)'),
(Permutation([], size=2),
'(1)'),
(Permutation([], size=10),
'(9)'),
(Permutation([1, 0, 2]),
'(2)(0 1)'),
(Permutation([1, 0, 2, 3, 4, 5]),
'(5)(0 1)'),
(Permutation([1, 0, 2, 3, 4, 5], size=10),
'(9)(0 1)'),
(Permutation([0, 1, 3, 2, 4, 5], size=10),
'(9)(2 3)'),
]:
assert str(p) == s
def test_Pi():
assert str(pi) == "pi"
def test_Poly():
assert str(Poly(0, x)) == "Poly(0, x, domain='ZZ')"
assert str(Poly(1, x)) == "Poly(1, x, domain='ZZ')"
assert str(Poly(x, x)) == "Poly(x, x, domain='ZZ')"
assert str(Poly(2*x + 1, x)) == "Poly(2*x + 1, x, domain='ZZ')"
assert str(Poly(2*x - 1, x)) == "Poly(2*x - 1, x, domain='ZZ')"
assert str(Poly(-1, x)) == "Poly(-1, x, domain='ZZ')"
assert str(Poly(-x, x)) == "Poly(-x, x, domain='ZZ')"
assert str(Poly(-2*x + 1, x)) == "Poly(-2*x + 1, x, domain='ZZ')"
assert str(Poly(-2*x - 1, x)) == "Poly(-2*x - 1, x, domain='ZZ')"
assert str(Poly(x - 1, x)) == "Poly(x - 1, x, domain='ZZ')"
assert str(Poly(2*x + x**5, x)) == "Poly(x**5 + 2*x, x, domain='ZZ')"
assert str(Poly(3**(2*x), 3**x)) == "Poly((3**x)**2, 3**x, domain='ZZ')"
assert str(Poly((x**2)**x)) == "Poly(((x**2)**x), (x**2)**x, domain='ZZ')"
assert str(Poly((x + y)**3, (x + y), expand=False)
) == "Poly((x + y)**3, x + y, domain='ZZ')"
assert str(Poly((x - 1)**2, (x - 1), expand=False)
) == "Poly((x - 1)**2, x - 1, domain='ZZ')"
assert str(
Poly(x**2 + 1 + y, x)) == "Poly(x**2 + y + 1, x, domain='ZZ[y]')"
assert str(
Poly(x**2 - 1 + y, x)) == "Poly(x**2 + y - 1, x, domain='ZZ[y]')"
assert str(Poly(x**2 + I*x, x)) == "Poly(x**2 + I*x, x, domain='EX')"
assert str(Poly(x**2 - I*x, x)) == "Poly(x**2 - I*x, x, domain='EX')"
assert str(Poly(-x*y*z + x*y - 1, x, y, z)
) == "Poly(-x*y*z + x*y - 1, x, y, z, domain='ZZ')"
assert str(Poly(-w*x**21*y**7*z + (1 + w)*z**3 - 2*x*z + 1, x, y, z)) == \
"Poly(-w*x**21*y**7*z - 2*x*z + (w + 1)*z**3 + 1, x, y, z, domain='ZZ[w]')"
assert str(Poly(x**2 + 1, x, modulus=2)) == "Poly(x**2 + 1, x, modulus=2)"
assert str(Poly(2*x**2 + 3*x + 4, x, modulus=17)) == "Poly(2*x**2 + 3*x + 4, x, modulus=17)"
def test_PolyRing():
assert str(ring("x", ZZ, lex)[0]) == "Polynomial ring in x over ZZ with lex order"
assert str(ring("x,y", QQ, grlex)[0]) == "Polynomial ring in x, y over QQ with grlex order"
assert str(ring("x,y,z", ZZ["t"], lex)[0]) == "Polynomial ring in x, y, z over ZZ[t] with lex order"
def test_FracField():
assert str(field("x", ZZ, lex)[0]) == "Rational function field in x over ZZ with lex order"
assert str(field("x,y", QQ, grlex)[0]) == "Rational function field in x, y over QQ with grlex order"
assert str(field("x,y,z", ZZ["t"], lex)[0]) == "Rational function field in x, y, z over ZZ[t] with lex order"
def test_PolyElement():
Ruv, u,v = ring("u,v", ZZ)
Rxyz, x,y,z = ring("x,y,z", Ruv)
assert str(x - x) == "0"
assert str(x - 1) == "x - 1"
assert str(x + 1) == "x + 1"
assert str(x**2) == "x**2"
assert str(x**(-2)) == "x**(-2)"
assert str(x**QQ(1, 2)) == "x**(1/2)"
assert str((u**2 + 3*u*v + 1)*x**2*y + u + 1) == "(u**2 + 3*u*v + 1)*x**2*y + u + 1"
assert str((u**2 + 3*u*v + 1)*x**2*y + (u + 1)*x) == "(u**2 + 3*u*v + 1)*x**2*y + (u + 1)*x"
assert str((u**2 + 3*u*v + 1)*x**2*y + (u + 1)*x + 1) == "(u**2 + 3*u*v + 1)*x**2*y + (u + 1)*x + 1"
assert str((-u**2 + 3*u*v - 1)*x**2*y - (u + 1)*x - 1) == "-(u**2 - 3*u*v + 1)*x**2*y - (u + 1)*x - 1"
assert str(-(v**2 + v + 1)*x + 3*u*v + 1) == "-(v**2 + v + 1)*x + 3*u*v + 1"
assert str(-(v**2 + v + 1)*x - 3*u*v + 1) == "-(v**2 + v + 1)*x - 3*u*v + 1"
def test_FracElement():
Fuv, u,v = field("u,v", ZZ)
Fxyzt, x,y,z,t = field("x,y,z,t", Fuv)
assert str(x - x) == "0"
assert str(x - 1) == "x - 1"
assert str(x + 1) == "x + 1"
assert str(x/3) == "x/3"
assert str(x/z) == "x/z"
assert str(x*y/z) == "x*y/z"
assert str(x/(z*t)) == "x/(z*t)"
assert str(x*y/(z*t)) == "x*y/(z*t)"
assert str((x - 1)/y) == "(x - 1)/y"
assert str((x + 1)/y) == "(x + 1)/y"
assert str((-x - 1)/y) == "(-x - 1)/y"
assert str((x + 1)/(y*z)) == "(x + 1)/(y*z)"
assert str(-y/(x + 1)) == "-y/(x + 1)"
assert str(y*z/(x + 1)) == "y*z/(x + 1)"
assert str(((u + 1)*x*y + 1)/((v - 1)*z - 1)) == "((u + 1)*x*y + 1)/((v - 1)*z - 1)"
assert str(((u + 1)*x*y + 1)/((v - 1)*z - t*u*v - 1)) == "((u + 1)*x*y + 1)/((v - 1)*z - u*v*t - 1)"
def test_Pow():
assert str(x**-1) == "1/x"
assert str(x**-2) == "x**(-2)"
assert str(x**2) == "x**2"
assert str((x + y)**-1) == "1/(x + y)"
assert str((x + y)**-2) == "(x + y)**(-2)"
assert str((x + y)**2) == "(x + y)**2"
assert str((x + y)**(1 + x)) == "(x + y)**(x + 1)"
assert str(x**Rational(1, 3)) == "x**(1/3)"
assert str(1/x**Rational(1, 3)) == "x**(-1/3)"
assert str(sqrt(sqrt(x))) == "x**(1/4)"
# not the same as x**-1
assert str(x**-1.0) == 'x**(-1.0)'
# see issue #2860
assert str(Pow(S(2), -1.0, evaluate=False)) == '2**(-1.0)'
def test_sqrt():
assert str(sqrt(x)) == "sqrt(x)"
assert str(sqrt(x**2)) == "sqrt(x**2)"
assert str(1/sqrt(x)) == "1/sqrt(x)"
assert str(1/sqrt(x**2)) == "1/sqrt(x**2)"
assert str(y/sqrt(x)) == "y/sqrt(x)"
assert str(x**(1/2)) == "x**0.5"
assert str(1/x**(1/2)) == "x**(-0.5)"
def test_Rational():
n1 = Rational(1, 4)
n2 = Rational(1, 3)
n3 = Rational(2, 4)
n4 = Rational(2, -4)
n5 = Rational(0)
n7 = Rational(3)
n8 = Rational(-3)
assert str(n1*n2) == "1/12"
assert str(n1*n2) == "1/12"
assert str(n3) == "1/2"
assert str(n1*n3) == "1/8"
assert str(n1 + n3) == "3/4"
assert str(n1 + n2) == "7/12"
assert str(n1 + n4) == "-1/4"
assert str(n4*n4) == "1/4"
assert str(n4 + n2) == "-1/6"
assert str(n4 + n5) == "-1/2"
assert str(n4*n5) == "0"
assert str(n3 + n4) == "0"
assert str(n1**n7) == "1/64"
assert str(n2**n7) == "1/27"
assert str(n2**n8) == "27"
assert str(n7**n8) == "1/27"
assert str(Rational("-25")) == "-25"
assert str(Rational("1.25")) == "5/4"
assert str(Rational("-2.6e-2")) == "-13/500"
assert str(S("25/7")) == "25/7"
assert str(S("-123/569")) == "-123/569"
assert str(S("0.1[23]", rational=1)) == "61/495"
assert str(S("5.1[666]", rational=1)) == "31/6"
assert str(S("-5.1[666]", rational=1)) == "-31/6"
assert str(S("0.[9]", rational=1)) == "1"
assert str(S("-0.[9]", rational=1)) == "-1"
assert str(sqrt(Rational(1, 4))) == "1/2"
assert str(sqrt(Rational(1, 36))) == "1/6"
assert str((123**25) ** Rational(1, 25)) == "123"
assert str((123**25 + 1)**Rational(1, 25)) != "123"
assert str((123**25 - 1)**Rational(1, 25)) != "123"
assert str((123**25 - 1)**Rational(1, 25)) != "122"
assert str(sqrt(Rational(81, 36))**3) == "27/8"
assert str(1/sqrt(Rational(81, 36))**3) == "8/27"
assert str(sqrt(-4)) == str(2*I)
assert str(2**Rational(1, 10**10)) == "2**(1/10000000000)"
assert sstr(Rational(2, 3), sympy_integers=True) == "S(2)/3"
x = Symbol("x")
assert sstr(x**Rational(2, 3), sympy_integers=True) == "x**(S(2)/3)"
assert sstr(Eq(x, Rational(2, 3)), sympy_integers=True) == "Eq(x, S(2)/3)"
assert sstr(Limit(x, x, Rational(7, 2)), sympy_integers=True) == \
"Limit(x, x, S(7)/2)"
def test_Float():
# NOTE dps is the whole number of decimal digits
assert str(Float('1.23', dps=1 + 2)) == '1.23'
assert str(Float('1.23456789', dps=1 + 8)) == '1.23456789'
assert str(
Float('1.234567890123456789', dps=1 + 18)) == '1.234567890123456789'
assert str(pi.evalf(1 + 2)) == '3.14'
assert str(pi.evalf(1 + 14)) == '3.14159265358979'
assert str(pi.evalf(1 + 64)) == ('3.141592653589793238462643383279'
'5028841971693993751058209749445923')
assert str(pi.round(-1)) == '0.'
assert str((pi**400 - (pi**400).round(1)).n(2)) == '-0.e+88'
assert str(Float(S.Infinity)) == 'inf'
assert str(Float(S.NegativeInfinity)) == '-inf'
def test_Relational():
assert str(Rel(x, y, "<")) == "x < y"
assert str(Rel(x + y, y, "==")) == "Eq(x + y, y)"
assert str(Rel(x, y, "!=")) == "Ne(x, y)"
assert str(Rel(x, y, ':=')) == "Assignment(x, y)"
assert str(Eq(x, 1) | Eq(x, 2)) == "Eq(x, 1) | Eq(x, 2)"
assert str(Ne(x, 1) & Ne(x, 2)) == "Ne(x, 1) & Ne(x, 2)"
def test_CRootOf():
assert str(rootof(x**5 + 2*x - 1, 0)) == "CRootOf(x**5 + 2*x - 1, 0)"
def test_RootSum():
f = x**5 + 2*x - 1
assert str(
RootSum(f, Lambda(z, z), auto=False)) == "RootSum(x**5 + 2*x - 1)"
assert str(RootSum(f, Lambda(
z, z**2), auto=False)) == "RootSum(x**5 + 2*x - 1, Lambda(z, z**2))"
def test_GroebnerBasis():
assert str(groebner(
[], x, y)) == "GroebnerBasis([], x, y, domain='ZZ', order='lex')"
F = [x**2 - 3*y - x + 1, y**2 - 2*x + y - 1]
assert str(groebner(F, order='grlex')) == \
"GroebnerBasis([x**2 - x - 3*y + 1, y**2 - 2*x + y - 1], x, y, domain='ZZ', order='grlex')"
assert str(groebner(F, order='lex')) == \
"GroebnerBasis([2*x - y**2 - y + 1, y**4 + 2*y**3 - 3*y**2 - 16*y + 7], x, y, domain='ZZ', order='lex')"
def test_set():
assert sstr(set()) == 'set()'
assert sstr(frozenset()) == 'frozenset()'
assert sstr(set([1])) == '{1}'
assert sstr(frozenset([1])) == 'frozenset({1})'
assert sstr(set([1, 2, 3])) == '{1, 2, 3}'
assert sstr(frozenset([1, 2, 3])) == 'frozenset({1, 2, 3})'
assert sstr(
set([1, x, x**2, x**3, x**4])) == '{1, x, x**2, x**3, x**4}'
assert sstr(
frozenset([1, x, x**2, x**3, x**4])) == 'frozenset({1, x, x**2, x**3, x**4})'
def test_SparseMatrix():
M = SparseMatrix([[x**+1, 1], [y, x + y]])
assert str(M) == "Matrix([[x, 1], [y, x + y]])"
assert sstr(M) == "Matrix([\n[x, 1],\n[y, x + y]])"
def test_Sum():
assert str(summation(cos(3*z), (z, x, y))) == "Sum(cos(3*z), (z, x, y))"
assert str(Sum(x*y**2, (x, -2, 2), (y, -5, 5))) == \
"Sum(x*y**2, (x, -2, 2), (y, -5, 5))"
def test_Symbol():
assert str(y) == "y"
assert str(x) == "x"
e = x
assert str(e) == "x"
def test_tuple():
assert str((x,)) == sstr((x,)) == "(x,)"
assert str((x + y, 1 + x)) == sstr((x + y, 1 + x)) == "(x + y, x + 1)"
assert str((x + y, (
1 + x, x**2))) == sstr((x + y, (1 + x, x**2))) == "(x + y, (x + 1, x**2))"
def test_Quaternion_str_printer():
q = Quaternion(x, y, z, t)
assert str(q) == "x + y*i + z*j + t*k"
q = Quaternion(x,y,z,x*t)
assert str(q) == "x + y*i + z*j + t*x*k"
q = Quaternion(x,y,z,x+t)
assert str(q) == "x + y*i + z*j + (t + x)*k"
def test_Quantity_str():
assert sstr(second, abbrev=True) == "s"
assert sstr(joule, abbrev=True) == "J"
assert str(second) == "second"
assert str(joule) == "joule"
def test_wild_str():
# Check expressions containing Wild not causing infinite recursion
w = Wild('x')
assert str(w + 1) == 'x_ + 1'
assert str(exp(2**w) + 5) == 'exp(2**x_) + 5'
assert str(3*w + 1) == '3*x_ + 1'
assert str(1/w + 1) == '1 + 1/x_'
assert str(w**2 + 1) == 'x_**2 + 1'
assert str(1/(1 - w)) == '1/(-x_ + 1)'
def test_zeta():
assert str(zeta(3)) == "zeta(3)"
def test_issue_3101():
e = x - y
a = str(e)
b = str(e)
assert a == b
def test_issue_3103():
e = -2*sqrt(x) - y/sqrt(x)/2
assert str(e) not in ["(-2)*x**1/2(-1/2)*x**(-1/2)*y",
"-2*x**1/2(-1/2)*x**(-1/2)*y", "-2*x**1/2-1/2*x**-1/2*w"]
assert str(e) == "-2*sqrt(x) - y/(2*sqrt(x))"
def test_issue_4021():
e = Integral(x, x) + 1
assert str(e) == 'Integral(x, x) + 1'
def test_sstrrepr():
assert sstr('abc') == 'abc'
assert sstrrepr('abc') == "'abc'"
e = ['a', 'b', 'c', x]
assert sstr(e) == "[a, b, c, x]"
assert sstrrepr(e) == "['a', 'b', 'c', x]"
def test_infinity():
assert sstr(oo*I) == "oo*I"
def test_full_prec():
assert sstr(S("0.3"), full_prec=True) == "0.300000000000000"
assert sstr(S("0.3"), full_prec="auto") == "0.300000000000000"
assert sstr(S("0.3"), full_prec=False) == "0.3"
assert sstr(S("0.3")*x, full_prec=True) in [
"0.300000000000000*x",
"x*0.300000000000000"
]
assert sstr(S("0.3")*x, full_prec="auto") in [
"0.3*x",
"x*0.3"
]
assert sstr(S("0.3")*x, full_prec=False) in [
"0.3*x",
"x*0.3"
]
def test_noncommutative():
A, B, C = symbols('A,B,C', commutative=False)
assert sstr(A*B*C**-1) == "A*B*C**(-1)"
assert sstr(C**-1*A*B) == "C**(-1)*A*B"
assert sstr(A*C**-1*B) == "A*C**(-1)*B"
assert sstr(sqrt(A)) == "sqrt(A)"
assert sstr(1/sqrt(A)) == "A**(-1/2)"
def test_empty_printer():
str_printer = StrPrinter()
assert str_printer.emptyPrinter("foo") == "foo"
assert str_printer.emptyPrinter(x*y) == "x*y"
assert str_printer.emptyPrinter(32) == "32"
def test_settings():
raises(TypeError, lambda: sstr(S(4), method="garbage"))
def test_RandomDomain():
from sympy.stats import Normal, Die, Exponential, pspace, where
X = Normal('x1', 0, 1)
assert str(where(X > 0)) == "Domain: (0 < x1) & (x1 < oo)"
D = Die('d1', 6)
assert str(where(D > 4)) == "Domain: Eq(d1, 5) | Eq(d1, 6)"
A = Exponential('a', 1)
B = Exponential('b', 1)
assert str(pspace(Tuple(A, B)).domain) == "Domain: (0 <= a) & (0 <= b) & (a < oo) & (b < oo)"
def test_FiniteSet():
assert str(FiniteSet(*range(1, 51))) == '{1, 2, 3, ..., 48, 49, 50}'
assert str(FiniteSet(*range(1, 6))) == '{1, 2, 3, 4, 5}'
def test_PrettyPoly():
from sympy.polys.domains import QQ
F = QQ.frac_field(x, y)
R = QQ[x, y]
assert sstr(F.convert(x/(x + y))) == sstr(x/(x + y))
assert sstr(R.convert(x + y)) == sstr(x + y)
def test_categories():
from sympy.categories import (Object, NamedMorphism,
IdentityMorphism, Category)
A = Object("A")
B = Object("B")
f = NamedMorphism(A, B, "f")
id_A = IdentityMorphism(A)
K = Category("K")
assert str(A) == 'Object("A")'
assert str(f) == 'NamedMorphism(Object("A"), Object("B"), "f")'
assert str(id_A) == 'IdentityMorphism(Object("A"))'
assert str(K) == 'Category("K")'
def test_Tr():
A, B = symbols('A B', commutative=False)
t = Tr(A*B)
assert str(t) == 'Tr(A*B)'
def test_issue_6387():
assert str(factor(-3.0*z + 3)) == '-3.0*(1.0*z - 1.0)'
def test_MatMul_MatAdd():
from sympy import MatrixSymbol
assert str(2*(MatrixSymbol("X", 2, 2) + MatrixSymbol("Y", 2, 2))) == \
"2*(X + Y)"
def test_MatrixSlice():
from sympy.matrices.expressions import MatrixSymbol
assert str(MatrixSymbol('X', 10, 10)[:5, 1:9:2]) == 'X[:5, 1:9:2]'
assert str(MatrixSymbol('X', 10, 10)[5, :5:2]) == 'X[5, :5:2]'
def test_true_false():
assert str(true) == repr(true) == sstr(true) == "True"
assert str(false) == repr(false) == sstr(false) == "False"
def test_Equivalent():
assert str(Equivalent(y, x)) == "Equivalent(x, y)"
def test_Xor():
assert str(Xor(y, x, evaluate=False)) == "Xor(x, y)"
def test_Complement():
assert str(Complement(S.Reals, S.Naturals)) == 'Reals \\ Naturals'
def test_SymmetricDifference():
assert str(SymmetricDifference(Interval(2, 3), Interval(3, 4),evaluate=False)) == \
'SymmetricDifference(Interval(2, 3), Interval(3, 4))'
def test_UnevaluatedExpr():
a, b = symbols("a b")
expr1 = 2*UnevaluatedExpr(a+b)
assert str(expr1) == "2*(a + b)"
def test_MatrixElement_printing():
# test cases for issue #11821
A = MatrixSymbol("A", 1, 3)
B = MatrixSymbol("B", 1, 3)
C = MatrixSymbol("C", 1, 3)
assert(str(A[0, 0]) == "A[0, 0]")
assert(str(3 * A[0, 0]) == "3*A[0, 0]")
F = C[0, 0].subs(C, A - B)
assert str(F) == "(-B + A)[0, 0]"
def test_MatrixSymbol_printing():
A = MatrixSymbol("A", 3, 3)
B = MatrixSymbol("B", 3, 3)
assert str(A - A*B - B) == "-B - A*B + A"
assert str(A*B - (A+B)) == "-(A + B) + A*B"
|
wxgeo/geophar
|
wxgeometrie/sympy/printing/tests/test_str.py
|
Python
|
gpl-2.0
| 25,525
|
#!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface for generating authentication token to access Google Account."""
__author__ = 'api.sgrinberg@gmail.com (Stan Grinberg)'
import os
import urllib
import urllib2
import warnings
from adspygoogle.common.Errors import AuthTokenError
from adspygoogle.common.Errors import CaptchaError
_DEPRECATION_WARNING = 'ClientLogin is deprecated. Please use OAuth 2.0'
warnings.filterwarnings('always', _DEPRECATION_WARNING, DeprecationWarning)
class AuthToken(object):
"""Fetches authentication token.
Responsible for generating the authentication token to access Google Account,
https://www.google.com/accounts/NewAccount. The token is fetched via the
ClientLogin API, http://code.google.com/apis/accounts/.
"""
def __init__(self, email, password, service, lib_sig, proxy, login_token=None,
login_captcha=None):
"""Inits AuthToken.
Args:
email: str Login email of the Google Account.
password: str Login password of the Google Account.
service: str Name of the Google service for which to authorize access.
lib_sig: str Client library signature.
proxy: str HTTP proxy to use.
"""
self.__email = email
self.__password = password
self.__account_type = 'GOOGLE'
self.__service = service
self.__source = 'Google-%s' % lib_sig
self.__proxy = proxy
self.__sid = ''
self.__lsid = ''
self.__auth = ''
self.__login_token = login_token
self.__login_captcha = login_captcha
self.__Login()
def __Login(self):
"""Fetch Auth token and SID, LSID cookies from Google Account auth."""
warnings.warn(_DEPRECATION_WARNING, DeprecationWarning, stacklevel=5)
if self.__proxy: os.environ['http_proxy'] = self.__proxy
url = 'https://www.google.com/accounts/ClientLogin'
data = [('Email', self.__email),
('Passwd', self.__password),
('accountType', self.__account_type),
('service', self.__service),
('source', self.__source)]
if self.__login_token and self.__login_captcha:
data.append(('logintoken', self.__login_token))
data.append(('logincaptcha', self.__login_captcha))
try:
fh = urllib2.urlopen(url, urllib.urlencode(data))
data = self.__ParseResponse(fh)
try:
if 'SID' in data or 'LSID' in data or 'Auth' in data:
self.__sid = data.get('SID', '')
self.__lsid = data.get('LSID', '')
self.__auth = data.get('Auth', '')
elif 'Error' in data and data['Error'] != 'CaptchaRequired':
msg = data['Error'].strip()
if 'Info' in data:
msg = msg + ' Additional Info: ' + data['Info'].strip()
raise AuthTokenError(msg)
elif 'CaptchaToken' in data:
raise CaptchaError(data['CaptchaToken'].strip(),
'http://www.google.com/accounts/'
+ data['CaptchaUrl'].strip())
else:
raise AuthTokenError('Unexpected response: ' + str(data))
finally:
fh.close()
except IOError, e:
raise AuthTokenError(e)
def __ParseResponse(self, fh):
"""Processes the ClientLogin response into a dict.
Args:
fh: file The file object representing the ClientLogin response.
Returns:
dict Dictionary containing the response in key-value format.
"""
ret = {}
for line in fh:
index = line.index('=')
key, value = line[:index], line[index + 1:].strip()
ret[key] = value
return ret
def GetSidToken(self):
"""Return SID cookie.
Returns:
str SID cookie.
"""
return self.__sid
def GetLsidToken(self):
"""Return LSID cookie.
Returns:
str LSDI cookie.
"""
return self.__lsid
def GetAuthToken(self):
"""Return Auth authentication token.
Returns:
str Auth authentication token.
"""
return self.__auth
|
donspaulding/adspygoogle
|
adspygoogle/common/AuthToken.py
|
Python
|
apache-2.0
| 4,527
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Controller.comment'
db.add_column('rainman_controller', 'comment',
self.gf('django.db.models.fields.CharField')(default='', max_length=200, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Controller.comment'
db.delete_column('rainman_controller', 'comment')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'rainman.controller': {
'Meta': {'unique_together': "(('site', 'name'),)", 'object_name': 'Controller'},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'max_on': ('django.db.models.fields.IntegerField', [], {'default': '3'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'controllers'", 'to': "orm['rainman.Site']"}),
'var': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
'rainman.day': {
'Meta': {'object_name': 'Day'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'rainman.dayrange': {
'Meta': {'object_name': 'DayRange'},
'days': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'ranges'", 'symmetrical': 'False', 'to': "orm['rainman.Day']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'rainman.daytime': {
'Meta': {'unique_together': "(('day', 'descr'),)", 'object_name': 'DayTime'},
'day': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'times'", 'to': "orm['rainman.Day']"}),
'descr': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'rainman.environmenteffect': {
'Meta': {'object_name': 'EnvironmentEffect'},
'factor': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'param_group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'environment_effects'", 'to': "orm['rainman.ParamGroup']"}),
'sun': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'temp': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'wind': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'rainman.feed': {
'Meta': {'object_name': 'Feed'},
'db_max_flow_wait': ('django.db.models.fields.PositiveIntegerField', [], {'default': '300', 'db_column': "'max_flow_wait'"}),
'flow': ('django.db.models.fields.FloatField', [], {'default': '10'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'feed_meters'", 'to': "orm['rainman.Site']"}),
'var': ('django.db.models.fields.CharField', [], {'max_length': '200', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'rainman.group': {
'Meta': {'unique_together': "(('site', 'name'),)", 'object_name': 'Group'},
'days': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'groups_y'", 'blank': 'True', 'to': "orm['rainman.DayRange']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'groups'", 'to': "orm['rainman.Site']"}),
'valves': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'groups'", 'symmetrical': 'False', 'to': "orm['rainman.Valve']"}),
'xdays': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'groups_n'", 'blank': 'True', 'to': "orm['rainman.DayRange']"})
},
'rainman.groupadjust': {
'Meta': {'unique_together': "(('group', 'start'),)", 'object_name': 'GroupAdjust'},
'factor': ('django.db.models.fields.FloatField', [], {}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'adjusters'", 'to': "orm['rainman.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'})
},
'rainman.groupoverride': {
'Meta': {'unique_together': "(('group', 'name'), ('group', 'start'))", 'object_name': 'GroupOverride'},
'allowed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'db_duration': ('django.db.models.fields.PositiveIntegerField', [], {'db_column': "'duration'"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'overrides'", 'to': "orm['rainman.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'off_level': ('django.db.models.fields.FloatField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'on_level': ('django.db.models.fields.FloatField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'})
},
'rainman.history': {
'Meta': {'unique_together': "(('site', 'time'),)", 'object_name': 'History'},
'feed': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rain': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'history'", 'to': "orm['rainman.Site']"}),
'sun': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'temp': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'wind': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'rainman.level': {
'Meta': {'unique_together': "(('valve', 'time'),)", 'object_name': 'Level'},
'flow': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'forced': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.FloatField', [], {}),
'time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'valve': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'levels'", 'to': "orm['rainman.Valve']"})
},
'rainman.log': {
'Meta': {'object_name': 'Log'},
'controller': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'logs'", 'null': 'True', 'to': "orm['rainman.Controller']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'logs'", 'to': "orm['rainman.Site']"}),
'text': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 5, 29, 0, 0)', 'db_index': 'True'}),
'valve': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'logs'", 'null': 'True', 'to': "orm['rainman.Valve']"})
},
'rainman.paramgroup': {
'Meta': {'unique_together': "(('site', 'name'),)", 'object_name': 'ParamGroup'},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'factor': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'rain': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'param_groups'", 'to': "orm['rainman.Site']"})
},
'rainman.rainmeter': {
'Meta': {'unique_together': "(('site', 'name'),)", 'object_name': 'RainMeter'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'rain_meters'", 'to': "orm['rainman.Site']"}),
'var': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'weight': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '10'})
},
'rainman.schedule': {
'Meta': {'unique_together': "(('valve', 'start'),)", 'object_name': 'Schedule'},
'changed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'max_length': '1'}),
'db_duration': ('django.db.models.fields.PositiveIntegerField', [], {'db_column': "'duration'"}),
'forced': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'max_length': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'seen': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'max_length': '1'}),
'start': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'valve': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'schedules'", 'to': "orm['rainman.Valve']"})
},
'rainman.site': {
'Meta': {'object_name': 'Site'},
'db_rain_delay': ('django.db.models.fields.PositiveIntegerField', [], {'default': '300', 'db_column': "'rain_delay'"}),
'db_rate': ('django.db.models.fields.FloatField', [], {'default': '0.00011574074074074075', 'db_column': "'rate'"}),
'host': ('django.db.models.fields.CharField', [], {'default': "'localhost'", 'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'port': ('django.db.models.fields.PositiveIntegerField', [], {'default': '50005'}),
'var': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200', 'blank': 'True'})
},
'rainman.sunmeter': {
'Meta': {'unique_together': "(('site', 'name'),)", 'object_name': 'SunMeter'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sun_meters'", 'to': "orm['rainman.Site']"}),
'var': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'weight': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '10'})
},
'rainman.tempmeter': {
'Meta': {'unique_together': "(('site', 'name'),)", 'object_name': 'TempMeter'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'temp_meters'", 'to': "orm['rainman.Site']"}),
'var': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'weight': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '10'})
},
'rainman.userforsite': {
'Meta': {'object_name': 'UserForSite'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'users'", 'blank': 'True', 'to': "orm['rainman.Site']"}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'valves': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'users'", 'blank': 'True', 'to': "orm['rainman.Valve']"})
},
'rainman.valve': {
'Meta': {'unique_together': "(('controller', 'name'),)", 'object_name': 'Valve'},
'area': ('django.db.models.fields.FloatField', [], {}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'controller': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'valves'", 'to': "orm['rainman.Controller']"}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'valves'", 'to': "orm['rainman.Feed']"}),
'flow': ('django.db.models.fields.FloatField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'max_level': ('django.db.models.fields.FloatField', [], {'default': '10'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'param_group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'valves'", 'to': "orm['rainman.ParamGroup']"}),
'priority': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'runoff': ('django.db.models.fields.FloatField', [], {'default': '1'}),
'shade': ('django.db.models.fields.FloatField', [], {'default': '1'}),
'start_level': ('django.db.models.fields.FloatField', [], {'default': '8'}),
'stop_level': ('django.db.models.fields.FloatField', [], {'default': '3'}),
'time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 5, 29, 0, 0)', 'db_index': 'True'}),
'var': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'verbose': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'rainman.valveoverride': {
'Meta': {'unique_together': "(('valve', 'name'), ('valve', 'start'))", 'object_name': 'ValveOverride'},
'db_duration': ('django.db.models.fields.PositiveIntegerField', [], {'db_column': "'duration'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'off_level': ('django.db.models.fields.FloatField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'on_level': ('django.db.models.fields.FloatField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'running': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'start': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'valve': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'overrides'", 'to': "orm['rainman.Valve']"})
},
'rainman.windmeter': {
'Meta': {'unique_together': "(('site', 'name'),)", 'object_name': 'WindMeter'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'wind_meters'", 'to': "orm['rainman.Site']"}),
'var': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'weight': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '10'})
}
}
complete_apps = ['rainman']
|
smurfix/HomEvenT
|
irrigation/rainman/migrations/0041_auto__add_field_controller_comment.py
|
Python
|
gpl-3.0
| 20,952
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from cyrusbus import Bus
class Limiter(object):
def __init__(self, limiter_miss_timeout_ms=None):
self.bus = Bus()
self.limiter_miss_timeout_ms = limiter_miss_timeout_ms
if self.limiter_miss_timeout_ms is None:
self.limiter_miss_timeout_ms = 500
def handle_callbacks(self, callback):
def handle(bus, *args, **kw):
callback(*args, **kw)
return handle
def subscribe_to_lock_miss(self, callback):
self.bus.subscribe('limiter.miss', self.handle_callbacks(callback))
def publish_lock_miss(self, url):
self.bus.publish('limiter.miss', url)
|
heynemann/octopus
|
octopus/limiter/__init__.py
|
Python
|
mit
| 682
|
# -*- coding: utf-8 -*-
from __future__ import division, absolute_import, unicode_literals
from django.utils.encoding import force_str
from rest_framework import fields, generics, serializers
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.views import APIView
from . import utils
from .viewmixins import Admin2Mixin
API_VERSION = '0.1'
class Admin2APISerializer(serializers.HyperlinkedModelSerializer):
_default_view_name = 'admin2:%(app_label)s_%(model_name)s_api_detail'
pk = fields.ReadOnlyField()
__unicode__ = fields.ReadOnlyField(source='__str__')
def get_extra_kwargs(self):
extra_kwargs = super(Admin2APISerializer, self).get_extra_kwargs()
extra_kwargs.update({
'url': {'view_name': self._get_default_view_name(self.Meta.model)}
})
return extra_kwargs
def _get_default_view_name(self, model):
"""
Return the view name to use if 'view_name' is not specified in 'Meta'
"""
model_meta = model._meta
format_kwargs = {
'app_label': model_meta.app_label,
'model_name': model_meta.object_name.lower()
}
return self._default_view_name % format_kwargs
class Admin2APIMixin(Admin2Mixin):
model = None
raise_exception = True
def get_serializer_class(self):
if self.serializer_class is None:
model_class = self.get_model()
class ModelAPISerilizer(Admin2APISerializer):
# we need to reset this here, since we don't know anything
# about the name of the admin instance when declaring the
# Admin2APISerializer base class
_default_view_name = ':'.join((
self.model_admin.admin.name,
'%(app_label)s_%(model_name)s_api_detail'))
class Meta:
model = model_class
return ModelAPISerilizer
return super(Admin2APIMixin, self).get_serializer_class()
class IndexAPIView(Admin2APIMixin, APIView):
apps = None
registry = None
app_verbose_names = None
app_verbose_name = None
def get_model_data(self, model):
model_admin = self.registry[model]
model_options = utils.model_options(model)
opts = {
'current_app': model_admin.admin.name,
'app_label': model_options.app_label,
'model_name': model_options.object_name.lower(),
}
model_url = reverse(
'%(current_app)s:%(app_label)s_%(model_name)s_api_list' % opts,
request=self.request,
format=self.kwargs.get('format'))
model_options = utils.model_options(model)
return {
'url': model_url,
'verbose_name': force_str(model_options.verbose_name),
'verbose_name_plural': force_str(model_options.verbose_name_plural),
}
def get_app_data(self, app_label, models):
model_data = []
for model in models:
model_data.append(self.get_model_data(model))
return {
'app_label': app_label,
'models': model_data,
'app_verbose_name': force_str(self.app_verbose_names.get(app_label))
}
def get(self, request):
app_data = []
for app_label, registry in self.apps.items():
models = registry.keys()
app_data.append(self.get_app_data(app_label, models))
index_data = {
'version': API_VERSION,
'apps': app_data,
}
return Response(index_data)
class ListCreateAPIView(Admin2APIMixin, generics.ListCreateAPIView):
pass
class RetrieveUpdateDestroyAPIView(Admin2APIMixin, generics.RetrieveUpdateDestroyAPIView):
pass
|
andrewsmedina/django-admin2
|
djadmin2/apiviews.py
|
Python
|
bsd-3-clause
| 3,823
|
# combine_all_summaries
# this fuses the summary files from pre23_hathi,
# post22_hathi, and chicago, to create a file
# where there is only one entry for each
# char, auth, date tuple
import csv
from collections import Counter
counts = dict()
columns = ['characters', 'speaking', 'agent', 'mod', 'patient', 'poss', 'total']
fieldnames = ['chargender', 'authgender', 'date', 'characters', 'speaking', 'agent', 'mod', 'patient', 'poss', 'total']
def add2counts(filepath, counts):
with open(filepath, encoding = 'utf-8') as f:
reader = csv.DictReader(f)
for row in reader:
triplet = (row['chargender'], row['authgender'], row['date'])
if triplet not in counts:
counts[triplet] = Counter()
for col in columns:
counts[triplet][col] += int(row[col])
return counts
add2counts('post22hathi/corrected_post22_summary.csv', counts)
add2counts('pre23hathi/corrected_pre23_hathi_summary.csv', counts)
with open('corrected_hathi_summaries.csv', mode = 'w', encoding = 'utf-8') as f:
writer = csv.DictWriter(f, fieldnames = fieldnames)
writer.writeheader()
for triplet, colcounts in counts.items():
r = dict()
r['chargender'], r['authgender'], r['date'] = triplet
for col in columns:
r[col] = colcounts[col]
writer.writerow(r)
|
tedunderwood/character
|
oldcode/combine_hathi_summaries.py
|
Python
|
mit
| 1,368
|
from corpustools.gui.psgui import *
def test_psgui(qtbot, specified_test_corpus, settings):
dialog = PhonoSearchDialog(None, settings, specified_test_corpus, True)
qtbot.addWidget(dialog)
|
PhonologicalCorpusTools/CorpusTools
|
tests/test_gui_psgui.py
|
Python
|
bsd-3-clause
| 199
|
# Copyright 2016 Quora, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import threading
import qcore.inspection as core_inspection
from . import debug
from . import futures
from . import _debug
__traceback_hide__ = True
_debug_options = _debug.options
class BatchingError(Exception):
pass
class BatchCancelledError(BatchingError):
pass
class BatchBase(futures.FutureBase):
"""Abstract base class describing a batch of operations."""
def __init__(self):
futures.FutureBase.__init__(self) # Cython doesn't support super(...)
self.items = []
def is_flushed(self):
return self.is_computed()
def is_cancelled(self):
return self.is_computed() and self.error() is not None
def is_empty(self):
return len(self.items) == 0
def get_priority(self):
"""Returns batch flush priority.
The higher priority, the earlier batch is flushed.
By default it returns ``(0, len(self.items))`` tuple;
0 here is base batch priority (zero by default).
:return: batch flush priority.
"""
return 0, len(self.items)
def flush(self):
"""Flushes the batch.
Almost the same as ``self.value()``, but:
* this method doesn't throw an error even if underlying batch
flush actually completed with an error
* on the other hand, subsequent flush throws an error.
So this method is intended to be called by schedulers:
* They must flush each batch just once
* They don't care (and moreover, shouldn't know) about actual
flush errors. These errors will be anyway re-thrown later -
on attempt to access values of underlying batch items.
"""
if self.is_computed():
raise BatchingError('Batch is already flushed or cancelled.')
if _debug_options.DUMP_FLUSH_BATCH:
debug.write('@async: -> batch flush:')
self.dump(4)
if _debug_options.DUMP_STACK:
debug.dump_stack()
try:
self.error() # Makes future to compute w/o raising an error
finally:
if _debug_options.DUMP_FLUSH_BATCH:
debug.write('@async: <- batch flushed: %s' % debug.str(self))
def cancel(self, error=None):
"""Cancels the batch.
It's a public ``_cancel()`` wrapper enforcing
additional safety properties.
"""
if self.is_computed():
return # Cancel must never raise an error
if error is None:
error = BatchCancelledError()
self.set_error(error)
def _compute(self):
self._try_switch_active_batch()
try:
self._flush()
self.set_value(None)
except BaseException as error:
if not self.is_computed():
self.set_error(error)
raise # Must re-throw to properly implement FutureBase API
def _computed(self):
# The purpose of this overridden method is to ensure that
# all items are computed before on_computed event is raised.
self._try_switch_active_batch()
error = self.error()
cancelled = error is not None
if cancelled:
self._cancel()
for item in self.items:
if not item.is_computed():
# We must ensure all batch items are computed
item.set_error(
error if cancelled
else AssertionError("Value of this item wasn't set on batch flush."))
futures.FutureBase._computed(self) # Cython doesn't support super(...)
def _flush(self):
"""A protected method that must be override to implement batch flush.
Normally it should simply forward the call to appropriate service
(cache, DB, etc.), that must execute the batch (i.e. ensure each
batch item will be able to acquire its value on subsequent attempt)
and set its current batch to the newly created one.
"""
raise NotImplementedError()
def _cancel(self):
"""A protected method that must be override to implement batch cancellation.
Normally it should simply forward the call to appropriate service
(cache, DB, etc.), that must discard the current batch in this case.
This method is optional to implement: _computed implementation
anyway ensures that all items are computed as well, but
you can add some additional logic here, if you want to.
"""
pass
def _try_switch_active_batch(self):
"""This protected method must be overridden to switch to a new active
batch of this type.
Must never throw an error.
"""
raise NotImplementedError()
def __str__(self):
return '%s (%s, %i items)' % (
core_inspection.get_full_name(type(self)),
'cancelled' if self.is_cancelled() else
'flushed' if self.is_flushed() else 'pending',
len(self.items))
def dump(self, indent=0):
debug.write(debug.str(self), indent)
debug.write('Priority: %s' % debug.repr(self.get_priority()), indent + 1)
if self.items:
debug.write('Items:', indent + 1)
for item in self.items:
item.dump(indent + 2)
else:
debug.write('No items.', indent + 1)
class BatchItemBase(futures.FutureBase):
"""Abstract base class describing batch item.
Batch items are futures providing result
of a particular cache operation.
"""
def __init__(self, batch):
super(BatchItemBase, self).__init__()
assert not batch.is_flushed(), "can't add an item to the batch that is already flushed"
self.batch = batch
self.index = len(batch.items)
batch.items.append(self)
def _compute(self):
"""This method ensures the value is available
by flushing the batch, if necessary.
"""
if not self.batch.is_flushed():
self.batch.flush()
class DebugBatchItem(BatchItemBase):
"""Debug batch item used to sync async execution."""
def __init__(self, batch_name='default', result=None):
global _debug_batch_state
batch = _debug_batch_state.batches.setdefault(batch_name, DebugBatch(batch_name))
super(DebugBatchItem, self).__init__(batch)
self._result = result
class DebugBatch(BatchBase):
"""Debug batch used to sync async execution."""
def __init__(self, name='default', index=0):
super(DebugBatch, self).__init__()
self.name = name
self.index = index
def _try_switch_active_batch(self):
if _debug_batch_state.batches.get(self.name, None) is self:
_debug_batch_state.batches[self.name] = DebugBatch(self.name, self.index + 1)
def _flush(self):
global _debug_batch_state
if _debug_options.DUMP_SYNC:
debug.write("@async.debug.sync: flushing batch %s (%i)" % (debug.repr(self.name), self.index))
for item in self.items:
item.set_value(item._result)
def _cancel(self):
global _debug_batch_state
if _debug_options.DUMP_SYNC:
debug.write("@async.debug.sync: cancelling batch %s (%i)" % (debug.repr(self.name), self.index))
class LocalDebugBatchState(threading.local):
def __init__(self):
super(LocalDebugBatchState, self).__init__()
self.batches = {}
_debug_batch_state = LocalDebugBatchState()
globals()['_debug_batch_state'] = _debug_batch_state
def sync(tag='default'):
return DebugBatchItem('sync-' + tag)
|
manannayak/asynq
|
asynq/batching.py
|
Python
|
apache-2.0
| 8,188
|
# proxy module
from __future__ import absolute_import
from tvtk.pyface.ui.qt4.scene_editor import *
|
enthought/etsproxy
|
enthought/tvtk/pyface/ui/qt4/scene_editor.py
|
Python
|
bsd-3-clause
| 100
|
import threading
import time as mod_time
import uuid
from redis.exceptions import LockError, LockNotOwnedError
from redis.utils import dummy
class Lock(object):
"""
A shared, distributed Lock. Using Redis for locking allows the Lock
to be shared across processes and/or machines.
It's left to the user to resolve deadlock issues and make sure
multiple clients play nicely together.
"""
lua_release = None
lua_extend = None
lua_reacquire = None
# KEYS[1] - lock name
# ARGV[1] - token
# return 1 if the lock was released, otherwise 0
LUA_RELEASE_SCRIPT = """
local token = redis.call('get', KEYS[1])
if not token or token ~= ARGV[1] then
return 0
end
redis.call('del', KEYS[1])
return 1
"""
# KEYS[1] - lock name
# ARGV[1] - token
# ARGV[2] - additional milliseconds
# ARGV[3] - "0" if the additional time should be added to the lock's
# existing ttl or "1" if the existing ttl should be replaced
# return 1 if the locks time was extended, otherwise 0
LUA_EXTEND_SCRIPT = """
local token = redis.call('get', KEYS[1])
if not token or token ~= ARGV[1] then
return 0
end
local expiration = redis.call('pttl', KEYS[1])
if not expiration then
expiration = 0
end
if expiration < 0 then
return 0
end
local newttl = ARGV[2]
if ARGV[3] == "0" then
newttl = ARGV[2] + expiration
end
redis.call('pexpire', KEYS[1], newttl)
return 1
"""
# KEYS[1] - lock name
# ARGV[1] - token
# ARGV[2] - milliseconds
# return 1 if the locks time was reacquired, otherwise 0
LUA_REACQUIRE_SCRIPT = """
local token = redis.call('get', KEYS[1])
if not token or token ~= ARGV[1] then
return 0
end
redis.call('pexpire', KEYS[1], ARGV[2])
return 1
"""
def __init__(self, redis, name, timeout=None, sleep=0.1,
blocking=True, blocking_timeout=None, thread_local=True):
"""
Create a new Lock instance named ``name`` using the Redis client
supplied by ``redis``.
``timeout`` indicates a maximum life for the lock.
By default, it will remain locked until release() is called.
``timeout`` can be specified as a float or integer, both representing
the number of seconds to wait.
``sleep`` indicates the amount of time to sleep per loop iteration
when the lock is in blocking mode and another client is currently
holding the lock.
``blocking`` indicates whether calling ``acquire`` should block until
the lock has been acquired or to fail immediately, causing ``acquire``
to return False and the lock not being acquired. Defaults to True.
Note this value can be overridden by passing a ``blocking``
argument to ``acquire``.
``blocking_timeout`` indicates the maximum amount of time in seconds to
spend trying to acquire the lock. A value of ``None`` indicates
continue trying forever. ``blocking_timeout`` can be specified as a
float or integer, both representing the number of seconds to wait.
``thread_local`` indicates whether the lock token is placed in
thread-local storage. By default, the token is placed in thread local
storage so that a thread only sees its token, not a token set by
another thread. Consider the following timeline:
time: 0, thread-1 acquires `my-lock`, with a timeout of 5 seconds.
thread-1 sets the token to "abc"
time: 1, thread-2 blocks trying to acquire `my-lock` using the
Lock instance.
time: 5, thread-1 has not yet completed. redis expires the lock
key.
time: 5, thread-2 acquired `my-lock` now that it's available.
thread-2 sets the token to "xyz"
time: 6, thread-1 finishes its work and calls release(). if the
token is *not* stored in thread local storage, then
thread-1 would see the token value as "xyz" and would be
able to successfully release the thread-2's lock.
In some use cases it's necessary to disable thread local storage. For
example, if you have code where one thread acquires a lock and passes
that lock instance to a worker thread to release later. If thread
local storage isn't disabled in this case, the worker thread won't see
the token set by the thread that acquired the lock. Our assumption
is that these cases aren't common and as such default to using
thread local storage.
"""
self.redis = redis
self.name = name
self.timeout = timeout
self.sleep = sleep
self.blocking = blocking
self.blocking_timeout = blocking_timeout
self.thread_local = bool(thread_local)
self.local = threading.local() if self.thread_local else dummy()
self.local.token = None
self.register_scripts()
def register_scripts(self):
cls = self.__class__
client = self.redis
if cls.lua_release is None:
cls.lua_release = client.register_script(cls.LUA_RELEASE_SCRIPT)
if cls.lua_extend is None:
cls.lua_extend = client.register_script(cls.LUA_EXTEND_SCRIPT)
if cls.lua_reacquire is None:
cls.lua_reacquire = \
client.register_script(cls.LUA_REACQUIRE_SCRIPT)
def __enter__(self):
# force blocking, as otherwise the user would have to check whether
# the lock was actually acquired or not.
if self.acquire(blocking=True):
return self
raise LockError("Unable to acquire lock within the time specified")
def __exit__(self, exc_type, exc_value, traceback):
self.release()
def acquire(self, blocking=None, blocking_timeout=None, token=None):
"""
Use Redis to hold a shared, distributed lock named ``name``.
Returns True once the lock is acquired.
If ``blocking`` is False, always return immediately. If the lock
was acquired, return True, otherwise return False.
``blocking_timeout`` specifies the maximum number of seconds to
wait trying to acquire the lock.
``token`` specifies the token value to be used. If provided, token
must be a bytes object or a string that can be encoded to a bytes
object with the default encoding. If a token isn't specified, a UUID
will be generated.
"""
sleep = self.sleep
if token is None:
token = uuid.uuid1().hex.encode()
else:
encoder = self.redis.connection_pool.get_encoder()
token = encoder.encode(token)
if blocking is None:
blocking = self.blocking
if blocking_timeout is None:
blocking_timeout = self.blocking_timeout
stop_trying_at = None
if blocking_timeout is not None:
stop_trying_at = mod_time.time() + blocking_timeout
while True:
if self.do_acquire(token):
self.local.token = token
return True
if not blocking:
return False
next_try_at = mod_time.time() + sleep
if stop_trying_at is not None and next_try_at > stop_trying_at:
return False
mod_time.sleep(sleep)
def do_acquire(self, token):
if self.timeout:
# convert to milliseconds
timeout = int(self.timeout * 1000)
else:
timeout = None
if self.redis.set(self.name, token, nx=True, px=timeout):
return True
return False
def locked(self):
"""
Returns True if this key is locked by any process, otherwise False.
"""
return self.redis.get(self.name) is not None
def owned(self):
"""
Returns True if this key is locked by this lock, otherwise False.
"""
stored_token = self.redis.get(self.name)
# need to always compare bytes to bytes
# TODO: this can be simplified when the context manager is finished
if stored_token and not isinstance(stored_token, bytes):
encoder = self.redis.connection_pool.get_encoder()
stored_token = encoder.encode(stored_token)
return self.local.token is not None and \
stored_token == self.local.token
def release(self):
"Releases the already acquired lock"
expected_token = self.local.token
if expected_token is None:
raise LockError("Cannot release an unlocked lock")
self.local.token = None
self.do_release(expected_token)
def do_release(self, expected_token):
if not bool(self.lua_release(keys=[self.name],
args=[expected_token],
client=self.redis)):
raise LockNotOwnedError("Cannot release a lock"
" that's no longer owned")
def extend(self, additional_time, replace_ttl=False):
"""
Adds more time to an already acquired lock.
``additional_time`` can be specified as an integer or a float, both
representing the number of seconds to add.
``replace_ttl`` if False (the default), add `additional_time` to
the lock's existing ttl. If True, replace the lock's ttl with
`additional_time`.
"""
if self.local.token is None:
raise LockError("Cannot extend an unlocked lock")
if self.timeout is None:
raise LockError("Cannot extend a lock with no timeout")
return self.do_extend(additional_time, replace_ttl)
def do_extend(self, additional_time, replace_ttl):
additional_time = int(additional_time * 1000)
if not bool(
self.lua_extend(
keys=[self.name],
args=[
self.local.token,
additional_time,
replace_ttl and "1" or "0"
],
client=self.redis,
)
):
raise LockNotOwnedError(
"Cannot extend a lock that's" " no longer owned"
)
return True
def reacquire(self):
"""
Resets a TTL of an already acquired lock back to a timeout value.
"""
if self.local.token is None:
raise LockError("Cannot reacquire an unlocked lock")
if self.timeout is None:
raise LockError("Cannot reacquire a lock with no timeout")
return self.do_reacquire()
def do_reacquire(self):
timeout = int(self.timeout * 1000)
if not bool(self.lua_reacquire(keys=[self.name],
args=[self.local.token, timeout],
client=self.redis)):
raise LockNotOwnedError("Cannot reacquire a lock that's"
" no longer owned")
return True
|
5977862/redis-py
|
redis/lock.py
|
Python
|
mit
| 11,349
|
#!/usr/bin/python2.6
# MarsRoverPictureCrawler (mrpc):
# A download bot for OPPORTUNITY and SPIRIT Mars Rover Pictures
#
# Copyright (C) 2011 by Maximilian Irro <maximilian.irro@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Please note that this license is for the software only. The picture material
# the software collects is owned by Courtesy NASA/JPL-Caltech and released by
# them under their JPL Image Use Policy.
#
# For more information see the files README and LICENSE. Copys of them should
# have reached you among with this program. If not, see the GitHub repository
# at http://github.com/mpgirro/MarsRoverPictureCrawler.git
import urllib2
from BeautifulSoup import *
from urlparse import urljoin
from os import system
import sys
from urllib2 import HTTPError, URLError
PIC_BASE = 'http://marsrover.nasa.gov/gallery/all/'
GALLERY = 'http://marsrover.nasa.gov/gallery/all/%s_%s%03d_text.html' # %( ROVER, CAMERA, SOL)
PATH_BASE = 'mars_rovers/'
ROVERS = [ 'opportunity', 'spirit']
CAMERAS = [ 'f', 'r', 'n', 'p', 'm' ]
NAME_CODE = {
'opportunity' : 'OPPORTUNITY',
'spirit' : 'SPIRIT',
'f' : 'Front Hazcam',
'r' : 'Rear Hazcam',
'n' : 'Navigation Camera',
'p' : 'Panoramic Camera',
'm' : 'Microscopic Imager'
}
def crawl(start, end, path=PATH_BASE):
# crawls the NASA rover index from the sol 'start' to
# the sol 'end'. the pictures will be downloaded to the
# directory 'path'. default is PATH_BASE.
if path is not PATH_BASE and path[-1] is not '/':
path = "%s/" %(path)
for sol in range( start, end+1):
for rover in ROVERS:
for cam in CAMERAS:
gal = GALLERY % ( rover, cam, sol)
pics = get_pictures( gal)
load_pics( pics, path, rover, sol, cam)
def get_pictures(url):
# scans a given url for links (a tag) and returns
# a list of all hrefs leading to a .JPG file
pics = []
try:
c = urllib2.urlopen( url )
except HTTPError, e:
#print 'picture retrievel from %s failed because of %s' %(url,e)
return pics
soup = BeautifulSoup( c.read() )
links = soup('a')
for link in links:
if 'href' in dict(link.attrs):
if link['href'][-4:] == '.JPG':
href = link['href']
pics.append(href)
return pics
def load_pics( pics, savepath, rover, sol, cam):
# loads all pictures in pics to the directory given through
# savepath/rover/sol/cam/. not yet existing directorys will be
# created and download information printed to the standard
# outputstream. does not create directorys if pics is empty.
if pics is None or len(pics) is 0:
return
path = "%s%s/Sol %s/%s (%i img)" % (savepath, NAME_CODE[rover], sol, NAME_CODE[cam], len(pics) )
system("mkdir -p '%s'" % (path) )
c = 1
for pic in pics:
print 'loading %s (%i/%i) from %s at Sol %s to %s' % (NAME_CODE[cam], c, len(pics), NAME_CODE[rover], sol, path)
pic_url = "%s%s" % (PIC_BASE, pic)
load_pic( pic_url, path)
c += 1
def load_pic( pic, dir):
# downloads and saves a picture to dir. the filename
# of the picture is given through its url by calling
# name(pic). odes nothing if the url is somehow invalid.
try:
opener = urllib2.build_opener()
page = opener.open(pic)
picpage = page.read()
filename = "%s/%s" % ( dir, name(pic))
fout = open(filename, "wb")
fout.write(picpage)
fout.close()
except URLError:
return
def name(url):
# returns the name of a picture in the provided url
# by splitting the url at all '/' and returning the
# last part. it is not checked if this is a valid
# filename nor if it has a datatype appended.
s = url.split("/")
s.reverse()
name = s[0]
return name
if __name__ == '__main__':
# command line arguments are: start_sol end_sol download_directory(optional)
# default path is the current_directory/mars_rovers/..
start = int(sys.argv[1])
end = int(sys.argv[2])
path = sys.argv[3]
crawl( start, end, path)
|
mpgirro/MarsRoverPictureCrawler
|
mrpc.py
|
Python
|
mit
| 5,158
|
import pytest
from configmanager import Config, Item, RequiredValueMissing
def test_validate_raises_required_value_missing():
config = Config({
'a': Item(required=True),
'b': Item(),
})
with pytest.raises(RequiredValueMissing):
config.validate()
config.a.set('value')
config.validate()
config.a.reset()
with pytest.raises(RequiredValueMissing):
config.validate()
|
jbasko/configmanager
|
tests/test_validation.py
|
Python
|
mit
| 429
|
#!/usr/bin/python
import math
filename = "gvrr.cc"
ss = "\
//\n\
// BAGEL - Brilliantly Advanced General Electronic Structure Library\n\
// Filename: " + filename + "\n\
// Copyright (C) 2012 Toru Shiozaki\n\
//\n\
// Author: Toru Shiozaki <shiozaki@northwestern.edu>\n\
// Maintainer: Shiozaki group\n\
//\n\
// This file is part of the BAGEL package.\n\
//\n\
// This program is free software: you can redistribute it and/or modify\n\
// it under the terms of the GNU General Public License as published by\n\
// the Free Software Foundation, either version 3 of the License, or\n\
// (at your option) any later version.\n\
//\n\
// This program is distributed in the hope that it will be useful,\n\
// but WITHOUT ANY WARRANTY; without even the implied warranty of\n\
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\
// GNU General Public License for more details.\n\
//\n\
// You should have received a copy of the GNU General Public License\n\
// along with this program. If not, see <http://www.gnu.org/licenses/>.\n\
//\n\
\n\
#include <src/integral/rys/gradbatch.h>\n\
#include <src/integral/rys/_gvrr_drv.h>\n\
#include <src/util/math/comb.h>\n\
#include <src/util/f77.h>\n\
\n\
using namespace std;\n\
using namespace bagel;\n\
\n\
static const Comb comb;\n\
\n\
\n\
void GradBatch::perform_VRR() {\n\
#ifndef LIBINT_INTERFACE\n\
const int a = basisinfo_[0]->angular_number();\n\
const int b = basisinfo_[1]->angular_number();\n\
const int c = basisinfo_[2]->angular_number();\n\
const int d = basisinfo_[3]->angular_number();\n\
const int acsize = (a+1)*(a+2)*(b+1)*(b+2)*(c+1)*(c+2)*(d+1)*(d+2)/16;\n\
\n\
const int isize = (amax_ + 1) * (cmax_ + 1);\n\
double* const workx = stack_->get(isize*rank_*3);\n\
double* const worky = workx + isize*rank_;\n\
double* const workz = worky + isize*rank_;\n\
\n\
const int a2 = a+2;\n\
const int b2 = b+2;\n\
const int c2 = c+2;\n\
const int d2 = d+2;\n\
\n\
double* const transx = stack_->get((amax_+1)*a2*b2);\n\
double* const transy = stack_->get((amax_+1)*a2*b2);\n\
double* const transz = stack_->get((amax_+1)*a2*b2);\n\
double* const trans2x = stack_->get((cmax_+1)*c2*d2);\n\
double* const trans2y = stack_->get((cmax_+1)*c2*d2);\n\
double* const trans2z = stack_->get((cmax_+1)*c2*d2);\n\
fill_n(transx, (amax_+1)*a2*b2, 0.0);\n\
fill_n(transy, (amax_+1)*a2*b2, 0.0);\n\
fill_n(transz, (amax_+1)*a2*b2, 0.0);\n\
fill_n(trans2x, (cmax_+1)*c2*d2, 0.0);\n\
fill_n(trans2y, (cmax_+1)*c2*d2, 0.0);\n\
fill_n(trans2z, (cmax_+1)*c2*d2, 0.0);\n\
// for usual integrals\n\
for (int ib = 0, k = 0; ib <= b+1; ++ib) {\n\
for (int ia = 0; ia <= a+1; ++ia, ++k) {\n\
if (ia == a+1 && ib == b+1) continue;\n\
for (int i = ia; i <= ia+ib; ++i) {\n\
transx[i + (amax_+1)*k] = comb(ib, ia+ib-i) * pow(AB_[0], ia+ib-i);\n\
transy[i + (amax_+1)*k] = comb(ib, ia+ib-i) * pow(AB_[1], ia+ib-i);\n\
transz[i + (amax_+1)*k] = comb(ib, ia+ib-i) * pow(AB_[2], ia+ib-i);\n\
} \n\
} \n\
}\n\
for (int id = 0, k = 0; id <= d+1; ++id) {\n\
for (int ic = 0; ic <= c+1; ++ic, ++k) {\n\
if (ic == c+1 && id == d+1) continue;\n\
for (int i = ic; i <= ic+id; ++i) {\n\
trans2x[i + (cmax_+1)*k] = comb(id, ic+id-i) * pow(CD_[0], ic+id-i);\n\
trans2y[i + (cmax_+1)*k] = comb(id, ic+id-i) * pow(CD_[1], ic+id-i);\n\
trans2z[i + (cmax_+1)*k] = comb(id, ic+id-i) * pow(CD_[2], ic+id-i);\n\
} \n\
} \n\
}\n\
double* const intermediate = stack_->get(b2*a2*(cmax_+1)*rank_);\n\
double* const final_x = stack_->get(b2*a2*c2*d2*rank_);\n\
double* const final_y = stack_->get(b2*a2*c2*d2*rank_);\n\
double* const final_z = stack_->get(b2*a2*c2*d2*rank_);\n\
double* const final_xa = stack_->get(b2*a2*c2*d2*rank_);\n\
double* const final_xb = stack_->get(b2*a2*c2*d2*rank_);\n\
double* const final_xc = stack_->get(b2*a2*c2*d2*rank_);\n\
double* const final_ya = stack_->get(b2*a2*c2*d2*rank_);\n\
double* const final_yb = stack_->get(b2*a2*c2*d2*rank_);\n\
double* const final_yc = stack_->get(b2*a2*c2*d2*rank_);\n\
double* const final_za = stack_->get(b2*a2*c2*d2*rank_);\n\
double* const final_zb = stack_->get(b2*a2*c2*d2*rank_);\n\
double* const final_zc = stack_->get(b2*a2*c2*d2*rank_);\n\
const array<bool,4> dummy{{basisinfo_[0]->dummy(), basisinfo_[1]->dummy(), basisinfo_[2]->dummy(), basisinfo_[3]->dummy()}};\n\
const int hashkey = (a << 24) + (b << 16) + (c << 8) + d;\n"
for a in range(0,8):
for b in range(0,8):
if a < b: continue
for c in range(0,8):
for d in range(0,8):
if c < d: continue
rank = int(math.ceil((a+b+c+d+2)*0.5-0.001))
off = 1 << 8
key = d+off*(c+off*(b+off*a))
if a == 0 and c == 0:
ss += "\
switch (hashkey) {\n"
if a == 7 or c == 7 or c == 7 or d == 7:
ss += "\
#ifdef COMPILE_J_ORB\n"
ss += "\
case " + str(key) + " :\n\
for (int j = 0; j != screening_size_; ++j) {\n\
int ii = screening_[j];\n\
gvrr_driver<" + str(a) + "," + str(b) + "," + str(c) + "," + str(d) + "," + str(rank) + ">(data_+ii*acsize, roots_+ii*rank_, weights_+ii*rank_, coeff_[ii],\n\
basisinfo_[0]->position(), basisinfo_[1]->position(), basisinfo_[2]->position(), basisinfo_[3]->position(),\n\
P_+ii*3, Q_+ii*3, xp_[ii], xq_[ii], size_block_,\n\
exponents_.get()+ii*4, transx, transy, transz, trans2x, trans2y, trans2z, intermediate,\n\
final_x, final_y, final_z, final_xa, final_xb, final_xc, final_ya, final_yb, final_yc, final_za, final_zb, final_zc, workx, worky, workz, dummy);\n\
} break;\n"
if a == 7 or c == 7 or c == 7 or d == 7:
ss += "\
#endif\n"
ss += "\
default :\n\
assert(false); // hashkey not found\n\
}\n\
stack_->release(b2*a2*c2*d2*rank_, final_zc);\n\
stack_->release(b2*a2*c2*d2*rank_, final_zb);\n\
stack_->release(b2*a2*c2*d2*rank_, final_za);\n\
stack_->release(b2*a2*c2*d2*rank_, final_yc);\n\
stack_->release(b2*a2*c2*d2*rank_, final_yb);\n\
stack_->release(b2*a2*c2*d2*rank_, final_ya);\n\
stack_->release(b2*a2*c2*d2*rank_, final_xc);\n\
stack_->release(b2*a2*c2*d2*rank_, final_xb);\n\
stack_->release(b2*a2*c2*d2*rank_, final_xa);\n\
stack_->release(b2*a2*c2*d2*rank_, final_z);\n\
stack_->release(b2*a2*c2*d2*rank_, final_y);\n\
stack_->release(b2*a2*c2*d2*rank_, final_x);\n\
\n\
stack_->release(b2*a2*(cmax_+1)*rank_, intermediate);\n\
\n\
stack_->release((cmax_+1)*c2*d2, trans2z);\n\
stack_->release((cmax_+1)*c2*d2, trans2y);\n\
stack_->release((cmax_+1)*c2*d2, trans2x);\n\
\n\
stack_->release((amax_+1)*a2*b2, transz);\n\
stack_->release((amax_+1)*a2*b2, transy);\n\
stack_->release((amax_+1)*a2*b2, transx);\n\
stack_->release(rank_*isize*3, workx);\n\
\n\
#endif\n\
}"
f = open(filename, "w")
f.write(ss)
|
nubakery/bagel
|
src/integral/rys/vrr_gen/ggen.py
|
Python
|
gpl-3.0
| 6,913
|
#
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
# os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hello.settings")
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "agent.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
kinnevo/kic_alone
|
manage.py
|
Python
|
mit
| 876
|
import re
line = 'asdf fff; ddd, fffc,baba, zz'
print re.split(r'[;,\s]\s*', line)
arr = [1,2,3,4]
print arr[1::2]
print arr[::1]
print arr[1::]
print arr[::3] # start::step
arr2 = [5,6,7,8,9]
print zip(arr,arr2)
print re.split(r'(;|,|\s)\s*', line)
print re.split(r'(?:;|,|\s)\s*', line)
|
wonghoifung/learning-python
|
r2_1.py
|
Python
|
mit
| 311
|
# -*- coding: utf-8 -*-
__author__ = """Sean Marlow"""
__email__ = 'sean.marlow@suse.com'
__version__ = '0.1.1'
|
smarlowucf/mockboto3
|
mockboto3/__init__.py
|
Python
|
mit
| 113
|
# Copyright 2013-2015 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from collections import deque
from mock import patch
import time
from uuid import uuid4
import cassandra
from cassandra.cluster import Cluster, NoHostAvailable
from cassandra.concurrent import execute_concurrent
from cassandra.policies import (RoundRobinPolicy, ExponentialReconnectionPolicy,
RetryPolicy, SimpleConvictionPolicy, HostDistance,
WhiteListRoundRobinPolicy)
from cassandra.query import SimpleStatement, TraceUnavailable
from tests.integration import use_singledc, PROTOCOL_VERSION, get_server_versions, get_node
from tests.integration.util import assert_quiescent_pool_state
def setup_module():
use_singledc()
class ClusterTests(unittest.TestCase):
def test_raise_error_on_control_connection_timeout(self):
"""
Test for initial control connection timeout
test_raise_error_on_control_connection_timeout tests that the driver times out after the set initial connection
timeout. It first pauses node1, essentially making it unreachable. It then attempts to create a Cluster object
via connecting to node1 with a timeout of 1 second, and ensures that a NoHostAvailable is raised, along with
an OperationTimedOut for 1 second.
@expected_errors NoHostAvailable When node1 is paused, and a connection attempt is made.
@since 2.6.0
@jira_ticket PYTHON-206
@expected_result NoHostAvailable exception should be raised after 1 second.
@test_category connection
"""
get_node(1).pause()
cluster = Cluster(contact_points=['127.0.0.1'], protocol_version=PROTOCOL_VERSION, connect_timeout=1)
with self.assertRaisesRegexp(NoHostAvailable, "OperationTimedOut\('errors=Timed out creating connection \(1 seconds\)"):
cluster.connect()
get_node(1).resume()
def test_basic(self):
"""
Test basic connection and usage
"""
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
result = session.execute(
"""
CREATE KEYSPACE clustertests
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}
""")
self.assertEqual(None, result)
result = session.execute(
"""
CREATE TABLE clustertests.cf0 (
a text,
b text,
c text,
PRIMARY KEY (a, b)
)
""")
self.assertEqual(None, result)
result = session.execute(
"""
INSERT INTO clustertests.cf0 (a, b, c) VALUES ('a', 'b', 'c')
""")
self.assertEqual(None, result)
result = session.execute("SELECT * FROM clustertests.cf0")
self.assertEqual([('a', 'b', 'c')], result)
session.execute("DROP KEYSPACE clustertests")
cluster.shutdown()
def test_connect_on_keyspace(self):
"""
Ensure clusters that connect on a keyspace, do
"""
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
result = session.execute(
"""
INSERT INTO test3rf.test (k, v) VALUES (8889, 8889)
""")
self.assertEqual(None, result)
result = session.execute("SELECT * FROM test3rf.test")
self.assertEqual([(8889, 8889)], result)
# test_connect_on_keyspace
session2 = cluster.connect('test3rf')
result2 = session2.execute("SELECT * FROM test")
self.assertEqual(result, result2)
cluster.shutdown()
def test_set_keyspace_twice(self):
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
session.execute("USE system")
session.execute("USE system")
cluster.shutdown()
def test_default_connections(self):
"""
Ensure errors are not thrown when using non-default policies
"""
Cluster(
load_balancing_policy=RoundRobinPolicy(),
reconnection_policy=ExponentialReconnectionPolicy(1.0, 600.0),
default_retry_policy=RetryPolicy(),
conviction_policy_factory=SimpleConvictionPolicy,
protocol_version=PROTOCOL_VERSION
)
def test_connect_to_already_shutdown_cluster(self):
"""
Ensure you cannot connect to a cluster that's been shutdown
"""
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
cluster.shutdown()
self.assertRaises(Exception, cluster.connect)
def test_auth_provider_is_callable(self):
"""
Ensure that auth_providers are always callable
"""
self.assertRaises(TypeError, Cluster, auth_provider=1, protocol_version=1)
c = Cluster(protocol_version=1)
self.assertRaises(TypeError, setattr, c, 'auth_provider', 1)
def test_v2_auth_provider(self):
"""
Check for v2 auth_provider compliance
"""
bad_auth_provider = lambda x: {'username': 'foo', 'password': 'bar'}
self.assertRaises(TypeError, Cluster, auth_provider=bad_auth_provider, protocol_version=2)
c = Cluster(protocol_version=2)
self.assertRaises(TypeError, setattr, c, 'auth_provider', bad_auth_provider)
def test_conviction_policy_factory_is_callable(self):
"""
Ensure that conviction_policy_factory are always callable
"""
self.assertRaises(ValueError, Cluster, conviction_policy_factory=1)
def test_connect_to_bad_hosts(self):
"""
Ensure that a NoHostAvailable Exception is thrown
when a cluster cannot connect to given hosts
"""
cluster = Cluster(['127.1.2.9', '127.1.2.10'],
protocol_version=PROTOCOL_VERSION)
self.assertRaises(NoHostAvailable, cluster.connect)
def test_cluster_settings(self):
"""
Test connection setting getters and setters
"""
if PROTOCOL_VERSION >= 3:
raise unittest.SkipTest("min/max requests and core/max conns aren't used with v3 protocol")
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
min_requests_per_connection = cluster.get_min_requests_per_connection(HostDistance.LOCAL)
self.assertEqual(cassandra.cluster.DEFAULT_MIN_REQUESTS, min_requests_per_connection)
cluster.set_min_requests_per_connection(HostDistance.LOCAL, min_requests_per_connection + 1)
self.assertEqual(cluster.get_min_requests_per_connection(HostDistance.LOCAL), min_requests_per_connection + 1)
max_requests_per_connection = cluster.get_max_requests_per_connection(HostDistance.LOCAL)
self.assertEqual(cassandra.cluster.DEFAULT_MAX_REQUESTS, max_requests_per_connection)
cluster.set_max_requests_per_connection(HostDistance.LOCAL, max_requests_per_connection + 1)
self.assertEqual(cluster.get_max_requests_per_connection(HostDistance.LOCAL), max_requests_per_connection + 1)
core_connections_per_host = cluster.get_core_connections_per_host(HostDistance.LOCAL)
self.assertEqual(cassandra.cluster.DEFAULT_MIN_CONNECTIONS_PER_LOCAL_HOST, core_connections_per_host)
cluster.set_core_connections_per_host(HostDistance.LOCAL, core_connections_per_host + 1)
self.assertEqual(cluster.get_core_connections_per_host(HostDistance.LOCAL), core_connections_per_host + 1)
max_connections_per_host = cluster.get_max_connections_per_host(HostDistance.LOCAL)
self.assertEqual(cassandra.cluster.DEFAULT_MAX_CONNECTIONS_PER_LOCAL_HOST, max_connections_per_host)
cluster.set_max_connections_per_host(HostDistance.LOCAL, max_connections_per_host + 1)
self.assertEqual(cluster.get_max_connections_per_host(HostDistance.LOCAL), max_connections_per_host + 1)
def test_submit_schema_refresh(self):
"""
Ensure new new schema is refreshed after submit_schema_refresh()
"""
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
cluster.connect()
self.assertNotIn("newkeyspace", cluster.metadata.keyspaces)
other_cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = other_cluster.connect()
session.execute(
"""
CREATE KEYSPACE newkeyspace
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}
""")
future = cluster.submit_schema_refresh()
future.result()
self.assertIn("newkeyspace", cluster.metadata.keyspaces)
session.execute("DROP KEYSPACE newkeyspace")
cluster.shutdown()
other_cluster.shutdown()
def test_refresh_schema(self):
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
original_meta = cluster.metadata.keyspaces
# full schema refresh, with wait
cluster.refresh_schema_metadata()
self.assertIsNot(original_meta, cluster.metadata.keyspaces)
self.assertEqual(original_meta, cluster.metadata.keyspaces)
cluster.shutdown()
def test_refresh_schema_keyspace(self):
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
original_meta = cluster.metadata.keyspaces
original_system_meta = original_meta['system']
# only refresh one keyspace
cluster.refresh_keyspace_metadata('system')
current_meta = cluster.metadata.keyspaces
self.assertIs(original_meta, current_meta)
current_system_meta = current_meta['system']
self.assertIsNot(original_system_meta, current_system_meta)
self.assertEqual(original_system_meta.as_cql_query(), current_system_meta.as_cql_query())
cluster.shutdown()
def test_refresh_schema_table(self):
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
original_meta = cluster.metadata.keyspaces
original_system_meta = original_meta['system']
original_system_schema_meta = original_system_meta.tables['schema_columnfamilies']
# only refresh one table
cluster.refresh_table_metadata('system', 'schema_columnfamilies')
current_meta = cluster.metadata.keyspaces
current_system_meta = current_meta['system']
current_system_schema_meta = current_system_meta.tables['schema_columnfamilies']
self.assertIs(original_meta, current_meta)
self.assertIs(original_system_meta, current_system_meta)
self.assertIsNot(original_system_schema_meta, current_system_schema_meta)
self.assertEqual(original_system_schema_meta.as_cql_query(), current_system_schema_meta.as_cql_query())
cluster.shutdown()
def test_refresh_schema_type(self):
if get_server_versions()[0] < (2, 1, 0):
raise unittest.SkipTest('UDTs were introduced in Cassandra 2.1')
if PROTOCOL_VERSION < 3:
raise unittest.SkipTest('UDTs are not specified in change events for protocol v2')
# We may want to refresh types on keyspace change events in that case(?)
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
keyspace_name = 'test1rf'
type_name = self._testMethodName
session.execute('CREATE TYPE IF NOT EXISTS %s.%s (one int, two text)' % (keyspace_name, type_name))
original_meta = cluster.metadata.keyspaces
original_test1rf_meta = original_meta[keyspace_name]
original_type_meta = original_test1rf_meta.user_types[type_name]
# only refresh one type
cluster.refresh_user_type_metadata('test1rf', type_name)
current_meta = cluster.metadata.keyspaces
current_test1rf_meta = current_meta[keyspace_name]
current_type_meta = current_test1rf_meta.user_types[type_name]
self.assertIs(original_meta, current_meta)
self.assertIs(original_test1rf_meta, current_test1rf_meta)
self.assertIsNot(original_type_meta, current_type_meta)
self.assertEqual(original_type_meta.as_cql_query(), current_type_meta.as_cql_query())
session.shutdown()
def test_refresh_schema_no_wait(self):
contact_points = ['127.0.0.1']
cluster = Cluster(protocol_version=PROTOCOL_VERSION, max_schema_agreement_wait=10,
contact_points=contact_points, load_balancing_policy=WhiteListRoundRobinPolicy(contact_points))
session = cluster.connect()
schema_ver = session.execute("SELECT schema_version FROM system.local WHERE key='local'")[0][0]
# create a schema disagreement
session.execute("UPDATE system.local SET schema_version=%s WHERE key='local'", (uuid4(),))
try:
agreement_timeout = 1
# cluster agreement wait exceeded
c = Cluster(protocol_version=PROTOCOL_VERSION, max_schema_agreement_wait=agreement_timeout)
start_time = time.time()
s = c.connect()
end_time = time.time()
self.assertGreaterEqual(end_time - start_time, agreement_timeout)
self.assertTrue(c.metadata.keyspaces)
# cluster agreement wait used for refresh
original_meta = c.metadata.keyspaces
start_time = time.time()
self.assertRaisesRegexp(Exception, r"Schema metadata was not refreshed.*", c.refresh_schema_metadata)
end_time = time.time()
self.assertGreaterEqual(end_time - start_time, agreement_timeout)
self.assertIs(original_meta, c.metadata.keyspaces)
# refresh wait overrides cluster value
original_meta = c.metadata.keyspaces
start_time = time.time()
c.refresh_schema_metadata(max_schema_agreement_wait=0)
end_time = time.time()
self.assertLess(end_time - start_time, agreement_timeout)
self.assertIsNot(original_meta, c.metadata.keyspaces)
self.assertEqual(original_meta, c.metadata.keyspaces)
c.shutdown()
refresh_threshold = 0.5
# cluster agreement bypass
c = Cluster(protocol_version=PROTOCOL_VERSION, max_schema_agreement_wait=0)
start_time = time.time()
s = c.connect()
end_time = time.time()
self.assertLess(end_time - start_time, refresh_threshold)
self.assertTrue(c.metadata.keyspaces)
# cluster agreement wait used for refresh
original_meta = c.metadata.keyspaces
start_time = time.time()
c.refresh_schema_metadata()
end_time = time.time()
self.assertLess(end_time - start_time, refresh_threshold)
self.assertIsNot(original_meta, c.metadata.keyspaces)
self.assertEqual(original_meta, c.metadata.keyspaces)
# refresh wait overrides cluster value
original_meta = c.metadata.keyspaces
start_time = time.time()
self.assertRaisesRegexp(Exception, r"Schema metadata was not refreshed.*", c.refresh_schema_metadata,
max_schema_agreement_wait=agreement_timeout)
end_time = time.time()
self.assertGreaterEqual(end_time - start_time, agreement_timeout)
self.assertIs(original_meta, c.metadata.keyspaces)
c.shutdown()
finally:
session.execute("UPDATE system.local SET schema_version=%s WHERE key='local'", (schema_ver,))
cluster.shutdown()
def test_trace(self):
"""
Ensure trace can be requested for async and non-async queries
"""
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
self.assertRaises(TypeError, session.execute, "SELECT * FROM system.local", trace=True)
def check_trace(trace):
self.assertIsNot(None, trace.request_type)
self.assertIsNot(None, trace.duration)
self.assertIsNot(None, trace.started_at)
self.assertIsNot(None, trace.coordinator)
self.assertIsNot(None, trace.events)
query = "SELECT * FROM system.local"
statement = SimpleStatement(query)
session.execute(statement, trace=True)
check_trace(statement.trace)
query = "SELECT * FROM system.local"
statement = SimpleStatement(query)
session.execute(statement)
self.assertEqual(None, statement.trace)
statement2 = SimpleStatement(query)
future = session.execute_async(statement2, trace=True)
future.result()
check_trace(future.get_query_trace())
statement2 = SimpleStatement(query)
future = session.execute_async(statement2)
future.result()
self.assertEqual(None, future.get_query_trace())
prepared = session.prepare("SELECT * FROM system.local")
future = session.execute_async(prepared, parameters=(), trace=True)
future.result()
check_trace(future.get_query_trace())
cluster.shutdown()
def test_trace_timeout(self):
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
query = "SELECT * FROM system.local"
statement = SimpleStatement(query)
future = session.execute_async(statement, trace=True)
future.result()
self.assertRaises(TraceUnavailable, future.get_query_trace, -1.0)
cluster.shutdown()
def test_string_coverage(self):
"""
Ensure str(future) returns without error
"""
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
query = "SELECT * FROM system.local"
statement = SimpleStatement(query)
future = session.execute_async(statement)
self.assertIn(query, str(future))
future.result()
self.assertIn(query, str(future))
self.assertIn('result', str(future))
cluster.shutdown()
def test_idle_heartbeat(self):
interval = 1
cluster = Cluster(protocol_version=PROTOCOL_VERSION, idle_heartbeat_interval=interval)
if PROTOCOL_VERSION < 3:
cluster.set_core_connections_per_host(HostDistance.LOCAL, 1)
session = cluster.connect()
# This test relies on impl details of connection req id management to see if heartbeats
# are being sent. May need update if impl is changed
connection_request_ids = {}
for h in cluster.get_connection_holders():
for c in h.get_connections():
# make sure none are idle (should have startup messages)
self.assertFalse(c.is_idle)
with c.lock:
connection_request_ids[id(c)] = deque(c.request_ids) # copy of request ids
# let two heatbeat intervals pass (first one had startup messages in it)
time.sleep(2 * interval + interval/10.)
connections = [c for holders in cluster.get_connection_holders() for c in holders.get_connections()]
# make sure requests were sent on all connections
for c in connections:
expected_ids = connection_request_ids[id(c)]
expected_ids.rotate(-1)
with c.lock:
self.assertListEqual(list(c.request_ids), list(expected_ids))
# assert idle status
self.assertTrue(all(c.is_idle for c in connections))
# send messages on all connections
statements_and_params = [("SELECT release_version FROM system.local", ())] * len(cluster.metadata.all_hosts())
results = execute_concurrent(session, statements_and_params)
for success, result in results:
self.assertTrue(success)
# assert not idle status
self.assertFalse(any(c.is_idle if not c.is_control_connection else False for c in connections))
# holders include session pools and cc
holders = cluster.get_connection_holders()
self.assertIn(cluster.control_connection, holders)
self.assertEqual(len(holders), len(cluster.metadata.all_hosts()) + 1) # hosts pools, 1 for cc
# include additional sessions
session2 = cluster.connect()
holders = cluster.get_connection_holders()
self.assertIn(cluster.control_connection, holders)
self.assertEqual(len(holders), 2 * len(cluster.metadata.all_hosts()) + 1) # 2 sessions' hosts pools, 1 for cc
cluster._idle_heartbeat.stop()
cluster._idle_heartbeat.join()
assert_quiescent_pool_state(self, cluster)
cluster.shutdown()
@patch('cassandra.cluster.Cluster.idle_heartbeat_interval', new=0.1)
def test_idle_heartbeat_disabled(self):
self.assertTrue(Cluster.idle_heartbeat_interval)
# heartbeat disabled with '0'
cluster = Cluster(protocol_version=PROTOCOL_VERSION, idle_heartbeat_interval=0)
self.assertEqual(cluster.idle_heartbeat_interval, 0)
session = cluster.connect()
# let two heatbeat intervals pass (first one had startup messages in it)
time.sleep(2 * Cluster.idle_heartbeat_interval)
connections = [c for holders in cluster.get_connection_holders() for c in holders.get_connections()]
# assert not idle status (should never get reset because there is not heartbeat)
self.assertFalse(any(c.is_idle for c in connections))
cluster.shutdown()
def test_pool_management(self):
# Ensure that in_flight and request_ids quiesce after cluster operations
cluster = Cluster(protocol_version=PROTOCOL_VERSION, idle_heartbeat_interval=0) # no idle heartbeat here, pool management is tested in test_idle_heartbeat
session = cluster.connect()
session2 = cluster.connect()
# prepare
p = session.prepare("SELECT * FROM system.local WHERE key=?")
self.assertTrue(session.execute(p, ('local',)))
# simple
self.assertTrue(session.execute("SELECT * FROM system.local WHERE key='local'"))
# set keyspace
session.set_keyspace('system')
session.set_keyspace('system_traces')
# use keyspace
session.execute('USE system')
session.execute('USE system_traces')
# refresh schema
cluster.refresh_schema_metadata()
cluster.refresh_schema_metadata(max_schema_agreement_wait=0)
# submit schema refresh
future = cluster.submit_schema_refresh()
future.result()
assert_quiescent_pool_state(self, cluster)
cluster.shutdown()
|
kracekumar/python-driver
|
tests/integration/standard/test_cluster.py
|
Python
|
apache-2.0
| 23,432
|
'''
Check the performance counters from SQL Server
See http://blogs.msdn.com/b/psssql/archive/2013/09/23/interpreting-the-counter-values-from-sys-dm-os-performance-counters.aspx
for information on how to report the metrics available in the sys.dm_os_performance_counters table
'''
# stdlib
import traceback
# 3rd party
import adodbapi
# project
from checks import AgentCheck
ALL_INSTANCES = 'ALL'
VALID_METRIC_TYPES = ('gauge', 'rate', 'histogram')
# Constant for SQLServer cntr_type
PERF_LARGE_RAW_BASE = 1073939712
PERF_RAW_LARGE_FRACTION = 537003264
PERF_AVERAGE_BULK = 1073874176
PERF_COUNTER_BULK_COUNT = 272696576
PERF_COUNTER_LARGE_RAWCOUNT = 65792
# Queries
COUNTER_TYPE_QUERY = '''select distinct cntr_type
from sys.dm_os_performance_counters
where counter_name = ?;'''
BASE_NAME_QUERY = '''select distinct counter_name
from sys.dm_os_performance_counters
where (counter_name=? or counter_name=?
or counter_name=?) and cntr_type=%s;''' % PERF_LARGE_RAW_BASE
INSTANCES_QUERY = '''select instance_name
from sys.dm_os_performance_counters
where counter_name=? and instance_name!='_Total';'''
VALUE_AND_BASE_QUERY = '''select cntr_value
from sys.dm_os_performance_counters
where (counter_name=? or counter_name=?)
and instance_name=?
order by cntr_type;'''
class SQLConnectionError(Exception):
"""
Exception raised for SQL instance connection issues
"""
pass
class SQLServer(AgentCheck):
SOURCE_TYPE_NAME = 'sql server'
SERVICE_CHECK_NAME = 'sqlserver.can_connect'
# FIXME: 6.x, set default to 5s (like every check)
DEFAULT_COMMAND_TIMEOUT = 30
METRICS = [
('sqlserver.buffer.cache_hit_ratio', 'Buffer cache hit ratio', ''), # RAW_LARGE_FRACTION
('sqlserver.buffer.page_life_expectancy', 'Page life expectancy', ''), # LARGE_RAWCOUNT
('sqlserver.stats.batch_requests', 'Batch Requests/sec', ''), # BULK_COUNT
('sqlserver.stats.sql_compilations', 'SQL Compilations/sec', ''), # BULK_COUNT
('sqlserver.stats.sql_recompilations', 'SQL Re-Compilations/sec', ''), # BULK_COUNT
('sqlserver.stats.connections', 'User Connections', ''), # LARGE_RAWCOUNT
('sqlserver.stats.lock_waits', 'Lock Waits/sec', '_Total'), # BULK_COUNT
('sqlserver.access.page_splits', 'Page Splits/sec', ''), # BULK_COUNT
('sqlserver.stats.procs_blocked', 'Processes blocked', ''), # LARGE_RAWCOUNT
('sqlserver.buffer.checkpoint_pages', 'Checkpoint pages/sec', '') # BULK_COUNT
]
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
# Cache connections
self.connections = {}
self.failed_connections = {}
self.instances_metrics = {}
# Pre-process the list of metrics to collect
custom_metrics = init_config.get('custom_metrics', [])
for instance in instances:
try:
self._make_metric_list_to_collect(instance, custom_metrics)
except SQLConnectionError:
self.log.exception("Skipping SQL Server instance")
continue
def _make_metric_list_to_collect(self, instance, custom_metrics):
"""
Store the list of metrics to collect by instance_key.
Will also create and cache cursors to query the db.
"""
metrics_to_collect = []
for name, counter_name, instance_name in self.METRICS:
try:
sql_type, base_name = self.get_sql_type(instance, counter_name)
metrics_to_collect.append(self.typed_metric(name,
counter_name,
base_name,
None,
sql_type,
instance_name,
None))
except SQLConnectionError:
raise
except Exception:
self.log.warning("Can't load the metric %s, ignoring", name, exc_info=True)
continue
# Load any custom metrics from conf.d/sqlserver.yaml
for row in custom_metrics:
user_type = row.get('type')
if user_type is not None and user_type not in VALID_METRIC_TYPES:
self.log.error('%s has an invalid metric type: %s', row['name'], user_type)
sql_type = None
try:
if user_type is None:
sql_type, base_name = self.get_sql_type(instance, row['counter_name'])
except Exception:
self.log.warning("Can't load the metric %s, ignoring", row['name'], exc_info=True)
continue
metrics_to_collect.append(self.typed_metric(row['name'],
row['counter_name'],
base_name,
user_type,
sql_type,
row.get('instance_name', ''),
row.get('tag_by', None)))
instance_key = self._conn_key(instance)
self.instances_metrics[instance_key] = metrics_to_collect
def typed_metric(self, dd_name, sql_name, base_name, user_type, sql_type, instance_name, tag_by):
'''
Create the appropriate SqlServerMetric object, each implementing its method to
fetch the metrics properly.
If a `type` was specified in the config, it is used to report the value
directly fetched from SQLServer. Otherwise, it is decided based on the
sql_type, according to microsoft's documentation.
'''
metric_type_mapping = {
PERF_COUNTER_BULK_COUNT: (self.rate, SqlSimpleMetric),
PERF_COUNTER_LARGE_RAWCOUNT: (self.gauge, SqlSimpleMetric),
PERF_LARGE_RAW_BASE: (self.gauge, SqlSimpleMetric),
PERF_RAW_LARGE_FRACTION: (self.gauge, SqlFractionMetric),
PERF_AVERAGE_BULK: (self.gauge, SqlIncrFractionMetric)
}
if user_type is not None:
# user type overrides any other value
metric_type = getattr(self, user_type)
cls = SqlSimpleMetric
else:
metric_type, cls = metric_type_mapping[sql_type]
return cls(dd_name, sql_name, base_name,
metric_type, instance_name, tag_by, self.log)
def _get_access_info(self, instance):
''' Convenience method to extract info from instance
'''
host = instance.get('host', '127.0.0.1,1433')
username = instance.get('username')
password = instance.get('password')
database = instance.get('database', 'master')
return host, username, password, database
def _conn_key(self, instance):
''' Return a key to use for the connection cache
'''
host, username, password, database = self._get_access_info(instance)
return '%s:%s:%s:%s' % (host, username, password, database)
def _conn_string(self, instance):
''' Return a connection string to use with adodbapi
'''
host, username, password, database = self._get_access_info(instance)
conn_str = 'Provider=SQLOLEDB;Data Source=%s;Initial Catalog=%s;' \
% (host, database)
if username:
conn_str += 'User ID=%s;' % (username)
if password:
conn_str += 'Password=%s;' % (password)
if not username and not password:
conn_str += 'Integrated Security=SSPI;'
return conn_str
def get_cursor(self, instance, cache_failure=False):
'''
Return a cursor to execute query against the db
Cursor are cached in the self.connections dict
'''
conn_key = self._conn_key(instance)
host = instance.get('host')
database = instance.get('database')
service_check_tags = [
'host:%s' % host,
'db:%s' % database
]
if conn_key in self.failed_connections:
raise self.failed_connections[conn_key]
if conn_key not in self.connections:
try:
conn = adodbapi.connect(
self._conn_string(instance),
timeout=int(instance.get('command_timeout',
self.DEFAULT_COMMAND_TIMEOUT))
)
self.connections[conn_key] = conn
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK, tags=service_check_tags)
except Exception:
cx = "%s - %s" % (host, database)
message = "Unable to connect to SQL Server for instance %s." % cx
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
tags=service_check_tags, message=message)
password = instance.get('password')
tracebk = traceback.format_exc()
if password is not None:
tracebk = tracebk.replace(password, "*" * 6)
# Avoid multiple connection timeouts (too slow):
# save the exception, re-raise it when needed
cxn_failure_exp = SQLConnectionError("%s \n %s" % (message, tracebk))
if cache_failure:
self.failed_connections[conn_key] = cxn_failure_exp
raise cxn_failure_exp
conn = self.connections[conn_key]
cursor = conn.cursor()
return cursor
def get_sql_type(self, instance, counter_name):
'''
Return the type of the performance counter so that we can report it to
Datadog correctly
If the sql_type is one that needs a base (PERF_RAW_LARGE_FRACTION and
PERF_AVERAGE_BULK), the name of the base counter will also be returned
'''
cursor = self.get_cursor(instance, cache_failure=True)
cursor.execute(COUNTER_TYPE_QUERY, (counter_name,))
(sql_type,) = cursor.fetchone()
if sql_type == PERF_LARGE_RAW_BASE:
self.log.warning("Metric %s is of type Base and shouldn't be reported this way",
counter_name)
base_name = None
if sql_type in [PERF_AVERAGE_BULK, PERF_RAW_LARGE_FRACTION]:
# This is an ugly hack. For certains type of metric (PERF_RAW_LARGE_FRACTION
# and PERF_AVERAGE_BULK), we need two metrics: the metrics specified and
# a base metrics to get the ratio. There is no unique schema so we generate
# the possible candidates and we look at which ones exist in the db.
candidates = (counter_name + " base",
counter_name.replace("(ms)", "base"),
counter_name.replace("Avg ", "") + " base"
)
try:
cursor.execute(BASE_NAME_QUERY, candidates)
base_name = cursor.fetchone().counter_name.strip()
self.log.debug("Got base metric: %s for metric: %s", base_name, counter_name)
except Exception, e:
self.log.warning("Could not get counter_name of base for metric: %s", e)
self.close_cursor(cursor)
return sql_type, base_name
def check(self, instance):
"""
Fetch the metrics from the sys.dm_os_performance_counters table
"""
cursor = self.get_cursor(instance)
custom_tags = instance.get('tags', [])
instance_key = self._conn_key(instance)
metrics_to_collect = self.instances_metrics[instance_key]
for metric in metrics_to_collect:
try:
metric.fetch_metric(cursor, custom_tags)
except Exception, e:
self.log.warning("Could not fetch metric %s: %s" % (metric.datadog_name, e))
self.close_cursor(cursor)
def close_cursor(self, cursor):
"""
We close the cursor explicitly b/c we had proven memory leaks
We handle any exception from closing, although according to the doc:
"in adodbapi, it is NOT an error to re-close a closed cursor"
"""
try:
cursor.close()
except Exception as e:
self.log.warning("Could not close adodbapi cursor\n{0}".format(e))
class SqlServerMetric(object):
'''General class for common methods, should never be instantiated directly
'''
def __init__(self, datadog_name, sql_name, base_name,
report_function, instance, tag_by, logger):
self.datadog_name = datadog_name
self.sql_name = sql_name
self.base_name = base_name
self.report_function = report_function
self.instance = instance
self.tag_by = tag_by
self.instances = None
self.past_values = {}
self.log = logger
def fetch_metrics(self, cursor, tags):
raise NotImplementedError
class SqlSimpleMetric(SqlServerMetric):
def fetch_metric(self, cursor, tags):
query_base = '''
select instance_name, cntr_value
from sys.dm_os_performance_counters
where counter_name = ?
'''
if self.instance == ALL_INSTANCES:
query = query_base + "and instance_name!= '_Total'"
query_content = (self.sql_name,)
else:
query = query_base + "and instance_name=?"
query_content = (self.sql_name, self.instance)
cursor.execute(query, query_content)
rows = cursor.fetchall()
for instance_name, cntr_value in rows:
metric_tags = tags
if self.instance == ALL_INSTANCES:
metric_tags = metric_tags + ['%s:%s' % (self.tag_by, instance_name.strip())]
self.report_function(self.datadog_name, cntr_value,
tags=metric_tags)
class SqlFractionMetric(SqlServerMetric):
def set_instances(self, cursor):
if self.instance == ALL_INSTANCES:
cursor.execute(INSTANCES_QUERY, (self.sql_name,))
self.instances = [row.instance_name for row in cursor.fetchall()]
else:
self.instances = [self.instance]
def fetch_metric(self, cursor, tags):
'''
Because we need to query the metrics by matching pairs, we can't query
all of them together without having to perform some matching based on
the name afterwards so instead we query instance by instance.
We cache the list of instance so that we don't have to look it up every time
'''
if self.instances is None:
self.set_instances(cursor)
for instance in self.instances:
cursor.execute(VALUE_AND_BASE_QUERY, (self.sql_name, self.base_name, instance))
rows = cursor.fetchall()
if len(rows) != 2:
self.log.warning("Missing counter to compute fraction for "
"metric %s instance %s, skipping", self.sql_name, instance)
continue
value = rows[0, "cntr_value"]
base = rows[1, "cntr_value"]
metric_tags = tags
if self.instance == ALL_INSTANCES:
metric_tags = metric_tags + ['%s:%s' % (self.tag_by, instance.strip())]
self.report_fraction(value, base, metric_tags)
def report_fraction(self, value, base, metric_tags):
try:
result = value / float(base)
self.report_function(self.datadog_name, result, tags=metric_tags)
except ZeroDivisionError:
self.log.debug("Base value is 0, won't report metric %s for tags %s",
self.datadog_name, metric_tags)
class SqlIncrFractionMetric(SqlFractionMetric):
def report_fraction(self, value, base, metric_tags):
key = "key:" + "".join(metric_tags)
if key in self.past_values:
old_value, old_base = self.past_values[key]
diff_value = value - old_value
diff_base = base - old_base
try:
result = diff_value / float(diff_base)
self.report_function(self.datadog_name, result, tags=metric_tags)
except ZeroDivisionError:
self.log.debug("Base value is 0, won't report metric %s for tags %s",
self.datadog_name, metric_tags)
self.past_values[key] = (value, base)
|
oneandoneis2/dd-agent
|
checks.d/sqlserver.py
|
Python
|
bsd-3-clause
| 17,085
|
#-*- coding:utf-8 -*-
class BasicTemplate(object):
def __init__(self):
self._part_files = []
self._part_imports = []
self._part_func = []
def part_files(self, files):
self._part_files.append("data = {}")
for name, data in files.items():
self._part_files.append(
self._prepare_file_var(name, data)
)
def _prepare_file_var(self, name, data):
output = ["data['{!s}'] = \"\" ".format(name)]
while data:
part = data[:70]
data = data[70:]
output.append(' "{!s}" '.format(part))
return "\\\n".join(output)
def part_imports(self):
self._part_imports.append("import base64")
def part_functions(self):
self._part_func = [
"def get_data(name):\n return data[name]",
"def get_decoded(name):\n return base64.b64decode(data[name])",
"def list_files():\n return list(data.keys())"
]
def render(self, files):
self.part_imports()
self.part_functions()
self.part_files(files)
header = "# generated with file2py\n" \
"template = '{!s}'".format(self.__class__.__name__)
blocks = [header,
"\n".join(self._part_imports),
"\n\n".join(self._part_files),
"\n\n\n".join(self._part_func)]
return "\n\n\n".join(blocks)
# Below are example templates for qt toolkit
class QtTemplate(BasicTemplate):
def part_imports(self):
super(QtTemplate, self).part_imports()
self._part_imports.append("""
try:
from PySide.QtCore import QByteArray
from PySide.QtGui import QIcon, QPixmap, QImage
except:
from PyQt4.QtCore import QByteArray
from PyQt4.QtGui import QIcon, QPixmap, QImage
""")
def part_functions(self):
super(QtTemplate, self).part_functions()
self._part_func += [
"def getAsQByteArray(n):\n"
" return QByteArray.fromBase64(data[n])",
"def getAsQPixmap(n):\n"
" return QPixmap.fromImage(getAsQImage(n))",
"def getAsQIcon(n):\n return QIcon(getAsQPixmap(n))",
"def getAsQImage(n):\n"
" return QImage.fromData(getAsQByteArray(n))"
]
class PySideTemplate(QtTemplate):
def part_imports(self):
BasicTemplate.part_imports(self)
self._part_imports.append("""
from PySide.QtCore import QByteArray
from PySide.QtGui import QIcon, QPixmap, QImage
""")
class PyQtTemplate(BasicTemplate):
def part_imports(self):
BasicTemplate.part_imports(self)
self._part_imports.append("""
from PyQt4.QtCore import QByteArray
from PyQt4.QtGui import QIcon, QPixmap, QImage
""")
lookup_map = {'basic': BasicTemplate,
'qt': QtTemplate,
'pyside': PySideTemplate,
'pyqt': PyQtTemplate}
def templateByName(name):
cname = name.strip().lower().replace('template', '')
if cname not in lookup_map:
raise Exception('Given template does not exist.')
return lookup_map[cname]()
|
kAlmAcetA/file2py
|
file2py/templates.py
|
Python
|
mit
| 3,156
|
from symbol.builder import FasterRcnn as Detector
from symbol.builder import ResNetV1bFPN as Backbone
from symbol.builder import add_anchor_to_arg
from models.FPN.builder import FPNNeck as Neck
from models.FPN.builder import FPNRpnHead as RpnHead
from models.FPN.builder import FPNRoiAlign as RoiExtractor
from models.FPN.builder import FPNBbox2fcHead as BboxHead
from mxnext.complicate import normalizer_factory
def get_config(is_train):
class General:
log_frequency = 10
name = __name__.rsplit("/")[-1].rsplit(".")[-1]
batch_image = 2 if is_train else 1
fp16 = False
loader_worker = 8
class KvstoreParam:
kvstore = "nccl"
batch_image = General.batch_image
gpus = [0, 1, 2, 3, 4, 5, 6, 7]
fp16 = General.fp16
class NormalizeParam:
normalizer = normalizer_factory(type="fixbn")
class BackboneParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
depth = 152
class NeckParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
class RpnParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
batch_image = General.batch_image
nnvm_proposal = True
nnvm_rpn_target = False
class anchor_generate:
scale = (8,)
ratio = (0.5, 1.0, 2.0)
stride = (4, 8, 16, 32, 64)
max_side = 1400
class anchor_assign:
allowed_border = 0
pos_thr = 0.7
neg_thr = 0.3
min_pos_thr = 0.0
image_anchor = 256
pos_fraction = 0.5
class head:
conv_channel = 256
mean = (0, 0, 0, 0)
std = (1, 1, 1, 1)
class proposal:
pre_nms_top_n = 2000 if is_train else 1000
post_nms_top_n = 2000 if is_train else 1000
nms_thr = 0.7
min_bbox_side = 0
class subsample_proposal:
proposal_wo_gt = False
image_roi = 512
fg_fraction = 0.25
fg_thr = 0.5
bg_thr_hi = 0.5
bg_thr_lo = 0.0
class bbox_target:
num_reg_class = 81
class_agnostic = False
weight = (1.0, 1.0, 1.0, 1.0)
mean = (0.0, 0.0, 0.0, 0.0)
std = (0.1, 0.1, 0.2, 0.2)
class BboxParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
num_class = 1 + 80
image_roi = 512
batch_image = General.batch_image
class regress_target:
class_agnostic = False
mean = (0.0, 0.0, 0.0, 0.0)
std = (0.1, 0.1, 0.2, 0.2)
class RoiParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
out_size = 7
stride = (4, 8, 16, 32)
roi_canonical_scale = 224
roi_canonical_level = 4
class DatasetParam:
if is_train:
image_set = ("coco_train2017", )
else:
image_set = ("coco_val2017", )
backbone = Backbone(BackboneParam)
neck = Neck(NeckParam)
rpn_head = RpnHead(RpnParam)
roi_extractor = RoiExtractor(RoiParam)
bbox_head = BboxHead(BboxParam)
detector = Detector()
if is_train:
train_sym = detector.get_train_symbol(backbone, neck, rpn_head, roi_extractor, bbox_head)
rpn_test_sym = None
test_sym = None
else:
train_sym = None
rpn_test_sym = detector.get_rpn_test_symbol(backbone, neck, rpn_head)
test_sym = detector.get_test_symbol(backbone, neck, rpn_head, roi_extractor, bbox_head)
class ModelParam:
train_symbol = train_sym
test_symbol = test_sym
rpn_test_symbol = rpn_test_sym
from_scratch = False
random = True
memonger = False
memonger_until = "stage3_unit21_plus"
class pretrain:
prefix = "pretrain_model/resnet%s_v1b" % BackboneParam.depth
epoch = 0
fixed_param = ["conv0", "stage1", "gamma", "beta"]
def process_weight(sym, arg, aux):
for stride in RpnParam.anchor_generate.stride:
add_anchor_to_arg(
sym, arg, aux, RpnParam.anchor_generate.max_side,
stride, RpnParam.anchor_generate.scale,
RpnParam.anchor_generate.ratio)
class OptimizeParam:
class optimizer:
type = "sgd"
lr = 0.01 / 8 * len(KvstoreParam.gpus) * KvstoreParam.batch_image
momentum = 0.9
wd = 0.0001
clip_gradient = None
class schedule:
begin_epoch = 0
end_epoch = 12
lr_iter = [120000 * 16 // (len(KvstoreParam.gpus) * KvstoreParam.batch_image),
160000 * 16 // (len(KvstoreParam.gpus) * KvstoreParam.batch_image)]
class warmup:
type = "gradual"
lr = 0.01 / 8 * len(KvstoreParam.gpus) * KvstoreParam.batch_image / 3.0
iter = 500
class TestParam:
min_det_score = 0.05
max_det_per_image = 100
process_roidb = lambda x: x
process_output = lambda x, y: x
class model:
prefix = "experiments/{}/checkpoint".format(General.name)
epoch = OptimizeParam.schedule.end_epoch
class nms:
type = "nms"
thr = 0.5
class coco:
annotation = "data/coco/annotations/instances_minival2014.json"
# data processing
class NormParam:
mean = tuple(i * 255 for i in (0.485, 0.456, 0.406)) # RGB order
std = tuple(i * 255 for i in (0.229, 0.224, 0.225))
# data processing
class ResizeParam:
short = 800
long = 1333
class PadParam:
short = 800
long = 1333
max_num_gt = 100
class AnchorTarget2DParam:
def __init__(self):
self.generate = self._generate()
class _generate:
def __init__(self):
self.stride = (4, 8, 16, 32, 64)
self.short = (200, 100, 50, 25, 13)
self.long = (334, 167, 84, 42, 21)
scales = (8)
aspects = (0.5, 1.0, 2.0)
class assign:
allowed_border = 0
pos_thr = 0.7
neg_thr = 0.3
min_pos_thr = 0.0
class sample:
image_anchor = 256
pos_fraction = 0.5
class RenameParam:
mapping = dict(image="data")
from core.detection_input import ReadRoiRecord, Resize2DImageBbox, \
ConvertImageFromHwcToChw, Flip2DImageBbox, Pad2DImageBbox, \
RenameRecord, Norm2DImage
from models.FPN.input import PyramidAnchorTarget2D
if is_train:
transform = [
ReadRoiRecord(None),
Norm2DImage(NormParam),
Resize2DImageBbox(ResizeParam),
Flip2DImageBbox(),
Pad2DImageBbox(PadParam),
ConvertImageFromHwcToChw(),
RenameRecord(RenameParam.mapping)
]
data_name = ["data"]
label_name = ["gt_bbox", "im_info"]
if not RpnParam.nnvm_rpn_target:
transform.append(PyramidAnchorTarget2D(AnchorTarget2DParam()))
label_name += ["rpn_cls_label", "rpn_reg_target", "rpn_reg_weight"]
else:
transform = [
ReadRoiRecord(None),
Norm2DImage(NormParam),
Resize2DImageBbox(ResizeParam),
ConvertImageFromHwcToChw(),
RenameRecord(RenameParam.mapping)
]
data_name = ["data", "im_info", "im_id", "rec_id"]
label_name = []
import core.detection_metric as metric
rpn_acc_metric = metric.AccWithIgnore(
"RpnAcc",
["rpn_cls_loss_output", "rpn_cls_label_blockgrad_output"],
[]
)
rpn_l1_metric = metric.L1(
"RpnL1",
["rpn_reg_loss_output", "rpn_cls_label_blockgrad_output"],
[]
)
# for bbox, the label is generated in network so it is an output
box_acc_metric = metric.AccWithIgnore(
"RcnnAcc",
["bbox_cls_loss_output", "bbox_label_blockgrad_output"],
[]
)
box_l1_metric = metric.L1(
"RcnnL1",
["bbox_reg_loss_output", "bbox_label_blockgrad_output"],
[]
)
metric_list = [rpn_acc_metric, rpn_l1_metric, box_acc_metric, box_l1_metric]
return General, KvstoreParam, RpnParam, RoiParam, BboxParam, DatasetParam, \
ModelParam, OptimizeParam, TestParam, \
transform, data_name, label_name, metric_list
|
TuSimple/simpledet
|
config/resnet_v1b/faster_r152v1b_fpn_2x.py
|
Python
|
apache-2.0
| 8,677
|
import wx
import logging
import matplotlib.cm
import numpy as np
import properties
p = properties.Properties.getInstance()
slider_width = 30
s_off = slider_width/2
class ColorBarPanel(wx.Panel):
'''
A HORIZONTAL color bar and value axis drawn on a panel.
'''
def __init__(self, parent, map, local_extents=[0.,1.], global_extents=None,
ticks=5, **kwargs):
'''
map -- a colormap name from matplotlib.cm
local_extents -- local min and max values of the measurement
global_extents -- min and max values of the measurement
ticks -- # of ticks to display values for on the bar
1 or 0 will draw no ticks
labelformat -- a valid format string for the values displayed
on the value axis
'''
wx.Panel.__init__(self, parent, **kwargs)
self.ticks = ticks
self.labelformat = '%.3f'
self.low_slider = wx.Button(self, -1, '[', pos=(0,-1), size=(slider_width,-1))
self.high_slider = wx.Button(self, -1, ']', pos=(self.Size[0],-1), size=(slider_width,-1))
self.ClearNotifyWindows()
self.SetMap(map)
self.interval = list(local_extents)
self.local_extents = local_extents
self.global_extents = list(local_extents)
self.clipmode = 'rescale'
self.low_slider.SetToolTipString('')
self.low_slider.GetToolTip().Enable(True)
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
self.Bind(wx.EVT_MOTION, self.OnMotion)
self.low_slider.Bind(wx.EVT_LEFT_DOWN, self.OnClipSliderLeftDown)
self.low_slider.Bind(wx.EVT_MOTION, self.OnClipSliderMotion)
self.high_slider.Bind(wx.EVT_LEFT_DOWN, self.OnClipSliderLeftDown)
self.high_slider.Bind(wx.EVT_MOTION, self.OnClipSliderMotion)
self.Bind(wx.EVT_RIGHT_DOWN, self.OnRightDown)
self.Bind(wx.EVT_SIZE, self.OnResize)
self.Bind(wx.EVT_PAINT, self.OnPaint)
def OnLeftDown(self, evt):
# Get the slider closest to the click point.
if abs(self.low_slider.GetPositionTuple()[0] - evt.GetX()) < abs(self.high_slider.GetPositionTuple()[0] - evt.GetX()):
self.cur_slider = self.low_slider
else:
self.cur_slider = self.high_slider
self.cur_slider.SetPosition((evt.GetX() - s_off, -1))
self.xo = 0
self.UpdateInterval()
def OnMotion(self, evt):
if not evt.Dragging() or not evt.LeftIsDown():
return
self.cur_slider.SetPosition((evt.GetX() - s_off, -1))
self.UpdateInterval()
def OnClipSliderLeftDown(self, evt):
self.cur_slider = evt.EventObject
self.xo = evt.GetX()
def OnClipSliderMotion(self, evt):
slider = evt.EventObject
if not evt.Dragging() or not evt.LeftIsDown():
return
slider.SetPosition((slider.GetPositionTuple()[0] + evt.GetX() - self.xo - s_off, -1))
self.xo = 0
self.UpdateInterval()
def ClearNotifyWindows(self):
self.notify_windows = []
def AddNotifyWindow(self, win):
self.notify_windows += [win]
def ResetInterval(self):
''' Sets clip interval to the extents of the colorbar. '''
self.interval = list(self.global_extents)
self.low_slider.SetPosition((0-s_off,-1))
self.high_slider.SetPosition((self.Size[0]-s_off,-1))
for win in self.notify_windows:
win.SetClipInterval(self.GetLocalInterval(), self.local_extents, self.clipmode)
self.Refresh()
def UpdateInterval(self):
''' Calculates the interval values w.r.t. the current extents
and clipping slider positions. '''
range = self.global_extents[1]-self.global_extents[0]
w = float(self.Size[0])
if range > 0 and w > 0:
self.interval[0] = self.global_extents[0] + ((self.low_slider.GetPositionTuple()[0] + s_off) / w * range)
self.interval[1] = self.global_extents[0] + ((self.high_slider.GetPositionTuple()[0] + s_off) / w * range)
self.low_slider.SetToolTipString(str(self.global_extents[0] + ((self.low_slider.GetPositionTuple()[0] + s_off) / w * range)))
self.high_slider.SetToolTipString(str(self.global_extents[0] + ((self.high_slider.GetPositionTuple()[0] + s_off) / w * range)))
else:
self.interval = list(self.local_extents)
self.UpdateLabelFormat()
for win in self.notify_windows:
win.SetClipInterval(self.GetLocalInterval(), self.local_extents, self.clipmode)
self.Refresh()
# TODO: To be added. Not sure how to treat intervals that are outside
# the current extents, do we resize the extents? This could get
# ugly and confusing.
## def SetInterval(self, interval):
## ''' '''
## self.interval = interval
## self.low_slider.SetPosition((0-s_off,-1))
## self.high_slider.SetPosition((self.Size[0]-s_off,-1))
## for win in self.notify_windows:
## win.SetClipInterval(self.GetInterval(), self.clipmode)
## self.Refresh()
def GetGlobalInterval(self):
''' Returns the interval clipped on the value axis. '''
return self.interval
def GetLocalInterval(self):
''' Returns the interval clipped on the local color bar.
If either part is outside the local_extents, the extent is returned.
'''
return (max(self.interval[0], self.local_extents[0]),
min(self.interval[1], self.local_extents[1]))
def GetGlobalExtents(self):
return self.global_extents
def GetLocalExtents(self):
return self.local_extents
def GetClipMode(self):
return self.clipmode
def SetMap(self, map):
''' Sets the colormap that is displayed.
map should be the string name of a colormap from matplotlib.cm'''
self.cm = matplotlib.cm.get_cmap(map)
self.Refresh()
def SetLocalExtents(self, local_extents):
#''' Sets the value axis min and max. Accepts a 2-tuple.'''
self.local_extents = local_extents
if self.local_extents[0] < self.global_extents[0]:
self.global_extents[0] = self.local_extents[0]
if self.local_extents[1] > self.global_extents[1]:
self.global_extents[1] = self.local_extents[1]
self.UpdateInterval()
def SetGlobalExtents(self, global_extents):
self.global_extents = list(global_extents)
self.UpdateInterval()
def SetTicks(self, ticks):
''' Sets the number of tick marks displayed by the ColorBarPanel.
1 or 0 will draw no ticks'''
self.ticks = ticks
self.Refresh()
def UpdateLabelFormat(self):
''' Selects a number format based on the step value between ticks '''
range = self.global_extents[1] - self.global_extents[0]
step = range / self.ticks
if 0 < step < 0.001:
self.labelformat = '%.3e'
else:
self.labelformat = '%.3f'
def OnToggleClipMode(self, evt):
if self.clipmode == 'clip':
self.clipmode = 'rescale'
else:
self.clipmode = 'clip'
for win in self.notify_windows:
win.SetClipInterval(self.GetLocalInterval(), self.local_extents, self.clipmode)
self.Refresh()
def OnRightDown(self, evt):
popupMenu = wx.Menu()
popupMenu.SetTitle('Colorbar')
reset = popupMenu.AppendItem(wx.MenuItem(popupMenu, -1, 'Reset sliders'))
self.Bind(wx.EVT_MENU, lambda(evt):self.ResetInterval(), reset)
if self.clipmode == 'clip':
bracket_mode = popupMenu.AppendItem(wx.MenuItem(popupMenu, -1, 'Value bracketing: RESCALE'))
else:
bracket_mode = popupMenu.AppendItem(wx.MenuItem(popupMenu, -1, 'Value bracketing: CLIP'))
self.Bind(wx.EVT_MENU, self.OnToggleClipMode, bracket_mode)
aggmethod = self.Parent.aggregationMethodsChoice.GetStringSelection().lower()
src_table = self.Parent.sourceChoice.GetStringSelection()
if (aggmethod in ['mean', 'median', 'min', 'max']
and self.interval != self.global_extents):
popupMenu.AppendSeparator()
saveitem = popupMenu.AppendItem(wx.MenuItem(popupMenu, -1, 'Create gate from interval'))
self.Bind(wx.EVT_MENU, self.on_create_gate_from_interval, saveitem)
self.PopupMenu(popupMenu, (evt.GetX(), evt.GetY()))
def on_create_gate_from_interval(self, evt):
self.create_gate_from_interval()
def create_gate_from_interval(self):
table = self.Parent.sourceChoice.GetStringSelection()
colname = self.Parent.measurementsChoice.GetStringSelection()
from guiutils import GateDialog
dlg = GateDialog(self)
if dlg.ShowModal() == wx.ID_OK:
from sqltools import Gate, Gate1D
p.gates[dlg.Value] = Gate([Gate1D((table, colname), self.interval)])
dlg.Destroy()
def OnResize(self, evt):
range = self.global_extents[1] - self.global_extents[0]
if range == 0:
self.low_slider.SetPosition((0,-1))
self.high_slider.SetPosition((self.Size[1],-1))
else:
self.low_slider.SetPosition((self.Size[0] * (self.interval[0] - self.global_extents[0]) / range - s_off, -1))
self.high_slider.SetPosition((self.Size[0] * (self.interval[1] - self.global_extents[0]) / range - s_off, -1))
self.UpdateLabelFormat()
def OnPaint(self, evt):
w_global, h = self.Size
if 0 in self.Size:
return
low_slider_pos = self.low_slider.GetPositionTuple()[0] + s_off
high_slider_pos = self.high_slider.GetPositionTuple()[0] + s_off
global_scale = self.global_extents[1] - self.global_extents[0] # value scale of the global data
if global_scale == 0:
local_x0 = 0
local_x1 = w_global
w_local = w_global
else:
local_x0 = (self.local_extents[0] - self.global_extents[0]) / global_scale * w_global # x pos (pixels) to start drawing the local color bar
local_x1 = (self.local_extents[1] - self.global_extents[0]) / global_scale * w_global # x pos (pixels) to stop drawing the local color bar
w_local = local_x1 - local_x0 # pixel width of the local color bar
w0 = max(low_slider_pos, local_x0) - local_x0
w1 = local_x1 - min(high_slider_pos, local_x1)
# create array of values to be used for the color bar
if self.clipmode=='rescale':
a1 = np.zeros(w0)
a2 = np.arange(abs(min(high_slider_pos, local_x1) - max(low_slider_pos, local_x0)), dtype=float) / (min(high_slider_pos, local_x1) - max(low_slider_pos, local_x0))
a3 = np.ones(w1)
a = np.hstack([a1,a2,a3])
elif self.clipmode=='clip':
a = np.arange(w_local, dtype=float) / w_local
a[:w0] = 0.
if w1>=1:
a[-w1:] = 1.
# draw the color bar
dc = wx.PaintDC(self)
dc.Clear()
dc.BeginDrawing()
dc.SetPen(wx.Pen((0,0,0)))
dc.DrawLine(0, (h-14)/2, local_x0, (h-14)/2)
for x, v in enumerate(a):
color = np.array(self.cm(v)) * 255
dc.SetPen(wx.Pen(color))
dc.DrawLine(x+local_x0, 0, x+local_x0, h-14)
dc.SetPen(wx.Pen((0,0,0)))
dc.DrawLine(local_x1, (h-14)/2, w_global, (h-14)/2)
# draw value axis
if self.ticks <= 1:
return
font = dc.GetFont()
font.SetPixelSize((6,12))
dc.SetFont(font)
for t in xrange(self.ticks):
xpos = t * w_global/(self.ticks-1.)
val = t * (self.global_extents[1]-self.global_extents[0]) / (self.ticks-1) + self.global_extents[0]
dc.DrawLine(xpos,6,xpos,h-14)
textpos = xpos - xpos/w_global * dc.GetFullTextExtent(self.labelformat%(val), font)[0]
dc.DrawText(self.labelformat%(val), textpos, h-13)
dc.EndDrawing()
|
afraser/CellProfiler-Analyst
|
cpa/colorbarpanel.py
|
Python
|
gpl-2.0
| 12,477
|
import re
import datetime as dt
import pytz
import lxml.html
from billy.scrape.events import EventScraper, Event
from openstates.utils import LXMLMixin
url = "http://assembly.state.ny.us/leg/?sh=hear"
class NYEventScraper(EventScraper, LXMLMixin):
_tz = pytz.timezone('US/Eastern')
jurisdiction = 'ny'
def lower_parse_page(self, url, session):
page = self.lxmlize(url)
tables = page.xpath("//table[@class='pubhrgtbl']")
date = None
ctty = None
chamber = 'other'
for table in tables:
metainf = {}
rows = table.xpath(".//tr")
for row in rows:
tds = row.xpath("./*")
if len(tds) < 2:
continue
key, value = tds
if key.tag == 'th' and key.get("class") == 'hrgdate':
date = key.text_content()
date = re.sub(r"\s+", " ", date)
date = re.sub(".*POSTPONED NEW DATE", "", date).strip()
# Due to the html structure this shouldn't be an elif
# It needs to fire twice in the same loop iteration
if value.tag == 'th' and value.get("class") == 'commtitle':
ctty = value.text_content()
chamber = 'other'
if "senate" in ctty.lower():
chamber = 'upper'
if "house" in ctty.lower():
chamber = 'lower'
if "joint" in ctty.lower():
chamber = 'joint'
coms = value.xpath('.//div[contains(@class,"comm-txt")]/text()')
elif key.tag == 'td':
key = key.text_content().strip()
value = value.text_content().strip()
value = value.replace(u'\x96', '-')
value = re.sub(r"\s+", " ", value)
metainf[key] = value
time = metainf['Time:']
repl = {
"A.M.": "AM",
"P.M.": "PM",
}
drepl = {
"Sept": "Sep"
}
for r in repl:
time = time.replace(r, repl[r])
for r in drepl:
date = date.replace(r, drepl[r])
time = re.sub("-.*", "", time)
time = time.strip()
year = dt.datetime.now().year
date = "%s %s %s" % (
date,
year,
time
)
if "tbd" in date.lower():
continue
date = date.replace(' PLEASE NOTE NEW TIME', '')
# Check if the event has been postponed.
postponed = 'POSTPONED' in date
if postponed:
date = date.replace(' POSTPONED', '')
date_formats = ["%B %d %Y %I:%M %p", "%b. %d %Y %I:%M %p"]
datetime = None
for fmt in date_formats:
try:
datetime = dt.datetime.strptime(date, fmt)
except ValueError:
pass
# If the datetime can't be parsed, bail.
if datetime is None:
return
title_key = set(metainf) & set([
'Public Hearing:', 'Summit:', 'Roundtable:',
'Public Roundtable:', 'Public Meeting:', 'Public Forum:',
'Meeting:'])
assert len(title_key) == 1, "Couldn't determine event title."
title_key = list(title_key).pop()
title = metainf[title_key]
title = re.sub(
r"\*\*Click here to view public hearing notice\*\*",
"",
title
)
# If event was postponed, add a warning to the title.
if postponed:
title = 'POSTPONED: %s' % title
event = Event(session, datetime, 'committee:meeting',
title,
location=metainf['Place:'],
contact=metainf['Contact:'])
if 'Media Contact:' in metainf:
event.update(media_contact=metainf['Media Contact:'])
event.add_source(url)
for com in coms:
event.add_participant('host',
com.strip(),
'committee',
chamber=self.classify_committee(com))
self.save_event(event)
def scrape(self, chamber, session):
self.scrape_lower(chamber, session)
#self.scrape_upper(chamber, session)
def scrape_lower(self, chamber, session):
if chamber == 'other':
self.lower_parse_page(url, session)
def classify_committee(self, name):
chamber = 'other'
if "senate" in name.lower():
chamber = 'upper'
if "assembly" in name.lower():
chamber = 'lower'
if "joint" in name.lower():
chamber = 'joint'
return chamber
"""
def scrape_upper(self, chamber, session):
if chamber != 'upper':
return
url = (r'http://open.nysenate.gov/legislation/2.0/search.json?'
r'term=otype:meeting&pageSize=1000&pageIdx=%d')
page_index = 1
while True:
resp = self.get(url % page_index)
if not resp.json():
break
if not resp.json()['response']['results']:
break
for obj in resp.json()['response']['results']:
event = self.upper_scrape_event(chamber, session, obj)
if event:
self.save_event(event)
page_index += 1
def upper_scrape_event(self, chamber, session, obj):
meeting = obj['data']['meeting']
date = int(meeting['meetingDateTime'])
date = dt.datetime.fromtimestamp(date / 1000)
if str(date.year) not in session:
return
description = 'Committee Meeting: ' + meeting['committeeName']
event = Event(session, date, 'committee:meeting',
description=description,
location=meeting['location'] or 'No location given.')
event.add_source(obj['url'])
event.add_participant('chair', meeting['committeeChair'],
'legislator', chamber='upper')
event.add_participant('host', meeting['committeeName'],
'committee', chamber='upper')
rgx = r'([a-z]+)(\d+)'
for bill in meeting['bills']:
raw_id = bill['senateBillNo']
bill_id = ' '.join(re.search(rgx, raw_id, re.I).groups())
event.add_related_bill(
bill_id, type='bill',
description=bill['summary'] or 'No description given.')
return event
"""
|
cliftonmcintosh/openstates
|
openstates/ny/events.py
|
Python
|
gpl-3.0
| 6,983
|
import re
import time
from hashlib import sha1
import sickbeard
from sickbeard import logger
from sickbeard.exceptions import ex
from sickbeard.clients import http_error_code
from lib.bencode import bencode, bdecode
from lib import requests
class GenericClient(object):
def __init__(self, name, host=None, username=None, password=None):
self.name = name
self.username = sickbeard.TORRENT_USERNAME if username is None else username
self.password = sickbeard.TORRENT_PASSWORD if password is None else password
self.host = sickbeard.TORRENT_HOST if host is None else host
self.url = None
self.response = None
self.auth = None
self.last_time = time.time()
self.session = requests.session()
self.session.auth = (self.username, self.password)
def _request(self, method='get', params={}, data=None, files=None):
if time.time() > self.last_time + 1800 or not self.auth:
self.last_time = time.time()
self._get_auth()
logger.log(self.name + u': Requested a ' + method.upper() + ' connection to url '+ self.url + ' with Params= ' + str(params) + ' Data=' + str(data if data else 'None')[0:99] + ('...' if len(data if data else 'None') > 200 else ''), logger.DEBUG)
if not self.auth:
logger.log(self.name + u': Authentication Failed' , logger.ERROR)
return False
try:
self.response = self.session.__getattribute__(method)(self.url, params=params, data=data, files=files, timeout=10, verify=False)
except requests.exceptions.ConnectionError, e:
logger.log(self.name + u': Unable to connect ' + ex(e), logger.ERROR)
return False
except (requests.exceptions.MissingSchema, requests.exceptions.InvalidURL):
logger.log(self.name + u': Invalid Host', logger.ERROR)
return False
except requests.exceptions.HTTPError, e:
logger.log(self.name + u': Invalid HTTP Request ' + ex(e), logger.ERROR)
return False
except requests.exceptions.Timeout, e:
logger.log(self.name + u': Connection Timeout ' + ex(e), logger.ERROR)
return False
except Exception, e:
logger.log(self.name + u': Unknown exception raised when send torrent to ' + self.name + ': ' + ex(e), logger.ERROR)
return False
if self.response.status_code == 401:
logger.log(self.name + u': Invalid Username or Password, check your config', logger.ERROR)
return False
if self.response.status_code in http_error_code.keys():
logger.log(self.name + u': ' + http_error_code[self.response.status_code], logger.DEBUG)
return False
logger.log(self.name + u': Response to '+ method.upper() + ' request is ' + self.response.text, logger.DEBUG)
return True
def _get_auth(self):
"""
This should be overridden and should return the auth_id needed for the client
"""
return None
def _add_torrent_uri(self, result):
"""
This should be overridden should return the True/False from the client
when a torrent is added via url (magnet or .torrent link)
"""
return False
def _add_torrent_file(self, result):
"""
This should be overridden should return the True/False from the client
when a torrent is added via result.content (only .torrent file)
"""
return False
def _set_torrent_label(self, result):
"""
This should be overridden should return the True/False from the client
when a torrent is set with label
"""
return True
def _set_torrent_ratio(self, result):
"""
This should be overridden should return the True/False from the client
when a torrent is set with ratio
"""
return True
def _set_torrent_priority(self, result):
"""
This should be overriden should return the True/False from the client
when a torrent is set with result.priority (-1 = low, 0 = normal, 1 = high)
"""
return True
def _set_torrent_path(self, torrent_path):
"""
This should be overridden should return the True/False from the client
when a torrent is set with path
"""
return True
def _set_torrent_pause(self, result):
"""
This should be overridden should return the True/False from the client
when a torrent is set with pause
"""
return True
def _get_torrent_hash(self, result):
if result.url.startswith('magnet'):
torrent_hash = re.findall('urn:btih:([\w]{32,40})', result.url)[0]
else:
info = bdecode(result.content)["info"]
torrent_hash = sha1(bencode(info)).hexdigest()
return torrent_hash
def sendTORRENT(self, result):
r_code = False
logger.log(u'Calling ' + self.name + ' Client', logger.DEBUG)
if not self._get_auth():
logger.log(self.name + u': Authentication Failed' , logger.ERROR)
return r_code
try:
result.hash = self._get_torrent_hash(result)
if result.url.startswith('magnet'):
r_code = self._add_torrent_uri(result)
else:
r_code = self._add_torrent_file(result)
if not r_code:
return False
if not self._set_torrent_pause(result):
logger.log(self.name + u': Unable to set the pause for Torrent', logger.ERROR)
if not self._set_torrent_label(result):
logger.log(self.name + u': Unable to set the label for Torrent', logger.ERROR)
if not self._set_torrent_ratio(result):
logger.log(self.name + u': Unable to set the ratio for Torrent', logger.ERROR)
if not self._set_torrent_path(result):
logger.log(self.name + u': Unable to set the path for Torrent', logger.ERROR)
if result.priority != 0 and not self._set_torrent_priority(result):
logger.log(self.name + u': Unable to set priority for Torrent', logger.ERROR)
except Exception, e:
logger.log(self.name + u': Failed Sending Torrent ', logger.ERROR)
logger.log(self.name + u': Exception raised when sending torrent: ' + ex(e), logger.DEBUG)
return r_code
return r_code
def testAuthentication(self):
try:
self.response = self.session.get(self.url, timeout=20, verify=False)
except requests.exceptions.ConnectionError, e:
return False, 'Error: ' + self.name + ' Connection Error'
except (requests.exceptions.MissingSchema, requests.exceptions.InvalidURL):
return False,'Error: Invalid ' + self.name + ' host'
if self.response.status_code == 401:
return False, 'Error: Invalid ' + self.name + ' Username or Password, check your config!'
try:
self._get_auth()
if self.response.status_code == 200 and self.auth:
return True, 'Success: Connected and Authenticated'
else:
return False, 'Error: Unable to get ' + self.name + ' Authentication, check your config!'
except Exception:
return False, 'Error: Unable to connect to '+ self.name
|
schumi2004/NOT_UPDATED_Sick-Beard-Dutch
|
sickbeard/clients/generic.py
|
Python
|
gpl-3.0
| 8,152
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015-2019 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from typing import Sequence
from snapcraft.internal.os_release import OsRelease
from ._platform import _is_deb_based
from snapcraft.internal import errors
from typing import List
class RepoError(errors.SnapcraftError):
pass
class NoNativeBackendError(RepoError):
fmt = "Native builds aren't supported on {distro}."
def __init__(self):
try:
distro = OsRelease().name()
except errors.OsReleaseNameError:
distro = "this system"
super().__init__(distro=distro)
class CacheUpdateFailedError(RepoError):
fmt = (
"Failed to update the package cache: "
"Some files could not be downloaded:{errors}"
"Check that the sources on your host are configured correctly."
)
def __init__(self, errors: str) -> None:
if errors:
errors = "\n\n{}\n\n".format(errors.replace(", ", "\n"))
else:
errors = " "
super().__init__(errors=errors)
class BuildPackageNotFoundError(RepoError):
fmt = "Could not find a required package in 'build-packages': {package}"
def __init__(self, package):
super().__init__(package=package)
class BuildPackagesNotInstalledError(RepoError):
fmt = "Could not install all requested build packages: {packages}"
def __init__(self, *, packages: List[str]) -> None:
super().__init__(packages=" ".join(packages))
class PackageFetchError(RepoError):
fmt = "Package fetch error: {message}"
def __init__(self, message: str) -> None:
super().__init__(message=message)
class PackageBrokenError(RepoError):
fmt = "The package {package} has unmet dependencies: {deps}"
def __init__(self, package: str, deps: List[str]) -> None:
super().__init__(package=package, deps=" ".join(deps))
class PackageNotFoundError(RepoError):
@property
def message(self):
message = "The package {!r} was not found.".format(self.package_name)
# If the package was multiarch, try to help.
distro = OsRelease().id()
if _is_deb_based(distro) and ":" in self.package_name:
(name, arch) = self.package_name.split(":", 2)
if arch:
message += (
"\nYou may need to add support for this architecture with "
"'dpkg --add-architecture {}'.".format(arch)
)
return message
def __init__(self, package_name):
self.package_name = package_name
def __str__(self):
return self.message
class UnpackError(RepoError):
fmt = "Error while provisioning {package!r}"
def __init__(self, package):
super().__init__(package=package)
class SnapUnavailableError(RepoError):
fmt = (
"Failed to install or refresh a snap: {snap_name!r} does not exist "
"or is not available on the desired channel {snap_channel!r}. "
"Use `snap info {snap_name}` to get a list of channels the "
"snap is available on."
)
def __init__(self, *, snap_name: str, snap_channel: str) -> None:
super().__init__(snap_name=snap_name, snap_channel=snap_channel)
class SnapFindError(RepoError):
fmt = (
"Could not find the snap {snap_name!r} installed on this host.\n"
"Install the snap and try again."
)
def __init__(self, *, snap_name):
super().__init__(snap_name=snap_name)
class SnapInstallError(RepoError):
fmt = "Error while installing snap {snap_name!r} from channel {snap_channel!r}"
def __init__(self, *, snap_name, snap_channel):
super().__init__(snap_name=snap_name, snap_channel=snap_channel)
class SnapDownloadError(RepoError):
fmt = "Error while downloading snap {snap_name!r} from channel {snap_channel!r}"
def __init__(self, *, snap_name, snap_channel):
super().__init__(snap_name=snap_name, snap_channel=snap_channel)
class SnapGetAssertionError(RepoError):
fmt = (
"Error while retrieving assertion with parameters "
"{assertion_params!r}\n"
"Verify the assertion exists and try again."
)
def __init__(self, *, assertion_params: Sequence[str]) -> None:
super().__init__(assertion_params=assertion_params)
class SnapRefreshError(RepoError):
fmt = "Error while refreshing snap {snap_name!r} to channel {snap_channel!r}"
def __init__(self, *, snap_name, snap_channel):
super().__init__(snap_name=snap_name, snap_channel=snap_channel)
class SnapdConnectionError(RepoError):
fmt = (
"Failed to get information for snap {snap_name!r}: "
"could not connect to {url!r}."
)
def __init__(self, snap_name: str, url: str) -> None:
super().__init__(snap_name=snap_name, url=url)
|
mvo5/snapcraft
|
snapcraft/internal/repo/errors.py
|
Python
|
gpl-3.0
| 5,436
|
# -*- coding: utf-8 -*-
import sys
import os
import regex
import codecs
from cleo import Command, InputArgument, InputOption
from src.models.user_model import UserModel
class CreateUserCommand(Command):
name = 'user:add'
description = 'Adds a user to the users table'
arguments = [
{
'name': 'username',
'description': 'Username',
'required': True
},
{
'name': 'email',
'description': 'Email',
'required': True
},
{
'name': 'password',
'description': 'Password',
'required': True
},
{
'name': 'project',
'description': 'Project',
'required': True
},
{
'name': 'requests_limit',
'description': 'Requests limit per month',
'required': True
},
{
'name': 'role',
'description': 'User role (admin or user)',
'required': True
}
]
def __init__(self):
super(CreateUserCommand, self).__init__()
def execute(self, i, o):
"""
Executes the command.
:type i: cleo.inputs.input.Input
:type o: cleo.outputs.output.Output
"""
# Read parameters
user = UserModel()
user.username = i.get_argument('username')
user.password = i.get_argument('password')
user.project = i.get_argument('project')
user.requests_limit = i.get_argument('requests_limit')
user.requests_made = 0
user.status = 'active'
user.role = i.get_argument('role')
user.save()
class LoginUserCommand(Command):
name = 'user:login'
description = 'Adds a user to the users table'
arguments = [
{
'name': 'username',
'description': 'Username',
'required': True
},
{
'name': 'password',
'description': 'Password',
'required': True
}
]
def __init__(self):
super(LoginUserCommand, self).__init__()
def execute(self, i, o):
"""
Executes the command.
:type i: cleo.inputs.input.Input
:type o: cleo.outputs.output.Output
"""
# Read parameters
username = i.get_argument('username')
password = i.get_argument('password')
UserActiveRecord.getByUsernameAndPassword(username, password)
|
clarinsi/reldi-api
|
tools/usertools/create_user.py
|
Python
|
gpl-3.0
| 2,544
|
# -*- coding: utf-8 -*-
__author__ = 'Robert Ancell <bob27@users.sourceforge.net>'
__license__ = 'GNU General Public License Version 2'
__copyright__ = 'Copyright 2005-2006 Robert Ancell'
class CECPProtocol:
"""CECP protocol en/decoder.
"""
# Data being accumulated to be parsed
__buffer = ''
NEWLINE = '\n'
MOVE_PREFIXS = ['My move is: ', 'my move is ', 'move ']
INVALID_MOVE_PREFIX = 'Illegal move: '
RESIGN_PREFIX = 'tellics resign'
DRAW_PREFIX = '1/2-1/2'
def __init__(self):
"""
"""
# Go to simple interface mode
self.onOutgoingData('xboard\n')
# Methods to extend
def onOutgoingData(self, data):
"""Called when there is data to send to the CECP engine.
'data' is the data to give to the AI (string).
"""
print 'OUT: ' + repr(data)
def onUnknownLine(self, line):
"""Called when an unknown line is received from the CECP AI.
'line' is the line that has not been decoded (string). There is
no newline on the end of the string.
"""
print 'Unknown CECP line: ' + line
def onMove(self, move):
"""Called when the AI makes a move.
'move' is the move the AI has decided to make (string).
"""
print 'CECP move: ' + move
def onIllegalMove(self, move):
"""Called when the AI rejects a move.
'move' is the move the AI rejected (string).
"""
print 'CECP illegal move: ' + move
def onResign(self):
"""Called when the AI resigns"""
print 'CECP AI resigns'
def logText(self, text, style):
print 'LOG: %s' % text
# Public methods
def sendSetSearchDepth(self, searchDepth):
"""Set the search depth for the AI.
'searchDepth' is the number of moves to look ahead (integer).
"""
# This is the CECP specified method
self.onOutgoingData('sd %i\n' % int(searchDepth))
# GNUchess uses this instead
self.onOutgoingData('depth %i\n' % int(searchDepth))
def sendSetPondering(self, aiPonders):
"""Enable/disable AI pondering.
'aiPonders' is a flag to show if the AI thinks during opponent moves (True) or not (False).
"""
if aiPonders:
self.onOutgoingData('hard\n')
else:
self.onOutgoingData('easy\n')
def sendMove(self, move):
"""Move for the current player.
'move' is the move the current player has made (string).
"""
self.onOutgoingData(move + '\n')
def sendWait(self):
"""Stop the AI from automatically moving"""
self.onOutgoingData('force\n')
def sendUndo(self):
"""Undo the last move"""
self.onOutgoingData('undo\n')
def sendMovePrompt(self):
"""Get the AI to move for the current player"""
self.onOutgoingData('go\n')
def sendConventionalClock(self, moveCount, base, increment):
"""
'moveCount' ???
'base' ??? (seconds)
'increment' ??? (seconds)
"""
self.onOutgoingData('level %d %d:%02d %d:%02d\n' % (moveCount, base / 60, base % 60, increment / 60, increment % 60))
def sendQuit(self):
"""Quit the engine"""
# Send 'quit' starting with a newline in case there are some characters already sent
self.onOutgoingData('\nquit\n')
def registerIncomingData(self, data):
"""
"""
self.__buffer += data
self.__parseData()
# Private methods
def __parseData(self):
while True:
index = self.__buffer.find(self.NEWLINE)
if index < 0:
return
line = self.__buffer[:index]
self.__buffer = self.__buffer[index+1:]
self.__parseLine(line)
def __parseLine(self, line):
for prefix in self.MOVE_PREFIXS:
if line.startswith(prefix):
move = line[len(prefix):]
self.logText(line + '\n', 'move')
self.onMove(move.strip())
return
if line.startswith(self.INVALID_MOVE_PREFIX):
self.onIllegalMove(line[len(self.INVALID_MOVE_PREFIX):])
elif line.startswith(self.RESIGN_PREFIX):
self.onResign()
elif line.startswith(self.DRAW_PREFIX):
print 'AI calls a draw'
else:
self.onUnknownLine(line)
self.logText(line + '\n', 'input')
class Connection(CECPProtocol):
"""
"""
def __init__(self):
"""
"""
# Start protocol
CECPProtocol.__init__(self)
# Methods to extend
def logText(self, text, style):
"""FIXME: define style
"""
pass
def onMove(self, move):
"""Called when the AI makes a move.
'move' is the move the AI made (string).
"""
print 'AI moves: ' + move
# Public methods
def start(self):
"""
"""
pass
def startGame(self):
"""
"""
pass
def configure(self, options = []):
"""
"""
for option in options:
self.onOutgoingData(option.value + '\n')
def requestMove(self, whiteTime, blackTime, ownTime):
"""Request the AI moves for the current player"""
# Set the clock
if ownTime > 0:
self.sendConventionalClock(0, ownTime / 1000, 0)
# Prompt the AI to move
self.sendMovePrompt()
def undoMove(self):
"""Undo the last move made by this AI"""
self.sendWait()
self.sendUndo()
def reportMove(self, move, isSelf):
"""Report the move the current player has made.
'move' is the move to report (string).
'isSelf' is a flag to say if the move is the move this AI made (True).
"""
# Don't report the move we made
if isSelf:
return
# Stop the AI from automatically moving
self.sendWait()
# Report the move
self.sendMove(move)
# Private methods
def onUnknownLine(self, line):
"""Called by CECPProtocol"""
pass#print 'Unknown CECP line: ' + line
def onIllegalMove(self, move):
"""Called by CECPProtocol"""
print 'CECP illegal move: ' + move
|
guillaumebel/nibbles-clutter
|
glchess/src/lib/cecp.py
|
Python
|
gpl-2.0
| 6,721
|
from twisted.internet.protocol import DatagramProtocol
from twisted.internet import reactor
COUNTER = 'c'
TIMING = 'ms'
GAUGE = 'g'
SET = 's'
class StatsD(DatagramProtocol):
def __init__(self, host=None, port=None):
self.host = host
self.port = port
self.usable = False
def startProtocol(self):
self.usable = True
self.transport.connect(self.host, self.port)
def stopProtocol(self):
self.usable = False
def datagramReceived(self, data, (host, port)):
pass
def connectionRefused(self):
pass
def sendMetric(self, metric, value, _type, at=None):
if self.usable:
packet = "%s:%d|%s" % (metric, value, _type)
if at:
packet += "|@%s" % (at, )
self.transport.write(packet)
client = StatsD(None, None)
send = client.sendMetric
def setup(host, port):
client.host = host
client.port = port
reactor.listenUDP(0, client, interface=host if host=='127.0.0.1' else '')
|
ojab/bnw
|
bnw/core/statsd.py
|
Python
|
bsd-2-clause
| 1,024
|
"""SCons.Tool.sunf90
Tool-specific initialization for sunf90, the Sun Studio F90 compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sunf90.py 4043 2009/02/23 09:06:45 scons"
import SCons.Util
from FortranCommon import add_all_to_env
compilers = ['sunf90', 'f90']
def generate(env):
"""Add Builders and construction variables for sun f90 compiler to an
Environment."""
add_all_to_env(env)
fcomp = env.Detect(compilers) or 'f90'
env['FORTRAN'] = fcomp
env['F90'] = fcomp
env['SHFORTRAN'] = '$FORTRAN'
env['SHF90'] = '$F90'
env['SHFORTRANFLAGS'] = SCons.Util.CLVar('$FORTRANFLAGS -KPIC')
env['SHF90FLAGS'] = SCons.Util.CLVar('$F90FLAGS -KPIC')
def exists(env):
return env.Detect(compilers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
BenLand100/rat-pac
|
python/SCons/Tool/sunf90.py
|
Python
|
bsd-3-clause
| 2,172
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (http://tiny.be). All Rights Reserved
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from datetime import datetime, timedelta
import time
from osv import osv
from osv import fields
from tools.translate import _
class make_hours_to_work(osv.osv_memory):
_name = 'make.hours.to.work'
_description = "Make Hours To Work"
_columns = {# Fecha Comienzo
'start_date':fields.date('Start Date', required=True),
# Fecha Fin
'end_date':fields.date('End Date', required=True),
# Color del Fondo
'background_color':fields.selection([('None', ''),
('Blue', 'Blue'),
('LightBlue', 'Light Blue'),
('Red', 'Red'),
('Green', 'Green'),
('LightGreen', 'Light Green'),
('Yellow', 'Yellow'),
('Orange', 'Orange'),
('DarkOrange', 'Dark Orange'),
('Maroon', 'Maroon'),
('Aqua', 'Aqua'),
('Fuchsia', 'Fuchsia'),
('LightGrey','LightGrey')],
string="background color", required=True),
# Motivo.
'name':fields.char('Reason', size=64),
# Horas
'hours':fields.float('Hours', required=True),
# Lunes
'monday':fields.boolean('Monday'),
# Martes
'tuesday':fields.boolean('Tuesday'),
# Miercoles
'wednesday':fields.boolean('Wednesday'),
# Jueves
'thursday':fields.boolean('Thursday'),
# Viernes
'friday':fields.boolean('Friday'),
# Sabado
'saturday':fields.boolean('Saturday'),
# Domingo
'sunday':fields.boolean('Sunday'),
}
_defaults = {
'background_color': lambda *a:'None',
}
#
### Función
#
def generate_hours_to_work(self, cr, uid, ids, context=None):
res={}
employee_id = context.get('active_id')
for wiz in self.browse(cr,uid,ids,context):
start_date = wiz.start_date
start_year = int(str(start_date[0:4]))
end_date = wiz.end_date
end_year = int(str(end_date[0:4]))
background_color = wiz.background_color
name = wiz.name
hours = wiz.hours
monday = wiz.monday
tuesday = wiz.tuesday
wednesday = wiz.wednesday
thursday = wiz.thursday
friday = wiz.friday
saturday = wiz.saturday
sunday = wiz.sunday
if start_year <> end_year:
raise osv.except_osv('Holidays Generation Error', 'Start and End Year are differents')
if end_date < start_date:
raise osv.except_osv('Holidays Generation Error', 'End Date < Start Date')
fec_ini = start_date
fec_ini = datetime.strptime(fec_ini,'%Y-%m-%d')
fec_fin = end_date
fec_fin = datetime.strptime(fec_fin,'%Y-%m-%d')
while fec_ini <= fec_fin:
if monday == True or tuesday == True or wednesday == True or thursday == True or friday == True or saturday == True or sunday == True:
dia = self._calculate_day(cr, uid, fec_ini)
if monday == True and dia == 'Lunes':
self._write_hours_to_work(cr, uid, employee_id, fec_ini, name, hours, background_color)
if tuesday == True and dia == 'Martes':
self._write_hours_to_work(cr, uid, employee_id, fec_ini, name, hours, background_color)
if wednesday == True and dia == 'Miercoles':
self._write_hours_to_work(cr, uid, employee_id, fec_ini, name, hours, background_color)
if thursday == True and dia == 'Jueves':
self._write_hours_to_work(cr, uid, employee_id, fec_ini, name, hours, background_color)
if friday == True and dia == 'Viernes':
self._write_hours_to_work(cr, uid, employee_id, fec_ini, name, hours, background_color)
if saturday == True and dia == 'Sabado':
self._write_hours_to_work(cr, uid, employee_id, fec_ini, name, hours, background_color)
if sunday == True and dia == 'Domingo':
self._write_hours_to_work(cr, uid, employee_id, fec_ini, name, hours, background_color)
else:
self._write_hours_to_work(cr, uid, employee_id, fec_ini, name, hours, background_color)
# Sumo 1 día a la fecha
fec_ini = fec_ini + timedelta(days=1)
fec_ini.strftime('%Y-%m-%d')
return {'type': 'ir.actions.act_window_close'}
def _calculate_day(self, cr, uid, my_date):
actual_date = str(my_date)
actual_date = actual_date[0:10]
from datetime import date
dia=['Sabado','Domingo','Lunes','Martes','Miercoles','Jueves','Viernes'];
arrayfNac=actual_date.split('-');
dfNac=date(int(arrayfNac[0]),int(arrayfNac[1]),int(arrayfNac[2]));
val1=dfNac.month;
val2=dfNac.year;
if(dfNac.month == 1):
val1=13;
val2=val2-1;
if(dfNac.month == 2):
val1=14;
val2=val2-1;
val3 = ((val1+1)*3)/5;
val4 = val2/4;
val5 = val2/100;
val6 = val2/400;
val7 = dfNac.day+(val1*2)+val3+val2+val4-val5+val6+2;
val8 = val7/7;
val0 = val7-(val8*7);
return (dia[val0])
def _write_hours_to_work(self, cr, uid, employee_id, my_date, name, hours, background_color):
hr_employee_calendar_obj = self.pool.get('hr.employee.calendar')
actual_date = str(my_date)
actual_date = actual_date[0:10]
my_date_year = actual_date[0:4]
# Miro que exista el calendario para el trabajador.
hr_employee_calendar_ids = hr_employee_calendar_obj.search(cr, uid,[('employee_id','=', employee_id),
('year', '=', my_date_year)])
if not hr_employee_calendar_ids:
line_vals = {'employee_id' : employee_id,
'year': my_date_year,
'name': 'Calendar ' + str(my_date_year),
}
hr_employee_calendar_id = hr_employee_calendar_obj.create(cr, uid, line_vals)
else:
hr_employee_calendar_id = hr_employee_calendar_ids[0]
estimated_calendar_resources_obj = self.pool.get('estimated.calendar.resources')
estimated_calendar_resources_ids = estimated_calendar_resources_obj.search(cr, uid,[('hr_employee_calendar_id','=', hr_employee_calendar_id),
('date', '=', actual_date)])
if not estimated_calendar_resources_ids:
line_vals = {'hr_employee_calendar_id' : hr_employee_calendar_id,
'name': name,
'date': my_date,
'hours': hours,
'background_color': background_color,
}
estimated_calendar_resources_id = estimated_calendar_resources_obj.create(cr, uid, line_vals)
else:
estimated_calendar_resources = estimated_calendar_resources_obj.browse(cr,uid,estimated_calendar_resources_ids[0])
if estimated_calendar_resources.hours > 0:
estimated_calendar_resources_obj.write(cr,uid,estimated_calendar_resources_ids,{'name': name,
'hours': hours,
'background_color': background_color,})
return True
make_hours_to_work()
|
avanzosc/avanzosc6.1
|
avanzosc_calendar/wizard/make_hours_to_work.py
|
Python
|
agpl-3.0
| 9,776
|
__author__ = 'manbug'
|
YangTe1/Forum
|
utils/__init__.py
|
Python
|
gpl-2.0
| 22
|
import pytest
from django.test import override_settings
from django.utils import timezone
from pretix import (
CreateOrderHotelRoom,
CreateOrderInput,
CreateOrderTicket,
CreateOrderTicketAnswer,
InvoiceInformation,
create_hotel_positions,
create_order,
)
from pretix.exceptions import PretixError
@pytest.fixture
def invoice_information():
return InvoiceInformation(
is_business=False,
company="ABC",
name="ABC",
street="ABC",
zipcode="ABC",
city="ABC",
country="ABC",
vat_id="ABC",
fiscal_code="ABC",
)
@override_settings(PRETIX_API="https://pretix/api/")
@pytest.mark.django_db
def test_creates_order(conference, hotel_room, requests_mock, invoice_information):
hotel_room.conference = conference
hotel_room.save()
requests_mock.post(
f"https://pretix/api/organizers/events/orders/",
json={"payments": [{"payment_url": "http://example.com"}], "code": 123},
)
requests_mock.get(
f"https://pretix/api/organizers/events/questions",
json={
"results": [
{"id": "1", "type": "S"},
{"id": "2", "type": "C", "options": [{"id": 1, "identifier": "abc"}]},
]
},
)
requests_mock.get(
f"https://pretix/api/organizers/events/items",
json={"results": [{"id": "123", "admission": True}]},
)
order_data = CreateOrderInput(
email="my@email.com",
locale="en",
payment_provider="stripe",
invoice_information=invoice_information,
hotel_rooms=[
CreateOrderHotelRoom(
room_id=str(hotel_room.id),
checkin=timezone.datetime(2020, 1, 1).date(),
checkout=timezone.datetime(2020, 1, 3).date(),
)
],
tickets=[
CreateOrderTicket(
ticket_id="123",
attendee_name="Example",
attendee_email="Example",
variation=None,
voucher=None,
answers=[
CreateOrderTicketAnswer(question_id="1", value="ABC"),
CreateOrderTicketAnswer(question_id="2", value="1"),
],
)
],
)
result = create_order(conference, order_data)
assert result.payment_url == "http://example.com"
@override_settings(PRETIX_API="https://pretix/api/")
@pytest.mark.django_db
def test_raises_when_response_is_400(conference, requests_mock, invoice_information):
requests_mock.post(
f"https://pretix/api/organizers/events/orders/", status_code=400, json={}
)
requests_mock.get(
f"https://pretix/api/organizers/events/questions", json={"results": []}
)
requests_mock.get(
f"https://pretix/api/organizers/events/items",
json={"results": [{"id": "123", "admission": False}]},
)
order_data = CreateOrderInput(
email="my@email.com",
locale="en",
payment_provider="stripe",
invoice_information=invoice_information,
hotel_rooms=[],
tickets=[
CreateOrderTicket(
ticket_id="123",
attendee_name="Example",
attendee_email="Example",
variation=None,
answers=None,
voucher=None,
)
],
)
with pytest.raises(PretixError):
create_order(conference, order_data)
@override_settings(PRETIX_API="https://pretix/api/")
@pytest.mark.django_db
def test_raises_value_error_if_answer_value_is_wrong(
conference, requests_mock, invoice_information
):
requests_mock.post(
f"https://pretix/api/organizers/events/orders/",
json={"payments": [{"payment_url": "http://example.com"}], "code": 123},
)
requests_mock.get(
f"https://pretix/api/organizers/events/questions",
json={
"results": [
{"id": "1", "type": "S"},
{"id": "2", "type": "C", "options": [{"id": 1, "identifier": "abc"}]},
]
},
)
requests_mock.get(
f"https://pretix/api/organizers/events/items",
json={"results": [{"id": "123", "admission": True}]},
)
order_data = CreateOrderInput(
email="my@email.com",
locale="en",
payment_provider="stripe",
invoice_information=invoice_information,
hotel_rooms=[],
tickets=[
CreateOrderTicket(
ticket_id="123",
attendee_name="Example",
attendee_email="Example",
variation=None,
voucher=None,
answers=[
CreateOrderTicketAnswer(question_id="1", value="ABC"),
# 100 doesn't exist as id in the questions
CreateOrderTicketAnswer(question_id="2", value="100"),
],
)
],
)
with pytest.raises(ValueError):
create_order(conference, order_data)
@pytest.mark.django_db
def test_create_hotel_positions(requests_mock, hotel_room_factory, invoice_information):
room = hotel_room_factory(
conference__pretix_hotel_ticket_id=1,
conference__pretix_hotel_room_type_question_id=2,
conference__pretix_hotel_checkin_question_id=3,
conference__pretix_hotel_checkout_question_id=4,
price=100,
)
rooms = [
CreateOrderHotelRoom(
room_id=str(room.id),
checkin=timezone.datetime(2020, 1, 1).date(),
checkout=timezone.datetime(2020, 1, 3).date(),
)
]
positions = create_hotel_positions(rooms, "en", room.conference)
assert positions == [
{
"item": 1,
"price": "200.00",
"answers": [
{
"question": 2,
"answer": room.name.localize("en"),
"options": [],
"option_identifier": [],
},
{
"question": 3,
"answer": "2020-01-01",
"options": [],
"option_identifier": [],
},
{
"question": 4,
"answer": "2020-01-03",
"options": [],
"option_identifier": [],
},
],
}
]
@override_settings(PRETIX_API="https://pretix/api/")
@pytest.mark.django_db
def test_not_required_and_empty_answer_is_skipped(
conference, requests_mock, invoice_information
):
orders_mock = requests_mock.post(
f"https://pretix/api/organizers/events/orders/",
json={"payments": [{"payment_url": "http://example.com"}], "code": 123},
)
requests_mock.get(
f"https://pretix/api/organizers/events/questions",
json={
"results": [
{"id": "1", "type": "S", "required": False},
{
"id": "2",
"type": "C",
"required": True,
"options": [{"id": 1, "identifier": "abc"}],
},
]
},
)
requests_mock.get(
f"https://pretix/api/organizers/events/items",
json={"results": [{"id": "123", "admission": True}]},
)
order_data = CreateOrderInput(
email="my@email.com",
locale="en",
payment_provider="stripe",
invoice_information=invoice_information,
hotel_rooms=[],
tickets=[
CreateOrderTicket(
ticket_id="123",
attendee_name="Example",
attendee_email="Example",
variation=None,
voucher=None,
answers=[
CreateOrderTicketAnswer(question_id="1", value=""),
CreateOrderTicketAnswer(question_id="2", value="1"),
],
)
],
)
result = create_order(conference, order_data)
assert result.payment_url == "http://example.com"
body = orders_mock.request_history[0].json()
answers = body["positions"][0]["answers"]
assert len(answers) == 1
assert answers == [
{
"question": "2",
"answer": "1",
"options": [1],
"option_identifier": [],
"option_identifiers": ["abc"],
}
]
@override_settings(PRETIX_API="https://pretix/api/")
@pytest.mark.django_db
def test_create_order_with_positions_with_voucher_and_one_without(
conference, requests_mock, invoice_information
):
orders_mock = requests_mock.post(
f"https://pretix/api/organizers/events/orders/",
json={"payments": [{"payment_url": "http://example.com"}], "code": 123},
)
requests_mock.get(
f"https://pretix/api/organizers/events/questions",
json={
"results": [
{"id": "1", "type": "S"},
{"id": "2", "type": "C", "options": [{"id": 1, "identifier": "abc"}]},
]
},
)
requests_mock.get(
f"https://pretix/api/organizers/events/items",
json={"results": [{"id": "123", "admission": True}]},
)
order_data = CreateOrderInput(
email="my@email.com",
locale="en",
payment_provider="stripe",
invoice_information=invoice_information,
hotel_rooms=[],
tickets=[
CreateOrderTicket(
ticket_id="123",
attendee_name="Example",
attendee_email="Example",
variation=None,
voucher=None,
answers=[
CreateOrderTicketAnswer(question_id="1", value="ABC"),
CreateOrderTicketAnswer(question_id="2", value="1"),
],
),
CreateOrderTicket(
ticket_id="123",
attendee_name="Example",
attendee_email="Example",
variation=None,
voucher="friendly-human-being",
answers=[
CreateOrderTicketAnswer(question_id="1", value="ABC"),
CreateOrderTicketAnswer(question_id="2", value="1"),
],
),
],
)
result = create_order(conference, order_data)
assert result.payment_url == "http://example.com"
body = orders_mock.request_history[0].json()
assert "voucher" not in body["positions"][0]
assert body["positions"][1]["voucher"] == "friendly-human-being"
|
patrick91/pycon
|
backend/pretix/tests/test_create_order.py
|
Python
|
mit
| 10,686
|
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2007 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
This is the integration file for Python.
"""
import cgi
import os
import re
import string
def escape(text, replace=string.replace):
"""Converts the special characters '<', '>', and '&'.
RFC 1866 specifies that these characters be represented
in HTML as < > and & respectively. In Python
1.5 we use the new string.replace() function for speed.
"""
text = replace(text, '&', '&') # must be done 1st
text = replace(text, '<', '<')
text = replace(text, '>', '>')
text = replace(text, '"', '"')
text = replace(text, "'", ''')
return text
# The FCKeditor class
class FCKeditor(object):
def __init__(self, instanceName):
self.InstanceName = instanceName
self.BasePath = '/fckeditor/'
self.Width = '100%'
self.Height = '200'
self.ToolbarSet = 'Default'
self.Value = '';
self.Config = {}
def Create(self):
return self.CreateHtml()
def CreateHtml(self):
HtmlValue = escape(self.Value)
Html = "<div>"
if (self.IsCompatible()):
File = "fckeditor.html"
Link = "%seditor/%s?InstanceName=%s" % (
self.BasePath,
File,
self.InstanceName
)
if (self.ToolbarSet is not None):
Link += "&ToolBar=%s" % self.ToolbarSet
# Render the linked hidden field
Html += "<input type=\"hidden\" id=\"%s\" name=\"%s\" value=\"%s\" style=\"display:none\" />" % (
self.InstanceName,
self.InstanceName,
HtmlValue
)
# Render the configurations hidden field
Html += "<input type=\"hidden\" id=\"%s___Config\" value=\"%s\" style=\"display:none\" />" % (
self.InstanceName,
self.GetConfigFieldString()
)
# Render the editor iframe
Html += "<iframe id=\"%s\__Frame\" src=\"%s\" width=\"%s\" height=\"%s\" frameborder=\"0\" scrolling=\"no\"></iframe>" % (
self.InstanceName,
Link,
self.Width,
self.Height
)
else:
if (self.Width.find("%%") < 0):
WidthCSS = "%spx" % self.Width
else:
WidthCSS = self.Width
if (self.Height.find("%%") < 0):
HeightCSS = "%spx" % self.Height
else:
HeightCSS = self.Height
Html += "<textarea name=\"%s\" rows=\"4\" cols=\"40\" style=\"width: %s; height: %s;\" wrap=\"virtual\">%s</textarea>" % (
self.InstanceName,
WidthCSS,
HeightCSS,
HtmlValue
)
Html += "</div>"
return Html
def IsCompatible(self):
if (os.environ.has_key("HTTP_USER_AGENT")):
sAgent = os.environ.get("HTTP_USER_AGENT", "")
else:
sAgent = ""
if (sAgent.find("MSIE") >= 0) and (sAgent.find("mac") < 0) and (sAgent.find("Opera") < 0):
i = sAgent.find("MSIE")
iVersion = float(sAgent[i+5:i+5+3])
if (iVersion >= 5.5):
return True
return False
elif (sAgent.find("Gecko/") >= 0):
i = sAgent.find("Gecko/")
iVersion = int(sAgent[i+6:i+6+8])
if (iVersion >= 20030210):
return True
return False
elif (sAgent.find("Opera/") >= 0):
i = sAgent.find("Opera/")
iVersion = float(sAgent[i+6:i+6+4])
if (iVersion >= 9.5):
return True
return False
elif (sAgent.find("AppleWebKit/") >= 0):
p = re.compile('AppleWebKit\/(\d+)', re.IGNORECASE)
m = p.search(sAgent)
if (m.group(1) >= 522):
return True
return False
else:
return False
def GetConfigFieldString(self):
sParams = ""
bFirst = True
for sKey in self.Config.keys():
sValue = self.Config[sKey]
if (not bFirst):
sParams += "&"
else:
bFirst = False
if (sValue):
k = escape(sKey)
v = escape(sValue)
if (sValue == "true"):
sParams += "%s=true" % k
elif (sValue == "false"):
sParams += "%s=false" % k
else:
sParams += "%s=%s" % (k, v)
return sParams
|
walterfan/pims4php
|
util/fckeditor/fckeditor.py
|
Python
|
apache-2.0
| 4,236
|
class IAccount():
def options(self):
pass
def signin(self):
pass
def signup(self):
pass
def logout(self):
pass
|
saraivaufc/askMathPlus
|
askmath/views/authentication/iaccount.py
|
Python
|
gpl-2.0
| 162
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
compat_urllib_parse,
)
class MalemotionIE(InfoExtractor):
_VALID_URL = r'^(?:https?://)?malemotion\.com/video/(.+?)\.(?P<id>.+?)(#|$)'
_TEST = {
'url': 'http://malemotion.com/video/bien-dur.10ew',
'file': '10ew.mp4',
'md5': 'b3cc49f953b107e4a363cdff07d100ce',
'info_dict': {
"title": "Bien dur",
"age_limit": 18,
},
'skip': 'This video has been deleted.'
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group("id")
webpage = self._download_webpage(url, video_id)
self.report_extraction(video_id)
# Extract video URL
video_url = compat_urllib_parse.unquote(
self._search_regex(r'<source type="video/mp4" src="(.+?)"', webpage, 'video URL'))
# Extract title
video_title = self._html_search_regex(
r'<title>(.*?)</title', webpage, 'title')
# Extract video thumbnail
video_thumbnail = self._search_regex(
r'<video .+?poster="(.+?)"', webpage, 'thumbnail', fatal=False)
formats = [{
'url': video_url,
'ext': 'mp4',
'format_id': 'mp4',
'preference': 1,
}]
return {
'id': video_id,
'formats': formats,
'uploader': None,
'upload_date': None,
'title': video_title,
'thumbnail': video_thumbnail,
'description': None,
'age_limit': 18,
}
|
MiLk/youtube-dl
|
youtube_dl/extractor/malemotion.py
|
Python
|
unlicense
| 1,665
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.serializers.json
import postgres.fields
import decimal
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='action_history',
field=postgres.fields.JSONField(default={}, encode_kwargs={'cls': django.core.serializers.json.DjangoJSONEncoder}, decode_kwargs={'parse_float': decimal.Decimal}),
preserve_default=True,
),
migrations.AddField(
model_name='user',
name='context',
field=postgres.fields.JSONField(default={}, encode_kwargs={'cls': django.core.serializers.json.DjangoJSONEncoder}, decode_kwargs={'parse_float': decimal.Decimal}),
preserve_default=True,
),
migrations.AddField(
model_name='user',
name='date_of_birth',
field=models.DateField(null=True, verbose_name='date of birth of user'),
preserve_default=True,
),
]
|
hotsyk/conferencio
|
conferencio/users/migrations/0002_auto_20150301_0526.py
|
Python
|
bsd-3-clause
| 1,162
|
'''
Created on 2013-3-8
@author: corleone
'''
from scrapy.cmdline import execute
from scrapy.settings import CrawlerSettings
if __name__ == '__main__':
file_name = u"permutation3.json"
with open(file_name, 'w') as f:
pass
execute(argv=["scrapy", "crawl", "lottery", "-o%s" % file_name, "-tjsonlines" ], settings=CrawlerSettings(__import__('scrapy.lottery.permutation3.lecai.settings', {}, {}, [''])))
# execute(argv=["scrapy", "shell", "http://www.lecai.com/lottery/draw/list/3?lottery_type=3&ds=2000-01-01&de=2013-03-09" ], settings=CrawlerSettings(__import__('scrapy.lottery.permutation3_lecai.settings', {}, {}, [''])))
|
535521469/crawler_sth
|
scrapy/lottery/permutation3/lecai/start.py
|
Python
|
bsd-3-clause
| 658
|
#!/usr/bin/env python
from setuptools import setup
setup(name="molmass",
version="2015.01.29",
description="Calculate the molecular mass (average, monoisotopic, and nominal), the elemental composition, and the mass distribution spectrum of a molecule given by its chemical formula, relative element weights, or sequence.",
author="Christoph Gohlke",
author_email="cgohlke@uci.edu",
url="https://github.com/dmccloskey/molmass",
packages=["sequencing_utilities"],
#entry_points={"console_scripts":
# ["makegff = sequencing_utilities.makegff:main",
# "sam2bam = sequencing_utilities.sam2bam:main",
# "mapped_percentage = sequencing_utilities.mapped_percentage:main"]},
#classifiers=[
#'Development Status :: 5 - Production/Stable',
#'Environment :: Console',
#'Intended Audience :: Science/Research',
#'Operating System :: OS Independent',
#'Programming Language :: Python :: 3.4',
#'Programming Language :: Cython',
#'Programming Language :: Python :: Implementation :: CPython',
#'Topic :: Scientific/Engineering',
#'Topic :: Scientific/Engineering :: Bio-Informatics'
#],
platforms="GNU/Linux, Mac OS X >= 10.7, Microsoft Windows >= 7",
)
|
dmccloskey/molmass
|
setup.py
|
Python
|
mit
| 1,274
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dingoprojectorg.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
phongvcao/dingoprojectorg
|
manage.py
|
Python
|
gpl-3.0
| 258
|
"""Tests prosegur setup."""
from unittest.mock import MagicMock, patch
from pytest import mark
from homeassistant.components.prosegur import DOMAIN
from tests.common import MockConfigEntry
@mark.parametrize(
"error",
[
ConnectionRefusedError,
ConnectionError,
],
)
async def test_setup_entry_fail_retrieve(hass, error):
"""Test loading the Prosegur entry."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={
"username": "test-username",
"password": "test-password",
"country": "PT",
"contract": "xpto",
},
)
config_entry.add_to_hass(hass)
with patch(
"pyprosegur.auth.Auth.login",
side_effect=error,
):
assert not await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
async def test_unload_entry(hass, aioclient_mock):
"""Test unloading the Prosegur entry."""
aioclient_mock.post(
"https://smart.prosegur.com/smart-server/ws/access/login",
json={"data": {"token": "123456789"}},
)
config_entry = MockConfigEntry(
domain=DOMAIN,
data={
"username": "test-username",
"password": "test-password",
"country": "PT",
"contract": "xpto",
},
)
config_entry.add_to_hass(hass)
install = MagicMock()
install.contract = "123"
with patch(
"homeassistant.components.prosegur.config_flow.Installation.retrieve",
return_value=install,
):
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert await hass.config_entries.async_unload(config_entry.entry_id)
|
Danielhiversen/home-assistant
|
tests/components/prosegur/test_init.py
|
Python
|
apache-2.0
| 1,781
|
# -*- encoding: utf-8 -*-
"""
Проверка наличия элемента
"""
inventory = ("a",
"b",
"c",
"d")
if "c" in inventory:
print "You will live to fight another day."
|
h4/fuit-webdev
|
examples/lesson2/2.3/1.9.5.py
|
Python
|
mit
| 227
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Multipass.ui'
#
# Created: Tue Feb 04 23:58:25 2014
# by: PyQt4 UI code generator 4.10.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Multipass(object):
def setupUi(self, Multipass):
Multipass.setObjectName(_fromUtf8("Multipass"))
Multipass.resize(465, 281)
self.widget = QtGui.QWidget(Multipass)
self.widget.setObjectName(_fromUtf8("widget"))
self.ascii85 = QtGui.QRadioButton(self.widget)
self.ascii85.setGeometry(QtCore.QRect(230, 160, 82, 17))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Consolas"))
font.setPointSize(12)
self.ascii85.setFont(font)
self.ascii85.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.ascii85.setChecked(True)
self.ascii85.setObjectName(_fromUtf8("ascii85"))
self.base64 = QtGui.QRadioButton(self.widget)
self.base64.setGeometry(QtCore.QRect(350, 160, 82, 17))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Consolas"))
font.setPointSize(12)
self.base64.setFont(font)
self.base64.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.base64.setObjectName(_fromUtf8("base64"))
self.username = QtGui.QLineEdit(self.widget)
self.username.setGeometry(QtCore.QRect(20, 40, 151, 20))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Consolas"))
font.setPointSize(12)
self.username.setFont(font)
self.username.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.username.setObjectName(_fromUtf8("username"))
self.domain = QtGui.QLineEdit(self.widget)
self.domain.setGeometry(QtCore.QRect(210, 40, 221, 20))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Consolas"))
font.setPointSize(12)
self.domain.setFont(font)
self.domain.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.domain.setObjectName(_fromUtf8("domain"))
self.master_password = QtGui.QLineEdit(self.widget)
self.master_password.setGeometry(QtCore.QRect(20, 110, 411, 20))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Consolas"))
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.master_password.setFont(font)
self.master_password.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.master_password.setInputMethodHints(QtCore.Qt.ImhHiddenText|QtCore.Qt.ImhNoAutoUppercase|QtCore.Qt.ImhNoPredictiveText)
self.master_password.setEchoMode(QtGui.QLineEdit.Password)
self.master_password.setObjectName(_fromUtf8("master_password"))
self.selected_size = QtGui.QSlider(self.widget)
self.selected_size.setGeometry(QtCore.QRect(90, 220, 341, 20))
self.selected_size.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.selected_size.setMinimum(8)
self.selected_size.setMaximum(40)
self.selected_size.setOrientation(QtCore.Qt.Horizontal)
self.selected_size.setObjectName(_fromUtf8("selected_size"))
self.username_label = QtGui.QLabel(self.widget)
self.username_label.setGeometry(QtCore.QRect(20, 16, 151, 20))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Consolas"))
font.setPointSize(12)
self.username_label.setFont(font)
self.username_label.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.username_label.setObjectName(_fromUtf8("username_label"))
self.domain_label = QtGui.QLabel(self.widget)
self.domain_label.setGeometry(QtCore.QRect(210, 16, 161, 20))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Consolas"))
font.setPointSize(12)
self.domain_label.setFont(font)
self.domain_label.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.domain_label.setObjectName(_fromUtf8("domain_label"))
self.masterPass_label = QtGui.QLabel(self.widget)
self.masterPass_label.setGeometry(QtCore.QRect(20, 86, 351, 20))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Consolas"))
font.setPointSize(12)
self.masterPass_label.setFont(font)
self.masterPass_label.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.masterPass_label.setObjectName(_fromUtf8("masterPass_label"))
self.generatedPass_label = QtGui.QLabel(self.widget)
self.generatedPass_label.setGeometry(QtCore.QRect(20, 160, 181, 16))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Consolas"))
font.setPointSize(12)
self.generatedPass_label.setFont(font)
self.generatedPass_label.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.generatedPass_label.setObjectName(_fromUtf8("generatedPass_label"))
self.length_label = QtGui.QLabel(self.widget)
self.length_label.setGeometry(QtCore.QRect(20, 220, 71, 20))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Consolas"))
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.length_label.setFont(font)
self.length_label.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.length_label.setObjectName(_fromUtf8("length_label"))
self.generated_password = QtGui.QTextBrowser(self.widget)
self.generated_password.setGeometry(QtCore.QRect(20, 180, 411, 31))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Consolas"))
font.setPointSize(12)
self.generated_password.setFont(font)
self.generated_password.setObjectName(_fromUtf8("generated_password"))
Multipass.setCentralWidget(self.widget)
self.menubar = QtGui.QMenuBar(Multipass)
self.menubar.setGeometry(QtCore.QRect(0, 0, 465, 21))
self.menubar.setObjectName(_fromUtf8("menubar"))
Multipass.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(Multipass)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
Multipass.setStatusBar(self.statusbar)
self.retranslateUi(Multipass)
QtCore.QObject.connect(self.username, QtCore.SIGNAL(_fromUtf8("textChanged(QString)")), Multipass.generatePassword)
QtCore.QObject.connect(self.domain, QtCore.SIGNAL(_fromUtf8("textChanged(QString)")), Multipass.generatePassword)
QtCore.QObject.connect(self.master_password, QtCore.SIGNAL(_fromUtf8("textChanged(QString)")), Multipass.generatePassword)
QtCore.QObject.connect(self.ascii85, QtCore.SIGNAL(_fromUtf8("clicked()")), Multipass.generatePassword)
QtCore.QObject.connect(self.base64, QtCore.SIGNAL(_fromUtf8("clicked()")), Multipass.generatePassword)
QtCore.QObject.connect(self.selected_size, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), Multipass.generatePassword)
QtCore.QMetaObject.connectSlotsByName(Multipass)
def retranslateUi(self, Multipass):
Multipass.setWindowTitle(_translate("Multipass", "MainWindow", None))
self.ascii85.setText(_translate("Multipass", "Ascii85", None))
self.base64.setText(_translate("Multipass", "Base64", None))
self.username_label.setText(_translate("Multipass", "Username:", None))
self.domain_label.setText(_translate("Multipass", "Domain:", None))
self.masterPass_label.setText(_translate("Multipass", "Master Password:", None))
self.generatedPass_label.setText(_translate("Multipass", "Generated Password:", None))
self.length_label.setText(_translate("Multipass", "Length:", None))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
Multipass = QtGui.QMainWindow()
ui = Ui_Multipass()
ui.setupUi(Multipass)
Multipass.show()
sys.exit(app.exec_())
|
marclr/passwordGenerator
|
Multipass.py
|
Python
|
mit
| 8,642
|
import cPickle, time, uuid, sys, random
import transaction
from ZODB.FileStorage import FileStorage
from ZODB.DB import DB
from BTrees.IOBTree import IOBTree
from BTrees.IIBTree import IIBTree
from BTrees.OOBTree import OOBTree
import testlib
storage = FileStorage('Data.fs')
db = DB(storage)
connection = db.open()
root = connection.root()
root['graphdb']=testlib.gdb()
g=root['graphdb']
g.nodes=IOBTree()
g.edges=IOBTree()
for i in range(1,100001):
node = [i,{}]
g.nodes[i]=node
for j in range(1,1000001):
edge = [j,random.randint(1,100000),random.randint(1,100000),OOBTree({'foo':'bar'})]
g.edges[j]=edge
transaction.commit()
connection.close()
db.close()
|
jhb/zodbtime
|
testdataoobt.py
|
Python
|
gpl-2.0
| 681
|
from .models import User, Role
from app.knowledgebase import Channel
from app.helpers import get_user_by_telegram_id
from app import db
from random import randint
from datetime import datetime, timedelta
from flask.ext.security import login_user, logout_user
class AccountManager(object):
@staticmethod
def get_user_by_telegram_id(telegram_user_id):
'''
Helper method that checks the database for a user with a
particular telegram_user_id and returns it if it exists,
or returns None if not
'''
return db.session.query(User).\
filter(User.telegram_user_id == telegram_user_id).first()
@staticmethod
def get_subscribed_channels(telegram_user_id):
'''
Checks if a user with this telegram_user_id exists in the database
and returns their channels
'''
user = db.session.query(User).filter(
User.telegram_user_id == telegram_user_id).first()
if user is not None:
return user.channels
else:
return None
@staticmethod
def add_channel(telegram_user_id, channel_name):
'''
Checks if a user with this telegram user id exists in the database
and if a channel with this name exists
adds the channel to the user
Returns True if the operation was done successfully
False if not
'''
user = db.session.query(User).filter(
User.telegram_user_id == telegram_user_id).first()
print "searching for " + str(channel_name)
channel = db.session.query(Channel).filter(
Channel.name == channel_name).first()
# TODO: Remove Edits
# and (channel is not None):
if (user is not None and channel is not None):
user.channels.append(channel)
db.session.add(user)
db.session.commit()
return True
else:
if (user is None):
print 'User none'
if (channel is None):
print 'Channel is none'
return False
@staticmethod
def delete_channel(telegram_user_id, channel_name):
'''
Checks if a user with this telegram user id exists in the database
and if a channel with this name exists
delete it from the user.
Returns True if the operation was done successfully
False if not
'''
user = db.session.query(User).filter(
User.telegram_user_id == telegram_user_id).first()
print "searching for " + str(channel_name)
# TODO: Remove Edits
if (user is not None):
length_before = len(user.channels)
# removes any channels with "channel_name"
user.channels = [
channel for channel in user.channels if channel.name != channel_name]
db.session.add(user)
db.session.commit()
length_after = len(user.channels)
if (length_before > length_after):
return True
else:
return False
else:
print 'User none'
return False
class TelegramAccountManager(object):
@staticmethod
def create_account_if_does_not_exist(telegram_user_id, name=""):
'''
Check if an account is present in the database
If it is, return True and don't do anything else
If it's not, return False and create the account
'''
if db.session.query(User.id).\
filter_by(telegram_user_id=telegram_user_id).\
scalar() is None:
# user does not exist
new_user = User(telegram_user_id=telegram_user_id, user_type=0)
# Add a name to the user
new_user.name = name
# Add to database
db.session.add(new_user)
# Commit changes
db.session.commit()
# Return false to indicate we created an account
return False
else:
return True
@staticmethod
def get_points(telegram_user_id):
'''
Simple get of the user's current points.
If no user by this id - return none
'''
user = get_user_by_telegram_id(telegram_user_id)
if user is not None:
return user.points
else:
return None
@staticmethod
def award_points(telegram_user_id, points):
'''
Adds points to user if user exists, otherwise returns false
'''
user = get_user_by_telegram_id(telegram_user_id)
if user is not None:
user.points = user.points + points
db.session.add(user)
db.session.commit()
return (True, user.points)
else:
return (False, user.points)
@staticmethod
def generate_and_store_otp(telegram_user_id):
'''
For the given user - stores a randomly generated OTP and expiry
'''
user = get_user_by_telegram_id(telegram_user_id)
if user is not None:
otp = randint(100000, 999999)
user.current_otp = otp
user.otp_expiry = datetime.now() + timedelta(seconds=60)
db.session.add(user)
db.session.commit()
return otp
else:
return None
@staticmethod
def merge_accounts_through_otp(current_user, otp):
'''
Looks through database for a user with the input otp.
If it sees a user with an unexpired OTP - merges the two users into one.
'''
telegram_user_with_otp = db.session.query(User).filter(User.current_otp == otp).first()
if telegram_user_with_otp is not None:
if telegram_user_with_otp.otp_expiry > datetime.now():
# If we have a valid user with the OTP and it's unexpired,
# merge the two accounts
success = TelegramAccountManager.merge_accounts(current_user, telegram_user_with_otp)
print "Merge success"
return telegram_user_with_otp
else:
print "Merge failure - expired OTP"
return None
else:
print "Merge failure - no user with OTP"
return None
@staticmethod
def merge_accounts(web_user, telegram_user):
'''
Given a web-user and a telegram user - merges the important information from the
telegram user into the web user and then deletes the web user
'''
with db.session.no_autoflush:
temp_dict = {}
temp_dict['name'] = web_user.name
# Add gamification points together
temp_dict['points'] = telegram_user.points + web_user.points
# Merge security info
temp_dict['email'] = web_user.email
temp_dict['password'] = web_user.password
temp_dict['active'] = web_user.active
temp_dict['confirmed_at'] = web_user.confirmed_at
temp_dict['roles'] = web_user.roles
# Delete the web account
db.session.delete(web_user)
db.session.commit()
# The web user will have a name - the telegram user will not
telegram_user.name = temp_dict['name']
# Add gamification points together
telegram_user.points = temp_dict['points']
# Merge security info
telegram_user.email = temp_dict['email']
telegram_user.password = temp_dict['password']
telegram_user.active = temp_dict['active']
telegram_user.confirmed_at = temp_dict['confirmed_at']
telegram_user.roles = temp_dict['roles']
# Relationship merging as needed
# Add the changed telegram acct
db.session.add(telegram_user)
db.session.commit()
# Return success
return True
|
yewsiang/botmother
|
app/accounts/__init__.py
|
Python
|
agpl-3.0
| 7,974
|
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from flask import request, session
from marshmallow_enum import EnumField
from webargs import fields, validate
from werkzeug.exceptions import Forbidden
from indico.modules.events.papers.controllers.base import RHPaperBase
from indico.modules.events.papers.models.comments import PaperReviewComment
from indico.modules.events.papers.models.reviews import (PaperAction, PaperCommentVisibility, PaperReview,
PaperReviewType, PaperTypeProxy)
from indico.modules.events.papers.models.revisions import PaperRevisionState
from indico.modules.events.papers.operations import (create_comment, create_paper_revision, create_review,
delete_comment, judge_paper, reset_paper_state, update_comment,
update_review)
from indico.modules.events.papers.schemas import PaperSchema
from indico.modules.events.papers.util import is_type_reviewing_possible
from indico.util.i18n import _
from indico.util.marshmallow import max_words, not_empty
from indico.web.args import parser, use_kwargs
class RHPaperDetails(RHPaperBase):
def _process(self):
return PaperSchema(context={'user': session.user}).jsonify(self.paper)
class RHResetPaperState(RHPaperBase):
def _check_paper_protection(self):
if self.paper.state == PaperRevisionState.submitted:
return False
# managers and judges can always reset
return self.paper.event.can_manage(session.user) or self.paper.can_judge(session.user)
def _process(self):
if self.paper.state != PaperRevisionState.submitted:
reset_paper_state(self.paper)
return '', 204
class RHCreatePaperComment(RHPaperBase):
def _check_paper_protection(self):
return self.paper.can_comment(session.user)
@use_kwargs({
'comment': fields.String(validate=not_empty),
'visibility': EnumField(PaperCommentVisibility, missing=None)
})
def _process(self, comment, visibility):
create_comment(self.paper, comment, visibility, session.user)
return '', 204
class RHCommentActions(RHPaperBase):
normalize_url_spec = {
'locators': {
lambda self: self.comment
}
}
def _check_access(self):
RHPaperBase._check_access(self)
if not self.comment.can_edit(session.user):
raise Forbidden
def _process_args(self):
RHPaperBase._process_args(self)
self.comment = (PaperReviewComment.query
.filter(PaperReviewComment.id == request.view_args['comment_id'],
~PaperReviewComment.is_deleted)
.first_or_404())
def _process_DELETE(self):
delete_comment(self.comment)
return '', 204
@use_kwargs({
'comment': fields.String(validate=not_empty),
'visibility': EnumField(PaperCommentVisibility)
}, partial=True)
def _process_PATCH(self, comment=None, visibility=None):
update_comment(self.comment, comment, visibility)
return '', 204
class RHJudgePaper(RHPaperBase):
def _check_paper_protection(self):
return self.paper.can_judge(session.user, check_state=True)
@use_kwargs({
'action': EnumField(PaperAction, required=True),
'comment': fields.String()
})
def _process(self, action, comment):
judge_paper(self.paper, action, comment, judge=session.user)
return '', 204
class RHSubmitNewRevision(RHPaperBase):
PAPER_REQUIRED = False
ALLOW_LOCKED = True
def _check_paper_protection(self):
if not self.event.cfp.is_manager(session.user):
if not RHPaperBase._check_paper_protection(self):
return False
if not self.contribution.is_user_associated(session.user, check_abstract=True):
return False
paper = self.contribution.paper
return paper is None or paper.state == PaperRevisionState.to_be_corrected
@use_kwargs({
'files': fields.List(fields.Field(), location='files', required=True)
})
def _process(self, files):
create_paper_revision(self.paper, session.user, files)
return '', 204
def _parse_review_args(event, review_type):
args_schema = {
'proposed_action': EnumField(PaperAction, required=True),
'comment': fields.String(missing='')
}
for question in event.cfp.get_questions_for_review_type(review_type):
attrs = {}
if question.is_required:
attrs['required'] = True
else:
attrs['missing'] = None
if question.field_type == 'rating':
field_cls = fields.Integer
elif question.field_type == 'text':
validators = []
if question.field_data['max_length']:
validators.append(validate.Length(max=question.field_data['max_length']))
if question.field_data['max_words']:
validators.append(max_words(question.field_data['max_words']))
attrs['validate'] = validators
field_cls = fields.String
elif question.field_type == 'bool':
field_cls = fields.Bool
else:
raise Exception('Invalid question field type: {}'.format(question.field_type))
args_schema['question_{}'.format(question.id)] = field_cls(**attrs)
data = parser.parse(args_schema)
questions_data = {k: v for k, v in data.iteritems() if k.startswith('question_')}
review_data = {k: v for k, v in data.iteritems() if not k.startswith('question_')}
return questions_data, review_data
class RHCreateReview(RHPaperBase):
normalize_url_spec = {
'locators': {
lambda self: self.paper,
lambda self: self.type
}
}
def _check_access(self):
RHPaperBase._check_access(self)
if not is_type_reviewing_possible(self.event.cfp, self.type.instance):
raise Forbidden(_('Reviewing is currently not possible'))
def _check_paper_protection(self):
if self.paper.last_revision.get_reviews(user=session.user, group=self.type.instance):
return False
return self.paper.can_review(session.user, check_state=True)
def _process_args(self):
RHPaperBase._process_args(self)
self.type = PaperTypeProxy(PaperReviewType[request.view_args['review_type']])
def _process(self):
questions_data, review_data = _parse_review_args(self.event, self.type)
create_review(self.paper, self.type, session.user, review_data, questions_data)
return '', 204
class RHUpdateReview(RHPaperBase):
normalize_url_spec = {
'locators': {
lambda self: self.review
}
}
def _check_paper_protection(self):
return self.review.can_edit(session.user, check_state=True)
def _check_access(self):
RHPaperBase._check_access(self)
if not is_type_reviewing_possible(self.event.cfp, self.review.type):
raise Forbidden(_('Reviewing is currently not possible'))
def _process_args(self):
RHPaperBase._process_args(self)
self.review = (PaperReview.query
.filter(PaperReview.id == request.view_args['review_id'])
.first_or_404())
def _process(self):
questions_data, review_data = _parse_review_args(self.event, self.review.type)
update_review(self.review, review_data, questions_data)
return '', 204
|
mic4ael/indico
|
indico/modules/events/papers/controllers/api.py
|
Python
|
mit
| 7,841
|
#! /usr/bin/python
"""
Query the Home Assistant API for available entities.
Output is printed to stdout.
"""
import sys
import getpass
import argparse
try:
from urllib2 import urlopen
PYTHON = 2
except ImportError:
from urllib.request import urlopen
PYTHON = 3
import json
def main(password, askpass, attrs, address, port):
"""Fetch Home Assistant API JSON page and post process."""
# Ask for password
if askpass:
password = getpass.getpass('Home Assistant API Password: ')
# Fetch API result
url = mk_url(address, port, password)
response = urlopen(url).read()
if PYTHON == 3:
response = response.decode('utf-8')
data = json.loads(response)
# Parse data
output = {'entity_id': []}
output.update([(attr, []) for attr in attrs])
for item in data:
output['entity_id'].append(item['entity_id'])
for attr in attrs:
output[attr].append(item['attributes'].get(attr, ''))
# Output data
print_table(output, ['entity_id'] + attrs)
def print_table(data, columns):
"""Format and print a table of data from a dictionary."""
# Get column lengths
lengths = {}
for key, value in data.items():
lengths[key] = max([len(str(val)) for val in value] + [len(key)])
# Print header
for item in columns:
itemup = item.upper()
sys.stdout.write(itemup + ' ' * (lengths[item] - len(item) + 4))
sys.stdout.write('\n')
# print body
for ind in range(len(data[columns[0]])):
for item in columns:
val = str(data[item][ind])
sys.stdout.write(val + ' ' * (lengths[item] - len(val) + 4))
sys.stdout.write("\n")
def mk_url(address, port, password):
"""Construct the URL call for the API states page."""
url = ''
if address.startswith('http://'):
url += address
else:
url += 'http://' + address
url += ':' + port + '/api/states?'
if password is not None:
url += 'api_password=' + password
return url
if __name__ == "__main__":
all_options = {'password': None, 'askpass': False, 'attrs': [],
'address': 'localhost', 'port': '8123'}
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('attrs', metavar='ATTRIBUTE', type=str, nargs='*',
help='an attribute to read from the state')
parser.add_argument('--password', dest='password', default=None,
type=str, help='API password for the BM server')
parser.add_argument('--ask-password', dest='askpass', default=False,
action='store_const', const=True,
help='prompt for BM API password')
parser.add_argument('--addr', dest='address',
default='localhost', type=str,
help='address of the BM server')
parser.add_argument('--port', dest='port', default='8123',
type=str, help='port that BM is hosting on')
args = parser.parse_args()
main(args.password, args.askpass, args.attrs, args.address, args.port)
|
bdfoster/blumate
|
script/get_entities.py
|
Python
|
mit
| 3,135
|
import multiprocessing as mp
import numpy as np
import PIL
# represents an environment that runs in a separate process
# it can be controlled via its message queue
class LearnerProcess(mp.Process):
def __init__(self, index, is_ready_barrier, queue, environment, shared_memory):
super(LearnerProcess, self).__init__()
# signals that this process has finished some computation (e.g. apply action)
self.is_ready_barrier = is_ready_barrier
self.queue = queue
self.environment = environment
self.shared_memory = shared_memory
self.index = index
self.name = "Learner Process {}".format(index)
self.daemon = True
def run(self):
# create the "numpy view" on the shared memory
self.shared_memory.setup_np_wrappers()
self.environment.start()
# just here for recording movies, so ignore it in other actions
frame_counter = 0
# main message loop
while True:
msg = self.queue.get()
if msg == "new_episode":
self.environment.new_episode()
self.environment.step()
# copy the current state to the shared memory
self.shared_memory.states[self.index] = self.environment.get_current_state()
self.is_ready_barrier.wait()
elif msg == "update":
self.apply_next_action()
# get current state and reward and copy both to shared memory
new_state = self.environment.get_current_state()
reward = self.environment.progress_delta * 10.0
self.shared_memory.rewards[self.index] = reward
self.shared_memory.states[self.index] = new_state
# signal that this process is done updating
self.is_ready_barrier.wait()
elif msg == "generate_frame":
self.apply_next_action()
if frame_counter % 4 == 0:
frame = self.environment.n64.get_frame()
frame = frame.transpose(PIL.Image.FLIP_TOP_BOTTOM)
frame.save("frames/learner_{}_{:05d}.png".format(self.index, frame_counter))
new_state = self.environment.get_current_state()
reward = self.environment.progress_delta * 10.0
self.shared_memory.rewards[self.index] = reward
self.shared_memory.states[self.index] = new_state
frame_counter += 1
self.is_ready_barrier.wait()
elif msg == "stop":
break
def apply_next_action(self, frame_skip=1):
action_index = self.shared_memory.action_indices[self.index]
self.environment.apply_action(action_index, frame_skip)
|
ferdkuh/rlkart
|
src/myrl/learner_process.py
|
Python
|
gpl-3.0
| 2,344
|
from draw_maze import parse_grid
from moves import move
from moves import LEFT, RIGHT, UP, DOWN
import pytest
LEVEL = """#######
#.....#
#..o..#
#.o*o.#
#..o..#
#.....#
#######"""
@pytest.mark.parametrize('direction, pos, tile', [
(LEFT, (3, 2), '*'),
(LEFT, (3, 1), 'o'),
(RIGHT, (3, 4), '*'),
(RIGHT, (3, 5), 'o'),
(UP, (2, 3), '*'),
(UP, (1, 3), 'o'),
(DOWN, (4, 3), '*'),
(DOWN, (5, 3), 'o'),
(DOWN, (3, 3), ' '),
])
def test_move_crate(direction, pos, tile):
"""Move a crate and check a given tile"""
maze = parse_grid(LEVEL)
move(maze, direction)
assert maze[pos[0]][pos[1]] == tile
def test_push_crate_to_wall():
maze = parse_grid("*o#")
move(maze, RIGHT)
assert maze[0] == ['*', 'o', '#']
def test_push_crate_to_crate():
maze = parse_grid("*oo")
move(maze, RIGHT)
assert maze == [['*', 'o', 'o']]
def test_push_crate_to_exit():
maze = parse_grid("*ox")
with pytest.raises(NotImplementedError):
move(maze, RIGHT)
@pytest.fixture
def bf_crate():
"""A single crate that can be pushed back and forth"""
maze = parse_grid(""".*o..\n.....""")
return maze
def test_move_left_right(bf_crate):
for d in [DOWN, RIGHT, RIGHT, UP, LEFT, DOWN, LEFT, LEFT, UP, RIGHT]:
move(bf_crate, d)
assert bf_crate[0][2] == 'o'
def test_move_right_left(bf_crate):
for d in [RIGHT, DOWN, RIGHT, RIGHT, UP, LEFT]:
move(bf_crate, d)
assert bf_crate[0][2] == 'o'
def test_move_lrrl(bf_crate):
for d in [DOWN, RIGHT, RIGHT, UP, LEFT, DOWN, LEFT, LEFT, UP,
RIGHT, RIGHT, DOWN, RIGHT, RIGHT, UP, LEFT]:
move(bf_crate, d)
assert bf_crate[0][2] == 'o'
SMALL_MAZE = """
#####
#...#
#*o.#
#...#
#####"""
PATHS = [
(UP, RIGHT, RIGHT, DOWN),
(UP, RIGHT, DOWN, RIGHT),
(DOWN, RIGHT, UP, RIGHT),
pytest.mark.xfail((RIGHT, RIGHT))
]
@pytest.mark.parametrize('path', PATHS)
def test_paths(path):
"""Different paths to the same spot"""
maze = parse_grid(SMALL_MAZE)
for direction in path:
move(maze, direction)
assert maze[2][3] == '*'
|
krother/maze_run
|
09_test_data/test_crate.py
|
Python
|
mit
| 2,136
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Simple IR baselines.
We plan to implement the following variants:
Given an input message, either:
(i) find the most similar message in the (training) dataset and output the
response from that exchange; or
(ii) find the most similar response to the input directly.
(iii) if label_candidates are provided, simply ranks them according to their
similarity to the input message.
Currently only (iii) is used.
Additionally, TFIDF is either used (requires building a dictionary) or not,
depending on whether you train on the train set first, or not.
"""
from typing import Optional
from parlai.core.params import ParlaiParser
from parlai.core.opt import Opt
import math
from collections.abc import Sequence
import heapq
import json
import parlai.utils.torch as torch_utils
from parlai.core.agents import Agent
from parlai.core.dict import DictionaryAgent
from parlai.utils.io import PathManager
class MaxPriorityQueue(Sequence):
"""
Fixed-size priority queue keeping the max_size largest items.
"""
def __init__(self, max_size):
"""
Initialize priority queue.
:param max_size: maximum capacity of priority queue.
"""
self.capacity = max_size
self.lst = []
def add(self, item, priority):
"""
Add element to the queue, with a separate priority if desired.
Element will not be added if the queue is at capacity and the element
has lower priority than the lowest currently in the queue.
:param item:
item to add to queue.
:param priority:
priority to use for item.
"""
if len(self.lst) < self.capacity:
heapq.heappush(self.lst, (priority, item))
elif priority > self.lst[0][0]:
heapq.heapreplace(self.lst, (priority, item))
def __getitem__(self, key):
"""
Get item at specified index.
:param key: integer index into priority queue, 0 <= index < max_size.
:returns: item stored at the specified index.
"""
return sorted(self.lst)[key][1]
def __len__(self):
"""
Return length of priority queue.
"""
return len(self.lst)
stopwords = {
'i',
'a',
'an',
'are',
'about',
'as',
'at',
'be',
'by',
'for',
'from',
'how',
'in',
'is',
'it',
'of',
'on',
'or',
'that',
'the',
'this',
'to',
'was',
'what',
'when',
'where',
'--',
'?',
'.',
"''",
"''",
"``",
',',
'do',
'see',
'want',
'people',
'and',
"n't",
"me",
'too',
'own',
'their',
'*',
"'s",
'not',
'than',
'other',
'you',
'your',
'know',
'just',
'but',
'does',
'really',
'have',
'into',
'more',
'also',
'has',
'any',
'why',
'will',
}
def score_match(query_rep, text, length_penalty, dictionary=None):
"""
Calculate the score match between the query representation the text.
:param query_rep:
base query representation to match text again.
:param text:
string to compare against query_rep for matching tokens
:param length_penalty:
scores are divided by the norm taken to this power
:param dictionary:
optional dictionary to use to tokenize text
:returns:
float score of match
"""
if text == "":
return 0
words = [w for w in dictionary.tokenize(text.lower())]
score = 0
rw = query_rep['words']
used = {}
for w in words:
if w in rw and w not in used:
score += rw[w]
used[w] = True
norm = math.sqrt(len(used))
norm = math.pow(norm * query_rep['norm'], length_penalty)
if norm > 1:
score /= norm
return score
def rank_candidates(query_rep, cands, length_penalty, dictionary):
"""
Rank candidates given representation of query.
:param query_rep:
base query representation to match text again.
:param cands:
strings to compare against query_rep for matching tokens
:param length_penalty:
scores are divided by the norm taken to this power
:dictionary:
dictionary to use to tokenize text
:returns:
ordered list of candidate strings in score-ranked order
"""
if True:
mpq = MaxPriorityQueue(100)
for c in cands:
score = score_match(query_rep, c, length_penalty, dictionary)
mpq.add(c, score)
return list(reversed(mpq))
else:
cands = list(cands)
score = [0] * len(cands)
for i, c in enumerate(cands):
score[i] = -score_match(query_rep, c, length_penalty, dictionary)
r = [i[0] for i in sorted(enumerate(score), key=lambda x: x[1])]
res = []
for i in range(min(100, len(score))):
res.append(cands[r[i]])
return res
class IrBaselineAgent(Agent):
"""
Information Retrieval baseline.
"""
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
"""
Add command line args specific to this agent.
"""
parser = parser.add_argument_group('IrBaseline Arguments')
parser.add_argument(
'-lp',
'--length_penalty',
type=float,
default=0.5,
help='length penalty for responses',
)
parser.add_argument(
'-hsz',
'--history_size',
type=int,
default=1,
help='number of utterances from the dialogue history to take use '
'as the query',
)
parser.add_argument(
'--label_candidates_file',
type=str,
default=None,
help='file of candidate responses to choose from',
)
cls.dictionary_class().add_cmdline_args(parser, partial_opt=partial_opt)
return parser
@classmethod
def dictionary_class(cls):
return DictionaryAgent
def __init__(self, opt, shared=None):
"""
Initialize agent.
"""
super().__init__(opt)
self.id = 'IRBaselineAgent'
self.length_penalty = float(opt['length_penalty'])
self.dictionary = DictionaryAgent(opt)
self.opt = opt
self.history = []
self.episodeDone = True
if opt.get('label_candidates_file'):
f = open(opt.get('label_candidates_file'))
self.label_candidates = f.read().split('\n')
def reset(self):
"""
Reset agent properties.
"""
self.observation = None
self.history = []
self.episodeDone = True
def observe(self, obs):
"""
Store and remember incoming observation message dict.
"""
self.observation = obs
self.dictionary.observe(obs)
if self.episodeDone:
self.history = []
if 'text' in obs:
self.history.append(obs.get('text', ''))
self.episodeDone = obs.get('episode_done', False)
return obs
def act(self):
"""
Generate a response to the previously seen observation(s).
"""
if self.opt.get('datatype', '').startswith('train'):
self.dictionary.act()
obs = self.observation
reply = {}
reply['id'] = self.getID()
# Rank candidates
cands = None
if obs.get('label_candidates', False) and len(obs['label_candidates']) > 0:
cands = obs['label_candidates']
if hasattr(self, 'label_candidates'):
# override label candidates with candidate file if set
cands = self.label_candidates
if cands:
hist_sz = self.opt.get('history_size', 1)
left_idx = max(0, len(self.history) - hist_sz)
text = ' '.join(self.history[left_idx : len(self.history)])
rep = self.build_query_representation(text)
reply['text_candidates'] = rank_candidates(
rep, cands, self.length_penalty, self.dictionary
)
reply['text'] = reply['text_candidates'][0]
return reply
def save(self, path=None):
"""
Save dictionary tokenizer if available.
"""
path = self.opt.get('model_file', None) if path is None else path
if path:
self.dictionary.save(path + '.dict')
data = {}
data['opt'] = self.opt
torch_utils.atomic_save(data, path)
with PathManager.open(path + '.opt', 'w') as handle:
json.dump(self.opt, handle)
def build_query_representation(self, query):
"""
Build representation of query, e.g. words or n-grams.
:param query: string to represent.
:returns: dictionary containing 'words' dictionary (token => frequency)
and 'norm' float (square root of the number of tokens)
"""
rep = {}
rep['words'] = {}
words = [w for w in self.dictionary.tokenize(query.lower())]
rw = rep['words']
used = {}
for w in words:
assert len(self.dictionary.freq) > 0
rw[w] = 1.0 / (1.0 + math.log(1.0 + self.dictionary.freq[w]))
used[w] = True
rep['norm'] = math.sqrt(len(words))
return rep
|
facebookresearch/ParlAI
|
parlai/agents/ir_baseline/ir_baseline.py
|
Python
|
mit
| 9,673
|
# Copyright 2011 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Test feature controller in scripts."""
__metaclass__ = type
from lp.services.features import (
get_relevant_feature_controller,
install_feature_controller,
)
from lp.services.features.flags import (
FeatureController,
NullFeatureController,
)
from lp.services.scripts.base import LaunchpadScript
from lp.testing import TestCase
from lp.testing.fakemethod import FakeMethod
from lp.testing.layers import DatabaseFunctionalLayer
class FakeScript(LaunchpadScript):
"""A dummy script that only records which feature controller is active."""
observed_feature_controller = object()
def __init__(self, name):
super(FakeScript, self).__init__(name=name, test_args=[])
def main(self):
self.observed_feature_controller = get_relevant_feature_controller()
# Shortcut some underpinnings of LaunchpadScript.run that we can't
# afford to have happen in tests.
_init_zca = FakeMethod()
_init_db = FakeMethod()
record_activity = FakeMethod()
class TestScriptFeatureController(TestCase):
layer = DatabaseFunctionalLayer
def setUp(self):
super(TestScriptFeatureController, self).setUp()
self.original_controller = get_relevant_feature_controller()
def tearDown(self):
install_feature_controller(self.original_controller)
super(TestScriptFeatureController, self).tearDown()
def test_script_installs_script_feature_controller(self):
script = FakeScript(name="bongo")
script.run()
self.assertNotEqual(
self.original_controller, script.observed_feature_controller)
self.assertNotEqual(None, script.observed_feature_controller)
self.assertIsInstance(
script.observed_feature_controller, FeatureController)
def test_script_restores_feature_controller(self):
previous_controller = NullFeatureController()
install_feature_controller(previous_controller)
FakeScript(name="mongo").run()
self.assertEqual(
previous_controller, get_relevant_feature_controller())
|
abramhindle/UnnaturalCodeFork
|
python/testdata/launchpad/lib/lp/services/scripts/tests/test_feature_controller.py
|
Python
|
agpl-3.0
| 2,226
|
#!/usr/bin/python
import os
import optparse
import test_lib
import test_util
import test_state
#import zstackwoodpecker.test_lib as test_lib
#import zstackwoodpecker.test_util as test_util
#import zstackwoodpecker.test_state as test_state
ACTION = 'Robot Action:'
ACTION_RESULT = 'Robot Action Result:'
VM = 'VM'
VOLUME = 'Volume'
SG = 'SG'
VIP = 'VIP'
EIP = 'EIP'
NIC = 'Nic'
ROOT_VOLUME = 'Root Volume'
IMAGE = 'Image'
RV_IMAGE = 'RootVolume Image'
DV_IMAGE = 'DataVolume Image'
RULE = 'Rule'
SP = 'SP'
PF = 'PF'
TA = test_state.TestAction
class ObjMap(object):
def __init__(self):
self.obj_map = {
VM: {},
VOLUME: {},
SG: {},
VIP: {},
EIP: {},
NIC: {},
ROOT_VOLUME: {},
IMAGE: {},
RV_IMAGE: {},
DV_IMAGE: {},
SP: {},
PF: {},
RULE: {}}
self.all_map = {}
def add_map(self, uuid, obj):
self.all_map[uuid] = obj
def get_map(self, uuid):
if self.all_map.has_key(uuid):
return self.all_map[uuid]
class ActionParser(object):
def __init__(self, action_line):
self.action_line = action_line
self.action = {ACTION: None,
VM: None,
VOLUME: None,
SG: None,
VIP: None,
EIP: None,
NIC: None,
ROOT_VOLUME: None,
IMAGE: None,
RV_IMAGE: None,
DV_IMAGE: None,
SP: None,
PF: None,
RULE: None}
self.action_result = {ACTION_RESULT: None,
VM: None,
VOLUME: None,
SG: None,
VIP: None,
EIP: None,
NIC: None,
ROOT_VOLUME: None,
IMAGE: None,
PF: None,
RV_IMAGE: None,
DV_IMAGE: None,
SP: None,
RULE: None}
self.new_obj = None
self.parse()
def parse(self):
fields = self.action_line.split(';')
for field in fields:
field = field.strip().lower()
if self.action_line.startswith(ACTION):
for key in self.action.keys():
if key == ACTION or key == ACTION_RESULT:
new_key = key
else:
new_key = 'on %s:' % key
new_key = new_key.lower()
if field.startswith(new_key):
self.action[key] = field.split(new_key)[1].strip()
break
else:
raise test_util.TestError('Does not recognize field in action: %s' % field)
elif self.action_line.startswith(ACTION_RESULT):
for key in self.action_result.keys():
if key == ACTION or key == ACTION_RESULT:
new_key = key
else:
new_key = 'new %s:' % key
new_key2 = 'on %s:' % key
new_key = new_key.lower()
new_key2 = new_key2.lower()
if field.startswith(new_key):
self.action_result[key] = field.split(new_key)[1].strip()
if key != ACTION_RESULT:
self.new_obj = self.action_result[key]
break
if field.startswith(new_key2):
self.action_result[key] = field.split(new_key2)[1].strip()
break
else:
raise test_util.TestError('Does not recognize field in action result: %s' % field)
def get_action(self):
return self.action
def get_action_result(self):
return self.action_result
def get_new_obj(self):
return self.new_obj
class Robot(object):
'''
robot action file could be get by:
grep 'Robot Action' robot_test_case_log_file > robot_action_file
'''
def __init__(self, options):
self.robot_action_file = options.robot_action_file
if not os.path.exists(self.robot_action_file):
raise test_util.TestError('Robot Action File: %s does not exit' \
% self.robot_action_file)
new_robot_action_file = '/tmp/zstack_robot_action_for_replay.log'
os.system("grep 'Robot Action' %s > %s" % (self.robot_action_file, new_robot_action_file))
self.robot_action_file = new_robot_action_file
self.test_obj_dict = test_state.TestStateDict()
self.obj_map = ObjMap()
self.latest_obj = None
self.robot_test_obj = test_util.Robot_Test_Object()
#self.parse_action_log()
def run(self):
#TODO: judge action file size
action_lines = open(self.robot_action_file, 'r').readlines()
line_num = 0
for action_line in action_lines:
line_num += 1
action_line = action_line.strip()
if 'idel' in action_line:
continue
if action_line.startswith(ACTION):
action_obj = ActionParser(action_line)
action = action_obj.get_action()[ACTION]
print "action: %s in line: %d" % (action, line_num)
if action == TA.create_vm:
vm = test_lib.lib_create_vm()
self.latest_obj = vm
self.test_obj_dict.add_vm(vm)
elif action == TA.stop_vm:
robot_vm_uuid = action_obj.get_action()[VM]
vm_obj = self.obj_map.get_map(robot_vm_uuid)
vm_obj.stop()
elif action == TA.start_vm:
robot_vm_uuid = action_obj.get_action()[VM]
vm_obj = self.obj_map.get_map(robot_vm_uuid)
vm_obj.start()
elif action == TA.reboot_vm:
robot_vm_uuid = action_obj.get_action()[VM]
vm_obj = self.obj_map.get_map(robot_vm_uuid)
vm_obj.reboot()
elif action == TA.destroy_vm:
robot_vm_uuid = action_obj.get_action()[VM]
vm_obj = self.obj_map.get_map(robot_vm_uuid)
vm_obj.destroy()
self.test_obj_dict.rm_vm(vm_obj)
elif action == TA.create_volume:
volume = test_lib.lib_create_volume_from_offering()
self.latest_obj = volume
self.test_obj_dict.add_volume(volume)
elif action == TA.create_image_from_volume:
robot_vm_uuid = action_obj.get_action()[VM]
vm_obj = self.obj_map.get_map(robot_vm_uuid)
vm_root_vol_uuid = test_lib.lib_get_root_volume(vm_obj.get_vm()).uuid
new_image = test_lib.lib_create_template_from_volume(vm_root_vol_uuid)
self.test_obj_dict.add_image(new_image)
self.latest_obj = new_image
elif action == TA.create_data_vol_template_from_volume:
robot_vm_uuid = action_obj.get_action()[VM]
vm_obj = self.obj_map.get_map(robot_vm_uuid)
robot_volume_uuid = action_obj.get_action()[VOLUME]
volume_obj = self.obj_map.get_map(robot_volume_uuid)
if not volume_obj:
volume = test_lib.lib_get_root_volume(vm_obj.get_vm())
else:
volume = volume_obj.get_volume()
new_data_vol_temp = test_lib.lib_create_data_vol_template_from_volume(vm_obj, volume)
self.test_obj_dict.add_image(new_data_vol_temp)
self.latest_obj = new_data_vol_temp
elif action == TA.delete_image:
robot_uuid = action_obj.get_action()[IMAGE]
obj = self.obj_map.get_map(robot_uuid)
obj.delete()
self.test_obj_dict.rm_image(obj)
elif action == TA.attach_volume:
robot_vm_uuid = action_obj.get_action()[VM]
vm_obj = self.obj_map.get_map(robot_vm_uuid)
robot_volume_uuid = action_obj.get_action()[VOLUME]
volume_obj = self.obj_map.get_map(robot_volume_uuid)
volume_obj.attach(vm_obj)
elif action == TA.detach_volume:
robot_volume_uuid = action_obj.get_action()[VOLUME]
volume_obj = self.obj_map.get_map(robot_volume_uuid)
volume_obj.detach()
elif action == TA.delete_volume:
robot_volume_uuid = action_obj.get_action()[VOLUME]
volume_obj = self.obj_map.get_map(robot_volume_uuid)
volume_obj.delete()
self.test_obj_dict.rm_volume(volume_obj)
elif action == TA.create_data_volume_from_image:
robot_image_uuid = action_obj.get_action()[IMAGE]
image_obj = self.obj_map.get_map(robot_image_uuid)
volume_obj = test_lib.lib_create_data_volume_from_image(image_obj)
self.latest_obj = volume_obj
self.test_obj_dict.add_volume(volume_obj)
elif action == TA.create_volume_snapshot:
robot_vm_uuid = action_obj.get_action()[VM]
if robot_vm_uuid:
#root volume
vm_obj = self.obj_map.get_map(robot_vm_uuid)
vm_root_vol_uuid = test_lib.lib_get_root_volume(vm_obj.get_vm()).uuid
target_volume_snapshots = self.test_obj_dict.get_volume_snapshot(vm_root_vol_uuid)
else:
robot_volume_uuid = action_obj.get_action()[VOLUME]
volume_obj = self.obj_map.get_map(robot_volume_uuid)
target_volume_snapshots = self.test_obj_dict.get_volume_snapshot(volume_obj.get_volume().uuid)
new_snapshot = test_lib.lib_create_volume_snapshot_from_volume(target_volume_snapshots, self.robot_test_obj, self.test_obj_dict)
self.latest_obj = new_snapshot
elif action == TA.create_volume_from_snapshot:
robot_snapshot_uuid = action_obj.get_action()[SP]
snapshot_obj = self.obj_map.get_map(robot_snapshot_uuid)
new_volume_obj = snapshot_obj.create_data_volume()
self.test_obj_dict.add_volume(new_volume_obj)
self.latest_obj = new_volume_obj
elif action == TA.use_volume_snapshot:
robot_snapshot_uuid = action_obj.get_action()[SP]
snapshot_obj = self.obj_map.get_map(robot_snapshot_uuid)
robot_volume_uuid = action_obj.get_action()[VOLUME]
target_volume_snapshots = self.test_obj_dict.get_volume_snapshot(robot_volume_uuid)
target_volume_snapshots.use_snapshot(snapshot_obj)
elif action == TA.create_image_from_snapshot:
robot_snapshot_uuid = action_obj.get_action()[SP]
snapshot_obj = self.obj_map.get_map(robot_snapshot_uuid)
new_image_obj = test_lib.lib_create_image_from_snapshot(snapshot_obj)
self.test_obj_dict.add_image(new_image_obj)
self.latest_obj = new_image_obj
elif action == TA.delete_volume_snapshot:
robot_snapshot_uuid = action_obj.get_action()[SP]
snapshot_obj = self.obj_map.get_map(robot_snapshot_uuid)
robot_volume_uuid = action_obj.get_action()[VOLUME]
target_volume_snapshots = self.test_obj_dict.get_volume_snapshot(robot_volume_uuid)
target_volume_snapshots.delete_snapshot(snapshot_obj)
elif action == TA.backup_volume_snapshot:
robot_snapshot_uuid = action_obj.get_action()[SP]
snapshot_obj = self.obj_map.get_map(robot_snapshot_uuid)
robot_volume_uuid = action_obj.get_action()[VOLUME]
target_volume_snapshots = self.test_obj_dict.get_volume_snapshot(robot_volume_uuid)
target_volume_snapshots.backup_snapshot(snapshot_obj)
elif action == TA.delete_backup_volume_snapshot:
robot_snapshot_uuid = action_obj.get_action()[SP]
snapshot_obj = self.obj_map.get_map(robot_snapshot_uuid)
robot_volume_uuid = action_obj.get_action()[VOLUME]
target_volume_snapshots = self.test_obj_dict.get_volume_snapshot(robot_volume_uuid)
target_volume_snapshots.delete_backuped_snapshot(snapshot_obj)
else:
print "skip action: %s in line: %d" % (action, line_num)
elif action_line.startswith(ACTION_RESULT):
action = action_line.split(';')[0].split(':')[1]
action_obj = ActionParser(action_line)
self.obj_map.add_map(action_obj.get_new_obj(), self.latest_obj)
self.latest_obj = None
def main():
parser = optparse.OptionParser()
parser.add_option(
"-f",
dest="robot_action_file",
action='store',
help="Robot Action File Log")
(options, args) = parser.parse_args()
robot = Robot(options)
robot.run()
if __name__ == '__main__':
main()
|
SoftwareKing/zstack-woodpecker
|
zstackwoodpecker/zstackwoodpecker/robot_replay.py
|
Python
|
apache-2.0
| 14,001
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
import pytest
from shuup.core.models import (
ProductCrossSell, ProductCrossSellType, StockBehavior
)
from shuup.testing.factories import (
create_product, get_default_shop, get_default_supplier
)
from shuup.xtheme.plugins.products import ProductCrossSellsPlugin
from shuup_tests.front.fixtures import get_jinja_context
@pytest.mark.django_db
def test_cross_sell_plugin_renders():
"""
Test that the plugin renders a product
"""
shop = get_default_shop()
supplier = get_default_supplier()
product = create_product("test-sku", shop=shop, supplier=supplier, stock_behavior=StockBehavior.UNSTOCKED)
computed = create_product("test-computed-sku", shop=shop, supplier=supplier, stock_behavior=StockBehavior.UNSTOCKED)
type = ProductCrossSellType.COMPUTED
ProductCrossSell.objects.create(product1=product, product2=computed, type=type)
assert ProductCrossSell.objects.filter(product1=product, type=type).count() == 1
context = get_jinja_context(product=product)
rendered = ProductCrossSellsPlugin({"type": type}).render(context)
assert computed.sku in rendered
def test_cross_sell_plugin_accepts_initial_config_as_string_or_enum():
plugin = ProductCrossSellsPlugin({"type": "computed"})
assert plugin.config["type"] == ProductCrossSellType.COMPUTED
plugin = ProductCrossSellsPlugin({"type": ProductCrossSellType.RECOMMENDED})
assert plugin.config["type"] == ProductCrossSellType.RECOMMENDED
|
suutari/shoop
|
shuup_tests/xtheme/test_cross_sell_plugin.py
|
Python
|
agpl-3.0
| 1,722
|
#
# Astronomy Constants (in cgs)
# @author: Mubdi Rahman
# Last Updated: January 12, 2011
#
# Speed of Light
c = 2.99792458E10
# Planck Constant
h = 6.6260755E-27
# Gravitational Constant
G = 6.67259E-8
# Electron Charge
ec = 4.8032068E-10
# Electron Mass
me = 9.1093897E-28
# Proton Mass
mp = 1.6726231E-24
# Neutron Mass
mn = 1.6749286E-24
# Hydrogen Mass
mh = 1.6733E-24
# Boltzmann Constant
kb = 1.380658E-16
# Stefan-Boltzmann Constant
sb = 5.67051E-5
# Astronomical Unit
au = 1.496E13
# Parsec
pc = 3.086E18
# Light Year
ly = 9.463E17
# Solar Mass
msun = 1.99E33
# Solar Radius
rsun = 6.96E10
# Solar Luminosity
lsun = 3.9E33
# Solar (Effective) Temperature
tsun = 5780.0
# Earth Radius
rearth = 6.378E8
# Earth Mass
mearth = 5.976E27
# Electron Volt
ev = 1.6021772E-12
### Metric Unit Conversions
# Kilometre
km = 100000.0
# Metre
m = 100.0
# Micron/Micrometre
mum = 1.0E-4
# Kilogram
kg = 1.0E3
# Anstrom
ang = 1.0E-8
### Time Units
# Year
yr = 3.1556926E7
# Day
day = 86400.0
### Flux Units
Jy = 1E-23
|
mubdi/MubdiScripts
|
mubdiscripts/astroconsts.py
|
Python
|
bsd-3-clause
| 1,142
|
from sympy.core import (S, pi, oo, symbols, Function, Rational, Integer, Tuple,
Derivative, Eq, Ne, Le, Lt, Gt, Ge)
from sympy.integrals import Integral
from sympy.concrete import Sum
from sympy.functions import (exp, sin, cos, fresnelc, fresnels, conjugate, Max,
Min, gamma, polygamma, loggamma, erf, erfi, erfc,
erf2, expint, erfinv, erfcinv, Ei, Si, Ci, li,
Shi, Chi, uppergamma, beta, subfactorial, erf2inv,
factorial, factorial2, catalan, RisingFactorial,
FallingFactorial, harmonic, atan2, sec, acsc,
hermite, laguerre, assoc_laguerre, jacobi,
gegenbauer, chebyshevt, chebyshevu, legendre,
assoc_legendre, Li, LambertW)
from sympy import mathematica_code as mcode
x, y, z, w = symbols('x,y,z,w')
f = Function('f')
def test_Integer():
assert mcode(Integer(67)) == "67"
assert mcode(Integer(-1)) == "-1"
def test_Rational():
assert mcode(Rational(3, 7)) == "3/7"
assert mcode(Rational(18, 9)) == "2"
assert mcode(Rational(3, -7)) == "-3/7"
assert mcode(Rational(-3, -7)) == "3/7"
assert mcode(x + Rational(3, 7)) == "x + 3/7"
assert mcode(Rational(3, 7)*x) == "(3/7)*x"
def test_Relational():
assert mcode(Eq(x, y)) == "x == y"
assert mcode(Ne(x, y)) == "x != y"
assert mcode(Le(x, y)) == "x <= y"
assert mcode(Lt(x, y)) == "x < y"
assert mcode(Gt(x, y)) == "x > y"
assert mcode(Ge(x, y)) == "x >= y"
def test_Function():
assert mcode(f(x, y, z)) == "f[x, y, z]"
assert mcode(sin(x) ** cos(x)) == "Sin[x]^Cos[x]"
assert mcode(sec(x) * acsc(x)) == "ArcCsc[x]*Sec[x]"
assert mcode(atan2(x, y)) == "ArcTan[x, y]"
assert mcode(conjugate(x)) == "Conjugate[x]"
assert mcode(Max(x, y, z)*Min(y, z)) == "Max[x, y, z]*Min[y, z]"
assert mcode(fresnelc(x)) == "FresnelC[x]"
assert mcode(fresnels(x)) == "FresnelS[x]"
assert mcode(gamma(x)) == "Gamma[x]"
assert mcode(uppergamma(x, y)) == "Gamma[x, y]"
assert mcode(polygamma(x, y)) == "PolyGamma[x, y]"
assert mcode(loggamma(x)) == "LogGamma[x]"
assert mcode(erf(x)) == "Erf[x]"
assert mcode(erfc(x)) == "Erfc[x]"
assert mcode(erfi(x)) == "Erfi[x]"
assert mcode(erf2(x, y)) == "Erf[x, y]"
assert mcode(expint(x, y)) == "ExpIntegralE[x, y]"
assert mcode(erfcinv(x)) == "InverseErfc[x]"
assert mcode(erfinv(x)) == "InverseErf[x]"
assert mcode(erf2inv(x, y)) == "InverseErf[x, y]"
assert mcode(Ei(x)) == "ExpIntegralEi[x]"
assert mcode(Ci(x)) == "CosIntegral[x]"
assert mcode(li(x)) == "LogIntegral[x]"
assert mcode(Si(x)) == "SinIntegral[x]"
assert mcode(Shi(x)) == "SinhIntegral[x]"
assert mcode(Chi(x)) == "CoshIntegral[x]"
assert mcode(beta(x, y)) == "Beta[x, y]"
assert mcode(factorial(x)) == "Factorial[x]"
assert mcode(factorial2(x)) == "Factorial2[x]"
assert mcode(subfactorial(x)) == "Subfactorial[x]"
assert mcode(FallingFactorial(x, y)) == "FactorialPower[x, y]"
assert mcode(RisingFactorial(x, y)) == "Pochhammer[x, y]"
assert mcode(catalan(x)) == "CatalanNumber[x]"
assert mcode(harmonic(x)) == "HarmonicNumber[x]"
assert mcode(harmonic(x, y)) == "HarmonicNumber[x, y]"
assert mcode(Li(x)) == "LogIntegral[x] - LogIntegral[2]"
assert mcode(LambertW(x)) == "ProductLog[x]"
assert mcode(LambertW(x, -1)) == "ProductLog[-1, x]"
assert mcode(LambertW(x, y)) == "ProductLog[y, x]"
def test_special_polynomials():
assert mcode(hermite(x, y)) == "HermiteH[x, y]"
assert mcode(laguerre(x, y)) == "LaguerreL[x, y]"
assert mcode(assoc_laguerre(x, y, z)) == "LaguerreL[x, y, z]"
assert mcode(jacobi(x, y, z, w)) == "JacobiP[x, y, z, w]"
assert mcode(gegenbauer(x, y, z)) == "GegenbauerC[x, y, z]"
assert mcode(chebyshevt(x, y)) == "ChebyshevT[x, y]"
assert mcode(chebyshevu(x, y)) == "ChebyshevU[x, y]"
assert mcode(legendre(x, y)) == "LegendreP[x, y]"
assert mcode(assoc_legendre(x, y, z)) == "LegendreP[x, y, z]"
def test_Pow():
assert mcode(x**3) == "x^3"
assert mcode(x**(y**3)) == "x^(y^3)"
assert mcode(1/(f(x)*3.5)**(x - y**x)/(x**2 + y)) == \
"(3.5*f[x])^(-x + y^x)/(x^2 + y)"
assert mcode(x**-1.0) == 'x^(-1.0)'
assert mcode(x**Rational(2, 3)) == 'x^(2/3)'
def test_Mul():
A, B, C, D = symbols('A B C D', commutative=False)
assert mcode(x*y*z) == "x*y*z"
assert mcode(x*y*A) == "x*y*A"
assert mcode(x*y*A*B) == "x*y*A**B"
assert mcode(x*y*A*B*C) == "x*y*A**B**C"
assert mcode(x*A*B*(C + D)*A*y) == "x*y*A**B**(C + D)**A"
def test_constants():
assert mcode(S.Zero) == "0"
assert mcode(S.One) == "1"
assert mcode(S.NegativeOne) == "-1"
assert mcode(S.Half) == "1/2"
assert mcode(S.ImaginaryUnit) == "I"
assert mcode(oo) == "Infinity"
assert mcode(S.NegativeInfinity) == "-Infinity"
assert mcode(S.ComplexInfinity) == "ComplexInfinity"
assert mcode(S.NaN) == "Indeterminate"
assert mcode(S.Exp1) == "E"
assert mcode(pi) == "Pi"
assert mcode(S.GoldenRatio) == "GoldenRatio"
assert mcode(S.TribonacciConstant) == \
"(1/3 + (1/3)*(19 - 3*33^(1/2))^(1/3) + " \
"(1/3)*(3*33^(1/2) + 19)^(1/3))"
assert mcode(2*S.TribonacciConstant) == \
"2*(1/3 + (1/3)*(19 - 3*33^(1/2))^(1/3) + " \
"(1/3)*(3*33^(1/2) + 19)^(1/3))"
assert mcode(S.EulerGamma) == "EulerGamma"
assert mcode(S.Catalan) == "Catalan"
def test_containers():
assert mcode([1, 2, 3, [4, 5, [6, 7]], 8, [9, 10], 11]) == \
"{1, 2, 3, {4, 5, {6, 7}}, 8, {9, 10}, 11}"
assert mcode((1, 2, (3, 4))) == "{1, 2, {3, 4}}"
assert mcode([1]) == "{1}"
assert mcode((1,)) == "{1}"
assert mcode(Tuple(*[1, 2, 3])) == "{1, 2, 3}"
def test_matrices():
from sympy.matrices import MutableDenseMatrix, MutableSparseMatrix, \
ImmutableDenseMatrix, ImmutableSparseMatrix
A = MutableDenseMatrix(
[[1, -1, 0, 0],
[0, 1, -1, 0],
[0, 0, 1, -1],
[0, 0, 0, 1]]
)
B = MutableSparseMatrix(A)
C = ImmutableDenseMatrix(A)
D = ImmutableSparseMatrix(A)
assert mcode(C) == mcode(A) == \
"{{1, -1, 0, 0}, " \
"{0, 1, -1, 0}, " \
"{0, 0, 1, -1}, " \
"{0, 0, 0, 1}}"
assert mcode(D) == mcode(B) == \
"SparseArray[{" \
"{1, 1} -> 1, {1, 2} -> -1, {2, 2} -> 1, {2, 3} -> -1, " \
"{3, 3} -> 1, {3, 4} -> -1, {4, 4} -> 1" \
"}, {4, 4}]"
# Trivial cases of matrices
assert mcode(MutableDenseMatrix(0, 0, [])) == '{}'
assert mcode(MutableSparseMatrix(0, 0, [])) == 'SparseArray[{}, {0, 0}]'
assert mcode(MutableDenseMatrix(0, 3, [])) == '{}'
assert mcode(MutableSparseMatrix(0, 3, [])) == 'SparseArray[{}, {0, 3}]'
assert mcode(MutableDenseMatrix(3, 0, [])) == '{{}, {}, {}}'
assert mcode(MutableSparseMatrix(3, 0, [])) == 'SparseArray[{}, {3, 0}]'
def test_NDArray():
from sympy.tensor.array import (
MutableDenseNDimArray, ImmutableDenseNDimArray,
MutableSparseNDimArray, ImmutableSparseNDimArray)
example = MutableDenseNDimArray(
[[[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]],
[[13, 14, 15, 16],
[17, 18, 19, 20],
[21, 22, 23, 24]]]
)
assert mcode(example) == \
"{{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}}, " \
"{{13, 14, 15, 16}, {17, 18, 19, 20}, {21, 22, 23, 24}}}"
example = ImmutableDenseNDimArray(example)
assert mcode(example) == \
"{{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}}, " \
"{{13, 14, 15, 16}, {17, 18, 19, 20}, {21, 22, 23, 24}}}"
example = MutableSparseNDimArray(example)
assert mcode(example) == \
"SparseArray[{" \
"{1, 1, 1} -> 1, {1, 1, 2} -> 2, {1, 1, 3} -> 3, " \
"{1, 1, 4} -> 4, {1, 2, 1} -> 5, {1, 2, 2} -> 6, " \
"{1, 2, 3} -> 7, {1, 2, 4} -> 8, {1, 3, 1} -> 9, " \
"{1, 3, 2} -> 10, {1, 3, 3} -> 11, {1, 3, 4} -> 12, " \
"{2, 1, 1} -> 13, {2, 1, 2} -> 14, {2, 1, 3} -> 15, " \
"{2, 1, 4} -> 16, {2, 2, 1} -> 17, {2, 2, 2} -> 18, " \
"{2, 2, 3} -> 19, {2, 2, 4} -> 20, {2, 3, 1} -> 21, " \
"{2, 3, 2} -> 22, {2, 3, 3} -> 23, {2, 3, 4} -> 24" \
"}, {2, 3, 4}]"
example = ImmutableSparseNDimArray(example)
assert mcode(example) == \
"SparseArray[{" \
"{1, 1, 1} -> 1, {1, 1, 2} -> 2, {1, 1, 3} -> 3, " \
"{1, 1, 4} -> 4, {1, 2, 1} -> 5, {1, 2, 2} -> 6, " \
"{1, 2, 3} -> 7, {1, 2, 4} -> 8, {1, 3, 1} -> 9, " \
"{1, 3, 2} -> 10, {1, 3, 3} -> 11, {1, 3, 4} -> 12, " \
"{2, 1, 1} -> 13, {2, 1, 2} -> 14, {2, 1, 3} -> 15, " \
"{2, 1, 4} -> 16, {2, 2, 1} -> 17, {2, 2, 2} -> 18, " \
"{2, 2, 3} -> 19, {2, 2, 4} -> 20, {2, 3, 1} -> 21, " \
"{2, 3, 2} -> 22, {2, 3, 3} -> 23, {2, 3, 4} -> 24" \
"}, {2, 3, 4}]"
def test_Integral():
assert mcode(Integral(sin(sin(x)), x)) == "Hold[Integrate[Sin[Sin[x]], x]]"
assert mcode(Integral(exp(-x**2 - y**2),
(x, -oo, oo),
(y, -oo, oo))) == \
"Hold[Integrate[Exp[-x^2 - y^2], {x, -Infinity, Infinity}, " \
"{y, -Infinity, Infinity}]]"
def test_Derivative():
assert mcode(Derivative(sin(x), x)) == "Hold[D[Sin[x], x]]"
assert mcode(Derivative(x, x)) == "Hold[D[x, x]]"
assert mcode(Derivative(sin(x)*y**4, x, 2)) == "Hold[D[y^4*Sin[x], {x, 2}]]"
assert mcode(Derivative(sin(x)*y**4, x, y, x)) == "Hold[D[y^4*Sin[x], x, y, x]]"
assert mcode(Derivative(sin(x)*y**4, x, y, 3, x)) == "Hold[D[y^4*Sin[x], x, {y, 3}, x]]"
def test_Sum():
assert mcode(Sum(sin(x), (x, 0, 10))) == "Hold[Sum[Sin[x], {x, 0, 10}]]"
assert mcode(Sum(exp(-x**2 - y**2),
(x, -oo, oo),
(y, -oo, oo))) == \
"Hold[Sum[Exp[-x^2 - y^2], {x, -Infinity, Infinity}, " \
"{y, -Infinity, Infinity}]]"
def test_comment():
from sympy.printing.mathematica import MCodePrinter
assert MCodePrinter()._get_comment("Hello World") == \
"(* Hello World *)"
def test_userfuncs():
# Dictionary mutation test
some_function = symbols("some_function", cls=Function)
my_user_functions = {"some_function": "SomeFunction"}
assert mcode(
some_function(z),
user_functions=my_user_functions) == \
'SomeFunction[z]'
assert mcode(
some_function(z),
user_functions=my_user_functions) == \
'SomeFunction[z]'
# List argument test
my_user_functions = \
{"some_function": [(lambda x: True, "SomeOtherFunction")]}
assert mcode(
some_function(z),
user_functions=my_user_functions) == \
'SomeOtherFunction[z]'
|
kaushik94/sympy
|
sympy/printing/tests/test_mathematica.py
|
Python
|
bsd-3-clause
| 10,933
|
#!/usr/bin/env python3
import sys
from osmium.replication import server
if __name__ == '__main__':
if len(sys.argv) != 3:
print("Usage: python check_server_for_updates.py <server url> <sequence id>")
sys.exit(254)
seqid = int(sys.argv[2])
state = server.ReplicationServer(sys.argv[1]).get_state_info()
if state is None:
print("ERROR: Cannot get state from URL %s." % (sys.argv[1], ))
sys.exit(253)
if state.sequence <= seqid:
print("Database up to date.")
sys.exit(1)
print("New data available (%i => %i)." % (seqid, state.sequence))
sys.exit(0)
|
openstreetmap/Nominatim
|
utils/check_server_for_updates.py
|
Python
|
gpl-2.0
| 629
|
# third party
import pyperf
# relative
from .bench_constructor import create_bench_sept_constructor
from .bench_deserialization import create_bench_sept_deserialize
from .bench_serialization import create_bench_sept_serialize
def run_sept_suite(runner: pyperf.Runner, rows, cols, lower_bound, upper_bound):
create_bench_sept_deserialize(runner, rows, cols, lower_bound, upper_bound)
create_bench_sept_constructor(runner, rows, cols, lower_bound, upper_bound)
create_bench_sept_serialize(runner, rows, cols, lower_bound, upper_bound)
|
OpenMined/PySyft
|
benchmarks/syft_benchmarks/septs/suite.py
|
Python
|
apache-2.0
| 548
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-2014 Elico Corp (<http://www.elico-corp.com>)
# Alex Duan <alex.duan@elico-corp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from test_base_data import TransactionCaseBaseData
class TestLandedCosts(TransactionCaseBaseData):
'''Tests for landed costs
Demo data:
1 stock picking with landed costs:
* distributed by volume
* distributed by value
* distributed by quantity
2 stock moves with landed costs:
* distributed by volume
* distributed by value
* distributed by quantity
'''
def setUp(self):
# prepare the stock moves for testing.
self.move_id1 = self.move_obj.create({
'name': 'Move1',
'product_id': self.product_id
})
def StockMove_LandingCostField(self):
# Environment:
# A stock move of a stock picking with landed costs
# Action:
# compute the landing cost
# Output:
# the right landing cost
pass
|
udayinfy/openerp-7.0
|
stock_landed_costs/tests/test_landed_costs.py
|
Python
|
agpl-3.0
| 1,946
|
#!/usr/bin/env python
"""Program entry point"""
from __future__ import print_function
import argparse
import sys
import wx
from runescape_price_watch import metadata
from runescape_price_watch.gui import MainFrame
def main(argv):
"""Program entry point.
:param argv: command-line arguments
:type argv: :class:`list`
"""
author_strings = []
for name, email in zip(metadata.authors, metadata.emails):
author_strings.append('Author: {0} <{1}>'.format(name, email))
epilog = '''
{project} {version}
{authors}
URL: <{url}>
'''.format(
project=metadata.project,
version=metadata.version,
authors='\n'.join(author_strings),
url=metadata.url)
arg_parser = argparse.ArgumentParser(
prog=argv[0],
formatter_class=argparse.RawDescriptionHelpFormatter,
description=metadata.description,
epilog=epilog)
arg_parser.add_argument(
'-V', '--version',
action='version',
version='{0} {1}'.format(metadata.project, metadata.version))
arg_parser.parse_args(args=argv[1:])
#create a new app, don't redirect stdout/stderr.
app = wx.App(redirect=False)
frame = MainFrame(parent=None)
# frame.Maximize()
frame.Show()
app.MainLoop()
return 0
def entry_point():
"""Zero-argument entry point for use with setuptools/distribute."""
raise SystemExit(main(sys.argv))
if __name__ == '__main__':
entry_point()
|
ryanmoyer/runescape-price-watch
|
runescape_price_watch/main.py
|
Python
|
gpl-3.0
| 1,469
|
# Copyright 2016 James Hensman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from matplotlib import pyplot as plt
import GPflow
import VFF
plt.ion()
# import matplotlib2tikz
plt.close('all')
X = np.vstack([np.random.rand(10, 1), np.random.rand(10, 1)*0.5])
Y = np.sin(3*X) + 0.4*np.cos(9*X) + 0.1 * np.random.randn(*X.shape)
# whether or not to optimize the parameters of the models. If False, use optimial parameters from ful model.
optimize = False
def plot(m, ax=None):
if ax is None:
f, ax = plt.subplots(1, 1)
xtest = np.linspace(-0.2, 1.2, 100)[:, None]
mu, var = m.predict_y(xtest)
line, = ax.plot(xtest, mu, lw=1.5)
ax.plot(xtest, mu + 2*np.sqrt(var), color=line.get_color())
ax.plot(xtest, mu - 2*np.sqrt(var), color=line.get_color())
ax.plot(m.X.value, m.Y.value, 'kx', mew=1.5)
# build a full model to get hypers.
K = GPflow.kernels.Matern12
m_full = GPflow.gpr.GPR(X, Y, kern=K(1))
m_full.optimize()
f, axes = plt.subplots(2, 3, sharex=True, sharey=True)
axes = axes.flatten()
ax_count = 0
for M in [20, 100, 500]:
m = VFF.SSGP(X, Y, kern=K(1), num_basis=M)
m.omega.fixed = True
m.kern.set_parameter_dict(m_full.kern.get_parameter_dict())
m.likelihood.set_parameter_dict(m_full.likelihood.get_parameter_dict())
if optimize:
m.optimize()
plot(m, axes[ax_count])
axes[ax_count].set_title('RFF (%i)' % M)
ax_count += 1
for M in [20, 100]:
m = VFF.gpr.GPR_1d(X, Y, np.arange(M), a=-1, b=2, kern=K(1))
m.kern.set_parameter_dict(m_full.kern.get_parameter_dict())
m.likelihood.set_parameter_dict(m_full.likelihood.get_parameter_dict())
if optimize:
m.optimize()
plot(m, axes[ax_count])
axes[ax_count].set_title('VFF (%i)' % M)
ax_count += 1
plot(m_full, axes[ax_count])
axes[ax_count].set_title('Full')
axes[ax_count].set_xlim(-0.1, 1.2)
axes[ax_count].set_ylim(-0.7, 1.7)
|
jameshensman/VFF
|
experiments/simple_regression/figure.py
|
Python
|
apache-2.0
| 2,416
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# streamondemand - XBMC Plugin
# XBMC Tools
# http://blog.tvalacarta.info/plugin-xbmc/streamondemand/
# Modificado por super_berny: inclusion de infoLabels
#------------------------------------------------------------
import urllib, urllib2
import xbmc
import xbmcgui
import xbmcplugin
import sys
import os
from servers import servertools
from core import config
from core import logger
# Esto permite su ejecución en modo emulado
try:
pluginhandle = int( sys.argv[ 1 ] )
except:
pluginhandle = ""
DEBUG = True
def addnewfolderextra(item, totalItems=0):
if item.fulltitle=="":
item.fulltitle=item.title
contextCommands = []
ok = False
try:
item.context = urllib.unquote_plus(item.context)
except:
item.context=""
if "|" in item.context:
item.context = item.context.split("|")
if DEBUG:
logger.info('[xbmctools.py] addnewfolderextra')
logger.info(item.tostring())
listitem = xbmcgui.ListItem( item.title, iconImage="DefaultFolder.png", thumbnailImage=item.thumbnail )
listitem.setInfo( "video", { "Title" : item.title, "Plot" : item.plot, "Studio" : item.channel.capitalize() } )
set_infoLabels(listitem,item.plot) # Modificacion introducida por super_berny para añadir infoLabels al ListItem
if item.fanart!="":
listitem.setProperty('fanart_image',item.fanart)
xbmcplugin.setPluginFanart(pluginhandle, item.fanart)
#Realzamos un quote sencillo para evitar problemas con títulos unicode
# title = title.replace("&","%26").replace("+","%2B").replace("%","%25")
try:
item.title = item.title.encode ("utf-8") #This only aplies to unicode strings. The rest stay as they are.
except:
pass
itemurl = '%s?%s' % ( sys.argv[ 0 ] , item.tourl())
if item.show != "": #Añadimos opción contextual para Añadir la serie completa a la biblioteca
addSerieCommand = "XBMC.RunPlugin(%s?%s)" % ( sys.argv[ 0 ] , item.clone(action="addlist2Library").tourl())
contextCommands.append(("Añadir Serie a Biblioteca",addSerieCommand))
if "1" in item.context and accion != "por_teclado":
DeleteCommand = "XBMC.RunPlugin(%s?%s)" % ( sys.argv[ 0 ] , item.clone(channel="buscador", action="borrar_busqueda").tourl())
contextCommands.append((config.get_localized_string( 30300 ),DeleteCommand))
if "4" in item.context:
searchSubtitleCommand = "XBMC.RunPlugin(%s?%s)" % ( sys.argv[ 0 ] , item.clone(channel="subtitletools", action="searchSubtitle").tourl())
contextCommands.append(("XBMC Subtitle",searchSubtitleCommand))
if "5" in item.context:
trailerCommand = "XBMC.Container.Update(%s?%s)" % ( sys.argv[ 0 ] , item.clone(channel="trailertools", action="buscartrailer").tourl())
contextCommands.append((config.get_localized_string(30162),trailerCommand))
if "6" in item.context:# Ver canal en vivo en justintv
justinCommand = "XBMC.PlayMedia(%s?%s)" % ( sys.argv[ 0 ] , item.clone(channel="justintv", action="playVideo").tourl())
contextCommands.append((config.get_localized_string(30410),justinCommand))
if "8" in item.context:# Añadir canal a favoritos justintv
justinCommand = "XBMC.RunPlugin(%s?%s)" % ( sys.argv[ 0 ] , item.clone(channel="justintv", action="addToFavorites").tourl())
contextCommands.append((config.get_localized_string(30406),justinCommand))
if "9" in item.context:# Remover canal de favoritos justintv
justinCommand = "XBMC.Container.Update(%s?%s)" % ( sys.argv[ 0 ] , item.clone(channel="justintv", action="removeFromFavorites").tourl())
contextCommands.append((config.get_localized_string(30407),justinCommand))
logger.info("[xbmctools.py] addnewfolderextra itemurl="+itemurl)
if config.get_platform()=="boxee":
#logger.info("Modo boxee")
ok = xbmcplugin.addDirectoryItem( handle = pluginhandle, url = itemurl , listitem=listitem, isFolder=True)
else:
#logger.info("Modo xbmc")
if len(contextCommands) > 0:
listitem.addContextMenuItems ( contextCommands, replaceItems=False)
if totalItems == 0:
ok = xbmcplugin.addDirectoryItem( handle = pluginhandle, url = itemurl , listitem=listitem, isFolder=True)
else:
ok = xbmcplugin.addDirectoryItem( handle = pluginhandle, url = itemurl , listitem=listitem, isFolder=True, totalItems=totalItems)
return ok
def addnewvideo(item, IsPlayable='false', totalItems = 0):
contextCommands = []
ok = False
try:
item.context = urllib.unquote_plus(item.context)
except:
item.context=""
if "|" in item.context:
item.context = item.context.split("|")
if DEBUG:
logger.info('[xbmctools.py] addnewvideo')
logger.info(item.tostring())
icon_image = os.path.join( config.get_runtime_path() , "resources" , "images" , "servers" , item.server+".png" )
if not os.path.exists(icon_image):
icon_image = "DefaultVideo.png"
listitem = xbmcgui.ListItem( item.title, iconImage="DefaultVideo.png", thumbnailImage=item.thumbnail )
listitem.setInfo( "video", { "Title" : item.title, "FileName" : item.title, "Plot" : item.plot, "Duration" : item.duration, "Studio" : item.channel.capitalize(), "Genre" : item.category } )
set_infoLabels(listitem,item.plot) # Modificacion introducida por super_berny para añadir infoLabels al ListItem
if item.fanart!="":
#logger.info("fanart :%s" %fanart)
listitem.setProperty('fanart_image',item.fanart)
xbmcplugin.setPluginFanart(pluginhandle, item.fanart)
if IsPlayable == 'true': #Esta opcion es para poder utilizar el xbmcplugin.setResolvedUrl()
listitem.setProperty('IsPlayable', 'true')
#listitem.setProperty('fanart_image',os.path.join(IMAGES_PATH, "cinetube.png"))
if "1" in item.context: #El uno añade al menu contextual la opcion de guardar en megalive un canal a favoritos
addItemCommand = "XBMC.RunPlugin(%s?%s)" % ( sys.argv[ 0 ] , item.clone(action="saveChannelFavorites").tourl())
contextCommands.append((config.get_localized_string(30301),addItemCommand))
if "2" in item.context:#El dos añade al menu contextual la opciones de eliminar y/o renombrar un canal en favoritos
addItemCommand = "XBMC.RunPlugin(%s?%s)" % ( sys.argv[ 0 ] , item.clone(action="deleteSavedChannel").tourl())
contextCommands.append((config.get_localized_string(30302),addItemCommand))
addItemCommand = "XBMC.RunPlugin(%s?%s)" % ( sys.argv[ 0 ] , item.clone(action="renameChannelTitle").tourl())
contextCommands.append((config.get_localized_string(30303),addItemCommand))
if "6" in item.context:# Ver canal en vivo en justintv
justinCommand = "XBMC.PlayMedia(%s?%s)" % ( sys.argv[ 0 ] , item.clone(channel="justintv", action="playVideo").tourl())
contextCommands.append((config.get_localized_string(30410),justinCommand))
if "7" in item.context:# Listar videos archivados en justintv
justinCommand = "XBMC.Container.Update(%s?%s)" % ( sys.argv[ 0 ] , item.clone(channel="justintv", action="listarchives").tourl())
contextCommands.append((config.get_localized_string(30409),justinCommand))
if "8" in item.context:# Añadir canal a favoritos justintv
justinCommand = "XBMC.RunPlugin(%s?%s)" % ( sys.argv[ 0 ] , item.clone(channel="justintv", action="addToFavorites").tourl())
contextCommands.append((config.get_localized_string(30406),justinCommand))
if "9" in item.context:# Remover canal de favoritos justintv
justinCommand = "XBMC.Container.Update(%s?%s)" % ( sys.argv[ 0 ] , item.clone(channel="justintv", action="removeFromFavorites").tourl())
contextCommands.append((config.get_localized_string(30407),justinCommand))
if len (contextCommands) > 0:
listitem.addContextMenuItems ( contextCommands, replaceItems=False)
try:
item.title = item.title.encode ("utf-8") #This only aplies to unicode strings. The rest stay as they are.
item.plot = item.plot.encode ("utf-8")
except:
pass
itemurl = '%s?%s' % ( sys.argv[ 0 ] , item.tourl())
#logger.info("[xbmctools.py] itemurl=%s" % itemurl)
if totalItems == 0:
ok = xbmcplugin.addDirectoryItem( handle = pluginhandle, url=itemurl, listitem=listitem, isFolder=False)
else:
ok = xbmcplugin.addDirectoryItem( handle = pluginhandle, url=itemurl, listitem=listitem, isFolder=False, totalItems=totalItems)
return ok
# FIXME: ¿Por qué no pasar el item en lugar de todos los parámetros?
def play_video(item,desdefavoritos=False,desdedescargados=False,desderrordescargas=False,strmfile=False):
from servers import servertools
import sys
import xbmcgui,xbmc
logger.info("[xbmctools.py] play_video")
logger.info(item.tostring())
try:
item.server = item.server.lower()
except:
item.server = ""
if item.server=="":
item.server="directo"
try:
from core import descargas
download_enable=True
except:
download_enable=False
view = False
# Abre el diálogo de selección
opciones = []
default_action = config.get_setting("default_action")
logger.info("default_action="+default_action)
# Si no es el modo normal, no muestra el diálogo porque cuelga XBMC
muestra_dialogo = (config.get_setting("player_mode")=="0" and not strmfile)
# Extrae las URL de los vídeos, y si no puedes verlo te dice el motivo
video_urls,puedes,motivo = servertools.resolve_video_urls_for_playing(item.server,item.url,item.password,muestra_dialogo)
# Si puedes ver el vídeo, presenta las opciones
if puedes:
for video_url in video_urls:
opciones.append(config.get_localized_string(30151) + " " + video_url[0])
if item.server=="local":
opciones.append(config.get_localized_string(30164))
else:
if download_enable:
opcion = config.get_localized_string(30153)
opciones.append(opcion) # "Descargar"
if item.channel=="favoritos":
opciones.append(config.get_localized_string(30154)) # "Quitar de favoritos"
else:
opciones.append(config.get_localized_string(30155)) # "Añadir a favoritos"
if not strmfile:
opciones.append(config.get_localized_string(30161)) # "Añadir a Biblioteca"
if download_enable:
if item.channel!="descargas":
opciones.append(config.get_localized_string(30157)) # "Añadir a lista de descargas"
else:
if item.category=="errores":
opciones.append(config.get_localized_string(30159)) # "Borrar descarga definitivamente"
opciones.append(config.get_localized_string(30160)) # "Pasar de nuevo a lista de descargas"
else:
opciones.append(config.get_localized_string(30156)) # "Quitar de lista de descargas"
if config.get_setting("jdownloader_enabled")=="true":
opciones.append(config.get_localized_string(30158)) # "Enviar a JDownloader"
if config.get_setting("pyload_enabled")=="true":
opciones.append(config.get_localized_string(30158).replace("jDownloader","pyLoad")) # "Enviar a pyLoad"
if default_action=="3":
seleccion = len(opciones)-1
# Busqueda de trailers en youtube
if not item.channel in ["Trailer","ecarteleratrailers"]:
opciones.append(config.get_localized_string(30162)) # "Buscar Trailer"
# Si no puedes ver el vídeo te informa
else:
import xbmcgui
if item.server!="":
advertencia = xbmcgui.Dialog()
if "<br/>" in motivo:
resultado = advertencia.ok( "Non è possibile guardare il video perché...",motivo.split("<br/>")[0],motivo.split("<br/>")[1],item.url)
else:
resultado = advertencia.ok( "Non è possibile guardare il video perché...",motivo,item.url)
else:
resultado = advertencia.ok( "Non è possibile guardare il video perché...","Il server che lo ospita non è","ancora supportato da streamondemand",item.url)
if item.channel=="favoritos":
opciones.append(config.get_localized_string(30154)) # "Quitar de favoritos"
if item.channel=="descargas":
if item.category=="errores":
opciones.append(config.get_localized_string(30159)) # "Borrar descarga definitivamente"
else:
opciones.append(config.get_localized_string(30156)) # "Quitar de lista de descargas"
if len(opciones)==0:
return
# Si la accion por defecto es "Preguntar", pregunta
if default_action=="0": # and server!="torrent":
import xbmcgui
dia = xbmcgui.Dialog()
seleccion = dia.select(config.get_localized_string(30163), opciones) # "Elige una opción"
#dia.close()
'''
elif default_action=="0" and server=="torrent":
advertencia = xbmcgui.Dialog()
logger.info("video_urls[0]="+str(video_urls[0][1]))
if puedes and ('"status":"COMPLETED"' in video_urls[0][1] or '"percent_done":100' in video_urls[0][1]):
listo = "y está listo para ver"
else:
listo = "y se está descargando"
resultado = advertencia.ok( "Torrent" , "El torrent ha sido añadido a la lista" , listo )
seleccion=-1
'''
elif default_action=="1":
seleccion = 0
elif default_action=="2":
seleccion = len(video_urls)-1
elif default_action=="3":
seleccion = seleccion
else:
seleccion=0
logger.info("seleccion=%d" % seleccion)
logger.info("seleccion=%s" % opciones[seleccion])
# No ha elegido nada, lo más probable porque haya dado al ESC
if seleccion==-1:
#Para evitar el error "Uno o más elementos fallaron" al cancelar la selección desde fichero strm
listitem = xbmcgui.ListItem( item.title, iconImage="DefaultVideo.png", thumbnailImage=item.thumbnail)
import sys
xbmcplugin.setResolvedUrl(int(sys.argv[ 1 ]),False,listitem) # JUR Added
#if config.get_setting("subtitulo") == "true":
# config.set_setting("subtitulo", "false")
return
if opciones[seleccion]==config.get_localized_string(30158): # "Enviar a JDownloader"
#d = {"web": url}urllib.urlencode(d)
from core import scrapertools
if item.subtitle!="":
data = scrapertools.cachePage(config.get_setting("jdownloader")+"/action/add/links/grabber0/start1/web="+item.url+ " " +item.thumbnail + " " + item.subtitle)
else:
data = scrapertools.cachePage(config.get_setting("jdownloader")+"/action/add/links/grabber0/start1/web="+item.url+ " " +item.thumbnail)
return
if opciones[seleccion]==config.get_localized_string(30158).replace("jDownloader","pyLoad"): # "Enviar a pyLoad"
logger.info("Enviando a pyload...")
if item.show!="":
package_name = item.show
else:
package_name = "streamondemand"
from core import pyload_client
pyload_client.download(url=item.url,package_name=package_name)
return
elif opciones[seleccion]==config.get_localized_string(30164): # Borrar archivo en descargas
# En "extra" está el nombre del fichero en favoritos
import os
os.remove( item.url )
xbmc.executebuiltin( "Container.Refresh" )
return
# Ha elegido uno de los vídeos
elif seleccion < len(video_urls):
mediaurl = video_urls[seleccion][1]
if len(video_urls[seleccion])>3:
wait_time = video_urls[seleccion][2]
item.subtitle = video_urls[seleccion][3]
elif len(video_urls[seleccion])>2:
wait_time = video_urls[seleccion][2]
else:
wait_time = 0
view = True
# Descargar
elif opciones[seleccion]==config.get_localized_string(30153): # "Descargar"
download_title = item.fulltitle
if item.hasContentDetails=="true":
download_title = item.contentTitle
import xbmc
# El vídeo de más calidad es el último
mediaurl = video_urls[len(video_urls)-1][1]
from core import downloadtools
keyboard = xbmc.Keyboard(download_title)
keyboard.doModal()
if (keyboard.isConfirmed()):
download_title = keyboard.getText()
devuelve = downloadtools.downloadbest(video_urls,download_title)
if devuelve==0:
advertencia = xbmcgui.Dialog()
resultado = advertencia.ok("plugin" , "Scaricato con successo")
elif devuelve==-1:
advertencia = xbmcgui.Dialog()
resultado = advertencia.ok("plugin" , "Download interrotto")
else:
advertencia = xbmcgui.Dialog()
resultado = advertencia.ok("plugin" , "Errore nel download")
return
elif opciones[seleccion]==config.get_localized_string(30154): #"Quitar de favoritos"
from core import favoritos
# En "extra" está el nombre del fichero en favoritos
favoritos.deletebookmark(urllib.unquote_plus( item.extra ))
advertencia = xbmcgui.Dialog()
resultado = advertencia.ok(config.get_localized_string(30102) , item.title , config.get_localized_string(30105)) # 'Se ha quitado de favoritos'
xbmc.executebuiltin( "Container.Refresh" )
return
elif opciones[seleccion]==config.get_localized_string(30159): #"Borrar descarga definitivamente"
from core import descargas
descargas.delete_error_bookmark(urllib.unquote_plus( item.extra ))
advertencia = xbmcgui.Dialog()
resultado = advertencia.ok(config.get_localized_string(30101) , item.title , config.get_localized_string(30106)) # 'Se ha quitado de la lista'
xbmc.executebuiltin( "Container.Refresh" )
return
elif opciones[seleccion]==config.get_localized_string(30160): #"Pasar de nuevo a lista de descargas":
from core import descargas
descargas.mover_descarga_error_a_pendiente(urllib.unquote_plus( item.extra ))
advertencia = xbmcgui.Dialog()
resultado = advertencia.ok(config.get_localized_string(30101) , item.title , config.get_localized_string(30107)) # 'Ha pasado de nuevo a la lista de descargas'
return
elif opciones[seleccion]==config.get_localized_string(30155): #"Añadir a favoritos":
from core import favoritos
from core import downloadtools
download_title = item.fulltitle
download_thumbnail = item.thumbnail
download_plot = item.plot
if item.hasContentDetails=="true":
download_title = item.contentTitle
download_thumbnail = item.contentThumbnail
download_plot = item.contentPlot
keyboard = xbmc.Keyboard(downloadtools.limpia_nombre_excepto_1(download_title)+" ["+item.channel+"]")
keyboard.doModal()
if keyboard.isConfirmed():
title = keyboard.getText()
favoritos.savebookmark(titulo=download_title,url=item.url,thumbnail=download_thumbnail,server=item.server,plot=download_plot,fulltitle=item.title)
advertencia = xbmcgui.Dialog()
resultado = advertencia.ok(config.get_localized_string(30102) , item.title , config.get_localized_string(30108)) # 'se ha añadido a favoritos'
return
elif opciones[seleccion]==config.get_localized_string(30156): #"Quitar de lista de descargas":
from core import descargas
# La categoría es el nombre del fichero en la lista de descargas
descargas.deletebookmark((urllib.unquote_plus( item.extra )))
advertencia = xbmcgui.Dialog()
resultado = advertencia.ok(config.get_localized_string(30101) , item.title , config.get_localized_string(30106)) # 'Se ha quitado de lista de descargas'
xbmc.executebuiltin( "Container.Refresh" )
return
elif opciones[seleccion]==config.get_localized_string(30157): #"Añadir a lista de descargas":
from core import descargas
from core import downloadtools
download_title = item.fulltitle
download_thumbnail = item.thumbnail
download_plot = item.plot
if item.hasContentDetails=="true":
download_title = item.contentTitle
download_thumbnail = item.contentThumbnail
download_plot = item.contentPlot
keyboard = xbmc.Keyboard(downloadtools.limpia_nombre_excepto_1(download_title))
keyboard.doModal()
if keyboard.isConfirmed():
download_title = keyboard.getText()
descargas.savebookmark(titulo=download_title,url=item.url,thumbnail=download_thumbnail,server=item.server,plot=download_plot,fulltitle=download_title)
advertencia = xbmcgui.Dialog()
resultado = advertencia.ok(config.get_localized_string(30101) , download_title , config.get_localized_string(30109)) # 'se ha añadido a la lista de descargas'
return
elif opciones[seleccion]==config.get_localized_string(30161): #"Añadir a Biblioteca": # Library
from platformcode import library
titulo = item.fulltitle
if item.fulltitle=="":
titulo = item.title
library.savelibrary(titulo,item.url,item.thumbnail,item.server,item.plot,canal=item.channel,category=item.category,Serie=item.show)
advertencia = xbmcgui.Dialog()
resultado = advertencia.ok(config.get_localized_string(30101) , titulo , config.get_localized_string(30135)) # 'se ha añadido a la lista de descargas'
return
elif opciones[seleccion]==config.get_localized_string(30162): #"Buscar Trailer":
config.set_setting("subtitulo", "false")
import sys
xbmc.executebuiltin("Container.Update(%s?%s)" % ( sys.argv[ 0 ] , item.clone(channel="trailertools", action="buscartrailer").tourl()))
return
# Si no hay mediaurl es porque el vídeo no está :)
logger.info("[xbmctools.py] mediaurl="+mediaurl)
if mediaurl=="":
logger.info("b1")
if server == "unknown":
alertUnsopportedServer()
else:
alertnodisponibleserver(item.server)
return
# Si hay un tiempo de espera (como en megaupload), lo impone ahora
if wait_time>0:
logger.info("b2")
continuar = handle_wait(wait_time,server,"Caricamento vídeo...")
if not continuar:
return
# Obtención datos de la Biblioteca (solo strms que estén en la biblioteca)
import xbmcgui
if strmfile:
logger.info("b3")
xlistitem = getLibraryInfo(mediaurl)
else:
logger.info("b4")
play_title = item.fulltitle
play_thumbnail = item.thumbnail
play_plot = item.plot
if item.hasContentDetails=="true":
play_title = item.contentTitle
play_thumbnail = item.contentThumbnail
play_plot = item.contentPlot
try:
xlistitem = xbmcgui.ListItem( play_title, iconImage="DefaultVideo.png", thumbnailImage=play_thumbnail, path=mediaurl)
logger.info("b4.1")
except:
xlistitem = xbmcgui.ListItem( play_title, iconImage="DefaultVideo.png", thumbnailImage=play_thumbnail)
logger.info("b4.2")
xlistitem.setInfo( "video", { "Title": play_title, "Plot" : play_plot , "Studio" : item.channel , "Genre" : item.category } )
#set_infoLabels(listitem,plot) # Modificacion introducida por super_berny para añadir infoLabels al ListItem
# Lanza el reproductor
# Lanza el reproductor
if strmfile: # and item.server != "torrent": #Si es un fichero strm no hace falta el play
logger.info("b6")
import sys
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, xlistitem)
if item.subtitle != "":
xbmc.sleep(2000)
xbmc.Player().setSubtitles(item.subtitle)
else:
logger.info("b7")
logger.info("player_mode="+config.get_setting("player_mode"))
logger.info("mediaurl="+mediaurl)
if config.get_setting("player_mode")=="3" or "megacrypter.com" in mediaurl:
logger.info("b11")
import download_and_play
download_and_play.download_and_play( mediaurl , "download_and_play.tmp" , config.get_setting("downloadpath") )
return
elif config.get_setting("player_mode")=="0" or (config.get_setting("player_mode")=="3" and mediaurl.startswith("rtmp")):
logger.info("b8")
# Añadimos el listitem a una lista de reproducción (playlist)
playlist = xbmc.PlayList( xbmc.PLAYLIST_VIDEO )
playlist.clear()
playlist.add( mediaurl, xlistitem )
# Reproduce
playersettings = config.get_setting('player_type')
logger.info("[xbmctools.py] playersettings="+playersettings)
if config.get_system_platform()=="xbox":
player_type = xbmc.PLAYER_CORE_AUTO
if playersettings == "0":
player_type = xbmc.PLAYER_CORE_AUTO
logger.info("[xbmctools.py] PLAYER_CORE_AUTO")
elif playersettings == "1":
player_type = xbmc.PLAYER_CORE_MPLAYER
logger.info("[xbmctools.py] PLAYER_CORE_MPLAYER")
elif playersettings == "2":
player_type = xbmc.PLAYER_CORE_DVDPLAYER
logger.info("[xbmctools.py] PLAYER_CORE_DVDPLAYER")
xbmcPlayer = xbmc.Player( player_type )
else:
xbmcPlayer = xbmc.Player()
xbmcPlayer.play(playlist)
if item.channel=="cuevana" and item.subtitle!="":
logger.info("subtitulo="+subtitle)
if item.subtitle!="" and (opciones[seleccion].startswith("Ver") or opciones[seleccion].startswith("Watch")):
logger.info("[xbmctools.py] Con subtitulos")
setSubtitles()
elif config.get_setting("player_mode")=="1":
logger.info("b9")
logger.info("mediaurl :"+ mediaurl)
logger.info("Tras setResolvedUrl")
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, xbmcgui.ListItem(path=mediaurl))
elif config.get_setting("player_mode")=="2":
logger.info("b10")
xbmc.executebuiltin( "PlayMedia("+mediaurl+")" )
if item.subtitle!="" and view:
logger.info("b11")
logger.info("Subtítulos externos: "+item.subtitle)
xbmc.Player().setSubtitles(item.subtitle)
def handle_wait(time_to_wait,title,text):
logger.info ("[xbmctools.py] handle_wait(time_to_wait=%d)" % time_to_wait)
import xbmc,xbmcgui
espera = xbmcgui.DialogProgress()
ret = espera.create(' '+title)
secs=0
percent=0
increment = int(100 / time_to_wait)
cancelled = False
while secs < time_to_wait:
secs = secs + 1
percent = increment*secs
secs_left = str((time_to_wait - secs))
remaining_display = ' Attendi '+secs_left+' secondi per il video...'
espera.update(percent,' '+text,remaining_display)
xbmc.sleep(1000)
if (espera.iscanceled()):
cancelled = True
break
if cancelled == True:
logger.info ('Attesa eliminata')
return False
else:
logger.info ('Attesa conclusa')
return True
def getLibraryInfo (mediaurl):
'''Obtiene información de la Biblioteca si existe (ficheros strm) o de los parámetros
'''
if DEBUG:
logger.info('[xbmctools.py] playlist OBTENCIÓN DE DATOS DE BIBLIOTECA')
# Información básica
label = xbmc.getInfoLabel( 'listitem.label' )
label2 = xbmc.getInfoLabel( 'listitem.label2' )
iconImage = xbmc.getInfoImage( 'listitem.icon' )
thumbnailImage = xbmc.getInfoImage( 'listitem.Thumb' ) #xbmc.getInfoLabel( 'listitem.thumbnailImage' )
if DEBUG:
logger.info ("[xbmctools.py]getMediaInfo: label = " + label)
logger.info ("[xbmctools.py]getMediaInfo: label2 = " + label2)
logger.info ("[xbmctools.py]getMediaInfo: iconImage = " + iconImage)
logger.info ("[xbmctools.py]getMediaInfo: thumbnailImage = " + thumbnailImage)
# Creación de listitem
listitem = xbmcgui.ListItem(label, label2, iconImage, thumbnailImage, mediaurl)
# Información adicional
lista = [
('listitem.genre', 's'), #(Comedy)
('listitem.year', 'i'), #(2009)
('listitem.episode', 'i'), #(4)
('listitem.season', 'i'), #(1)
('listitem.top250', 'i'), #(192)
('listitem.tracknumber', 'i'), #(3)
('listitem.rating', 'f'), #(6.4) - range is 0..10
# ('listitem.watched', 'd'), # depreciated. use playcount instead
('listitem.playcount', 'i'), #(2) - number of times this item has been played
# ('listitem.overlay', 'i'), #(2) - range is 0..8. See GUIListItem.h for values
('listitem.overlay', 's'), #JUR - listitem devuelve un string, pero addinfo espera un int. Ver traducción más abajo
('listitem.cast', 's'), # (Michal C. Hall) - List concatenated into a string
('listitem.castandrole', 's'), #(Michael C. Hall|Dexter) - List concatenated into a string
('listitem.director', 's'), #(Dagur Kari)
('listitem.mpaa', 's'), #(PG-13)
('listitem.plot', 's'), #(Long Description)
('listitem.plotoutline', 's'), #(Short Description)
('listitem.title', 's'), #(Big Fan)
('listitem.duration', 's'), #(3)
('listitem.studio', 's'), #(Warner Bros.)
('listitem.tagline', 's'), #(An awesome movie) - short description of movie
('listitem.writer', 's'), #(Robert D. Siegel)
('listitem.tvshowtitle', 's'), #(Heroes)
('listitem.premiered', 's'), #(2005-03-04)
('listitem.status', 's'), #(Continuing) - status of a TVshow
('listitem.code', 's'), #(tt0110293) - IMDb code
('listitem.aired', 's'), #(2008-12-07)
('listitem.credits', 's'), #(Andy Kaufman) - writing credits
('listitem.lastplayed', 's'), #(%Y-%m-%d %h
('listitem.album', 's'), #(The Joshua Tree)
('listitem.votes', 's'), #(12345 votes)
('listitem.trailer', 's'), #(/home/user/trailer.avi)
]
# Obtenemos toda la info disponible y la metemos en un diccionario
# para la función setInfo.
infodict = dict()
for label,tipo in lista:
key = label.split('.',1)[1]
value = xbmc.getInfoLabel( label )
if value != "":
if DEBUG:
logger.info ("[xbmctools.py]getMediaInfo: "+key+" = " + value) #infoimage=infolabel
if tipo == 's':
infodict[key]=value
elif tipo == 'i':
infodict[key]=int(value)
elif tipo == 'f':
infodict[key]=float(value)
#Transforma el valor de overlay de string a int.
if infodict.has_key('overlay'):
value = infodict['overlay'].lower()
if value.find('rar') > -1:
infodict['overlay'] = 1
elif value.find('zip')> -1:
infodict['overlay'] = 2
elif value.find('trained')> -1:
infodict['overlay'] = 3
elif value.find('hastrainer')> -1:
infodict['overlay'] = 4
elif value.find('locked')> -1:
infodict['overlay'] = 5
elif value.find('unwatched')> -1:
infodict['overlay'] = 6
elif value.find('watched')> -1:
infodict['overlay'] = 7
elif value.find('hd')> -1:
infodict['overlay'] = 8
else:
infodict.pop('overlay')
if len (infodict) > 0:
listitem.setInfo( "video", infodict )
return listitem
def alertnodisponible():
advertencia = xbmcgui.Dialog()
#'Vídeo no disponible'
#'No se han podido localizar videos en la página del canal'
resultado = advertencia.ok(config.get_localized_string(30055) , config.get_localized_string(30056))
def alertnodisponibleserver(server):
advertencia = xbmcgui.Dialog()
# 'El vídeo ya no está en %s' , 'Prueba en otro servidor o en otro canal'
resultado = advertencia.ok( config.get_localized_string(30055),(config.get_localized_string(30057)%server),config.get_localized_string(30058))
def alertUnsopportedServer():
advertencia = xbmcgui.Dialog()
# 'Servidor no soportado o desconocido' , 'Prueba en otro servidor o en otro canal'
resultado = advertencia.ok( config.get_localized_string(30065),config.get_localized_string(30058))
def alerterrorpagina():
advertencia = xbmcgui.Dialog()
#'Error en el sitio web'
#'No se puede acceder por un error en el sitio web'
resultado = advertencia.ok(config.get_localized_string(30059) , config.get_localized_string(30060))
def alertanomegauploadlow(server):
advertencia = xbmcgui.Dialog()
#'La calidad elegida no esta disponible', 'o el video ha sido borrado',
#'Prueba a reproducir en otra calidad'
resultado = advertencia.ok( config.get_localized_string(30055) , config.get_localized_string(30061) , config.get_localized_string(30062))
# AÑADIDO POR JUR. SOPORTE DE FICHEROS STRM
def playstrm(params,url,category):
'''Play para videos en ficheros strm
'''
logger.info("[xbmctools.py] playstrm url="+url)
title = unicode( xbmc.getInfoLabel( "ListItem.Title" ), "utf-8" )
thumbnail = urllib.unquote_plus( params.get("thumbnail") )
plot = unicode( xbmc.getInfoLabel( "ListItem.Plot" ), "utf-8" )
server = params["server"]
if (params.has_key("Serie")):
serie = params.get("Serie")
else:
serie = ""
if (params.has_key("subtitle")):
subtitle = params.get("subtitle")
else:
subtitle = ""
from core.item import Item
from platformcode.subtitletools import saveSubtitleName
item = Item(title=title,show=serie)
saveSubtitleName(item)
play_video("Biblioteca streamondemand",server,url,category,title,thumbnail,plot,strmfile=True,Serie=serie,subtitle=subtitle)
def renderItems(itemlist, item, isPlayable='false'):
viewmode = "list"
if itemlist <> None:
for item in itemlist:
logger.info("item="+item.tostring())
if item.category == "":
item.category = item.category
if item.fulltitle=="":
item.fulltitle=item.title
if item.fanart=="":
channel_fanart = os.path.join( config.get_runtime_path(), 'resources', 'images', 'fanart', item.channel+'.jpg')
if os.path.exists(channel_fanart):
item.fanart = channel_fanart
else:
item.fanart = os.path.join(config.get_runtime_path(),"fanart.jpg")
if item.folder:
addnewfolderextra(item, totalItems = len(itemlist))
else:
if config.get_setting("player_mode")=="1": # SetResolvedUrl debe ser siempre "isPlayable = true"
isPlayable = "true"
addnewvideo( item, IsPlayable=isPlayable, totalItems = len(itemlist))
if item.viewmode!="list":
viewmode = item.viewmode
# Cierra el directorio
xbmcplugin.setContent(pluginhandle,"Movies")
xbmcplugin.setPluginCategory( handle=pluginhandle, category=item.category )
xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE )
# Modos biblioteca
# MediaInfo3 - 503
#
# Modos fichero
# WideIconView - 505
# ThumbnailView - 500
if config.get_setting("forceview")=="true":
if viewmode=="list":
xbmc.executebuiltin("Container.SetViewMode(50)")
elif viewmode=="movie_with_plot":
xbmc.executebuiltin("Container.SetViewMode(503)")
elif viewmode=="movie":
xbmc.executebuiltin("Container.SetViewMode(500)")
xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True )
def wait2second():
logger.info("[xbmctools.py] wait2second")
import time
contador = 0
while xbmc.Player().isPlayingVideo()==False:
logger.info("[xbmctools.py] setSubtitles: Waiting 2 seconds for video to start before setting subtitles")
time.sleep(2)
contador = contador + 1
if contador>10:
break
def setSubtitles():
logger.info("[xbmctools.py] setSubtitles")
import time
contador = 0
while xbmc.Player().isPlayingVideo()==False:
logger.info("[xbmctools.py] setSubtitles: Waiting 2 seconds for video to start before setting subtitles")
time.sleep(2)
contador = contador + 1
if contador>10:
break
subtitlefile = os.path.join( config.get_data_path(), 'subtitulo.srt' )
logger.info("[xbmctools.py] setting subtitle file %s" % subtitlefile)
xbmc.Player().setSubtitles(subtitlefile)
def trailer(item):
logger.info("[xbmctools.py] trailer")
config.set_setting("subtitulo", "false")
import sys
xbmc.executebuiltin("XBMC.RunPlugin(%s?channel=%s&action=%s&category=%s&title=%s&url=%s&thumbnail=%s&plot=%s&server=%s)" % ( sys.argv[ 0 ] , "trailertools" , "buscartrailer" , urllib.quote_plus( item.category ) , urllib.quote_plus( item.fulltitle ) , urllib.quote_plus( item.url ) , urllib.quote_plus( item.thumbnail ) , urllib.quote_plus( "" ) ))
return
def alert_no_puedes_ver_video(server,url,motivo):
import xbmcgui
if server!="":
advertencia = xbmcgui.Dialog()
if "<br/>" in motivo:
resultado = advertencia.ok( "Non è possibile visualizzare questo video perché...",motivo.split("<br/>")[0],motivo.split("<br/>")[1],url)
else:
resultado = advertencia.ok( "Non è possibile visualizzare questo video perché...",motivo,url)
else:
resultado = advertencia.ok( "Non è possibile visualizzare questo video perché...","Il server che lo ospita non è","ancora supportato da StreamOnDemand",url)
def set_infoLabels(listitem,plot):
# Modificacion introducida por super_berny para añadir infoLabels al ListItem
if plot.startswith("{'infoLabels'"):
# Necesitaba un parametro que pase los datos desde Item hasta esta funcion
# y el que parecia mas idoneo era plot.
# plot tiene que ser un str con el siguiente formato:
# plot="{'infoLabels':{dicionario con los pares de clave/valor descritos en
# http://mirrors.xbmc.org/docs/python-docs/14.x-helix/xbmcgui.html#ListItem-setInfo}}"
try:
import ast
infodict=ast.literal_eval(plot)['infoLabels']
listitem.setInfo( "video", infodict)
except:
pass
|
dentaku65/plugin.video.sod
|
platformcode/xbmctools.py
|
Python
|
gpl-3.0
| 40,049
|
import config
import twitter
import time
import random
class colors:
HEADER = '\033[95m' if config.print_in_color else ""
OKBLUE = '\033[94m' if config.print_in_color else ""
OKGREEN = '\033[92m' if config.print_in_color else ""
WARNING = '\033[93m' if config.print_in_color else ""
FAIL = '\033[91m' if config.print_in_color else ""
ENDC = '\033[0m' if config.print_in_color else ""
BOLD = '\033[1m' if config.print_in_color else ""
UNDERLINE = '\033[4m' if config.print_in_color else ""
print(colors.HEADER + "Remember you can change the settings in config.py!")
# All variables related to the Twitter API
twitter_api = twitter.Api(consumer_key=config.twitter_credentials["consumer_key"],
consumer_secret=config.twitter_credentials["consumer_secret"],
access_token_key=config.twitter_credentials["access_token"],
access_token_secret=config.twitter_credentials["access_secret"])
screen_name = twitter_api.VerifyCredentials().screen_name
friends = []
while len(friends) is not twitter_api.GetUser(screen_name=screen_name).friends_count:
try:
f = twitter_api.GetFriends(screen_name=screen_name)
friends = [x.screen_name for x in f]
print(colors.OKGREEN + "Friends retrieved successfully!")
break
except Exception as e:
# Friends couldn't be retrieved
print(colors.FAIL + colors.BOLD + str(e) + colors.ENDC)
print(colors.FAIL + colors.BOLD + "Couldn't retrieve friends. The bot won't unfollow someone random when we start"
" following someone else. So your account might reach the limit (following 2000"
" users)" + colors.ENDC)
if config.wait_retrieve is False:
break
time.sleep(600)
def check():
print(colors.OKGREEN + "Started Analyzing (" + str(time.gmtime().tm_hour) + ":" + str(time.gmtime().tm_min) + ":" + str(
time.gmtime().tm_sec) + ")")
# Retrieving the last 1000 tweets for each tag and appends them into a list
just_retweet_streak = 0
searched_tweets = []
for x in config.search_tags:
searched_tweets += twitter_api.GetSearch(term=x, count='1000')
for tweet in searched_tweets:
if any(x in tweet.text.lower().split() for x in config.retweet_tags):
# The script only cares about contests that require retweeting. It would be very weird to not have to
# retweet anything; that usually means that there's a link you gotta open and then fill up a form.
# This clause checks if the text contains any retweet_tags
if tweet.retweeted_status is not None:
# In case it is a retweet, we switch to the original one
if any(x in tweet.retweeted_status.text.lower().split() for x in config.retweet_tags):
tweet = tweet.retweeted_status
else:
continue
if tweet.user.screen_name.lower() in config.banned_users or any(x in tweet.user.name.lower() for x in config.banned_name_keywords):
# If it's the original one, we check if the author is banned
print(colors.WARNING + "Avoided user with ID: " + tweet.user.screen_name + " & Name: " + tweet.user.name + colors.ENDC)
continue
try:
# RETWEET
# This is ran under a try clause because there's always an error when trying to retweet something
# already retweeted. So if that's the case, the except is called and we skip this tweet
# If the tweet wasn't retweeted before, we retweet it and check for other stuff
twitter_api.PostRetweet(status_id=tweet.id)
print(colors.OKBLUE + "Retweeted " + str(tweet.id))
just_retweet_streak += 1
# MESSAGE
try:
# So we don't skip the tweet if we get the "You cannot send messages to users who are not following you." error
if config.use_msgs is True and any(x in tweet.text.lower() for x in config.message_tags):
# If the tweet contains any of the message_tags, we send a DM to the author with a random
# sentence from the message_text list
twitter_api.PostDirectMessage(
text=config.message_text[random.randint(0, len(config.message_text) - 1)],
screen_name=tweet.user.screen_name)
print("DM sent to: " + tweet.user.screen_name)
just_retweet_streak = 0
# 1 every 86.4s guarantees we won't pass the 1000 DM per day limit
time.sleep(config.msg_rate)
except:
pass
# FOLLOW
if any(x in tweet.text.lower() for x in config.follow_tags):
# If the tweet contains any follow_tags, it automatically follows all the users mentioned in the
# tweet (if there's any) + the author
addFriends = []
friends_count = twitter_api.GetUser(screen_name=screen_name).friends_count
if tweet.user.screen_name not in friends:
print("Followed: @" + tweet.user.screen_name)
twitter_api.CreateFriendship(screen_name=tweet.user.screen_name)
addFriends.append(tweet.user.screen_name)
just_retweet_streak = 0
time.sleep(config.follow_rate - config.retweet_rate if config.follow_rate > config.retweet_rate else 0)
for name in tweet.user_mentions:
if name.screen_name in friends or name.screen_name in addFriends:
continue
print("Followed: @" + name.screen_name)
twitter_api.CreateFriendship(screen_name=name.screen_name)
addFriends.append(name.screen_name)
just_retweet_streak = 0
time.sleep(config.retweet_rate)
# Twitter sets a limit of not following more than 2k people in total (varies depending on followers)
# So every time the bot follows a new user, its deletes another one randomly
if friends_count >= 2000:
while friends_count < twitter_api.GetUser(screen_name=screen_name).friends_count:
try:
x = friends[random.randint(0, len(friends) - 1)]
print("Unfollowed: @" + x)
twitter_api.DestroyFriendship(screen_name=x)
friends.remove(x)
except Exception as e:
print(e)
friends.extend(addFriends)
# LIKE
try:
# So we don't skip the tweet if we get the "You have already favorited this status." error
if any(x in tweet.text.lower() for x in config.like_tags):
# If the tweets contains any like_tags, it automatically likes the tweet
twitter_api.CreateFavorite(status_id=tweet.id)
print("Liked: " + str(tweet.id))
just_retweet_streak = 0
except:
pass
# Max is 2400 tweets per day in windows of half an hour. Thus, 36s as interval guarantees as we won't
# pass that amount
time.sleep(config.retweet_rate * (just_retweet_streak + 1))
except Exception as e:
# In case the error contains sentences that mean the app is probably banned or the user over daily
# status update limit, we cancel the function
if "retweeted" not in str(e):
print(colors.FAIL + colors.BOLD + str(e) + colors.ENDC)
return
# And continues with the next item
print(colors.OKGREEN + "Finished Analyzing (" + str(len(searched_tweets)) + " tweets analyzed)")
while True:
print("\n")
try:
check()
except Exception as e:
print(colors.FAIL + colors.BOLD + str(e) + colors.ENDC)
time.sleep(100*len(config.search_tags))
# This is here in case there were not tweets checked
time.sleep(2*len(config.search_tags))
|
imdiegoestevez/Twitter-Giveaways-Bot
|
main.py
|
Python
|
mit
| 8,698
|
# This module is for compatibility only. All functions are defined elsewhere.
__all__ = ['rand', 'tril', 'trapz', 'hanning', 'rot90', 'triu', 'diff', 'angle',
'roots', 'ptp', 'kaiser', 'randn', 'cumprod', 'diag', 'msort',
'LinearAlgebra', 'RandomArray', 'prod', 'std', 'hamming', 'flipud',
'max', 'blackman', 'corrcoef', 'bartlett', 'eye', 'squeeze', 'sinc',
'tri', 'cov', 'svd', 'min', 'median', 'fliplr', 'eig', 'mean']
import numpy.oldnumeric.linear_algebra as LinearAlgebra
import numpy.oldnumeric.random_array as RandomArray
from numpy import tril, trapz as _Ntrapz, hanning, rot90, triu, diff, \
angle, roots, ptp as _Nptp, kaiser, cumprod as _Ncumprod, \
diag, msort, prod as _Nprod, std as _Nstd, hamming, flipud, \
amax as _Nmax, amin as _Nmin, blackman, bartlett, \
squeeze, sinc, median, fliplr, mean as _Nmean, transpose
from numpy.linalg import eig, svd
from numpy.random import rand, randn
import numpy as np
from typeconv import convtypecode
def eye(N, M=None, k=0, typecode=None, dtype=None):
""" eye returns a N-by-M 2-d array where the k-th diagonal is all ones,
and everything else is zeros.
"""
dtype = convtypecode(typecode, dtype)
if M is None: M = N
m = np.equal(np.subtract.outer(np.arange(N), np.arange(M)),-k)
if m.dtype != dtype:
return m.astype(dtype)
def tri(N, M=None, k=0, typecode=None, dtype=None):
""" returns a N-by-M array where all the diagonals starting from
lower left corner up to the k-th are all ones.
"""
dtype = convtypecode(typecode, dtype)
if M is None: M = N
m = np.greater_equal(np.subtract.outer(np.arange(N), np.arange(M)),-k)
if m.dtype != dtype:
return m.astype(dtype)
def trapz(y, x=None, axis=-1):
return _Ntrapz(y, x, axis=axis)
def ptp(x, axis=0):
return _Nptp(x, axis)
def cumprod(x, axis=0):
return _Ncumprod(x, axis)
def max(x, axis=0):
return _Nmax(x, axis)
def min(x, axis=0):
return _Nmin(x, axis)
def prod(x, axis=0):
return _Nprod(x, axis)
def std(x, axis=0):
N = asarray(x).shape[axis]
return _Nstd(x, axis)*sqrt(N/(N-1.))
def mean(x, axis=0):
return _Nmean(x, axis)
# This is exactly the same cov function as in MLab
def cov(m, y=None, rowvar=0, bias=0):
if y is None:
y = m
else:
y = y
if rowvar:
m = transpose(m)
y = transpose(y)
if (m.shape[0] == 1):
m = transpose(m)
if (y.shape[0] == 1):
y = transpose(y)
N = m.shape[0]
if (y.shape[0] != N):
raise ValueError("x and y must have the same number of observations")
m = m - _Nmean(m,axis=0)
y = y - _Nmean(y,axis=0)
if bias:
fact = N*1.0
else:
fact = N-1.0
return squeeze(dot(transpose(m), conjugate(y)) / fact)
from numpy import sqrt, multiply
def corrcoef(x, y=None):
c = cov(x, y)
d = diag(c)
return c/sqrt(multiply.outer(d,d))
from compat import *
from functions import *
from precision import *
from ufuncs import *
from misc import *
import compat
import precision
import functions
import misc
import ufuncs
import numpy
__version__ = numpy.__version__
del numpy
__all__ += ['__version__']
__all__ += compat.__all__
__all__ += precision.__all__
__all__ += functions.__all__
__all__ += ufuncs.__all__
__all__ += misc.__all__
del compat
del functions
del precision
del ufuncs
del misc
|
beiko-lab/gengis
|
bin/Lib/site-packages/numpy/oldnumeric/mlab.py
|
Python
|
gpl-3.0
| 3,566
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/yc/code/calibre/calibre/src/calibre/gui2/preferences/look_feel.ui'
#
# Created: Thu Oct 25 16:54:55 2012
# by: PyQt4 UI code generator 4.8.5
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(820, 519)
Form.setWindowTitle(_("Form"))
self.gridLayout_2 = QtGui.QGridLayout(Form)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.tabWidget = QtGui.QTabWidget(Form)
self.tabWidget.setObjectName(_fromUtf8("tabWidget"))
self.tab = QtGui.QWidget()
self.tab.setObjectName(_fromUtf8("tab"))
self.gridLayout_9 = QtGui.QGridLayout(self.tab)
self.gridLayout_9.setObjectName(_fromUtf8("gridLayout_9"))
self.label_7 = QtGui.QLabel(self.tab)
self.label_7.setText(_("Choose &language (requires restart):"))
self.label_7.setObjectName(_fromUtf8("label_7"))
self.gridLayout_9.addWidget(self.label_7, 2, 0, 1, 1)
self.opt_language = QtGui.QComboBox(self.tab)
self.opt_language.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToMinimumContentsLengthWithIcon)
self.opt_language.setMinimumContentsLength(20)
self.opt_language.setObjectName(_fromUtf8("opt_language"))
self.gridLayout_9.addWidget(self.opt_language, 2, 1, 1, 1)
self.opt_systray_icon = QtGui.QCheckBox(self.tab)
self.opt_systray_icon.setText(_("Enable system &tray icon (needs restart)"))
self.opt_systray_icon.setObjectName(_fromUtf8("opt_systray_icon"))
self.gridLayout_9.addWidget(self.opt_systray_icon, 3, 0, 1, 1)
self.label_17 = QtGui.QLabel(self.tab)
self.label_17.setText(_("User Interface &layout (needs restart):"))
self.label_17.setObjectName(_fromUtf8("label_17"))
self.gridLayout_9.addWidget(self.label_17, 1, 0, 1, 1)
self.opt_gui_layout = QtGui.QComboBox(self.tab)
self.opt_gui_layout.setMaximumSize(QtCore.QSize(250, 16777215))
self.opt_gui_layout.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToMinimumContentsLengthWithIcon)
self.opt_gui_layout.setMinimumContentsLength(20)
self.opt_gui_layout.setObjectName(_fromUtf8("opt_gui_layout"))
self.gridLayout_9.addWidget(self.opt_gui_layout, 1, 1, 1, 1)
self.opt_disable_animations = QtGui.QCheckBox(self.tab)
self.opt_disable_animations.setToolTip(_("Disable all animations. Useful if you have a slow/old computer."))
self.opt_disable_animations.setText(_("Disable &animations"))
self.opt_disable_animations.setObjectName(_fromUtf8("opt_disable_animations"))
self.gridLayout_9.addWidget(self.opt_disable_animations, 3, 1, 1, 1)
self.opt_disable_tray_notification = QtGui.QCheckBox(self.tab)
self.opt_disable_tray_notification.setText(_("Disable ¬ifications in system tray"))
self.opt_disable_tray_notification.setObjectName(_fromUtf8("opt_disable_tray_notification"))
self.gridLayout_9.addWidget(self.opt_disable_tray_notification, 4, 0, 1, 1)
self.opt_show_splash_screen = QtGui.QCheckBox(self.tab)
self.opt_show_splash_screen.setText(_("Show &splash screen at startup"))
self.opt_show_splash_screen.setObjectName(_fromUtf8("opt_show_splash_screen"))
self.gridLayout_9.addWidget(self.opt_show_splash_screen, 4, 1, 1, 1)
self.groupBox_2 = QtGui.QGroupBox(self.tab)
self.groupBox_2.setTitle(_("&Toolbar"))
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.gridLayout_8 = QtGui.QGridLayout(self.groupBox_2)
self.gridLayout_8.setObjectName(_fromUtf8("gridLayout_8"))
self.opt_toolbar_icon_size = QtGui.QComboBox(self.groupBox_2)
self.opt_toolbar_icon_size.setObjectName(_fromUtf8("opt_toolbar_icon_size"))
self.gridLayout_8.addWidget(self.opt_toolbar_icon_size, 0, 1, 1, 1)
self.label_5 = QtGui.QLabel(self.groupBox_2)
self.label_5.setText(_("&Icon size:"))
self.label_5.setObjectName(_fromUtf8("label_5"))
self.gridLayout_8.addWidget(self.label_5, 0, 0, 1, 1)
self.opt_toolbar_text = QtGui.QComboBox(self.groupBox_2)
self.opt_toolbar_text.setObjectName(_fromUtf8("opt_toolbar_text"))
self.gridLayout_8.addWidget(self.opt_toolbar_text, 1, 1, 1, 1)
self.label_8 = QtGui.QLabel(self.groupBox_2)
self.label_8.setText(_("Show &text under icons:"))
self.label_8.setObjectName(_fromUtf8("label_8"))
self.gridLayout_8.addWidget(self.label_8, 1, 0, 1, 1)
self.gridLayout_9.addWidget(self.groupBox_2, 7, 0, 1, 2)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout_9.addItem(spacerItem, 8, 0, 1, 1)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.label_2 = QtGui.QLabel(self.tab)
self.label_2.setText(_("Interface font:"))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.horizontalLayout.addWidget(self.label_2)
self.font_display = QtGui.QLineEdit(self.tab)
self.font_display.setReadOnly(True)
self.font_display.setObjectName(_fromUtf8("font_display"))
self.horizontalLayout.addWidget(self.font_display)
self.gridLayout_9.addLayout(self.horizontalLayout, 6, 0, 1, 1)
self.change_font_button = QtGui.QPushButton(self.tab)
self.change_font_button.setText(_("Change &font (needs restart)"))
self.change_font_button.setObjectName(_fromUtf8("change_font_button"))
self.gridLayout_9.addWidget(self.change_font_button, 6, 1, 1, 1)
self.label_widget_style = QtGui.QLabel(self.tab)
self.label_widget_style.setText(_("User interface &style (needs restart):"))
self.label_widget_style.setObjectName(_fromUtf8("label_widget_style"))
self.gridLayout_9.addWidget(self.label_widget_style, 0, 0, 1, 1)
self.opt_ui_style = QtGui.QComboBox(self.tab)
self.opt_ui_style.setObjectName(_fromUtf8("opt_ui_style"))
self.gridLayout_9.addWidget(self.opt_ui_style, 0, 1, 1, 1)
self.opt_book_list_tooltips = QtGui.QCheckBox(self.tab)
self.opt_book_list_tooltips.setText(_("Show &tooltips in the book list"))
self.opt_book_list_tooltips.setObjectName(_fromUtf8("opt_book_list_tooltips"))
self.gridLayout_9.addWidget(self.opt_book_list_tooltips, 5, 0, 1, 1)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(I("lt.png"))), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.tabWidget.addTab(self.tab, icon, _fromUtf8(""))
self.tab_4 = QtGui.QWidget()
self.tab_4.setObjectName(_fromUtf8("tab_4"))
self.gridLayout_12 = QtGui.QGridLayout(self.tab_4)
self.gridLayout_12.setObjectName(_fromUtf8("gridLayout_12"))
self.label_3 = QtGui.QLabel(self.tab_4)
self.label_3.setText(_("Note that <b>comments</b> will always be displayed at the end, regardless of the position you assign here."))
self.label_3.setWordWrap(True)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout_12.addWidget(self.label_3, 2, 1, 1, 1)
self.opt_use_roman_numerals_for_series_number = QtGui.QCheckBox(self.tab_4)
self.opt_use_roman_numerals_for_series_number.setText(_("Use &Roman numerals for series"))
self.opt_use_roman_numerals_for_series_number.setChecked(True)
self.opt_use_roman_numerals_for_series_number.setObjectName(_fromUtf8("opt_use_roman_numerals_for_series_number"))
self.gridLayout_12.addWidget(self.opt_use_roman_numerals_for_series_number, 0, 1, 1, 1)
self.groupBox = QtGui.QGroupBox(self.tab_4)
self.groupBox.setTitle(_("Select displayed metadata"))
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.gridLayout_3 = QtGui.QGridLayout(self.groupBox)
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.df_up_button = QtGui.QToolButton(self.groupBox)
self.df_up_button.setToolTip(_("Move up"))
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(_fromUtf8(I("arrow-up.png"))), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.df_up_button.setIcon(icon1)
self.df_up_button.setObjectName(_fromUtf8("df_up_button"))
self.gridLayout_3.addWidget(self.df_up_button, 0, 1, 1, 1)
self.df_down_button = QtGui.QToolButton(self.groupBox)
self.df_down_button.setToolTip(_("Move down"))
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(_fromUtf8(I("arrow-down.png"))), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.df_down_button.setIcon(icon2)
self.df_down_button.setObjectName(_fromUtf8("df_down_button"))
self.gridLayout_3.addWidget(self.df_down_button, 2, 1, 1, 1)
self.field_display_order = QtGui.QListView(self.groupBox)
self.field_display_order.setAlternatingRowColors(True)
self.field_display_order.setObjectName(_fromUtf8("field_display_order"))
self.gridLayout_3.addWidget(self.field_display_order, 0, 0, 3, 1)
spacerItem1 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout_3.addItem(spacerItem1, 1, 1, 1, 1)
self.gridLayout_12.addWidget(self.groupBox, 2, 0, 2, 1)
self.hboxlayout = QtGui.QHBoxLayout()
self.hboxlayout.setObjectName(_fromUtf8("hboxlayout"))
self.label = QtGui.QLabel(self.tab_4)
self.label.setText(_("Default author link template:"))
self.label.setObjectName(_fromUtf8("label"))
self.hboxlayout.addWidget(self.label)
self.opt_default_author_link = QtGui.QLineEdit(self.tab_4)
self.opt_default_author_link.setToolTip(_("<p>Enter a template to be used to create a link for\n"
"an author in the books information dialog. This template will\n"
"be used when no link has been provided for the author using\n"
"Manage Authors. You can use the values {author} and\n"
"{author_sort}, and any template function."))
self.opt_default_author_link.setObjectName(_fromUtf8("opt_default_author_link"))
self.hboxlayout.addWidget(self.opt_default_author_link)
self.gridLayout_12.addLayout(self.hboxlayout, 0, 0, 1, 1)
self.opt_bd_show_cover = QtGui.QCheckBox(self.tab_4)
self.opt_bd_show_cover.setText(_("Show &cover in the book details panel"))
self.opt_bd_show_cover.setObjectName(_fromUtf8("opt_bd_show_cover"))
self.gridLayout_12.addWidget(self.opt_bd_show_cover, 1, 0, 1, 2)
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(_fromUtf8(I("book.png"))), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.tabWidget.addTab(self.tab_4, icon3, _fromUtf8(""))
self.tab_2 = QtGui.QWidget()
self.tab_2.setObjectName(_fromUtf8("tab_2"))
self.gridLayout_10 = QtGui.QGridLayout(self.tab_2)
self.gridLayout_10.setObjectName(_fromUtf8("gridLayout_10"))
self.opt_categories_using_hierarchy = EditWithComplete(self.tab_2)
self.opt_categories_using_hierarchy.setToolTip(_("A comma-separated list of categories in which items containing\n"
"periods are displayed in the tag browser trees. For example, if\n"
"this box contains \'tags\' then tags of the form \'Mystery.English\'\n"
"and \'Mystery.Thriller\' will be displayed with English and Thriller\n"
"both under \'Mystery\'. If \'tags\' is not in this box,\n"
"then the tags will be displayed each on their own line."))
self.opt_categories_using_hierarchy.setObjectName(_fromUtf8("opt_categories_using_hierarchy"))
self.gridLayout_10.addWidget(self.opt_categories_using_hierarchy, 3, 2, 1, 3)
self.label_9 = QtGui.QLabel(self.tab_2)
self.label_9.setText(_("Tags browser category &partitioning method:"))
self.label_9.setObjectName(_fromUtf8("label_9"))
self.gridLayout_10.addWidget(self.label_9, 0, 0, 1, 2)
self.opt_tags_browser_partition_method = QtGui.QComboBox(self.tab_2)
self.opt_tags_browser_partition_method.setToolTip(_("Choose how tag browser subcategories are displayed when\n"
"there are more items than the limit. Select by first\n"
"letter to see an A, B, C list. Choose partitioned to\n"
"have a list of fixed-sized groups. Set to disabled\n"
"if you never want subcategories"))
self.opt_tags_browser_partition_method.setObjectName(_fromUtf8("opt_tags_browser_partition_method"))
self.gridLayout_10.addWidget(self.opt_tags_browser_partition_method, 0, 2, 1, 1)
self.label_10 = QtGui.QLabel(self.tab_2)
self.label_10.setText(_("&Collapse when more items than:"))
self.label_10.setObjectName(_fromUtf8("label_10"))
self.gridLayout_10.addWidget(self.label_10, 0, 3, 1, 1)
self.opt_tags_browser_collapse_at = QtGui.QSpinBox(self.tab_2)
self.opt_tags_browser_collapse_at.setToolTip(_("If a Tag Browser category has more than this number of items, it is divided\n"
"up into subcategories. If the partition method is set to disable, this value is ignored."))
self.opt_tags_browser_collapse_at.setMaximum(10000)
self.opt_tags_browser_collapse_at.setObjectName(_fromUtf8("opt_tags_browser_collapse_at"))
self.gridLayout_10.addWidget(self.opt_tags_browser_collapse_at, 0, 4, 1, 1)
spacerItem2 = QtGui.QSpacerItem(690, 252, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout_10.addItem(spacerItem2, 5, 0, 1, 5)
self.label_8111 = QtGui.QLabel(self.tab_2)
self.label_8111.setText(_("Categories not to partition:"))
self.label_8111.setObjectName(_fromUtf8("label_8111"))
self.gridLayout_10.addWidget(self.label_8111, 1, 2, 1, 1)
self.opt_tag_browser_dont_collapse = EditWithComplete(self.tab_2)
self.opt_tag_browser_dont_collapse.setToolTip(_("A comma-separated list of categories that are not to\n"
"be partitioned even if the number of items is larger than\n"
"the value shown above. This option can be used to\n"
"avoid collapsing hierarchical categories that have only\n"
"a few top-level elements."))
self.opt_tag_browser_dont_collapse.setObjectName(_fromUtf8("opt_tag_browser_dont_collapse"))
self.gridLayout_10.addWidget(self.opt_tag_browser_dont_collapse, 1, 3, 1, 2)
self.opt_show_avg_rating = QtGui.QCheckBox(self.tab_2)
self.opt_show_avg_rating.setText(_("Show &average ratings in the tags browser"))
self.opt_show_avg_rating.setChecked(True)
self.opt_show_avg_rating.setObjectName(_fromUtf8("opt_show_avg_rating"))
self.gridLayout_10.addWidget(self.opt_show_avg_rating, 2, 0, 1, 5)
self.label_81 = QtGui.QLabel(self.tab_2)
self.label_81.setText(_("Categories with &hierarchical items:"))
self.label_81.setObjectName(_fromUtf8("label_81"))
self.gridLayout_10.addWidget(self.label_81, 3, 0, 1, 1)
self.opt_tag_browser_old_look = QtGui.QCheckBox(self.tab_2)
self.opt_tag_browser_old_look.setText(_("Use &alternating row colors in the Tag Browser"))
self.opt_tag_browser_old_look.setObjectName(_fromUtf8("opt_tag_browser_old_look"))
self.gridLayout_10.addWidget(self.opt_tag_browser_old_look, 4, 0, 1, 5)
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(_fromUtf8(I("tags.png"))), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.tabWidget.addTab(self.tab_2, icon4, _fromUtf8(""))
self.tab_3 = QtGui.QWidget()
self.tab_3.setObjectName(_fromUtf8("tab_3"))
self.gridLayout_11 = QtGui.QGridLayout(self.tab_3)
self.gridLayout_11.setObjectName(_fromUtf8("gridLayout_11"))
self.opt_separate_cover_flow = QtGui.QCheckBox(self.tab_3)
self.opt_separate_cover_flow.setText(_("Show cover &browser in a separate window (needs restart)"))
self.opt_separate_cover_flow.setObjectName(_fromUtf8("opt_separate_cover_flow"))
self.gridLayout_11.addWidget(self.opt_separate_cover_flow, 0, 0, 1, 2)
self.label_6 = QtGui.QLabel(self.tab_3)
self.label_6.setText(_("&Number of covers to show in browse mode (needs restart):"))
self.label_6.setObjectName(_fromUtf8("label_6"))
self.gridLayout_11.addWidget(self.label_6, 1, 0, 1, 1)
self.opt_cover_flow_queue_length = QtGui.QSpinBox(self.tab_3)
self.opt_cover_flow_queue_length.setObjectName(_fromUtf8("opt_cover_flow_queue_length"))
self.gridLayout_11.addWidget(self.opt_cover_flow_queue_length, 1, 1, 1, 1)
spacerItem3 = QtGui.QSpacerItem(690, 283, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout_11.addItem(spacerItem3, 4, 0, 1, 2)
self.opt_cb_fullscreen = QtGui.QCheckBox(self.tab_3)
self.opt_cb_fullscreen.setText(_("When showing cover browser in separate window, show it &fullscreen"))
self.opt_cb_fullscreen.setObjectName(_fromUtf8("opt_cb_fullscreen"))
self.gridLayout_11.addWidget(self.opt_cb_fullscreen, 2, 0, 1, 2)
self.fs_help_msg = QtGui.QLabel(self.tab_3)
self.fs_help_msg.setStyleSheet(_fromUtf8("margin-left: 1.5em"))
self.fs_help_msg.setText(_("You can press the %s keys to toggle full screen mode."))
self.fs_help_msg.setWordWrap(True)
self.fs_help_msg.setObjectName(_fromUtf8("fs_help_msg"))
self.gridLayout_11.addWidget(self.fs_help_msg, 3, 0, 1, 2)
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap(_fromUtf8(I("cover_flow.png"))), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.tabWidget.addTab(self.tab_3, icon5, _fromUtf8(""))
self.gridLayout_2.addWidget(self.tabWidget, 0, 0, 1, 1)
self.label_7.setBuddy(self.opt_language)
self.label_17.setBuddy(self.opt_gui_layout)
self.label_5.setBuddy(self.opt_toolbar_icon_size)
self.label_8.setBuddy(self.opt_toolbar_text)
self.label_2.setBuddy(self.font_display)
self.label_widget_style.setBuddy(self.opt_ui_style)
self.label.setBuddy(self.opt_default_author_link)
self.label_9.setBuddy(self.opt_tags_browser_partition_method)
self.label_10.setBuddy(self.opt_tags_browser_collapse_at)
self.label_8111.setBuddy(self.opt_tag_browser_dont_collapse)
self.label_81.setBuddy(self.opt_categories_using_hierarchy)
self.label_6.setBuddy(self.opt_cover_flow_queue_length)
self.retranslateUi(Form)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _("Main Interface"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_4), _("Book Details"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _("Tag Browser"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3), _("Cover Browser"))
from calibre.gui2.complete2 import EditWithComplete
|
yeyanchao/calibre
|
src/calibre/gui2/preferences/look_feel_ui.py
|
Python
|
gpl-3.0
| 19,303
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "varnish.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
haandol/python-varnish-example
|
manage.py
|
Python
|
apache-2.0
| 250
|
from heuristicSearch.planners.island_astar import IslandAstar
from heuristicSearch.envs.island_env import IslandGridEnvironment
from heuristicSearch.envs.occupancy_grid import OccupancyGrid
from heuristicSearch.graph.node import Node
from heuristicSearch.heuristics.island_heuristic import IslandHeuristic
from heuristicSearch.utils.visualizer import ImageVisualizer
from heuristicSearch.utils.utils import *
from functools import partial
import matplotlib.pyplot as plt
import cv2 as cv
import pickle
import sys
def main():
"""Numpy array is accessed as (r, c) while a point is (x, y). The code
follows (r, c) convention everywhere. Hence, be careful whenever using a
point with opencv."""
folder = sys.argv[1]
image = folder + "/image.png"
start_goal = folder + "/start_goal.pkl"
islandsFile = folder + "/islands.pkl"
startPoint, goalPoint = pickle.load( open(start_goal, "rb") )
islands = pickle.load( open(islandsFile, "rb") )
occGrid = OccupancyGrid()
occMap = occGrid.getMapFromImage(image)
viz = ImageVisualizer(occMap)
viz.incrementalDisplay = True
print(occMap.shape)
print(startPoint, goalPoint)
gridEnv = IslandGridEnvironment(occMap, occMap.shape[0], occMap.shape[1],
islands)
gridEnv.setIslandNodes( islands )
islandHeur = IslandHeuristic()
gridEnv.setHeuristic( partial(islandHeur.heuristic, env=gridEnv,
metric=gridEnv.euclideanHeuristic) )
startNode = Node(gridEnv.getIdFromPoint(startPoint))
startNode.setParent(None)
goalNode = Node(gridEnv.getIdFromPoint(goalPoint))
gridEnv.addNode(goalNode)
gridEnv.goal(goalNode)
assert(gridEnv.isValidPoint(startPoint))
assert(gridEnv.isValidPoint(goalPoint))
# Island visualization.
#for island in gridEnv.getIslandNodes():
# viz.drawCircle(gridEnv.getPointFromId(island.getNodeId()),
# gridEnv.islandThresh)
# viz.displayImage(1)
#cv.destroyAllWindows()
# Planner
planner = IslandAstar( gridEnv, inflation=1)
planFound = planner.plan(startNode, goalNode, viz=viz)
path = []
if planFound:
print("Planning successful")
currNode = goalNode
while(currNode != startNode):
path.append(currNode)
currNode = currNode.getParent()
# Reverse the list.
path = path[::-1]
print("Solution cost is %d"%path[-1].g)
planStateIds = map(lambda node : node.getNodeId(), path)
pathPoints = []
for node in path:
pathPoints.append(gridEnv.getPointFromId(node.getNodeId()))
viz.displayImage()
#viz.joinPointsInOrder(pathPoints, thickness=5)
viz.markPoints( pathPoints, color=100 )
viz.displayImage()
cv.waitKey(0)
#cv.imwrite( )
main()
|
shivamvats/graphSearch
|
main_island.py
|
Python
|
mit
| 2,825
|
from django.contrib import admin
from featureflipper.models import Feature
class FeatureAdmin(admin.ModelAdmin):
list_display = ('name', 'description', 'status')
def enable_features(self, request, queryset):
for feature in queryset:
feature.enable()
feature.save()
self.message_user(request, "Successfully enabled %d features." % len(queryset))
enable_features.short_description = "Enable selected features"
def disable_features(self, request, queryset):
for feature in queryset:
feature.disable()
feature.save()
self.message_user(request, "Successfully disabled %d features." % len(queryset))
disable_features.short_description = "Disable selected features"
def flip_features(self, request, queryset):
for feature in queryset:
feature.flip()
feature.save()
self.message_user(request, "Successfully flipped %d features." % len(queryset))
flip_features.short_description = "Flip selected features"
actions = [enable_features, disable_features, flip_features]
admin.site.register(Feature, FeatureAdmin)
|
tobych/django-feature-flipper
|
featureflipper/admin.py
|
Python
|
apache-2.0
| 1,161
|
"""
Extension 2 (ext2)
Used in Linux systems
"""
from construct import *
Char = SLInt8
UChar = ULInt8
Short = SLInt16
UShort = ULInt16
Long = SLInt32
ULong = ULInt32
def BlockPointer(name):
return Struct(name,
ULong("block_number"),
OnDemandPointer(lambda ctx: ctx["block_number"]),
)
superblock = Struct("superblock",
ULong('inodes_count'),
ULong('blocks_count'),
ULong('reserved_blocks_count'),
ULong('free_blocks_count'),
ULong('free_inodes_count'),
ULong('first_data_block'),
Enum(ULong('log_block_size'),
OneKB = 0,
TwoKB = 1,
FourKB = 2,
),
Long('log_frag_size'),
ULong('blocks_per_group'),
ULong('frags_per_group'),
ULong('inodes_per_group'),
ULong('mtime'),
ULong('wtime'),
UShort('mnt_count'),
Short('max_mnt_count'),
Const(UShort('magic'), 0xEF53),
UShort('state'),
UShort('errors'),
Padding(2),
ULong('lastcheck'),
ULong('checkinterval'),
ULong('creator_os'),
ULong('rev_level'),
Padding(235 * 4),
)
group_descriptor = Struct("group_descriptor",
ULong('block_bitmap'),
ULong('inode_bitmap'),
ULong('inode_table'),
UShort('free_blocks_count'),
UShort('free_inodes_count'),
UShort('used_dirs_count'),
Padding(14),
)
inode = Struct("inode",
FlagsEnum(UShort('mode'),
IXOTH = 0x0001,
IWOTH = 0x0002,
IROTH = 0x0004,
IRWXO = 0x0007,
IXGRP = 0x0008,
IWGRP = 0x0010,
IRGRP = 0x0020,
IRWXG = 0x0038,
IXUSR = 0x0040,
IWUSR = 0x0080,
IRUSR = 0x0100,
IRWXU = 0x01C0,
ISVTX = 0x0200,
ISGID = 0x0400,
ISUID = 0x0800,
IFIFO = 0x1000,
IFCHR = 0x2000,
IFDIR = 0x4000,
IFBLK = 0x6000,
IFREG = 0x8000,
IFLNK = 0xC000,
IFSOCK = 0xA000,
IFMT = 0xF000,
),
UShort('uid'),
ULong('size'),
ULong('atime'),
ULong('ctime'),
ULong('mtime'),
ULong('dtime'),
UShort('gid'),
UShort('links_count'),
ULong('blocks'),
FlagsEnum(ULong('flags'),
SecureDelete = 0x0001,
AllowUndelete = 0x0002,
Compressed = 0x0004,
Synchronous = 0x0008,
),
Padding(4),
StrictRepeater(12, ULong('blocks')),
ULong("indirect1_block"),
ULong("indirect2_block"),
ULong("indirect3_block"),
ULong('version'),
ULong('file_acl'),
ULong('dir_acl'),
ULong('faddr'),
UChar('frag'),
Byte('fsize'),
Padding(10) ,
)
# special inodes
EXT2_BAD_INO = 1
EXT2_ROOT_INO = 2
EXT2_ACL_IDX_INO = 3
EXT2_ACL_DATA_INO = 4
EXT2_BOOT_LOADER_INO = 5
EXT2_UNDEL_DIR_INO = 6
EXT2_FIRST_INO = 11
directory_record = Struct("directory_entry",
ULong("inode"),
UShort("rec_length"),
UShort("name_length"),
Field("name", lambda ctx: ctx["name_length"]),
Padding(lambda ctx: ctx["rec_length"] - ctx["name_length"])
)
print superblock.sizeof()
|
larsks/pydonet
|
lib/pydonet/construct/formats/filesystem/ext2.py
|
Python
|
gpl-2.0
| 3,167
|
"""
.. _tut-inverse-methods:
Source localization with MNE/dSPM/sLORETA/eLORETA
=================================================
The aim of this tutorial is to teach you how to compute and apply a linear
minimum-norm inverse method on evoked/raw/epochs data.
"""
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.minimum_norm import make_inverse_operator, apply_inverse
###############################################################################
# Process MEG data
data_path = sample.data_path()
raw_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_filt-0-40_raw.fif')
raw = mne.io.read_raw_fif(raw_fname) # already has an average reference
events = mne.find_events(raw, stim_channel='STI 014')
event_id = dict(aud_l=1) # event trigger and conditions
tmin = -0.2 # start of each epoch (200ms before the trigger)
tmax = 0.5 # end of each epoch (500ms after the trigger)
raw.info['bads'] = ['MEG 2443', 'EEG 053']
baseline = (None, 0) # means from the first instant to t = 0
reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=('meg', 'eog'), baseline=baseline, reject=reject)
###############################################################################
# Compute regularized noise covariance
# ------------------------------------
# For more details see :ref:`tut_compute_covariance`.
noise_cov = mne.compute_covariance(
epochs, tmax=0., method=['shrunk', 'empirical'], rank=None, verbose=True)
fig_cov, fig_spectra = mne.viz.plot_cov(noise_cov, raw.info)
###############################################################################
# Compute the evoked response
# ---------------------------
# Let's just use the MEG channels for simplicity.
evoked = epochs.average().pick('meg')
evoked.plot(time_unit='s')
evoked.plot_topomap(times=np.linspace(0.05, 0.15, 5), ch_type='mag',
time_unit='s')
###############################################################################
# It's also a good idea to look at whitened data:
evoked.plot_white(noise_cov, time_unit='s')
del epochs, raw # to save memory
###############################################################################
# Inverse modeling: MNE/dSPM on evoked and raw data
# -------------------------------------------------
# Here we first read the forward solution. You will likely need to compute
# one for your own data -- see :ref:`tut-forward` for information on how
# to do it.
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-oct-6-fwd.fif'
fwd = mne.read_forward_solution(fname_fwd)
###############################################################################
# Next, we make an MEG inverse operator.
inverse_operator = make_inverse_operator(
evoked.info, fwd, noise_cov, loose=0.2, depth=0.8)
del fwd
# You can write it to disk with::
#
# >>> from mne.minimum_norm import write_inverse_operator
# >>> write_inverse_operator('sample_audvis-meg-oct-6-inv.fif',
# inverse_operator)
###############################################################################
# Compute inverse solution
# ------------------------
# We can use this to compute the inverse solution and obtain source time
# courses:
method = "dSPM"
snr = 3.
lambda2 = 1. / snr ** 2
stc, residual = apply_inverse(evoked, inverse_operator, lambda2,
method=method, pick_ori=None,
return_residual=True, verbose=True)
###############################################################################
# Visualization
# -------------
# We can look at different dipole activations:
fig, ax = plt.subplots()
ax.plot(1e3 * stc.times, stc.data[::100, :].T)
ax.set(xlabel='time (ms)', ylabel='%s value' % method)
###############################################################################
# Examine the original data and the residual after fitting:
fig, axes = plt.subplots(2, 1)
evoked.plot(axes=axes)
for ax in axes:
ax.texts.clear()
for line in ax.lines:
line.set_color('#98df81')
residual.plot(axes=axes)
###############################################################################
# Here we use peak getter to move visualization to the time point of the peak
# and draw a marker at the maximum peak vertex.
# sphinx_gallery_thumbnail_number = 9
vertno_max, time_max = stc.get_peak(hemi='rh')
subjects_dir = data_path + '/subjects'
surfer_kwargs = dict(
hemi='rh', subjects_dir=subjects_dir,
clim=dict(kind='value', lims=[8, 12, 15]), views='lateral',
initial_time=time_max, time_unit='s', size=(800, 800), smoothing_steps=10)
brain = stc.plot(**surfer_kwargs)
brain.add_foci(vertno_max, coords_as_verts=True, hemi='rh', color='blue',
scale_factor=0.6, alpha=0.5)
brain.add_text(0.1, 0.9, 'dSPM (plus location of maximal activation)', 'title',
font_size=14)
# The documentation website's movie is generated with:
# brain.save_movie(..., tmin=0.05, tmax=0.15, interpolation='linear',
# time_dilation=20, framerate=10, time_viewer=True)
###############################################################################
# There are many other ways to visualize and work with source data, see
# for example:
#
# - :ref:`tut-viz-stcs`
# - :ref:`ex-morph-surface`
# - :ref:`ex-morph-volume`
# - :ref:`ex-vector-mne-solution`
# - :ref:`tut-dipole-orientations`
# - :ref:`tut-mne-fixed-free`
# - :ref:`examples using apply_inverse
# <sphx_glr_backreferences_mne.minimum_norm.apply_inverse>`.
|
mne-tools/mne-tools.github.io
|
0.23/_downloads/bbc4594eea14cf3d0473ec5148e21b09/30_mne_dspm_loreta.py
|
Python
|
bsd-3-clause
| 5,669
|
#!/usr/bin/env python
# encoding: utf-8
"""
Author(s): Matthew Loper
See LICENCE.txt for licensing and contact information.
"""
import numpy as np
import unittest
from .ch import Ch
class TestLinalg(unittest.TestCase):
def setUp(self):
np.random.seed(0)
def test_slogdet(self):
from . import ch
tmp = ch.random.randn(100).reshape((10,10))
# print 'chumpy version: ' + str(slogdet(tmp)[1].r)
# print 'old version:' + str(np.linalg.slogdet(tmp.r)[1])
eps = 1e-10
diff = np.random.rand(100) * eps
diff_reshaped = diff.reshape((10,10))
gt = np.linalg.slogdet(tmp.r+diff_reshaped)[1] - np.linalg.slogdet(tmp.r)[1]
pred = ch.linalg.slogdet(tmp)[1].dr_wrt(tmp).dot(diff)
#print gt
#print pred
diff = gt - pred
self.assertTrue(np.max(np.abs(diff)) < 1e-12)
sgn_gt = np.linalg.slogdet(tmp.r)[0]
sgn_pred = ch.linalg.slogdet(tmp)[0]
#print sgn_gt
#print sgn_pred
diff = sgn_gt - sgn_pred.r
self.assertTrue(np.max(np.abs(diff)) < 1e-12)
def test_lstsq(self):
from .linalg import lstsq
shapes = ([10, 3], [3, 10])
for shape in shapes:
for b2d in True, False:
A = (np.random.rand(np.prod(shape))-.5).reshape(shape)
if b2d:
b = np.random.randn(shape[0],2)
else:
b = np.random.randn(shape[0])
x1, residuals1, rank1, s1 = lstsq(A, b)
x2, residuals2, rank2, s2 = np.linalg.lstsq(A, b)
#print x1.r
#print x2
#print residuals1.r
#print residuals2
self.assertTrue(np.max(np.abs(x1.r-x2)) < 1e-14)
if len(residuals2) > 0:
self.assertTrue(np.max(np.abs(residuals1.r-residuals2)) < 1e-14)
def test_pinv(self):
from .linalg import Pinv
data = (np.random.rand(12)-.5).reshape((3, 4))
pc_tall = Pinv(data)
pc_wide = Pinv(data.T)
pn_tall = np.linalg.pinv(data)
pn_wide = np.linalg.pinv(data.T)
tall_correct = np.max(np.abs(pc_tall.r - pn_tall)) < 1e-12
wide_correct = np.max(np.abs(pc_wide.r - pn_wide)) < 1e-12
# if not tall_correct or not wide_correct:
# print tall_correct
# print wide_correct
# import pdb; pdb.set_trace()
self.assertTrue(tall_correct)
self.assertTrue(wide_correct)
return # FIXME. how to test derivs?
for pc in [pc_tall, pc_wide]:
self.chkd(pc, pc.mtx)
import pdb; pdb.set_trace()
def test_svd(self):
from .linalg import Svd
eps = 1e-3
idx = 10
data = np.sin(np.arange(300)*100+10).reshape((-1,3))
data[3,:] = data[3,:]*0+10
data[:,1] *= 2
data[:,2] *= 4
data = data.copy()
u,s,v = np.linalg.svd(data, full_matrices=False)
data = Ch(data)
data2 = data.r.copy()
data2.ravel()[idx] += eps
u2,s2,v2 = np.linalg.svd(data2, full_matrices=False)
svdu, svdd, svdv = Svd(x=data)
# test singular values
diff_emp = (s2-s) / eps
diff_pred = svdd.dr_wrt(data)[:,idx]
#print diff_emp
#print diff_pred
ratio = diff_emp / diff_pred
#print ratio
self.assertTrue(np.max(np.abs(ratio - 1.)) < 1e-4)
# test V
diff_emp = (v2 - v) / eps
diff_pred = svdv.dr_wrt(data)[:,idx].reshape(diff_emp.shape)
ratio = diff_emp / diff_pred
#print ratio
self.assertTrue(np.max(np.abs(ratio - 1.)) < 1e-2)
# test U
diff_emp = (u2 - u) / eps
diff_pred = svdu.dr_wrt(data)[:,idx].reshape(diff_emp.shape)
ratio = diff_emp / diff_pred
#print ratio
self.assertTrue(np.max(np.abs(ratio - 1.)) < 1e-2)
def test_det(self):
from .linalg import Det
mtx1 = Ch(np.sin(2**np.arange(9)).reshape((3,3)))
mtx1_det = Det(mtx1)
dr = mtx1_det.dr_wrt(mtx1)
eps = 1e-5
mtx2 = mtx1.r.copy()
input_diff = np.sin(np.arange(mtx2.size)).reshape(mtx2.shape) * eps
mtx2 += input_diff
mtx2_det = Det(mtx2)
output_diff_emp = (np.linalg.det(mtx2) - np.linalg.det(mtx1.r)).ravel()
output_diff_pred = Det(mtx1).dr_wrt(mtx1).dot(input_diff.ravel())
#print output_diff_emp
#print output_diff_pred
self.assertTrue(np.max(np.abs(output_diff_emp - output_diff_pred)) < eps*1e-4)
self.assertTrue(np.max(np.abs(mtx1_det.r - np.linalg.det(mtx1.r)).ravel()) == 0)
def test_inv1(self):
from .linalg import Inv
mtx1 = Ch(np.sin(2**np.arange(9)).reshape((3,3)))
mtx1_inv = Inv(mtx1)
dr = mtx1_inv.dr_wrt(mtx1)
eps = 1e-5
mtx2 = mtx1.r.copy()
input_diff = np.sin(np.arange(mtx2.size)).reshape(mtx2.shape) * eps
mtx2 += input_diff
mtx2_inv = Inv(mtx2)
output_diff_emp = (np.linalg.inv(mtx2) - np.linalg.inv(mtx1.r)).ravel()
output_diff_pred = Inv(mtx1).dr_wrt(mtx1).dot(input_diff.ravel())
#print output_diff_emp
#print output_diff_pred
self.assertTrue(np.max(np.abs(output_diff_emp - output_diff_pred)) < eps*1e-4)
self.assertTrue(np.max(np.abs(mtx1_inv.r - np.linalg.inv(mtx1.r)).ravel()) == 0)
def test_inv2(self):
from .linalg import Inv
eps = 1e-8
idx = 13
mtx1 = np.random.rand(100).reshape((10,10))
mtx2 = mtx1.copy()
mtx2.ravel()[idx] += eps
diff_emp = (np.linalg.inv(mtx2) - np.linalg.inv(mtx1)) / eps
mtx1 = Ch(mtx1)
diff_pred = Inv(mtx1).dr_wrt(mtx1)[:,13].reshape(diff_emp.shape)
#print diff_emp
#print diff_pred
#print diff_emp - diff_pred
self.assertTrue(np.max(np.abs(diff_pred.ravel()-diff_emp.ravel())) < 1e-4)
@unittest.skipIf(np.__version__ < '1.8',
"broadcasting for matrix inverse not supported in numpy < 1.8")
def test_inv3(self):
"""Test linalg.inv with broadcasting support."""
from .linalg import Inv
mtx1 = Ch(np.sin(2**np.arange(12)).reshape((3,2,2)))
mtx1_inv = Inv(mtx1)
dr = mtx1_inv.dr_wrt(mtx1)
eps = 1e-5
mtx2 = mtx1.r.copy()
input_diff = np.sin(np.arange(mtx2.size)).reshape(mtx2.shape) * eps
mtx2 += input_diff
mtx2_inv = Inv(mtx2)
output_diff_emp = (np.linalg.inv(mtx2) - np.linalg.inv(mtx1.r)).ravel()
output_diff_pred = Inv(mtx1).dr_wrt(mtx1).dot(input_diff.ravel())
# print output_diff_emp
# print output_diff_pred
self.assertTrue(np.max(np.abs(output_diff_emp.ravel() - output_diff_pred.ravel())) < eps*1e-3)
self.assertTrue(np.max(np.abs(mtx1_inv.r - np.linalg.inv(mtx1.r)).ravel()) == 0)
def chkd(self, obj, parm, eps=1e-14):
backed_up = parm.x
if True:
diff = (np.random.rand(parm.size)-.5).reshape(parm.shape)
else:
diff = np.zeros(parm.shape)
diff.ravel()[4] = 2.
dr = obj.dr_wrt(parm)
parm.x = backed_up - diff*eps
r_lower = obj.r
parm.x = backed_up + diff*eps
r_upper = obj.r
diff_emp = (r_upper - r_lower) / (eps*2.)
diff_pred = dr.dot(diff.ravel()).reshape(diff_emp.shape)
#print diff_emp
#print diff_pred
print(diff_emp / diff_pred)
print(diff_emp - diff_pred)
parm.x = backed_up
suite = unittest.TestLoader().loadTestsFromTestCase(TestLinalg)
if __name__ == '__main__':
unittest.main()
|
mattloper/chumpy
|
chumpy/test_linalg.py
|
Python
|
mit
| 8,103
|
# coding=utf-8
# 1. generate velocity and force nodes of sphere using MATLAB,
# 2. for each force node, get b, solve surrounding velocity boundary condition (pipe and cover, named boundary velocity) using formula from Liron's paper, save .mat file
# 3. read .mat file, for each boundary velocity, solve associated boundary force.
# 4. solve sphere M matrix using boundary force.
# 5. solve problem and check.
import sys
import petsc4py
petsc4py.init(sys.argv)
import numpy as np
# import pickle
# from time import time
# from scipy.io import loadmat
from src.stokes_flow import problem_dic, obj_dic
from src.geo import *
from petsc4py import PETSc
from src import stokes_flow as sf
from src.myio import *
from src.objComposite import *
from src.myvtk import *
from src.support_class import *
from codeStore.helix_common import *
# @profile
def main_fun(**main_kwargs):
OptDB = PETSc.Options()
fileHandle = OptDB.getString('f', 'tail_U')
OptDB.setValue('f', fileHandle)
main_kwargs['fileHandle'] = fileHandle
field_range = np.array([[-3, -3, -3], [3, 3, 3]])
n_grid = np.array([1, 1, 1]) * OptDB.getInt('n_grid', 10)
main_kwargs['field_range'] = field_range
main_kwargs['n_grid'] = n_grid
main_kwargs['region_type'] = 'rectangle'
problem_kwargs = get_problem_kwargs(**main_kwargs)
matrix_method = problem_kwargs['matrix_method']
pickProblem = problem_kwargs['pickProblem']
fileHandle = problem_kwargs['fileHandle']
save_vtk = problem_kwargs['save_vtk']
if not problem_kwargs['restart']:
print_case_info(**problem_kwargs)
tail_obj_list = create_ecoli_tail(moveh=np.zeros(3), **problem_kwargs)
problem = problem_dic[matrix_method](**problem_kwargs)
if 'stokesletsInPipe' in matrix_method:
forcepipe = problem_kwargs['forcepipe']
problem.set_prepare(forcepipe)
for tobj in tail_obj_list:
problem.add_obj(tobj)
# # dbg
# problem.show_u_nodes()
# assert 1 == 2
if pickProblem:
problem.pickmyself('%s_tran' % fileHandle, ifcheck=True)
problem.print_info()
problem.create_matrix()
# 1. translation
for tobj in tail_obj_list:
tobj.set_rigid_velocity(np.array((0, 0, 1, 0, 0, 0)))
problem.create_F_U()
problem.solve()
if problem_kwargs['pickProblem']:
problem.pickmyself('%s_tran' % fileHandle, pick_M=False, mat_destroy=False)
print_single_ecoli_force_result(problem, part='tail', prefix='tran', **problem_kwargs)
problem.vtk_self('%s_tran' % fileHandle)
if save_vtk:
problem.vtk_velocity('%s_tran' % fileHandle)
# 2. rotation
for tobj in tail_obj_list:
tobj.set_rigid_velocity(np.array((0, 0, 0, 0, 0, 1)))
problem.create_F_U()
problem.solve()
if problem_kwargs['pickProblem']:
problem.pickmyself('%s_rota' % fileHandle, pick_M=False, mat_destroy=False)
print_single_ecoli_force_result(problem, part='tail', prefix='rota', **problem_kwargs)
problem.vtk_self('%s_rota' % fileHandle)
if save_vtk:
problem.vtk_velocity('%s_rota' % fileHandle)
return True
def self_repeat_tail(**main_kwargs):
OptDB = PETSc.Options()
fileHandle = OptDB.getString('f', 'tail_U')
OptDB.setValue('f', fileHandle)
main_kwargs['fileHandle'] = fileHandle
field_range = np.array([[-3, -3, -3], [3, 3, 3]])
n_grid = np.array([1, 1, 1]) * OptDB.getInt('n_grid', 10)
main_kwargs['field_range'] = field_range
main_kwargs['n_grid'] = n_grid
main_kwargs['region_type'] = 'rectangle'
problem_kwargs = get_problem_kwargs(**main_kwargs)
matrix_method = problem_kwargs['matrix_method']
pickProblem = problem_kwargs['pickProblem']
fileHandle = problem_kwargs['fileHandle']
save_vtk = problem_kwargs['save_vtk']
if not problem_kwargs['restart']:
print_case_info(**problem_kwargs)
_ = create_selfRepeat_tail(np.zeros(3), **problem_kwargs)
tail_list, tail_start_list, tail_body0_list, tail_end_list = _
part_obj_list = list(tube_flatten((tail_start_list, tail_body0_list, tail_end_list)))
problem = problem_dic[matrix_method](**problem_kwargs)
if 'stokesletsInPipe' in matrix_method:
forcepipe = problem_kwargs['forcepipe']
problem.set_prepare(forcepipe)
for t1_list in (tail_start_list, tail_body0_list, tail_end_list):
for obj_pair in zip(t1_list, tail_list, ):
problem.add_obj(obj_pair)
# # dbg
# tgeo = base_geo()
# tnodes = np.vstack([t1.get_u_geo().get_all_nodes() for t1 in tail_list])
# tgeo.set_nodes(tnodes, 0)
# tgeo.show_nodes()
# problem.show_u_nodes()
# assert 1 == 2
if pickProblem:
problem.pickmyself('%s_tran' % fileHandle, ifcheck=True)
problem.print_info()
problem.create_matrix()
# 1. translation
for tobj in part_obj_list:
tobj.set_rigid_velocity(np.array((0, 0, 1, 0, 0, 0)))
problem.create_F_U()
problem.solve()
if problem_kwargs['pickProblem']:
problem.pickmyself('%s_tran' % fileHandle, pick_M=False, mat_destroy=False)
print_single_ecoli_force_result(problem, part='tail', prefix='tran', **problem_kwargs)
problem.vtk_self('%s_tran' % fileHandle)
if save_vtk:
problem.vtk_velocity('%s_tran' % fileHandle)
# 2. rotation
for tobj in part_obj_list:
tobj.set_rigid_velocity(np.array((0, 0, 0, 0, 0, 1)))
problem.create_F_U()
problem.solve()
if problem_kwargs['pickProblem']:
problem.pickmyself('%s_rota' % fileHandle, pick_M=False, mat_destroy=False)
print_single_ecoli_force_result(problem, part='tail', prefix='rota', **problem_kwargs)
problem.vtk_self('%s_rota' % fileHandle)
if save_vtk:
problem.vtk_velocity('%s_rota' % fileHandle)
return True
def self_rotate_tail(**main_kwargs):
OptDB = PETSc.Options()
fileHandle = OptDB.getString('f', 'tail_U')
OptDB.setValue('f', fileHandle)
main_kwargs['fileHandle'] = fileHandle
field_range = np.array([[-3, -3, -3], [3, 3, 3]])
n_grid = np.array([1, 1, 1]) * OptDB.getInt('n_grid', 10)
main_kwargs['field_range'] = field_range
main_kwargs['n_grid'] = n_grid
main_kwargs['region_type'] = 'rectangle'
problem_kwargs = get_problem_kwargs(**main_kwargs)
matrix_method = problem_kwargs['matrix_method']
pickProblem = problem_kwargs['pickProblem']
fileHandle = problem_kwargs['fileHandle']
save_vtk = problem_kwargs['save_vtk']
n_tail = problem_kwargs['n_tail']
if not problem_kwargs['restart']:
print_case_info(**problem_kwargs)
tail_obj_list = create_ecoli_tail(moveh=np.zeros(3), **problem_kwargs)
tail_obj = tail_obj_list[0]
problem_kwargs['problem_center'] = tail_obj.get_u_geo().get_center()
problem_kwargs['problem_norm'] = tail_obj.get_u_geo().get_geo_norm()
problem_kwargs['problem_n_copy'] = n_tail
problem = problem_dic[matrix_method](**problem_kwargs)
problem.add_obj(tail_obj)
# # dbg
# problem.show_u_nodes()
# err_msg = 'self_rotate_3d_petsc function is modified. '
# assert 1 == 2, err_msg
if pickProblem:
problem.pickmyself('%s_tran' % fileHandle, ifcheck=True)
problem.print_info()
problem.create_matrix()
# 1. translation
problem.set_rigid_velocity(1, 0)
problem.create_F_U()
problem.solve()
if problem_kwargs['pickProblem']:
problem.pickmyself('%s_tran' % fileHandle, pick_M=False, mat_destroy=False)
print_single_ecoli_force_result(problem, part='tail', prefix='tran', **problem_kwargs)
problem.vtk_self('%s_tran' % fileHandle)
if save_vtk:
problem.vtk_velocity('%s_tran' % fileHandle)
# 2. rotation
problem.set_rigid_velocity(0, 1)
problem.create_F_U()
problem.solve()
if problem_kwargs['pickProblem']:
problem.pickmyself('%s_rota' % fileHandle, pick_M=False, mat_destroy=False)
print_single_ecoli_force_result(problem, part='tail', prefix='rota', **problem_kwargs)
problem.vtk_self('%s_rota' % fileHandle)
if save_vtk:
problem.vtk_velocity('%s_rota' % fileHandle)
return True
def dbg_SelfRepeat_FatHelix():
repeat_n = 3 # repeat tail repeat_n times
# problem_kwargs['repeat_n'] = repeat_n
# repeat_n = problem_kwargs['repeat_n']
tugeo = SelfRepeat_FatHelix(repeat_n)
tugeo.create_deltatheta(dth=0.2, radius=0.1, R1=1, R2=1, B=0.2, n_c=3, with_cover=1)
# tugeo.show_nodes()
tugeo.show_all_nodes()
if __name__ == '__main__':
OptDB = PETSc.Options()
# if OptDB.getBool('self_repeat_tail', False):
# OptDB.setValue('main_fun', False)
# self_repeat_tail()
#
# if OptDB.getBool('main_fun', True):
# main_fun()
matrix_method = OptDB.getString('sm', 'rs_stokeslets')
assert matrix_method in ('pf', 'pf_selfRepeat',
'pf_selfRotate', 'rs_selfRotate', 'lg_rs_selfRotate')
if matrix_method == 'pf_selfRepeat':
self_repeat_tail()
if matrix_method in ('pf_selfRotate', 'rs_selfRotate', 'lg_rs_selfRotate'):
self_rotate_tail()
elif matrix_method == 'pf':
main_fun()
|
pcmagic/stokes_flow
|
ecoli_in_pipe/tail_U.py
|
Python
|
mit
| 9,601
|
#!/usr/bin/python
#
# This utility cover two needs of the admin:
#
# 1) generate authentication URL
# 2) submit PIN to complete registration
import base
import sys
import random
def do_init(initiative_name):
print "Initializing: %s" % initiative_name
# this at least go interactive and return a message/error
base.Disku(initiative_name, create=True)
return 0
def do_generate(iname):
url, random_name = inner_do_generate(iname)
print "url:", url
print "random associated ID:", random_name
return 0
def inner_do_generate(iname, supply_random=False):
twiface = base.twitt(iname)
if not supply_random:
random_name= str(random.randint(1, 9999))
else:
random_name= str(supply_random)
twiface.diskconf.log("Generating token for %s..." % random_name)
url, oauth_token, oauth_secret = twiface.get_url_registration()
# redunded: its the old logfile
twiface.store_oauth_tmp_token(oauth_token, oauth_secret, random_name)
tokens = { 'oauth_token': oauth_token , 'oauth_secret': oauth_secret }
twiface.diskconf.log("... url %s TMP name associated %s" % (url, random_name) )
twiface.diskconf.append_temporary(random_name, tokens)
twiface.diskconf.log("Appended token in %s" % twiface.diskconf.tmptokenf)
return url, random_name
def do_complete(iname, pin, urltoken):
pincode = unicode(int(pin))
random_token = unicode(int(urltoken))
twiface = base.twitt(iname)
tokens = twiface.diskconf.pop_temporary(random_token)
username = twiface.complete_registration(pincode, tokens)
# it print the username at the end
if username:
twiface.diskconf.log("username: %s registered!" % username)
else:
twiface.diskconf.log("do_complete fail with %s and %s" %
(pin, urltoken) )
return username
def do_check(initiative_name):
base.Disku(initiative_name, create=False)
# TODO print some stuff - stats - etc
def help(swname):
print " %s init | generate | complete <initiative_NAME>" % swname
print "\tinit: initialize an application name"
print "\tgenerate: generate a registration link"
print "\tcomplete: +need the PIN as argument + Token from the URL"
return 0
if __name__ == '__main__':
if len(sys.argv) < 2 or sys.argv[1] == '-h' or sys.argv[1] == '--help' or sys.argv[1] == 'help':
quit(help(sys.argv[0]))
if len(sys.argv) < 3:
print "you've selected command: %s without specify the initiative name" % sys.argv[2]
quit(help(sys.argv[0]))
if len(sys.argv) != 3 or (sys.argv[2] == 'complete' and len(sys.argv) == 5):
quit(help(sys.argv[0]))
if sys.argv[1] == 'init':
quit(do_init(sys.argv[2]))
elif sys.argv[1] == 'generate':
quit(do_generate(sys.argv[2]))
elif sys.argv[1] == 'complete':
quit(do_complete(sys.argv[2], sys.argv[3], sys.argv[4]))
else:
quit(help(sys.argv[0]))
|
vecna/mic-CHECK
|
adminutils.py
|
Python
|
cc0-1.0
| 2,975
|
import requests
from at_helper.version import Version
class Modpack:
slug = None
version_data = None
api = 'https://api.atlauncher.com/v1/'
def __init__(self, slug):
"""
Creates a modpack object, using the given slug.
Args:
slug: Slug for the modpack, same as the `safeName`
from the ATLauncher API.
"""
self.slug = slug
def _getData(self):
"""
Gets and parses version data from the ATLauncher API.
Returns:
A dict corresponding to the "data" in the API. Notably, the "versions"
array has been replaced with a list of Version objects.
Throws:
RuntimeError: If we're unable to access to the ATLauncher API.
"""
if not self.version_data:
response = requests.get(self.api + 'pack/' + self.slug + '/')
if response.status_code != 200 or response.json()['error']:
raise RuntimeError(response.status_code, 'Bad response code from ATLauncher API.')
self.version_data = response.json()['data']
self.version_data['versions'] = map(lambda v: Version(self.slug, v), self.version_data['versions'])
return self.version_data
def versions(self):
"""
Get modpack versions.
Returns:
A list of Version objects appropriate to the modpack.
"""
return self._getData()['versions']
def isPublic(self):
"""
Determines whether the modpack in question is public or not.
Returns:
True if the pack is public, false if it is not.
"""
return self._getData()['type'] == 'public'
def name(self):
"""
Gets the name of the modpack.
Returns:
String of the modpack name.
"""
return self._getData()['name']
def description(self):
"""
Gets the modpack description.
Returns:
String of the description.
"""
return self._getData()['description']
def websites(self):
"""
Gets websites for the modpack.
Returns:
A dict with keys "support" for the support website, and "modpack"
for the primary modpack website.
"""
return {
'support': self._getData()['supportURL'],
'modpack': self._getData()['websiteURL']
}
|
MCProHosting/at-helper
|
at_helper/modpack.py
|
Python
|
mit
| 2,463
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
BOOLEANS_TRUE = ['y', 'yes', 'on', '1', 'true', 1, True]
BOOLEANS_FALSE = ['n', 'no', 'off', '0', 'false', 0, False]
BOOLEANS = BOOLEANS_TRUE + BOOLEANS_FALSE
# ansible modules can be written in any language. To simplify
# development of Python modules, the functions available here can
# be used to do many common tasks
import locale
import os
import re
import pipes
import shlex
import subprocess
import sys
import types
import time
import select
import shutil
import stat
import tempfile
import traceback
import grp
import pwd
import platform
import errno
import datetime
from itertools import repeat, chain
try:
import syslog
HAS_SYSLOG=True
except ImportError:
HAS_SYSLOG=False
try:
# Python 2
from itertools import imap
except ImportError:
# Python 3
imap = map
try:
# Python 2
basestring
except NameError:
# Python 3
basestring = str
try:
# Python 2
unicode
except NameError:
# Python 3
unicode = str
try:
# Python 2.6+
bytes
except NameError:
# Python 2.4
bytes = str
try:
dict.iteritems
except AttributeError:
# Python 3
def iteritems(d):
return d.items()
else:
# Python 2
def iteritems(d):
return d.iteritems()
try:
reduce
except NameError:
# Python 3
from functools import reduce
try:
NUMBERTYPES = (int, long, float)
except NameError:
# Python 3
NUMBERTYPES = (int, float)
# Python2 & 3 way to get NoneType
NoneType = type(None)
try:
from collections import Sequence, Mapping
except ImportError:
# python2.5
Sequence = (list, tuple)
Mapping = (dict,)
try:
from collections.abc import KeysView
SEQUENCETYPE = (Sequence, KeysView)
except:
SEQUENCETYPE = Sequence
try:
import json
# Detect the python-json library which is incompatible
# Look for simplejson if that's the case
try:
if not isinstance(json.loads, types.FunctionType) or not isinstance(json.dumps, types.FunctionType):
raise ImportError
except AttributeError:
raise ImportError
except ImportError:
try:
import simplejson as json
except ImportError:
print('\n{"msg": "Error: ansible requires the stdlib json or simplejson module, neither was found!", "failed": true}')
sys.exit(1)
except SyntaxError:
print('\n{"msg": "SyntaxError: probably due to installed simplejson being for a different python version", "failed": true}')
sys.exit(1)
from ansible.module_utils.six import PY2, PY3, b, binary_type, text_type, string_types
HAVE_SELINUX=False
try:
import selinux
HAVE_SELINUX=True
except ImportError:
pass
try:
from systemd import journal
has_journal = True
except ImportError:
has_journal = False
AVAILABLE_HASH_ALGORITHMS = dict()
try:
import hashlib
# python 2.7.9+ and 2.7.0+
for attribute in ('available_algorithms', 'algorithms'):
algorithms = getattr(hashlib, attribute, None)
if algorithms:
break
if algorithms is None:
# python 2.5+
algorithms = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
for algorithm in algorithms:
AVAILABLE_HASH_ALGORITHMS[algorithm] = getattr(hashlib, algorithm)
except ImportError:
import sha
AVAILABLE_HASH_ALGORITHMS = {'sha1': sha.sha}
try:
import md5
AVAILABLE_HASH_ALGORITHMS['md5'] = md5.md5
except ImportError:
pass
try:
from ast import literal_eval
except ImportError:
# a replacement for literal_eval that works with python 2.4. from:
# https://mail.python.org/pipermail/python-list/2009-September/551880.html
# which is essentially a cut/paste from an earlier (2.6) version of python's
# ast.py
from compiler import ast, parse
def literal_eval(node_or_string):
"""
Safely evaluate an expression node or a string containing a Python
expression. The string or node provided may only consist of the following
Python literal structures: strings, numbers, tuples, lists, dicts, booleans,
and None.
"""
_safe_names = {'None': None, 'True': True, 'False': False}
if isinstance(node_or_string, basestring):
node_or_string = parse(node_or_string, mode='eval')
if isinstance(node_or_string, ast.Expression):
node_or_string = node_or_string.node
def _convert(node):
if isinstance(node, ast.Const) and isinstance(node.value, (basestring, int, float, long, complex)):
return node.value
elif isinstance(node, ast.Tuple):
return tuple(map(_convert, node.nodes))
elif isinstance(node, ast.List):
return list(map(_convert, node.nodes))
elif isinstance(node, ast.Dict):
return dict((_convert(k), _convert(v)) for k, v in node.items())
elif isinstance(node, ast.Name):
if node.name in _safe_names:
return _safe_names[node.name]
elif isinstance(node, ast.UnarySub):
return -_convert(node.expr)
raise ValueError('malformed string')
return _convert(node_or_string)
_literal_eval = literal_eval
# Backwards compat. There were present in basic.py before
from ansible.module_utils.pycompat24 import get_exception
# Internal global holding passed in params. This is consulted in case
# multiple AnsibleModules are created. Otherwise each AnsibleModule would
# attempt to read from stdin. Other code should not use this directly as it
# is an internal implementation detail
_ANSIBLE_ARGS = None
FILE_COMMON_ARGUMENTS=dict(
src = dict(),
mode = dict(type='raw'),
owner = dict(),
group = dict(),
seuser = dict(),
serole = dict(),
selevel = dict(),
setype = dict(),
follow = dict(type='bool', default=False),
# not taken by the file module, but other modules call file so it must ignore them.
content = dict(no_log=True),
backup = dict(),
force = dict(),
remote_src = dict(), # used by assemble
regexp = dict(), # used by assemble
delimiter = dict(), # used by assemble
directory_mode = dict(), # used by copy
unsafe_writes = dict(type='bool'), # should be available to any module using atomic_move
)
PASSWD_ARG_RE = re.compile(r'^[-]{0,2}pass[-]?(word|wd)?')
# Can't use 07777 on Python 3, can't use 0o7777 on Python 2.4
PERM_BITS = int('07777', 8) # file mode permission bits
EXEC_PERM_BITS = int('00111', 8) # execute permission bits
DEFAULT_PERM = int('0666', 8) # default file permission bits
def get_platform():
''' what's the platform? example: Linux is a platform. '''
return platform.system()
def get_distribution():
''' return the distribution name '''
if platform.system() == 'Linux':
try:
supported_dists = platform._supported_dists + ('arch',)
distribution = platform.linux_distribution(supported_dists=supported_dists)[0].capitalize()
if not distribution and os.path.isfile('/etc/system-release'):
distribution = platform.linux_distribution(supported_dists=['system'])[0].capitalize()
if 'Amazon' in distribution:
distribution = 'Amazon'
else:
distribution = 'OtherLinux'
except:
# FIXME: MethodMissing, I assume?
distribution = platform.dist()[0].capitalize()
else:
distribution = None
return distribution
def get_distribution_version():
''' return the distribution version '''
if platform.system() == 'Linux':
try:
distribution_version = platform.linux_distribution()[1]
if not distribution_version and os.path.isfile('/etc/system-release'):
distribution_version = platform.linux_distribution(supported_dists=['system'])[1]
except:
# FIXME: MethodMissing, I assume?
distribution_version = platform.dist()[1]
else:
distribution_version = None
return distribution_version
def get_all_subclasses(cls):
'''
used by modules like Hardware or Network fact classes to retrieve all subclasses of a given class.
__subclasses__ return only direct sub classes. This one go down into the class tree.
'''
# Retrieve direct subclasses
subclasses = cls.__subclasses__()
to_visit = list(subclasses)
# Then visit all subclasses
while to_visit:
for sc in to_visit:
# The current class is now visited, so remove it from list
to_visit.remove(sc)
# Appending all subclasses to visit and keep a reference of available class
for ssc in sc.__subclasses__():
subclasses.append(ssc)
to_visit.append(ssc)
return subclasses
def load_platform_subclass(cls, *args, **kwargs):
'''
used by modules like User to have different implementations based on detected platform. See User
module for an example.
'''
this_platform = get_platform()
distribution = get_distribution()
subclass = None
# get the most specific superclass for this platform
if distribution is not None:
for sc in get_all_subclasses(cls):
if sc.distribution is not None and sc.distribution == distribution and sc.platform == this_platform:
subclass = sc
if subclass is None:
for sc in get_all_subclasses(cls):
if sc.platform == this_platform and sc.distribution is None:
subclass = sc
if subclass is None:
subclass = cls
return super(cls, subclass).__new__(subclass)
def json_dict_unicode_to_bytes(d, encoding='utf-8'):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
and dict container types (the containers that the json module returns)
'''
if isinstance(d, unicode):
return d.encode(encoding)
elif isinstance(d, dict):
return dict(imap(json_dict_unicode_to_bytes, iteritems(d), repeat(encoding)))
elif isinstance(d, list):
return list(imap(json_dict_unicode_to_bytes, d, repeat(encoding)))
elif isinstance(d, tuple):
return tuple(imap(json_dict_unicode_to_bytes, d, repeat(encoding)))
else:
return d
def json_dict_bytes_to_unicode(d, encoding='utf-8'):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
and dict container types (the containers that the json module returns)
'''
if isinstance(d, bytes):
return unicode(d, encoding)
elif isinstance(d, dict):
return dict(imap(json_dict_bytes_to_unicode, iteritems(d), repeat(encoding)))
elif isinstance(d, list):
return list(imap(json_dict_bytes_to_unicode, d, repeat(encoding)))
elif isinstance(d, tuple):
return tuple(imap(json_dict_bytes_to_unicode, d, repeat(encoding)))
else:
return d
def return_values(obj):
""" Return stringified values from datastructures. For use with removing
sensitive values pre-jsonification."""
if isinstance(obj, basestring):
if obj:
if isinstance(obj, bytes):
yield obj
else:
# Unicode objects should all convert to utf-8
# (still must deal with surrogateescape on python3)
yield obj.encode('utf-8')
return
elif isinstance(obj, SEQUENCETYPE):
for element in obj:
for subelement in return_values(element):
yield subelement
elif isinstance(obj, Mapping):
for element in obj.items():
for subelement in return_values(element[1]):
yield subelement
elif isinstance(obj, (bool, NoneType)):
# This must come before int because bools are also ints
return
elif isinstance(obj, NUMBERTYPES):
yield str(obj)
else:
raise TypeError('Unknown parameter type: %s, %s' % (type(obj), obj))
def remove_values(value, no_log_strings):
""" Remove strings in no_log_strings from value. If value is a container
type, then remove a lot more"""
if isinstance(value, basestring):
if isinstance(value, unicode):
# This should work everywhere on python2. Need to check
# surrogateescape on python3
bytes_value = value.encode('utf-8')
value_is_unicode = True
else:
bytes_value = value
value_is_unicode = False
if bytes_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
bytes_value = bytes_value.replace(omit_me, '*' * 8)
if value_is_unicode:
value = unicode(bytes_value, 'utf-8', errors='replace')
else:
value = bytes_value
elif isinstance(value, SEQUENCETYPE):
return [remove_values(elem, no_log_strings) for elem in value]
elif isinstance(value, Mapping):
return dict((k, remove_values(v, no_log_strings)) for k, v in value.items())
elif isinstance(value, tuple(chain(NUMBERTYPES, (bool, NoneType)))):
stringy_value = str(value)
if stringy_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
if omit_me in stringy_value:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
elif isinstance(value, datetime.datetime):
value = value.isoformat()
else:
raise TypeError('Value of unknown type: %s, %s' % (type(value), value))
return value
def heuristic_log_sanitize(data, no_log_values=None):
''' Remove strings that look like passwords from log messages '''
# Currently filters:
# user:pass@foo/whatever and http://username:pass@wherever/foo
# This code has false positives and consumes parts of logs that are
# not passwds
# begin: start of a passwd containing string
# end: end of a passwd containing string
# sep: char between user and passwd
# prev_begin: where in the overall string to start a search for
# a passwd
# sep_search_end: where in the string to end a search for the sep
output = []
begin = len(data)
prev_begin = begin
sep = 1
while sep:
# Find the potential end of a passwd
try:
end = data.rindex('@', 0, begin)
except ValueError:
# No passwd in the rest of the data
output.insert(0, data[0:begin])
break
# Search for the beginning of a passwd
sep = None
sep_search_end = end
while not sep:
# URL-style username+password
try:
begin = data.rindex('://', 0, sep_search_end)
except ValueError:
# No url style in the data, check for ssh style in the
# rest of the string
begin = 0
# Search for separator
try:
sep = data.index(':', begin + 3, end)
except ValueError:
# No separator; choices:
if begin == 0:
# Searched the whole string so there's no password
# here. Return the remaining data
output.insert(0, data[0:begin])
break
# Search for a different beginning of the password field.
sep_search_end = begin
continue
if sep:
# Password was found; remove it.
output.insert(0, data[end:prev_begin])
output.insert(0, '********')
output.insert(0, data[begin:sep + 1])
prev_begin = begin
output = ''.join(output)
if no_log_values:
output = remove_values(output, no_log_values)
return output
def is_executable(path):
'''is the given path executable?
Limitations:
* Does not account for FSACLs.
* Most times we really want to know "Can the current user execute this
file" This function does not tell us that, only if an execute bit is set.
'''
# These are all bitfields so first bitwise-or all the permissions we're
# looking for, then bitwise-and with the file's mode to determine if any
# execute bits are set.
return ((stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) & os.stat(path)[stat.ST_MODE])
def _load_params():
''' read the modules parameters and store them globally.
This function may be needed for certain very dynamic custom modules which
want to process the parameters that are being handed the module. Since
this is so closely tied to the implementation of modules we cannot
guarantee API stability for it (it may change between versions) however we
will try not to break it gratuitously. It is certainly more future-proof
to call this function and consume its outputs than to implement the logic
inside it as a copy in your own code.
'''
global _ANSIBLE_ARGS
if _ANSIBLE_ARGS is not None:
buffer = _ANSIBLE_ARGS
else:
# debug overrides to read args from file or cmdline
# Avoid tracebacks when locale is non-utf8
# We control the args and we pass them as utf8
if len(sys.argv) > 1:
if os.path.isfile(sys.argv[1]):
fd = open(sys.argv[1], 'rb')
buffer = fd.read()
fd.close()
else:
buffer = sys.argv[1]
if PY3:
buffer = buffer.encode('utf-8', errors='surrogateescape')
# default case, read from stdin
else:
if PY2:
buffer = sys.stdin.read()
else:
buffer = sys.stdin.buffer.read()
_ANSIBLE_ARGS = buffer
try:
params = json.loads(buffer.decode('utf-8'))
except ValueError:
# This helper used too early for fail_json to work.
print('\n{"msg": "Error: Module unable to decode valid JSON on stdin. Unable to figure out what parameters were passed", "failed": true}')
sys.exit(1)
if PY2:
params = json_dict_unicode_to_bytes(params)
try:
return params['ANSIBLE_MODULE_ARGS']
except KeyError:
# This helper does not have access to fail_json so we have to print
# json output on our own.
print('\n{"msg": "Error: Module unable to locate ANSIBLE_MODULE_ARGS in json data from stdin. Unable to figure out what parameters were passed", "failed": true}')
sys.exit(1)
def env_fallback(*args, **kwargs):
''' Load value from environment '''
for arg in args:
if arg in os.environ:
return os.environ[arg]
else:
raise AnsibleFallbackNotFound
def _lenient_lowercase(lst):
"""Lowercase elements of a list.
If an element is not a string, pass it through untouched.
"""
lowered = []
for value in lst:
try:
lowered.append(value.lower())
except AttributeError:
lowered.append(value)
return lowered
class AnsibleFallbackNotFound(Exception):
pass
class AnsibleModule(object):
def __init__(self, argument_spec, bypass_checks=False, no_log=False,
check_invalid_arguments=True, mutually_exclusive=None, required_together=None,
required_one_of=None, add_file_common_args=False, supports_check_mode=False,
required_if=None):
'''
common code for quickly building an ansible module in Python
(although you can write modules in anything that can return JSON)
see library/* for examples
'''
self.argument_spec = argument_spec
self.supports_check_mode = supports_check_mode
self.check_mode = False
self.no_log = no_log
self.cleanup_files = []
self._debug = False
self._diff = False
self._verbosity = 0
# May be used to set modifications to the environment for any
# run_command invocation
self.run_command_environ_update = {}
self.aliases = {}
self._legal_inputs = ['_ansible_check_mode', '_ansible_no_log', '_ansible_debug', '_ansible_diff', '_ansible_verbosity', '_ansible_selinux_special_fs', '_ansible_module_name', '_ansible_version', '_ansible_syslog_facility']
if add_file_common_args:
for k, v in FILE_COMMON_ARGUMENTS.items():
if k not in self.argument_spec:
self.argument_spec[k] = v
self._load_params()
self._set_fallbacks()
# append to legal_inputs and then possibly check against them
try:
self.aliases = self._handle_aliases()
except Exception:
e = get_exception()
# Use exceptions here because it isn't safe to call fail_json until no_log is processed
print('\n{"failed": true, "msg": "Module alias error: %s"}' % str(e))
sys.exit(1)
# Save parameter values that should never be logged
self.no_log_values = set()
# Use the argspec to determine which args are no_log
for arg_name, arg_opts in self.argument_spec.items():
if arg_opts.get('no_log', False):
# Find the value for the no_log'd param
no_log_object = self.params.get(arg_name, None)
if no_log_object:
self.no_log_values.update(return_values(no_log_object))
# check the locale as set by the current environment, and reset to
# a known valid (LANG=C) if it's an invalid/unavailable locale
self._check_locale()
self._check_arguments(check_invalid_arguments)
# check exclusive early
if not bypass_checks:
self._check_mutually_exclusive(mutually_exclusive)
self._set_defaults(pre=True)
self._CHECK_ARGUMENT_TYPES_DISPATCHER = {
'str': self._check_type_str,
'list': self._check_type_list,
'dict': self._check_type_dict,
'bool': self._check_type_bool,
'int': self._check_type_int,
'float': self._check_type_float,
'path': self._check_type_path,
'raw': self._check_type_raw,
'jsonarg': self._check_type_jsonarg,
'json': self._check_type_jsonarg,
}
if not bypass_checks:
self._check_required_arguments()
self._check_argument_types()
self._check_argument_values()
self._check_required_together(required_together)
self._check_required_one_of(required_one_of)
self._check_required_if(required_if)
self._set_defaults(pre=False)
if not self.no_log and self._verbosity >= 3:
self._log_invocation()
# finally, make sure we're in a sane working dir
self._set_cwd()
def load_file_common_arguments(self, params):
'''
many modules deal with files, this encapsulates common
options that the file module accepts such that it is directly
available to all modules and they can share code.
'''
path = params.get('path', params.get('dest', None))
if path is None:
return {}
else:
path = os.path.expanduser(path)
# if the path is a symlink, and we're following links, get
# the target of the link instead for testing
if params.get('follow', False) and os.path.islink(path):
path = os.path.realpath(path)
mode = params.get('mode', None)
owner = params.get('owner', None)
group = params.get('group', None)
# selinux related options
seuser = params.get('seuser', None)
serole = params.get('serole', None)
setype = params.get('setype', None)
selevel = params.get('selevel', None)
secontext = [seuser, serole, setype]
if self.selinux_mls_enabled():
secontext.append(selevel)
default_secontext = self.selinux_default_context(path)
for i in range(len(default_secontext)):
if i is not None and secontext[i] == '_default':
secontext[i] = default_secontext[i]
return dict(
path=path, mode=mode, owner=owner, group=group,
seuser=seuser, serole=serole, setype=setype,
selevel=selevel, secontext=secontext,
)
# Detect whether using selinux that is MLS-aware.
# While this means you can set the level/range with
# selinux.lsetfilecon(), it may or may not mean that you
# will get the selevel as part of the context returned
# by selinux.lgetfilecon().
def selinux_mls_enabled(self):
if not HAVE_SELINUX:
return False
if selinux.is_selinux_mls_enabled() == 1:
return True
else:
return False
def selinux_enabled(self):
if not HAVE_SELINUX:
seenabled = self.get_bin_path('selinuxenabled')
if seenabled is not None:
(rc,out,err) = self.run_command(seenabled)
if rc == 0:
self.fail_json(msg="Aborting, target uses selinux but python bindings (libselinux-python) aren't installed!")
return False
if selinux.is_selinux_enabled() == 1:
return True
else:
return False
# Determine whether we need a placeholder for selevel/mls
def selinux_initial_context(self):
context = [None, None, None]
if self.selinux_mls_enabled():
context.append(None)
return context
def _to_filesystem_str(self, path):
'''Returns filesystem path as a str, if it wasn't already.
Used in selinux interactions because it cannot accept unicode
instances, and specifying complex args in a playbook leaves
you with unicode instances. This method currently assumes
that your filesystem encoding is UTF-8.
'''
if isinstance(path, unicode):
path = path.encode("utf-8")
return path
# If selinux fails to find a default, return an array of None
def selinux_default_context(self, path, mode=0):
context = self.selinux_initial_context()
if not HAVE_SELINUX or not self.selinux_enabled():
return context
try:
ret = selinux.matchpathcon(self._to_filesystem_str(path), mode)
except OSError:
return context
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
# may contain ':' characters
context = ret[1].split(':', 3)
return context
def selinux_context(self, path):
context = self.selinux_initial_context()
if not HAVE_SELINUX or not self.selinux_enabled():
return context
try:
ret = selinux.lgetfilecon_raw(self._to_filesystem_str(path))
except OSError:
e = get_exception()
if e.errno == errno.ENOENT:
self.fail_json(path=path, msg='path %s does not exist' % path)
else:
self.fail_json(path=path, msg='failed to retrieve selinux context')
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
# may contain ':' characters
context = ret[1].split(':', 3)
return context
def user_and_group(self, filename):
filename = os.path.expanduser(filename)
st = os.lstat(filename)
uid = st.st_uid
gid = st.st_gid
return (uid, gid)
def find_mount_point(self, path):
path = os.path.realpath(os.path.expanduser(os.path.expandvars(path)))
while not os.path.ismount(path):
path = os.path.dirname(path)
return path
def is_special_selinux_path(self, path):
"""
Returns a tuple containing (True, selinux_context) if the given path is on a
NFS or other 'special' fs mount point, otherwise the return will be (False, None).
"""
try:
f = open('/proc/mounts', 'r')
mount_data = f.readlines()
f.close()
except:
return (False, None)
path_mount_point = self.find_mount_point(path)
for line in mount_data:
(device, mount_point, fstype, options, rest) = line.split(' ', 4)
if path_mount_point == mount_point:
for fs in self._selinux_special_fs:
if fs in fstype:
special_context = self.selinux_context(path_mount_point)
return (True, special_context)
return (False, None)
def set_default_selinux_context(self, path, changed):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
context = self.selinux_default_context(path)
return self.set_context_if_different(path, context, False)
def set_context_if_different(self, path, context, changed, diff=None):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
cur_context = self.selinux_context(path)
new_context = list(cur_context)
# Iterate over the current context instead of the
# argument context, which may have selevel.
(is_special_se, sp_context) = self.is_special_selinux_path(path)
if is_special_se:
new_context = sp_context
else:
for i in range(len(cur_context)):
if len(context) > i:
if context[i] is not None and context[i] != cur_context[i]:
new_context[i] = context[i]
elif context[i] is None:
new_context[i] = cur_context[i]
if cur_context != new_context:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['secontext'] = cur_context
if 'after' not in diff:
diff['after'] = {}
diff['after']['secontext'] = new_context
try:
if self.check_mode:
return True
rc = selinux.lsetfilecon(self._to_filesystem_str(path),
str(':'.join(new_context)))
except OSError:
e = get_exception()
self.fail_json(path=path, msg='invalid selinux context: %s' % str(e), new_context=new_context, cur_context=cur_context, input_was=context)
if rc != 0:
self.fail_json(path=path, msg='set selinux context failed')
changed = True
return changed
def set_owner_if_different(self, path, owner, changed, diff=None):
path = os.path.expanduser(path)
if owner is None:
return changed
orig_uid, orig_gid = self.user_and_group(path)
try:
uid = int(owner)
except ValueError:
try:
uid = pwd.getpwnam(owner).pw_uid
except KeyError:
self.fail_json(path=path, msg='chown failed: failed to look up user %s' % owner)
if orig_uid != uid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['owner'] = orig_uid
if 'after' not in diff:
diff['after'] = {}
diff['after']['owner'] = uid
if self.check_mode:
return True
try:
os.lchown(path, uid, -1)
except OSError:
self.fail_json(path=path, msg='chown failed')
changed = True
return changed
def set_group_if_different(self, path, group, changed, diff=None):
path = os.path.expanduser(path)
if group is None:
return changed
orig_uid, orig_gid = self.user_and_group(path)
try:
gid = int(group)
except ValueError:
try:
gid = grp.getgrnam(group).gr_gid
except KeyError:
self.fail_json(path=path, msg='chgrp failed: failed to look up group %s' % group)
if orig_gid != gid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['group'] = orig_gid
if 'after' not in diff:
diff['after'] = {}
diff['after']['group'] = gid
if self.check_mode:
return True
try:
os.lchown(path, -1, gid)
except OSError:
self.fail_json(path=path, msg='chgrp failed')
changed = True
return changed
def set_mode_if_different(self, path, mode, changed, diff=None):
path = os.path.expanduser(path)
path_stat = os.lstat(path)
if mode is None:
return changed
if not isinstance(mode, int):
try:
mode = int(mode, 8)
except Exception:
try:
mode = self._symbolic_mode_to_octal(path_stat, mode)
except Exception:
e = get_exception()
self.fail_json(path=path,
msg="mode must be in octal or symbolic form",
details=str(e))
if mode != stat.S_IMODE(mode):
# prevent mode from having extra info orbeing invalid long number
self.fail_json(path=path, msg="Invalid mode supplied, only permission info is allowed", details=mode)
prev_mode = stat.S_IMODE(path_stat.st_mode)
if prev_mode != mode:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['mode'] = oct(prev_mode)
if 'after' not in diff:
diff['after'] = {}
diff['after']['mode'] = oct(mode)
if self.check_mode:
return True
# FIXME: comparison against string above will cause this to be executed
# every time
try:
if hasattr(os, 'lchmod'):
os.lchmod(path, mode)
else:
if not os.path.islink(path):
os.chmod(path, mode)
else:
# Attempt to set the perms of the symlink but be
# careful not to change the perms of the underlying
# file while trying
underlying_stat = os.stat(path)
os.chmod(path, mode)
new_underlying_stat = os.stat(path)
if underlying_stat.st_mode != new_underlying_stat.st_mode:
os.chmod(path, stat.S_IMODE(underlying_stat.st_mode))
except OSError:
e = get_exception()
if os.path.islink(path) and e.errno == errno.EPERM: # Can't set mode on symbolic links
pass
elif e.errno in (errno.ENOENT, errno.ELOOP): # Can't set mode on broken symbolic links
pass
else:
raise e
except Exception:
e = get_exception()
self.fail_json(path=path, msg='chmod failed', details=str(e))
path_stat = os.lstat(path)
new_mode = stat.S_IMODE(path_stat.st_mode)
if new_mode != prev_mode:
changed = True
return changed
def _symbolic_mode_to_octal(self, path_stat, symbolic_mode):
new_mode = stat.S_IMODE(path_stat.st_mode)
mode_re = re.compile(r'^(?P<users>[ugoa]+)(?P<operator>[-+=])(?P<perms>[rwxXst-]*|[ugo])$')
for mode in symbolic_mode.split(','):
match = mode_re.match(mode)
if match:
users = match.group('users')
operator = match.group('operator')
perms = match.group('perms')
if users == 'a':
users = 'ugo'
for user in users:
mode_to_apply = self._get_octal_mode_from_symbolic_perms(path_stat, user, perms)
new_mode = self._apply_operation_to_mode(user, operator, mode_to_apply, new_mode)
else:
raise ValueError("bad symbolic permission for mode: %s" % mode)
return new_mode
def _apply_operation_to_mode(self, user, operator, mode_to_apply, current_mode):
if operator == '=':
if user == 'u': mask = stat.S_IRWXU | stat.S_ISUID
elif user == 'g': mask = stat.S_IRWXG | stat.S_ISGID
elif user == 'o': mask = stat.S_IRWXO | stat.S_ISVTX
# mask out u, g, or o permissions from current_mode and apply new permissions
inverse_mask = mask ^ PERM_BITS
new_mode = (current_mode & inverse_mask) | mode_to_apply
elif operator == '+':
new_mode = current_mode | mode_to_apply
elif operator == '-':
new_mode = current_mode - (current_mode & mode_to_apply)
return new_mode
def _get_octal_mode_from_symbolic_perms(self, path_stat, user, perms):
prev_mode = stat.S_IMODE(path_stat.st_mode)
is_directory = stat.S_ISDIR(path_stat.st_mode)
has_x_permissions = (prev_mode & EXEC_PERM_BITS) > 0
apply_X_permission = is_directory or has_x_permissions
# Permission bits constants documented at:
# http://docs.python.org/2/library/stat.html#stat.S_ISUID
if apply_X_permission:
X_perms = {
'u': {'X': stat.S_IXUSR},
'g': {'X': stat.S_IXGRP},
'o': {'X': stat.S_IXOTH}
}
else:
X_perms = {
'u': {'X': 0},
'g': {'X': 0},
'o': {'X': 0}
}
user_perms_to_modes = {
'u': {
'r': stat.S_IRUSR,
'w': stat.S_IWUSR,
'x': stat.S_IXUSR,
's': stat.S_ISUID,
't': 0,
'u': prev_mode & stat.S_IRWXU,
'g': (prev_mode & stat.S_IRWXG) << 3,
'o': (prev_mode & stat.S_IRWXO) << 6 },
'g': {
'r': stat.S_IRGRP,
'w': stat.S_IWGRP,
'x': stat.S_IXGRP,
's': stat.S_ISGID,
't': 0,
'u': (prev_mode & stat.S_IRWXU) >> 3,
'g': prev_mode & stat.S_IRWXG,
'o': (prev_mode & stat.S_IRWXO) << 3 },
'o': {
'r': stat.S_IROTH,
'w': stat.S_IWOTH,
'x': stat.S_IXOTH,
's': 0,
't': stat.S_ISVTX,
'u': (prev_mode & stat.S_IRWXU) >> 6,
'g': (prev_mode & stat.S_IRWXG) >> 3,
'o': prev_mode & stat.S_IRWXO }
}
# Insert X_perms into user_perms_to_modes
for key, value in X_perms.items():
user_perms_to_modes[key].update(value)
or_reduce = lambda mode, perm: mode | user_perms_to_modes[user][perm]
return reduce(or_reduce, perms, 0)
def set_fs_attributes_if_different(self, file_args, changed, diff=None):
# set modes owners and context as needed
changed = self.set_context_if_different(
file_args['path'], file_args['secontext'], changed, diff
)
changed = self.set_owner_if_different(
file_args['path'], file_args['owner'], changed, diff
)
changed = self.set_group_if_different(
file_args['path'], file_args['group'], changed, diff
)
changed = self.set_mode_if_different(
file_args['path'], file_args['mode'], changed, diff
)
return changed
def set_directory_attributes_if_different(self, file_args, changed, diff=None):
return self.set_fs_attributes_if_different(file_args, changed, diff)
def set_file_attributes_if_different(self, file_args, changed, diff=None):
return self.set_fs_attributes_if_different(file_args, changed, diff)
def add_path_info(self, kwargs):
'''
for results that are files, supplement the info about the file
in the return path with stats about the file path.
'''
path = kwargs.get('path', kwargs.get('dest', None))
if path is None:
return kwargs
if os.path.exists(path):
(uid, gid) = self.user_and_group(path)
kwargs['uid'] = uid
kwargs['gid'] = gid
try:
user = pwd.getpwuid(uid)[0]
except KeyError:
user = str(uid)
try:
group = grp.getgrgid(gid)[0]
except KeyError:
group = str(gid)
kwargs['owner'] = user
kwargs['group'] = group
st = os.lstat(path)
kwargs['mode'] = oct(stat.S_IMODE(st[stat.ST_MODE]))
# secontext not yet supported
if os.path.islink(path):
kwargs['state'] = 'link'
elif os.path.isdir(path):
kwargs['state'] = 'directory'
elif os.stat(path).st_nlink > 1:
kwargs['state'] = 'hard'
else:
kwargs['state'] = 'file'
if HAVE_SELINUX and self.selinux_enabled():
kwargs['secontext'] = ':'.join(self.selinux_context(path))
kwargs['size'] = st[stat.ST_SIZE]
else:
kwargs['state'] = 'absent'
return kwargs
def _check_locale(self):
'''
Uses the locale module to test the currently set locale
(per the LANG and LC_CTYPE environment settings)
'''
try:
# setting the locale to '' uses the default locale
# as it would be returned by locale.getdefaultlocale()
locale.setlocale(locale.LC_ALL, '')
except locale.Error:
# fallback to the 'C' locale, which may cause unicode
# issues but is preferable to simply failing because
# of an unknown locale
locale.setlocale(locale.LC_ALL, 'C')
os.environ['LANG'] = 'C'
os.environ['LC_ALL'] = 'C'
os.environ['LC_MESSAGES'] = 'C'
except Exception:
e = get_exception()
self.fail_json(msg="An unknown error was encountered while attempting to validate the locale: %s" % e)
def _handle_aliases(self):
# this uses exceptions as it happens before we can safely call fail_json
aliases_results = {} #alias:canon
for (k,v) in self.argument_spec.items():
self._legal_inputs.append(k)
aliases = v.get('aliases', None)
default = v.get('default', None)
required = v.get('required', False)
if default is not None and required:
# not alias specific but this is a good place to check this
raise Exception("internal error: required and default are mutually exclusive for %s" % k)
if aliases is None:
continue
if type(aliases) != list:
raise Exception('internal error: aliases must be a list')
for alias in aliases:
self._legal_inputs.append(alias)
aliases_results[alias] = k
if alias in self.params:
self.params[k] = self.params[alias]
return aliases_results
def _check_arguments(self, check_invalid_arguments):
self._syslog_facility = 'LOG_USER'
for (k,v) in list(self.params.items()):
if k == '_ansible_check_mode' and v:
self.check_mode = True
elif k == '_ansible_no_log':
self.no_log = self.boolean(v)
elif k == '_ansible_debug':
self._debug = self.boolean(v)
elif k == '_ansible_diff':
self._diff = self.boolean(v)
elif k == '_ansible_verbosity':
self._verbosity = v
elif k == '_ansible_selinux_special_fs':
self._selinux_special_fs = v
elif k == '_ansible_syslog_facility':
self._syslog_facility = v
elif k == '_ansible_version':
self.ansible_version = v
elif k == '_ansible_module_name':
self._name = v
elif check_invalid_arguments and k not in self._legal_inputs:
self.fail_json(msg="unsupported parameter for module: %s" % k)
#clean up internal params:
if k.startswith('_ansible_'):
del self.params[k]
if self.check_mode and not self.supports_check_mode:
self.exit_json(skipped=True, msg="remote module (%s) does not support check mode" % self._name)
def _count_terms(self, check):
count = 0
for term in check:
if term in self.params:
count += 1
return count
def _check_mutually_exclusive(self, spec):
if spec is None:
return
for check in spec:
count = self._count_terms(check)
if count > 1:
self.fail_json(msg="parameters are mutually exclusive: %s" % (check,))
def _check_required_one_of(self, spec):
if spec is None:
return
for check in spec:
count = self._count_terms(check)
if count == 0:
self.fail_json(msg="one of the following is required: %s" % ','.join(check))
def _check_required_together(self, spec):
if spec is None:
return
for check in spec:
counts = [ self._count_terms([field]) for field in check ]
non_zero = [ c for c in counts if c > 0 ]
if len(non_zero) > 0:
if 0 in counts:
self.fail_json(msg="parameters are required together: %s" % (check,))
def _check_required_arguments(self):
''' ensure all required arguments are present '''
missing = []
for (k,v) in self.argument_spec.items():
required = v.get('required', False)
if required and k not in self.params:
missing.append(k)
if len(missing) > 0:
self.fail_json(msg="missing required arguments: %s" % ",".join(missing))
def _check_required_if(self, spec):
''' ensure that parameters which conditionally required are present '''
if spec is None:
return
for (key, val, requirements) in spec:
missing = []
if key in self.params and self.params[key] == val:
for check in requirements:
count = self._count_terms((check,))
if count == 0:
missing.append(check)
if len(missing) > 0:
self.fail_json(msg="%s is %s but the following are missing: %s" % (key, val, ','.join(missing)))
def _check_argument_values(self):
''' ensure all arguments have the requested values, and there are no stray arguments '''
for (k,v) in self.argument_spec.items():
choices = v.get('choices',None)
if choices is None:
continue
if isinstance(choices, SEQUENCETYPE):
if k in self.params:
if self.params[k] not in choices:
# PyYaml converts certain strings to bools. If we can unambiguously convert back, do so before checking the value. If we can't figure this out, module author is responsible.
lowered_choices = None
if self.params[k] == 'False':
lowered_choices = _lenient_lowercase(choices)
FALSEY = frozenset(BOOLEANS_FALSE)
overlap = FALSEY.intersection(choices)
if len(overlap) == 1:
# Extract from a set
(self.params[k],) = overlap
if self.params[k] == 'True':
if lowered_choices is None:
lowered_choices = _lenient_lowercase(choices)
TRUTHY = frozenset(BOOLEANS_TRUE)
overlap = TRUTHY.intersection(choices)
if len(overlap) == 1:
(self.params[k],) = overlap
if self.params[k] not in choices:
choices_str=",".join([str(c) for c in choices])
msg="value of %s must be one of: %s, got: %s" % (k, choices_str, self.params[k])
self.fail_json(msg=msg)
else:
self.fail_json(msg="internal error: choices for argument %s are not iterable: %s" % (k, choices))
def safe_eval(self, str, locals=None, include_exceptions=False):
# do not allow method calls to modules
if not isinstance(str, basestring):
# already templated to a datastructure, perhaps?
if include_exceptions:
return (str, None)
return str
if re.search(r'\w\.\w+\(', str):
if include_exceptions:
return (str, None)
return str
# do not allow imports
if re.search(r'import \w+', str):
if include_exceptions:
return (str, None)
return str
try:
result = literal_eval(str)
if include_exceptions:
return (result, None)
else:
return result
except Exception:
e = get_exception()
if include_exceptions:
return (str, e)
return str
def _check_type_str(self, value):
if isinstance(value, basestring):
return value
# Note: This could throw a unicode error if value's __str__() method
# returns non-ascii. Have to port utils.to_bytes() if that happens
return str(value)
def _check_type_list(self, value):
if isinstance(value, list):
return value
if isinstance(value, basestring):
return value.split(",")
elif isinstance(value, int) or isinstance(value, float):
return [ str(value) ]
raise TypeError('%s cannot be converted to a list' % type(value))
def _check_type_dict(self, value):
if isinstance(value, dict):
return value
if isinstance(value, basestring):
if value.startswith("{"):
try:
return json.loads(value)
except:
(result, exc) = self.safe_eval(value, dict(), include_exceptions=True)
if exc is not None:
raise TypeError('unable to evaluate string as dictionary')
return result
elif '=' in value:
fields = []
field_buffer = []
in_quote = False
in_escape = False
for c in value.strip():
if in_escape:
field_buffer.append(c)
in_escape = False
elif c == '\\':
in_escape = True
elif not in_quote and c in ('\'', '"'):
in_quote = c
elif in_quote and in_quote == c:
in_quote = False
elif not in_quote and c in (',', ' '):
field = ''.join(field_buffer)
if field:
fields.append(field)
field_buffer = []
else:
field_buffer.append(c)
field = ''.join(field_buffer)
if field:
fields.append(field)
return dict(x.split("=", 1) for x in fields)
else:
raise TypeError("dictionary requested, could not parse JSON or key=value")
raise TypeError('%s cannot be converted to a dict' % type(value))
def _check_type_bool(self, value):
if isinstance(value, bool):
return value
if isinstance(value, basestring) or isinstance(value, int):
return self.boolean(value)
raise TypeError('%s cannot be converted to a bool' % type(value))
def _check_type_int(self, value):
if isinstance(value, int):
return value
if isinstance(value, basestring):
return int(value)
raise TypeError('%s cannot be converted to an int' % type(value))
def _check_type_float(self, value):
if isinstance(value, float):
return value
if isinstance(value, basestring):
return float(value)
raise TypeError('%s cannot be converted to a float' % type(value))
def _check_type_path(self, value):
value = self._check_type_str(value)
return os.path.expanduser(os.path.expandvars(value))
def _check_type_jsonarg(self, value):
# Return a jsonified string. Sometimes the controller turns a json
# string into a dict/list so transform it back into json here
if isinstance(value, (unicode, bytes)):
return value.strip()
else:
if isinstance(value, (list, tuple, dict)):
return json.dumps(value)
raise TypeError('%s cannot be converted to a json string' % type(value))
def _check_type_raw(self, value):
return value
def _check_argument_types(self):
''' ensure all arguments have the requested type '''
for (k, v) in self.argument_spec.items():
wanted = v.get('type', None)
if k not in self.params:
continue
if wanted is None:
# Mostly we want to default to str.
# For values set to None explicitly, return None instead as
# that allows a user to unset a parameter
if self.params[k] is None:
continue
wanted = 'str'
value = self.params[k]
if value is None:
continue
try:
type_checker = self._CHECK_ARGUMENT_TYPES_DISPATCHER[wanted]
except KeyError:
self.fail_json(msg="implementation error: unknown type %s requested for %s" % (wanted, k))
try:
self.params[k] = type_checker(value)
except (TypeError, ValueError):
self.fail_json(msg="argument %s is of type %s and we were unable to convert to %s" % (k, type(value), wanted))
def _set_defaults(self, pre=True):
for (k,v) in self.argument_spec.items():
default = v.get('default', None)
if pre == True:
# this prevents setting defaults on required items
if default is not None and k not in self.params:
self.params[k] = default
else:
# make sure things without a default still get set None
if k not in self.params:
self.params[k] = default
def _set_fallbacks(self):
for k,v in self.argument_spec.items():
fallback = v.get('fallback', (None,))
fallback_strategy = fallback[0]
fallback_args = []
fallback_kwargs = {}
if k not in self.params and fallback_strategy is not None:
for item in fallback[1:]:
if isinstance(item, dict):
fallback_kwargs = item
else:
fallback_args = item
try:
self.params[k] = fallback_strategy(*fallback_args, **fallback_kwargs)
except AnsibleFallbackNotFound:
continue
def _load_params(self):
''' read the input and set the params attribute.
This method is for backwards compatibility. The guts of the function
were moved out in 2.1 so that custom modules could read the parameters.
'''
# debug overrides to read args from file or cmdline
self.params = _load_params()
def _log_to_syslog(self, msg):
if HAS_SYSLOG:
module = 'ansible-%s' % self._name
facility = getattr(syslog, self._syslog_facility, syslog.LOG_USER)
syslog.openlog(str(module), 0, facility)
syslog.syslog(syslog.LOG_INFO, msg)
def debug(self, msg):
if self._debug:
self.log(msg)
def log(self, msg, log_args=None):
if not self.no_log:
if log_args is None:
log_args = dict()
module = 'ansible-%s' % self._name
if isinstance(module, bytes):
module = module.decode('utf-8', 'replace')
# 6655 - allow for accented characters
if not isinstance(msg, (bytes, unicode)):
raise TypeError("msg should be a string (got %s)" % type(msg))
# We want journal to always take text type
# syslog takes bytes on py2, text type on py3
if isinstance(msg, bytes):
journal_msg = remove_values(msg.decode('utf-8', 'replace'), self.no_log_values)
else:
# TODO: surrogateescape is a danger here on Py3
journal_msg = remove_values(msg, self.no_log_values)
if PY3:
syslog_msg = journal_msg
else:
syslog_msg = journal_msg.encode('utf-8', 'replace')
if has_journal:
journal_args = [("MODULE", os.path.basename(__file__))]
for arg in log_args:
journal_args.append((arg.upper(), str(log_args[arg])))
try:
journal.send(u"%s %s" % (module, journal_msg), **dict(journal_args))
except IOError:
# fall back to syslog since logging to journal failed
self._log_to_syslog(syslog_msg)
else:
self._log_to_syslog(syslog_msg)
def _log_invocation(self):
''' log that ansible ran the module '''
# TODO: generalize a separate log function and make log_invocation use it
# Sanitize possible password argument when logging.
log_args = dict()
passwd_keys = ['password', 'login_password']
for param in self.params:
canon = self.aliases.get(param, param)
arg_opts = self.argument_spec.get(canon, {})
no_log = arg_opts.get('no_log', False)
if self.boolean(no_log):
log_args[param] = 'NOT_LOGGING_PARAMETER'
elif param in passwd_keys:
log_args[param] = 'NOT_LOGGING_PASSWORD'
else:
param_val = self.params[param]
if not isinstance(param_val, basestring):
param_val = str(param_val)
elif isinstance(param_val, unicode):
param_val = param_val.encode('utf-8')
log_args[param] = heuristic_log_sanitize(param_val, self.no_log_values)
msg = []
for arg in log_args:
arg_val = log_args[arg]
if not isinstance(arg_val, basestring):
arg_val = str(arg_val)
elif isinstance(arg_val, unicode):
arg_val = arg_val.encode('utf-8')
msg.append('%s=%s' % (arg, arg_val))
if msg:
msg = 'Invoked with %s' % ' '.join(msg)
else:
msg = 'Invoked'
self.log(msg, log_args=log_args)
def _set_cwd(self):
try:
cwd = os.getcwd()
if not os.access(cwd, os.F_OK|os.R_OK):
raise
return cwd
except:
# we don't have access to the cwd, probably because of sudo.
# Try and move to a neutral location to prevent errors
for cwd in [os.path.expandvars('$HOME'), tempfile.gettempdir()]:
try:
if os.access(cwd, os.F_OK|os.R_OK):
os.chdir(cwd)
return cwd
except:
pass
# we won't error here, as it may *not* be a problem,
# and we don't want to break modules unnecessarily
return None
def get_bin_path(self, arg, required=False, opt_dirs=[]):
'''
find system executable in PATH.
Optional arguments:
- required: if executable is not found and required is true, fail_json
- opt_dirs: optional list of directories to search in addition to PATH
if found return full path; otherwise return None
'''
sbin_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin']
paths = []
for d in opt_dirs:
if d is not None and os.path.exists(d):
paths.append(d)
paths += os.environ.get('PATH', '').split(os.pathsep)
bin_path = None
# mangle PATH to include /sbin dirs
for p in sbin_paths:
if p not in paths and os.path.exists(p):
paths.append(p)
for d in paths:
if not d:
continue
path = os.path.join(d, arg)
if os.path.exists(path) and is_executable(path):
bin_path = path
break
if required and bin_path is None:
self.fail_json(msg='Failed to find required executable %s' % arg)
return bin_path
def boolean(self, arg):
''' return a bool for the arg '''
if arg is None or type(arg) == bool:
return arg
if isinstance(arg, basestring):
arg = arg.lower()
if arg in BOOLEANS_TRUE:
return True
elif arg in BOOLEANS_FALSE:
return False
else:
self.fail_json(msg='Boolean %s not in either boolean list' % arg)
def jsonify(self, data):
for encoding in ("utf-8", "latin-1"):
try:
return json.dumps(data, encoding=encoding)
# Old systems using old simplejson module does not support encoding keyword.
except TypeError:
try:
new_data = json_dict_bytes_to_unicode(data, encoding=encoding)
except UnicodeDecodeError:
continue
return json.dumps(new_data)
except UnicodeDecodeError:
continue
self.fail_json(msg='Invalid unicode encoding encountered')
def from_json(self, data):
return json.loads(data)
def add_cleanup_file(self, path):
if path not in self.cleanup_files:
self.cleanup_files.append(path)
def do_cleanup_files(self):
for path in self.cleanup_files:
self.cleanup(path)
def exit_json(self, **kwargs):
''' return from the module, without error '''
self.add_path_info(kwargs)
if not 'changed' in kwargs:
kwargs['changed'] = False
if 'invocation' not in kwargs:
kwargs['invocation'] = {'module_args': self.params}
kwargs = remove_values(kwargs, self.no_log_values)
self.do_cleanup_files()
print('\n%s' % self.jsonify(kwargs))
sys.exit(0)
def fail_json(self, **kwargs):
''' return from the module, with an error message '''
self.add_path_info(kwargs)
assert 'msg' in kwargs, "implementation error -- msg to explain the error is required"
kwargs['failed'] = True
if 'invocation' not in kwargs:
kwargs['invocation'] = {'module_args': self.params}
kwargs = remove_values(kwargs, self.no_log_values)
self.do_cleanup_files()
print('\n%s' % self.jsonify(kwargs))
sys.exit(1)
def fail_on_missing_params(self, required_params=None):
''' This is for checking for required params when we can not check via argspec because we
need more information than is simply given in the argspec.
'''
if not required_params:
return
missing_params = []
for required_param in required_params:
if not self.params.get(required_param):
missing_params.append(required_param)
if missing_params:
self.fail_json(msg="missing required arguments: %s" % ','.join(missing_params))
def digest_from_file(self, filename, algorithm):
''' Return hex digest of local file for a digest_method specified by name, or None if file is not present. '''
if not os.path.exists(filename):
return None
if os.path.isdir(filename):
self.fail_json(msg="attempted to take checksum of directory: %s" % filename)
# preserve old behaviour where the third parameter was a hash algorithm object
if hasattr(algorithm, 'hexdigest'):
digest_method = algorithm
else:
try:
digest_method = AVAILABLE_HASH_ALGORITHMS[algorithm]()
except KeyError:
self.fail_json(msg="Could not hash file '%s' with algorithm '%s'. Available algorithms: %s" %
(filename, algorithm, ', '.join(AVAILABLE_HASH_ALGORITHMS)))
blocksize = 64 * 1024
infile = open(filename, 'rb')
block = infile.read(blocksize)
while block:
digest_method.update(block)
block = infile.read(blocksize)
infile.close()
return digest_method.hexdigest()
def md5(self, filename):
''' Return MD5 hex digest of local file using digest_from_file().
Do not use this function unless you have no other choice for:
1) Optional backwards compatibility
2) Compatibility with a third party protocol
This function will not work on systems complying with FIPS-140-2.
Most uses of this function can use the module.sha1 function instead.
'''
if 'md5' not in AVAILABLE_HASH_ALGORITHMS:
raise ValueError('MD5 not available. Possibly running in FIPS mode')
return self.digest_from_file(filename, 'md5')
def sha1(self, filename):
''' Return SHA1 hex digest of local file using digest_from_file(). '''
return self.digest_from_file(filename, 'sha1')
def sha256(self, filename):
''' Return SHA-256 hex digest of local file using digest_from_file(). '''
return self.digest_from_file(filename, 'sha256')
def backup_local(self, fn):
'''make a date-marked backup of the specified file, return True or False on success or failure'''
backupdest = ''
if os.path.exists(fn):
# backups named basename-YYYY-MM-DD@HH:MM:SS~
ext = time.strftime("%Y-%m-%d@%H:%M:%S~", time.localtime(time.time()))
backupdest = '%s.%s.%s' % (fn, os.getpid(), ext)
try:
shutil.copy2(fn, backupdest)
except (shutil.Error, IOError):
e = get_exception()
self.fail_json(msg='Could not make backup of %s to %s: %s' % (fn, backupdest, e))
return backupdest
def cleanup(self, tmpfile):
if os.path.exists(tmpfile):
try:
os.unlink(tmpfile)
except OSError:
e = get_exception()
sys.stderr.write("could not cleanup %s: %s" % (tmpfile, e))
def atomic_move(self, src, dest, unsafe_writes=False):
'''atomically move src to dest, copying attributes from dest, returns true on success
it uses os.rename to ensure this as it is an atomic operation, rest of the function is
to work around limitations, corner cases and ensure selinux context is saved if possible'''
context = None
dest_stat = None
if os.path.exists(dest):
try:
dest_stat = os.stat(dest)
os.chmod(src, dest_stat.st_mode & PERM_BITS)
os.chown(src, dest_stat.st_uid, dest_stat.st_gid)
except OSError:
e = get_exception()
if e.errno != errno.EPERM:
raise
if self.selinux_enabled():
context = self.selinux_context(dest)
else:
if self.selinux_enabled():
context = self.selinux_default_context(dest)
creating = not os.path.exists(dest)
try:
login_name = os.getlogin()
except OSError:
# not having a tty can cause the above to fail, so
# just get the LOGNAME environment variable instead
login_name = os.environ.get('LOGNAME', None)
# if the original login_name doesn't match the currently
# logged-in user, or if the SUDO_USER environment variable
# is set, then this user has switched their credentials
switched_user = login_name and login_name != pwd.getpwuid(os.getuid())[0] or os.environ.get('SUDO_USER')
try:
# Optimistically try a rename, solves some corner cases and can avoid useless work, throws exception if not atomic.
os.rename(src, dest)
except (IOError, OSError):
e = get_exception()
if e.errno not in [errno.EPERM, errno.EXDEV, errno.EACCES, errno.ETXTBSY]:
# only try workarounds for errno 18 (cross device), 1 (not permitted), 13 (permission denied)
# and 26 (text file busy) which happens on vagrant synced folders and other 'exotic' non posix file systems
self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, e))
else:
dest_dir = os.path.dirname(dest)
dest_file = os.path.basename(dest)
try:
tmp_dest = tempfile.NamedTemporaryFile(
prefix=".ansible_tmp", dir=dest_dir, suffix=dest_file)
except (OSError, IOError):
e = get_exception()
self.fail_json(msg='The destination directory (%s) is not writable by the current user. Error was: %s' % (dest_dir, e))
try: # leaves tmp file behind when sudo and not root
if switched_user and os.getuid() != 0:
# cleanup will happen by 'rm' of tempdir
# copy2 will preserve some metadata
shutil.copy2(src, tmp_dest.name)
else:
shutil.move(src, tmp_dest.name)
if self.selinux_enabled():
self.set_context_if_different(
tmp_dest.name, context, False)
try:
tmp_stat = os.stat(tmp_dest.name)
if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
os.chown(tmp_dest.name, dest_stat.st_uid, dest_stat.st_gid)
except OSError:
e = get_exception()
if e.errno != errno.EPERM:
raise
os.rename(tmp_dest.name, dest)
except (shutil.Error, OSError, IOError):
e = get_exception()
# sadly there are some situations where we cannot ensure atomicity, but only if
# the user insists and we get the appropriate error we update the file unsafely
if unsafe_writes and e.errno == errno.EBUSY:
#TODO: issue warning that this is an unsafe operation, but doing it cause user insists
try:
try:
out_dest = open(dest, 'wb')
in_src = open(src, 'rb')
shutil.copyfileobj(in_src, out_dest)
finally: # assuring closed files in 2.4 compatible way
if out_dest:
out_dest.close()
if in_src:
in_src.close()
except (shutil.Error, OSError, IOError):
e = get_exception()
self.fail_json(msg='Could not write data to file (%s) from (%s): %s' % (dest, src, e))
else:
self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, e))
self.cleanup(tmp_dest.name)
if creating:
# make sure the file has the correct permissions
# based on the current value of umask
umask = os.umask(0)
os.umask(umask)
os.chmod(dest, DEFAULT_PERM & ~umask)
if switched_user:
os.chown(dest, os.getuid(), os.getgid())
if self.selinux_enabled():
# rename might not preserve context
self.set_context_if_different(dest, context, False)
def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None, use_unsafe_shell=False, prompt_regex=None, environ_update=None):
'''
Execute a command, returns rc, stdout, and stderr.
:arg args: is the command to run
* If args is a list, the command will be run with shell=False.
* If args is a string and use_unsafe_shell=False it will split args to a list and run with shell=False
* If args is a string and use_unsafe_shell=True it runs with shell=True.
:kw check_rc: Whether to call fail_json in case of non zero RC.
Default False
:kw close_fds: See documentation for subprocess.Popen(). Default True
:kw executable: See documentation for subprocess.Popen(). Default None
:kw data: If given, information to write to the stdin of the command
:kw binary_data: If False, append a newline to the data. Default False
:kw path_prefix: If given, additional path to find the command in.
This adds to the PATH environment vairable so helper commands in
the same directory can also be found
:kw cwd: iIf given, working directory to run the command inside
:kw use_unsafe_shell: See `args` parameter. Default False
:kw prompt_regex: Regex string (not a compiled regex) which can be
used to detect prompts in the stdout which would otherwise cause
the execution to hang (especially if no input data is specified)
:kwarg environ_update: dictionary to *update* os.environ with
'''
shell = False
if isinstance(args, list):
if use_unsafe_shell:
args = " ".join([pipes.quote(x) for x in args])
shell = True
elif isinstance(args, basestring) and use_unsafe_shell:
shell = True
elif isinstance(args, string_types):
# On python2.6 and below, shlex has problems with text type
# On python3, shlex needs a text type.
if PY2 and isinstance(args, text_type):
args = args.encode('utf-8')
elif PY3 and isinstance(args, binary_type):
args = args.decode('utf-8', errors='surrogateescape')
args = shlex.split(args)
else:
msg = "Argument 'args' to run_command must be list or string"
self.fail_json(rc=257, cmd=args, msg=msg)
prompt_re = None
if prompt_regex:
if isinstance(prompt_regex, text_type):
if PY3:
prompt_regex = prompt_regex.encode('utf-8', errors='surrogateescape')
elif PY2:
prompt_regex = prompt_regex.encode('utf-8')
try:
prompt_re = re.compile(prompt_regex, re.MULTILINE)
except re.error:
self.fail_json(msg="invalid prompt regular expression given to run_command")
# expand things like $HOME and ~
if not shell:
args = [ os.path.expandvars(os.path.expanduser(x)) for x in args if x is not None ]
rc = 0
msg = None
st_in = None
# Manipulate the environ we'll send to the new process
old_env_vals = {}
# We can set this from both an attribute and per call
for key, val in self.run_command_environ_update.items():
old_env_vals[key] = os.environ.get(key, None)
os.environ[key] = val
if environ_update:
for key, val in environ_update.items():
old_env_vals[key] = os.environ.get(key, None)
os.environ[key] = val
if path_prefix:
old_env_vals['PATH'] = os.environ['PATH']
os.environ['PATH'] = "%s:%s" % (path_prefix, os.environ['PATH'])
# If using test-module and explode, the remote lib path will resemble ...
# /tmp/test_module_scratch/debug_dir/ansible/module_utils/basic.py
# If using ansible or ansible-playbook with a remote system ...
# /tmp/ansible_vmweLQ/ansible_modlib.zip/ansible/module_utils/basic.py
# Clean out python paths set by ansiballz
if 'PYTHONPATH' in os.environ:
pypaths = os.environ['PYTHONPATH'].split(':')
pypaths = [x for x in pypaths \
if not x.endswith('/ansible_modlib.zip') \
and not x.endswith('/debug_dir')]
os.environ['PYTHONPATH'] = ':'.join(pypaths)
if not os.environ['PYTHONPATH']:
del os.environ['PYTHONPATH']
# create a printable version of the command for use
# in reporting later, which strips out things like
# passwords from the args list
to_clean_args = args
if PY2:
if isinstance(args, text_type):
to_clean_args = args.encode('utf-8')
else:
if isinstance(args, binary_type):
to_clean_args = args.decode('utf-8', errors='replace')
if isinstance(args, (text_type, binary_type)):
to_clean_args = shlex.split(to_clean_args)
clean_args = []
is_passwd = False
for arg in to_clean_args:
if is_passwd:
is_passwd = False
clean_args.append('********')
continue
if PASSWD_ARG_RE.match(arg):
sep_idx = arg.find('=')
if sep_idx > -1:
clean_args.append('%s=********' % arg[:sep_idx])
continue
else:
is_passwd = True
arg = heuristic_log_sanitize(arg, self.no_log_values)
clean_args.append(arg)
clean_args = ' '.join(pipes.quote(arg) for arg in clean_args)
if data:
st_in = subprocess.PIPE
kwargs = dict(
executable=executable,
shell=shell,
close_fds=close_fds,
stdin=st_in,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
if cwd and os.path.isdir(cwd):
kwargs['cwd'] = cwd
# store the pwd
prev_dir = os.getcwd()
# make sure we're in the right working directory
if cwd and os.path.isdir(cwd):
try:
os.chdir(cwd)
except (OSError, IOError):
e = get_exception()
self.fail_json(rc=e.errno, msg="Could not open %s, %s" % (cwd, str(e)))
try:
if self._debug:
if isinstance(args, list):
running = ' '.join(args)
else:
running = args
self.log('Executing: ' + running)
cmd = subprocess.Popen(args, **kwargs)
# the communication logic here is essentially taken from that
# of the _communicate() function in ssh.py
stdout = b('')
stderr = b('')
rpipes = [cmd.stdout, cmd.stderr]
if data:
if not binary_data:
data += '\n'
if isinstance(data, text_type):
if PY3:
errors = 'surrogateescape'
else:
errors = 'strict'
data = data.encode('utf-8', errors=errors)
cmd.stdin.write(data)
cmd.stdin.close()
while True:
rfd, wfd, efd = select.select(rpipes, [], rpipes, 1)
if cmd.stdout in rfd:
dat = os.read(cmd.stdout.fileno(), 9000)
stdout += dat
if dat == b(''):
rpipes.remove(cmd.stdout)
if cmd.stderr in rfd:
dat = os.read(cmd.stderr.fileno(), 9000)
stderr += dat
if dat == b(''):
rpipes.remove(cmd.stderr)
# if we're checking for prompts, do it now
if prompt_re:
if prompt_re.search(stdout) and not data:
return (257, stdout, "A prompt was encountered while running a command, but no input data was specified")
# only break out if no pipes are left to read or
# the pipes are completely read and
# the process is terminated
if (not rpipes or not rfd) and cmd.poll() is not None:
break
# No pipes are left to read but process is not yet terminated
# Only then it is safe to wait for the process to be finished
# NOTE: Actually cmd.poll() is always None here if rpipes is empty
elif not rpipes and cmd.poll() == None:
cmd.wait()
# The process is terminated. Since no pipes to read from are
# left, there is no need to call select() again.
break
cmd.stdout.close()
cmd.stderr.close()
rc = cmd.returncode
except (OSError, IOError):
e = get_exception()
self.fail_json(rc=e.errno, msg=str(e), cmd=clean_args)
except:
self.fail_json(rc=257, msg=traceback.format_exc(), cmd=clean_args)
# Restore env settings
for key, val in old_env_vals.items():
if val is None:
del os.environ[key]
else:
os.environ[key] = val
if rc != 0 and check_rc:
msg = heuristic_log_sanitize(stderr.rstrip(), self.no_log_values)
self.fail_json(cmd=clean_args, rc=rc, stdout=stdout, stderr=stderr, msg=msg)
# reset the pwd
os.chdir(prev_dir)
return (rc, stdout, stderr)
def append_to_file(self, filename, str):
filename = os.path.expandvars(os.path.expanduser(filename))
fh = open(filename, 'a')
fh.write(str)
fh.close()
def pretty_bytes(self,size):
ranges = (
(1<<70, 'ZB'),
(1<<60, 'EB'),
(1<<50, 'PB'),
(1<<40, 'TB'),
(1<<30, 'GB'),
(1<<20, 'MB'),
(1<<10, 'KB'),
(1, 'Bytes')
)
for limit, suffix in ranges:
if size >= limit:
break
return '%.2f %s' % (float(size)/ limit, suffix)
#
# Backwards compat
#
# In 2.0, moved from inside the module to the toplevel
is_executable = is_executable
def get_module_path():
return os.path.dirname(os.path.realpath(__file__))
|
ramondelafuente/ansible
|
lib/ansible/module_utils/basic.py
|
Python
|
gpl-3.0
| 86,695
|
# This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Export syntax-highlighted text as HTML.
"""
import lydocument
import textformats
import ly.document
import ly.colorize
def html_text(text, mode=None, scheme='editor', inline=True, number_lines=False, full_html=True,
wrap_tag="pre", wrap_attrib="id", wrap_attrib_name="document"):
"""Converts the text to HTML using the specified or guessed mode."""
c = ly.document.Cursor(ly.document.Document(text, mode))
return html(c, scheme, inline, number_lines, full_html, wrap_tag, wrap_attrib, wrap_attrib_name)
def html_inline(cursor, scheme='editor', inline=True, number_lines=False,
full_html=True, wrap_tag="pre", wrap_attrib="id", wrap_attrib_name="document"):
"""Return an (by default) inline-styled HTML document for the cursor's selection."""
c = lydocument.cursor(cursor)
return html(c, scheme, inline, number_lines, full_html, wrap_tag, wrap_attrib, wrap_attrib_name)
def html_document(document, scheme='editor', inline=False, number_lines=False, full_html=True,
wrap_tag="pre", wrap_attrib="id", wrap_attrib_name="document"):
"""Return a (by default) css-styled HTML document for the full document."""
c = lydocument.Cursor(lydocument.Document(document))
return html(c, scheme, inline, number_lines, full_html, wrap_tag, wrap_attrib, wrap_attrib_name)
def html(cursor, scheme='editor', inline=False, number_lines=False, full_html=True,
wrap_tag="pre", wrap_attrib="id", wrap_attrib_name="document"):
"""Return a HTML document with the syntax-highlighted region.
The tokens are marked with <span> tags. The cursor is a
ly.document.Cursor instance. The specified text formats scheme is used
(by default 'editor'). If inline is True, the span tags have inline
style attributes. If inline is False, the span tags have class
attributes and a stylesheet is included.
Set number_lines to True to add line numbers.
"""
data = textformats.formatData(scheme) # the current highlighting scheme
w = ly.colorize.HtmlWriter()
w.set_wrapper_tag(wrap_tag)
w.set_wrapper_attribute(wrap_attrib)
w.document_id = wrap_attrib_name
w.inline_style = inline
w.number_lines = number_lines
w.full_html = full_html
w.fgcolor = data.baseColors['text'].name()
w.bgcolor = data.baseColors['background'].name()
w.css_scheme = data.css_scheme()
return w.html(cursor)
|
dliessi/frescobaldi
|
frescobaldi_app/highlight2html.py
|
Python
|
gpl-2.0
| 3,289
|
# -*- coding: utf-8 -*-
import numpy as np
import numpy.ma as ma
import itertools
import scipy.optimize
from pytmatrix.psd import GammaPSD
import csv
import datetime
from netCDF4 import Dataset
from ..DropSizeDistribution import DropSizeDistribution
from ..io import common
import os
def read_arm_jwd_b1(filename):
"""
Takes a filename pointing to an ARM Parsivel netcdf file and returns
a drop size distribution object.
Usage:
dsd = read_parsivel_parsivel_netcdf(filename)
Returns:
DropSizeDistrometer object
"""
reader = ArmJwdReader(filename)
if reader:
return DropSizeDistribution(reader)
else:
return None
del (reader)
class ArmJwdReader(object):
"""
This class reads and parses parsivel disdrometer data from ARM netcdf
files. These conform to document (Need Document).
Use the read_arm_jwd_b1() function to interface with this.
"""
def __init__(self, filename):
"""
Handles setting up a reader.
"""
self.fields = {}
self.info = {}
self.nc_dataset = Dataset(filename)
self.filename = filename
time = np.ma.array(
self.nc_dataset.variables["time_offset"][:]
+ self.nc_dataset.variables["base_time"][:]
)
self.time = self._get_epoch_time(time)
Nd = np.ma.array(self.nc_dataset.variables["nd"][:])
velocity = np.ma.array(self.nc_dataset.variables["fall_vel"][:])
rain_rate = np.ma.array(self.nc_dataset.variables["rain_rate"][:])
self.diameter = np.ma.array(
self.nc_dataset.variables["mean_diam_drop_class"][:]
)
self.spread = np.ma.array(self.nc_dataset.variables["delta_diam"][:])
# TODO: Move this to new metadata utility, and just add information from raw netcdf where appropriate
self.bin_edges = common.var_to_dict(
"bin_edges",
np.hstack((0, self.diameter + np.array(self.spread) / 2)),
"mm",
"Boundaries of bin sizes",
)
self.spread = common.var_to_dict(
"spread", self.spread, "mm", "Bin size spread of bins"
)
self.diameter = common.var_to_dict(
"diameter", self.diameter, "mm", "Particle diameter of bins"
)
self.fields["Nd"] = common.var_to_dict(
"Nd", Nd, "m^-3 mm^-1", "Liquid water particle concentration"
)
self.fields["velocity"] = common.var_to_dict(
"velocity", velocity, "m s^-1", "Terminal fall velocity for each bin"
)
self.fields["rain_rate"] = common.var_to_dict(
"rain_rate", rain_rate, "mm h^-1", "Rain rate"
)
self.fields["num_drop"] = common.var_to_dict(
"num_drop", self.nc_dataset.variables["num_drop"][:], "#", "Number of Drops"
)
self.fields["d_max"] = common.var_to_dict(
"d_max",
self.nc_dataset.variables["d_max"][:],
"mm",
"Diameter of largest drop",
)
self.fields["liq_water"] = common.var_to_dict(
"liq_water",
self.nc_dataset.variables["liq_water"][:],
"gm/m^3",
"Liquid water content",
)
self.fields["n_0"] = common.var_to_dict(
"n_0",
self.nc_dataset.variables["n_0"][:],
"1/(m^3-mm)",
"Distribution Intercept",
)
self.fields["lambda"] = common.var_to_dict(
"lambda",
self.nc_dataset.variables["lambda"][:],
"1/mm",
"Distribution Slope",
)
for key in self.nc_dataset.ncattrs():
self.info[key] = self.nc_dataset.getncattr(key)
def _get_epoch_time(self, sample_times):
"""Convert time to epoch time and return a dictionary."""
eptime = {
"data": sample_times,
"units": common.EPOCH_UNITS,
"standard_name": "Time",
"long_name": "Time (UTC)",
}
return eptime
|
josephhardinee/PyDisdrometer
|
pydsd/aux_readers/ARM_JWD_Reader.py
|
Python
|
lgpl-2.1
| 4,075
|
__all__ = ['DelimitedTextReader', 'DelimitedPointsReaderBase', 'XYZTextReader']
__displayname__ = 'Delimited File I/O'
import sys
import numpy as np
import pandas as pd
from .. import _helpers, interface
from ..base import ReaderBase
if sys.version_info < (3,):
from StringIO import StringIO
else:
from io import StringIO
class DelimitedTextReader(ReaderBase):
"""This reader will take in any delimited text file and make a ``vtkTable``
from it. This is not much different than the default .txt or .csv reader in
ParaView, however it gives us room to use our own extensions and a little
more flexibility in the structure of the files we import.
"""
__displayname__ = 'Delimited Text Reader'
__category__ = 'reader'
extensions = 'dat csv txt text ascii xyz tsv ntab'
description = 'PVGeo: Delimited Text Files'
def __init__(self, nOutputPorts=1, outputType='vtkTable', **kwargs):
ReaderBase.__init__(
self, nOutputPorts=nOutputPorts, outputType=outputType, **kwargs
)
# Parameters to control the file read:
# - if these are set/changed, we must reperform the read
self.__delimiter = kwargs.get('delimiter', ' ')
self.__use_tab = kwargs.get('use_tab', False)
self.__skipRows = kwargs.get('skiprows', 0)
self.__comments = kwargs.get('comments', '!')
self.__has_titles = kwargs.get('has_titles', True)
# Data objects to hold the read data for access by the pipeline methods
self._data = []
self._titles = []
def _get_delimiter(self):
"""For itenral use only!"""
if self.__use_tab:
return None
return self.__delimiter
def get_split_on_white_space(self):
"""Returns the status of how the delimiter interprets whitespace"""
return self.__use_tab
#### Methods for performing the read ####
def _get_file_contents(self, idx=None):
"""This grabs the lines of the input data file as a string array. This
allows us to load the file contents, parse the header then use numpy or
pandas to parse the data."""
if idx is not None:
filenames = [self.get_file_names(idx=idx)]
else:
filenames = self.get_file_names()
contents = []
for f in filenames:
try:
contents.append(
np.genfromtxt(
f, dtype=str, delimiter='\n', comments=self.__comments
)[self.__skipRows : :]
)
except (IOError, OSError) as fe:
raise _helpers.PVGeoError(str(fe))
if idx is not None:
return contents[0]
return contents
def _extract_header(self, content):
"""Override this. Removes header from single file's content."""
if len(np.shape(content)) > 2:
raise _helpers.PVGeoError(
"`_extract_header()` can only handle a sigle file's content"
)
idx = 0
if self.__has_titles:
titles = content[idx].split(self._get_delimiter())
idx += 1
else:
cols = len(content[idx].split(self._get_delimiter()))
titles = []
for i in range(cols):
titles.append('Field %d' % i)
return titles, content[idx::]
def _extract_headers(self, contents):
"""Should NOT be overriden. This is a convienance methods to iteratively
get all file contents. Your should override ``_extract_header``.
"""
ts = []
for i, c in enumerate(contents):
titles, newcontent = self._extract_header(c)
contents[i] = newcontent
ts.append(titles)
# Check that the titles are the same across files:
ts = np.unique(np.asarray(ts), axis=0)
if len(ts) > 1:
raise _helpers.PVGeoError(
'Data array titles varied across file timesteps. This data is invalid as a timeseries.'
)
return ts[0], contents
def _file_contents_to_data_frame(self, contents):
"""Should NOT need to be overriden. After ``_extract_headers`` handles
removing the file header from the file contents, this method will parse
the remainder of the contents into a pandas DataFrame with column names
generated from the titles resulting from in ``_extract_headers``.
"""
data = []
for content in contents:
if self.get_split_on_white_space():
df = pd.read_csv(
StringIO("\n".join(content)),
names=self.get_titles(),
delim_whitespace=self.get_split_on_white_space(),
)
else:
df = pd.read_csv(
StringIO("\n".join(content)),
names=self.get_titles(),
sep=self._get_delimiter(),
)
data.append(df)
return data
def _read_up_front(self):
"""Should not need to be overridden."""
# Perform Read
contents = self._get_file_contents()
self._titles, contents = self._extract_headers(contents)
self._data = self._file_contents_to_data_frame(contents)
self.need_to_read(flag=False)
return 1
#### Methods for accessing the data read in #####
def _get_raw_data(self, idx=0):
"""This will return the proper data for the given timestep as a dataframe"""
return self._data[idx]
#### Algorithm Methods ####
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to get data for current timestep and populate the
output data object.
"""
# Get output:
output = self.GetOutputData(outInfo, 0)
# Get requested time index
i = _helpers.get_requested_time(self, outInfo)
if self.need_to_read():
self._read_up_front()
# Generate the data object
interface.data_frame_to_table(self._get_raw_data(idx=i), output)
return 1
#### Seters and Geters ####
def set_delimiter(self, deli):
"""The input file's delimiter. To use a tab delimiter please use
``set_split_on_white_space()``
Args:
deli (str): a string delimiter/seperator
"""
if deli != self.__delimiter:
self.__delimiter = deli
self.Modified()
def set_split_on_white_space(self, flag):
"""Set a boolean flag to override the ``set_delimiter()`` and use any
white space as a delimiter.
"""
if flag != self.__use_tab:
self.__use_tab = flag
self.Modified()
def set_skip_rows(self, skip):
"""Set the integer number of rows to skip at the top of the file."""
if skip != self.__skipRows:
self.__skipRows = skip
self.Modified()
def get_skip_rows(self):
"""Get the integer number of rows to skip at the top of the file."""
return self.__skipRows
def set_comments(self, identifier):
"""The character identifier for comments within the file."""
if identifier != self.__comments:
self.__comments = identifier
self.Modified()
def set_has_titles(self, flag):
"""Set the boolean for if the delimited file has header titles for the
data arrays.
"""
if self.__has_titles != flag:
self.__has_titles = flag
self.Modified()
def has_titles(self):
"""Get the boolean for if the delimited file has header titles for the
data arrays.
"""
return self.__has_titles
def get_titles(self):
return self._titles
###############################################################################
class DelimitedPointsReaderBase(DelimitedTextReader):
"""A base class for delimited text readers that produce ``vtkPolyData``
points.
"""
__displayname__ = 'Delimited Points Reader Base'
__category__ = 'base'
# extensions are inherrited from DelimitedTextReader
description = 'PVGeo: Delimited Points' # Should be overriden
def __init__(self, **kwargs):
DelimitedTextReader.__init__(self, outputType='vtkPolyData', **kwargs)
self.__copy_z = kwargs.get('copy_z', False)
def set_copy_z(self, flag):
"""Set whether or not to copy the Z-component of the points to the
Point Data"""
if self.__copy_z != flag:
self.__copy_z = flag
self.Modified()
def get_copy_z(self):
"""Get the status of whether or not to copy the Z-component of the
points to the Point Data
"""
return self.__copy_z
#### Algorithm Methods ####
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to get data for current timestep and populate the
output data object.
"""
# Get output:
output = self.GetOutputData(outInfo, 0)
# Get requested time index
i = _helpers.get_requested_time(self, outInfo)
if self.need_to_read():
self._read_up_front()
# Generate the PolyData output
data = self._get_raw_data(idx=i)
output.DeepCopy(interface.points_to_poly_data(data, copy_z=self.get_copy_z()))
return 1
###############################################################################
class XYZTextReader(DelimitedTextReader):
"""A makeshift reader for XYZ files where titles have comma delimiter and
data has space delimiter.
"""
__displayname__ = 'XYZ Text Reader'
__category__ = 'reader'
# extensions are inherrited from DelimitedTextReader
description = 'PVGeo: XYZ Delimited Text Files where header has comma delimiter.'
def __init__(self, **kwargs):
DelimitedTextReader.__init__(self, **kwargs)
self.set_comments(kwargs.get('comments', '#'))
# Simply override the extract titles functionality
def _extract_header(self, content):
"""Internal helper to parse header details for XYZ files"""
titles = content[0][2::].split(', ') # first two characers of header is '! '
return titles, content[1::]
|
banesullivan/ParaViewGeophysics
|
PVGeo/readers/delimited.py
|
Python
|
bsd-3-clause
| 10,331
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
'''
Created on 2016年10月1日
@author: Jimmy Liu
@group : waditu
@contact: jimmysoa@sina.cn
'''
import json
import time
import six
def nowtime_str():
return time.time() * 1000
def get_jdata(txtdata):
txtdata = txtdata.content
if six.PY3:
txtdata = txtdata.decode('utf-8')
jsonobj = json.loads(txtdata)
return jsonobj
|
changbindu/rufeng-finance
|
src/tushare/tushare/trader/utils.py
|
Python
|
lgpl-3.0
| 406
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.