text
stringlengths 4
1.02M
| meta
dict |
|---|---|
in_chat_with_support = []
waiting_support_approval = []
@bot.message_handler(commands=['support'])
def support(message):
userlang = redisserver.get("settings:user:language:" + str(message.from_user.id))
userid = message.from_user.id
banlist = redisserver.sismember('zigzag_banlist', '{}'.format(userid))
if banlist:
return
if message.from_user.id not in in_chat_with_support:
if userid not in waiting_support_approval:
bot.reply_to(message, language[userlang]["WAITING_APPROVAL_MESSENGER_MSG"], parse_mode="HTML")
waiting_support_approval.append(message.from_user.id)
bot.send_message("-" + str(SUPPORT_GP), "User " + str(message.from_user.id) + " - " + str(message.from_user.first_name) + str(message.from_user.first_name) + " is waiting for your approval to join the support chat!")
else:
bot.reply_to(message, "Please wait. Your chat request hasnt been still manually accepted! \nIf you want to leave, type /leave")
return
else:
bot.reply_to(message, language[userlang]["ALREADY_IN_MESSENGER_MSG"], parse_mode="HTML")
@bot.message_handler(commands=['leave'])
def leave_support(message):
userlang = redisserver.get("settings:user:language:" + str(message.from_user.id))
userid = message.from_user.id
banlist = redisserver.sismember('zigzag_banlist', '{}'.format(userid))
if banlist:
return
if message.from_user.id in in_chat_with_support:
bot.reply_to(message, language[userlang]["LEFT_MESSENGER_MSG"], parse_mode="HTML")
in_chat_with_support.remove(message.from_user.id)
bot.send_message("-" + str(SUPPORT_GP), "User " + str(message.from_user.id) + " - " + str(message.from_user.first_name) + str(message.from_user.first_name) + " left the support chat.")
elif userid in waiting_support_approval:
waiting_support_approval.remove(userid)
bot.reply_to(message, language[userlang]["LEFT_MESSENGER_MSG"], parse_mode="HTML")
bot.send_message("-" + str(SUPPORT_GP), "User " + str(message.from_user.id) + " - " + str(message.from_user.first_name) + str(message.from_user.first_name) + " left the support chat while waiting for approval")
else:
bot.reply_to(message, language[userlang]["NOT_IN_MESSENGER_MSG"], parse_mode="HTML")
@bot.message_handler(commands=['force_user_leave'])
def forceleave(message):
if message.from_user.id in ADMINS_IDS:
if len(message.text.split()) < 2:
bot.reply_to(message, "Dude, enter an ID.", parse_mode="Markdown")
return
userid = int(message.text.split()[1])
if userid in in_chat_with_support:
bot.reply_to(message, "Kicked user from chat.", parse_mode="HTML")
bot.send_message(userid, "You have forced to leave chat by admin.")
in_chat_with_support.remove(userid)
else:
bot.reply_to(message, "User not in chat.")
else:
bot.send_message(message.chat.id, "You dont have permission.")
@bot.message_handler(commands=['accept_chat'])
def acceptchat(message):
if message.from_user.id in ADMINS_IDS:
if len(message.text.split()) < 2:
bot.reply_to(message, "Dude, enter an ID.", parse_mode="Markdown")
return
userid = int(message.text.split()[1])
userlang = redisserver.get("settings:user:language:" + str(userid))
if userid in waiting_support_approval:
bot.reply_to(message, "Accepted chat request", parse_mode="HTML")
bot.send_message(userid, language[userlang]["ACCEPTED_MESSENGER_MSG"])
waiting_support_approval.remove(userid)
in_chat_with_support.append(userid)
else:
bot.reply_to(message, "User not in approval list")
else:
bot.send_message(message.chat.id, "You dont have permission.")
@bot.message_handler(commands=['deny_chat'])
def acceptchat(message):
if message.from_user.id in ADMINS_IDS:
if len(message.text.split()) < 2:
bot.reply_to(message, "Dude, enter an ID.", parse_mode="Markdown")
return
userid = int(message.text.split()[1])
userlang = redisserver.get("settings:user:language:" + str(userid))
if userid in waiting_support_approval:
bot.reply_to(message, "Denied chat request", parse_mode="HTML")
bot.send_message(userid, language[userlang]["DENIED_MESSENGER_MSG"])
waiting_support_approval.remove(userid)
else:
bot.reply_to(message, "User not in approval list")
else:
bot.send_message(message.chat.id, "You dont have permission.")
# MESSAGE HANDLING SYSTEM IN MESSAGE_HANDLER PLUGIN
|
{
"content_hash": "a9e9a65980666395179d05c36e90dcce",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 223,
"avg_line_length": 51.48837209302326,
"alnum_prop": 0.6933152664859982,
"repo_name": "WebShark025/TheZigZagProject",
"id": "001d3a37dadc9c182f92378b070aded3a4a58452",
"size": "4428",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/messenger.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "90778"
},
{
"name": "Shell",
"bytes": "635"
}
],
"symlink_target": ""
}
|
__title__ = 'sparx'
__description__ = 'Simplified Data Munging, Wrangling and Preparing Library'
__url__ = 'http://cleverinsight.co'
__version__ = (0, 0, 1)
__build__ = 0
__author__ = 'Bastin Robins J'
__author_email__ = 'robin@cleverinsight.co'
__license__ = 'Proprietary'
__copyright__ = 'Copyright 2017 CleverInsight'
|
{
"content_hash": "c39615cd174eac50c384af8f9a9790bc",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 76,
"avg_line_length": 35.55555555555556,
"alnum_prop": 0.659375,
"repo_name": "CleverInsight/sparx",
"id": "d54744d4b5cd1e14fdf3bc2fbd3eb58176b44c16",
"size": "320",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sparx/__version__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "8630"
}
],
"symlink_target": ""
}
|
from groupflow_shared import *
from mininet.net import *
from mininet.node import OVSSwitch, UserSwitch
from mininet.link import TCLink
from mininet.log import setLogLevel
from mininet.cli import CLI
from mininet.node import Node, RemoteController
from scipy.stats import truncnorm
from numpy.random import randint, uniform
from subprocess import *
import sys
import signal
from time import sleep, time
from datetime import datetime
from multiprocessing import Process
import numpy as np
class FlowTrackerTestTopo( Topo ):
def __init__( self ):
"Create custom topo."
# Initialize topology
Topo.__init__( self )
# Add hosts and switches
h0 = self.addHost('h0', ip='10.0.0.1')
h1 = self.addHost('h1', ip='10.0.0.2')
s0 = self.addSwitch('s0')
s1 = self.addSwitch('s1')
# Add links
self.addLink(s0, h0, bw = 1000, use_htb = True)
self.addLink(s1, h1, bw = 1000, use_htb = True)
self.addLink(s0, s1, bw = 5, use_htb = True)
def mcastConfig(self, net):
# Configure hosts for multicast support
net.get('h0').cmd('route add -net 224.0.0.0/4 h0-eth0')
net.get('h1').cmd('route add -net 224.0.0.0/4 h1-eth0')
def get_host_list(self):
return ['h0', 'h1']
def get_switch_list(self):
return ['s0', 's1']
def flowtrackerTest(topo, hosts = [], interactive = False, util_link_weight = 10, link_weight_type = 'linear'):
# Launch the external controller
pox_arguments = ['pox.py', 'log', '--file=pox.log,w', 'openflow.discovery', 'forwarding.l2_learning']
print 'Launching external controller: ' + str(pox_arguments[0])
print 'Launch arguments:'
print ' '.join(pox_arguments)
with open(os.devnull, "w") as fnull:
pox_process = Popen(pox_arguments, stdout=fnull, stderr=fnull, shell=False, close_fds=True)
# Allow time for the log file to be generated
sleep(1)
# External controller
net = Mininet(topo, controller=RemoteController, switch=OVSSwitch, link=TCLink, build=False, autoSetMacs=True)
# pox = RemoteController('pox', '127.0.0.1', 6633)
net.addController('pox', RemoteController, ip = '127.0.0.1', port = 6633)
net.start()
#for switch_name in topo.get_switch_list():
# net.get(switch_name).controlIntf = net.get(switch_name).intf('lo')
# net.get(switch_name).cmd('route add -host 127.0.0.1 dev lo')
# net.get('pox').cmd('route add -host ' + net.get(switch_name).IP() + ' dev lo')
topo.mcastConfig(net)
print 'Network configuration:'
print net.get('h0').cmd('ifconfig')
print net.get('h0').cmd('route')
print net.get('h1').cmd('ifconfig')
print net.get('h1').cmd('route')
sleep_time = 2
print 'Waiting ' + str(sleep_time) + ' seconds to allow for controller topology discovery'
sleep(sleep_time) # Allow time for the controller to detect the topology
if not interactive:
net.ping([net.get('h0'), net.get('h1')])
net.iperf([net.get('h0'), net.get('h1')], l4Type='UDP')
net.iperf([net.get('h1'), net.get('h0')], l4Type='UDP')
else:
print 'Launching test applications...'
sender_proc = None
sender_log = open('sender_log.txt', 'w')
receiver_proc = None
receiver_log = open('receiver_log.txt', 'w')
# Launch multicast sender and receiver
sender_command = ['python', 'multicast_sender.py']
receiver_command = ['python', 'multicast_receiver.py']
receiver_proc = net.get('h1').popen(' '.join(receiver_command), stdout=receiver_log, stderr=receiver_log, close_fds=True, shell=True)
sender_proc = net.get('h0').popen(' '.join(sender_command), stdout=sender_log, stderr=sender_log, close_fds=True, shell=True)
print 'Launched test applications'
sleep(15)
print 'Terminating test applications'
sender_log.flush()
receiver_log.flush()
sender_proc.terminate()
sender_proc.wait()
sender_proc = None
receiver_proc.terminate()
receiver_proc.wait()
receiver_proc = None
print 'Terminating controller'
pox_process.send_signal(signal.SIGINT)
sleep(3)
print 'Waiting for controller termination...'
pox_process.send_signal(signal.SIGKILL)
pox_process.wait()
print 'Controller terminated'
pox_process = None
net.stop()
# write_final_stats_log(log_file_name, flow_log_path, event_log_path, membership_mean, membership_std_dev, membership_avg_bound, test_groups, test_group_launch_times, topo)
topos = { 'mcast_test': ( lambda: MulticastTestTopo() ) }
if __name__ == '__main__':
setLogLevel( 'info' )
# Interactive mode with barebones topology
print 'Launching default multicast test topology'
topo = FlowTrackerTestTopo()
hosts = topo.get_host_list()
flowtrackerTest(topo, hosts, False)
# Make extra sure the network terminated cleanly
call(['python', 'kill_running_test.py'])
|
{
"content_hash": "4ac3841f66c2b843225f6e63a23bdf4b",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 176,
"avg_line_length": 37.093525179856115,
"alnum_prop": 0.6274243599689682,
"repo_name": "alexcraig/GroupFlow",
"id": "1ebfa46c9d5d54074a8f67c915a8b28c9de17735",
"size": "5178",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "groupflow_scripts/bw_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "1675"
},
{
"name": "Makefile",
"bytes": "5585"
},
{
"name": "Python",
"bytes": "704943"
},
{
"name": "Shell",
"bytes": "5111"
}
],
"symlink_target": ""
}
|
from heat.engine import clients
from heat.openstack.common import log as logging
from heat.engine import properties
from heat.engine.resources.neutron import neutron
from heat.engine.resources.neutron import router
if clients.neutronclient is not None:
from neutronclient.common.exceptions import NeutronClientException
logger = logging.getLogger(__name__)
class FloatingIP(neutron.NeutronResource):
PROPERTIES = (
FLOATING_NETWORK_ID, VALUE_SPECS, PORT_ID, FIXED_IP_ADDRESS,
) = (
'floating_network_id', 'value_specs', 'port_id', 'fixed_ip_address',
)
properties_schema = {
FLOATING_NETWORK_ID: properties.Schema(
properties.Schema.STRING,
_('ID of network to allocate floating IP from.'),
required=True
),
VALUE_SPECS: properties.Schema(
properties.Schema.MAP,
_('Extra parameters to include in the "floatingip" object in the '
'creation request. Parameters are often specific to installed '
'hardware or extensions.'),
default={}
),
PORT_ID: properties.Schema(
properties.Schema.STRING,
_('ID of an existing port with at least one IP address to '
'associate with this floating IP.')
),
FIXED_IP_ADDRESS: properties.Schema(
properties.Schema.STRING,
_('IP address to use if the port has multiple addresses.')
),
}
attributes_schema = {
'router_id': _('ID of the router used as gateway, set when associated '
'with a port.'),
'tenant_id': _('The tenant owning this floating IP.'),
'floating_network_id': _('ID of the network in which this IP is '
'allocated.'),
'fixed_ip_address': _('IP address of the associated port, if '
'specified.'),
'floating_ip_address': _('The allocated address of this IP.'),
'port_id': _('ID of the port associated with this IP.'),
'show': _('All attributes.')
}
def add_dependencies(self, deps):
super(FloatingIP, self).add_dependencies(deps)
# depend on any RouterGateway in this template with the same
# network_id as this floating_network_id
for resource in self.stack.itervalues():
if (resource.has_interface('OS::Neutron::RouterGateway') and
resource.properties.get(router.RouterGateway.NETWORK_ID) ==
self.properties.get(self.FLOATING_NETWORK_ID)):
deps += (self, resource)
def handle_create(self):
props = self.prepare_properties(
self.properties,
self.physical_resource_name())
fip = self.neutron().create_floatingip({
'floatingip': props})['floatingip']
self.resource_id_set(fip['id'])
def _show_resource(self):
return self.neutron().show_floatingip(self.resource_id)['floatingip']
def handle_delete(self):
client = self.neutron()
try:
client.delete_floatingip(self.resource_id)
except NeutronClientException as ex:
self._handle_not_found_exception(ex)
class FloatingIPAssociation(neutron.NeutronResource):
PROPERTIES = (
FLOATINGIP_ID, PORT_ID, FIXED_IP_ADDRESS,
) = (
'floatingip_id', 'port_id', 'fixed_ip_address',
)
properties_schema = {
FLOATINGIP_ID: properties.Schema(
properties.Schema.STRING,
_('ID of the floating IP to associate.'),
required=True
),
PORT_ID: properties.Schema(
properties.Schema.STRING,
_('ID of an existing port with at least one IP address to '
'associate with this floating IP.')
),
FIXED_IP_ADDRESS: properties.Schema(
properties.Schema.STRING,
_('IP address to use if the port has multiple addresses.')
),
}
def handle_create(self):
props = self.prepare_properties(self.properties, self.name)
floatingip_id = props.pop(self.FLOATINGIP_ID)
self.neutron().update_floatingip(floatingip_id, {
'floatingip': props})['floatingip']
self.resource_id_set('%s:%s' % (floatingip_id, props[self.PORT_ID]))
def handle_delete(self):
if not self.resource_id:
return
client = self.neutron()
(floatingip_id, port_id) = self.resource_id.split(':')
try:
client.update_floatingip(
floatingip_id,
{'floatingip': {'port_id': None}})
except NeutronClientException as ex:
self._handle_not_found_exception(ex)
def resource_mapping():
if clients.neutronclient is None:
return {}
return {
'OS::Neutron::FloatingIP': FloatingIP,
'OS::Neutron::FloatingIPAssociation': FloatingIPAssociation,
}
|
{
"content_hash": "16331a13f73cb92586c1e13f1d52a5a1",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 79,
"avg_line_length": 35.8705035971223,
"alnum_prop": 0.594263939029282,
"repo_name": "ntt-sic/heat",
"id": "b86e3c47b8ca716e658ea704eed4d21b5f073907",
"size": "5606",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heat/engine/resources/neutron/floatingip.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3336181"
},
{
"name": "Shell",
"bytes": "22168"
}
],
"symlink_target": ""
}
|
"""
Code for generating an ensemble submission for the SeeClickFix contest hosted on Kaggle. It loads base
submission files generated by our team's (Bryan Gregory and Miroslaw Horbal) individual models then combines them
using segment based averaging.
Ensemble weights for each segment of data are stored in SETTINGS.json along with the filepaths for the base input
submission files. Note that base submission 0 corresponds to Bryan's model and base submission 1 is Miroslaw's model.
This relies on already generated submission files from the base models, so base models must be run prior to performing
the ensemble.
Requires: PANDAS >.13
NUMPY
"""
__author__ = ['Bryan Gregory','Miroslaw Horbal']
__email__ = ['bryan.gregory1@gmail.com','miroslaw@gmail.com']
__date__ = '01-04-2013'
#Internal modules
import utils
#Start logger to record all info, warnings, and errors to Logs/logfile.log
log = utils.start_logging(__name__)
#External modules
import sys
import pandas as pd
import numpy as np
from datetime import datetime
def main():
#---Load environment settings from SETTINGS.json in root directory and build filepaths for all base submissions---#
settings = utils.load_settings('SETTINGS.json')
base_filepaths = (settings['file_bryan_submission'],
settings['file_miroslaw_submission'])
segment_weights = settings['ensemble_segment_weights']
segments = segment_weights.keys()
targets = segment_weights[segments[0]].keys()
#---Output the segment weights to be used for ensemble averaging of base submissions---#
log.info('==========ENSEMBLE WEIGHTS (B,M)============')
for segment in segment_weights:
log.info(segment.upper()+':')
for target in segment_weights[segment]:
log.info(' '+target.upper()+' -- ['+segment_weights[segment][target]['0']+','+
segment_weights[segment][target]['1']+']')
#---Load each base submission to a list of dataframes---#
base_subs = []
for file in base_filepaths:
try:
base_subs.append(pd.read_csv(file).set_index(['id'], drop=False).sort())
log.info('Base submission successfully loaded: %s.' % file)
except IOError:
log.info('Base submission file does not exist: %s. Run base model to generate, or update filepath.' %file)
sys.exit('---Exiting---')
utils.line_break()
#---Load id's labeled with segments to a dataframe used for segment based averaging---#
file = settings['file_segment_ids']
try:
segment_ids = pd.read_csv(file)
log.info('Segment IDs successfully loaded from: %s.' % file)
except IOError:
log.info('Segment IDs file does not exist: %s. Update filepath in SETTINGS.json.' % file)
utils.line_break()
#---Transform base predictions to log space prior to averaging, if selected in settings---#
if settings['avg_log_space'] == 'y':
log.info('Transforming base predictions to log space prior to averaging.')
for i in range(len(base_subs)):
for target in targets:
base_subs[i][target] = np.log(base_subs[i][target]+1)
utils.line_break()
#---Apply segment based weights to each base submission then combine them to create ensemble submission---#
log.info('Applying segment weights to base submissions then combining to create ensemble.')
for i in range(len(base_subs)):
#Merge the segment labels from the segment id's file with the base submission dataframe
base_subs[i] = base_subs[i].merge(segment_ids,on='id',how='inner')
for segment in segments:
for target in targets:
base_subs[i][target][base_subs[i]['Segment'] == segment] \
*= float(segment_weights[segment][target][str(i)])
del base_subs[i]['Segment']
ensemble_sub = base_subs[0].ix[:]
for i in range(len(base_subs)-1):
for target in targets:
ensemble_sub[target] += base_subs[i+1][target]
utils.line_break()
#---Transform ensemble predictions back to normal, if use log space averaging was selected in settings---#
if settings['avg_log_space'] == 'y':
log.info('Transforming ensemble predictions back to normal from log space.')
for target in targets:
ensemble_sub[target] = np.exp(ensemble_sub[target])-1
utils.line_break()
#---Apply any final target scalars to ensemble predictions---#
for target in targets:
ensemble_sub[target] *= float(settings['target_scalars'][target])
#---Output ensemble submission to directory set in SETTINGS.json, appending creation date and time---#
timestamp = datetime.now().strftime('%m-%d-%y_%H%M')
filename = settings['dir_ensemble_submissions']+'ensemble_predictions_'+timestamp+'.csv'
ensemble_sub.to_csv(filename, index=False)
log.info('Ensemble submission saved: %s' % filename)
utils.line_break()
#End main
log.info('Program executed successfully without error! Exiting.')
if __name__ == '__main__':
sys.exit(main())
|
{
"content_hash": "8756884aed85ed6a1e0921b07ab72096",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 119,
"avg_line_length": 45.771929824561404,
"alnum_prop": 0.6487159831353009,
"repo_name": "theusual/kaggle-seeclickfix-ensemble",
"id": "7dfc412b6202ea5b8604de6e69690a980846794c",
"size": "5218",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "5835"
}
],
"symlink_target": ""
}
|
import tfmpl.plots.confusion_matrix
|
{
"content_hash": "817ccb3d65cab10559f43ccb2178b406",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 35,
"avg_line_length": 35,
"alnum_prop": 0.8857142857142857,
"repo_name": "cheind/tf-matplotlib",
"id": "41a07b36d7d4fed1de9e6b810d0e192385848aec",
"size": "35",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tfmpl/plots/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26160"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
with open('README.txt') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='Implot',
version='1.0.0',
description='Implot Driver',
long_description=readme,
author='Imperial Creaitivy Laboratories',
author_email='hilalyamine@gmail.com',
url='',
license=license,
packages=find_packages(exclude=('tests', 'docs'))
)
|
{
"content_hash": "424247288541737fe5b3e009906bde45",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 53,
"avg_line_length": 21.8,
"alnum_prop": 0.6536697247706422,
"repo_name": "aelbouchti/Implot",
"id": "c10e769753902a54780fe1cb7cdef492dbd0ef96",
"size": "461",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8745"
}
],
"symlink_target": ""
}
|
"""
For creating healpix maps from catalogs.
"""
import os
from os.path import join
import shutil
import tempfile
import subprocess
from collections import Counter
from collections import OrderedDict as odict
import fitsio
import numpy as np
import numpy.lib.recfunctions as recfuncs
import healpy as hp
from scipy.interpolate import interp1d
from scipy.optimize import brentq
import ugali.utils.skymap
import ugali.utils.binning
from ugali.utils.projector import cel2gal, gal2cel
from ugali.utils.healpix import ang2pix, pix2ang, superpixel, read_map
from ugali.utils.shell import mkdir
from ugali.utils.logger import logger
from ugali.utils.config import Config
from ugali.utils.constants import MAGLIMS
class Maglims(object):
""" Object for deriving magnitude limits from the catalog """
def __init__(self, config):
self.config = Config(config)
self._setup()
def _setup(self):
self.nside_catalog = self.config['coords']['nside_catalog']
self.nside_mask = self.config['coords']['nside_mask']
self.nside_pixel = self.config['coords']['nside_pixel']
self.filenames = self.config.getFilenames()
self.footfile = self.config['data']['footprint']
try:
self.footprint = fitsio.read(self.footfile)['I'].ravel()
except:
logger.warn("Couldn't open %s; will pass through."%self.footfile)
self.footprint = self.footfile
def run(self,field=None,simple=False,force=False):
"""
Loop through pixels containing catalog objects and calculate
the magnitude limit. This gets a bit convoluted due to all
the different pixel resolutions...
"""
if field is None: fields = [1,2]
else: fields = [field]
for filenames in self.filenames.compress(~self.filenames.mask['catalog']).data:
infile = filenames['catalog']
for f in fields:
outfile = filenames['mask_%i'%f]
if os.path.exists(outfile) and not force:
logger.info("Found %s; skipping..."%outfile)
continue
pixels,maglims=self.calculate(infile,f,simple)
logger.info("Creating %s"%outfile)
outdir = mkdir(os.path.dirname(outfile))
data = odict()
data['PIXEL']=pixels
data['MAGLIM']=maglims.astype('f4')
ugali.utils.healpix.write_partial_map(outfile,data,
self.nside_pixel)
def calculate(self, infile, field=1, simple=False):
logger.info("Calculating magnitude limit from %s"%infile)
#manglefile = self.config['mangle']['infile_%i'%field]
#footfile = self.config['data']['footprint']
#try:
# footprint = fitsio.read(footfile)['I'].ravel()
#except:
# logger.warn("Couldn't open %s; will try again."%footfile)
# footprint = footfile
mag_column = self.config['catalog']['mag_%i_field'%field]
magerr_column = self.config['catalog']['mag_err_%i_field'%field]
# For simple maglims
release = self.config['data']['release'].lower()
band = self.config['catalog']['mag_%i_band'%field]
pixel_pix_name = 'PIX%i'%self.nside_pixel
# If the data already has a healpix pixel assignment then use it
# Otherwise recalculate...
try:
data = fitsio.read(infile,columns=[pixel_pix_name])
except ValueError as e:
logger.info(str(e))
columns=[self.config['catalog']['lon_field'],
self.config['catalog']['lat_field']]
data = fitsio.read(infile,columns=columns)[columns]
pix = ang2pix(self.nside_pixel,data[columns[0]],data[columns[1]])
data = recfuncs.rec_append_fields(data,pixel_pix_name,pix)
#mask_pixels = np.arange( hp.nside2npix(self.nside_mask), dtype='int')
mask_maglims = np.zeros(hp.nside2npix(self.nside_mask))
out_pixels = np.zeros(0,dtype='int')
out_maglims = np.zeros(0)
# Find the objects in each pixel
pixel_pix = data[pixel_pix_name]
mask_pix = ugali.utils.skymap.superpixel(pixel_pix,self.nside_pixel,self.nside_mask)
count = Counter(mask_pix)
pixels = sorted(count.keys())
pix_digi = np.digitize(mask_pix,pixels).argsort()
idx = 0
min_num = 500
signal_to_noise = 10.
magerr_lim = 1/signal_to_noise
for pix in pixels:
# Calculate the magnitude limit in each pixel
num = count[pix]
objs = data[pix_digi[idx:idx+num]]
idx += num
if simple:
# Set constant magnitude limits
logger.debug("Simple magnitude limit for %s"%infile)
mask_maglims[pix] = MAGLIMS[release][band]
elif num < min_num:
logger.info('Found <%i objects in pixel %i'%(min_num,pix))
mask_maglims[pix] = 0
else:
mag = objs[mag_column]
magerr = objs[magerr_column]
# Estimate the magnitude limit as suggested by:
# https://deswiki.cosmology.illinois.edu/confluence/display/DO/SVA1+Release+Document
# (https://desweb.cosmology.illinois.edu/confluence/display/Operations/SVA1+Doc)
maglim = np.median(mag[(magerr>0.9*magerr_lim)&(magerr<1.1*magerr_lim)])
# Alternative method to estimate the magnitude limit by fitting median
#mag_min, mag_max = mag.min(),mag.max()
#mag_bins = np.arange(mag_min,mag_max,0.1) #0.1086?
#x,y = ugali.utils.binning.binnedMedian(mag,magerr,mag_bins)
#x,y = x[~np.isnan(y)],y[~np.isnan(y)]
#magerr_med = interp1d(x,y)
#mag0 = np.median(x)
#maglim = brentq(lambda a: magerr_med(a)-magerr_lim,x.min(),x.max(),disp=False)
# Median from just objects near magerr cut
mask_maglims[pix] = maglim
logger.debug("%i (n=%i): maglim=%g"%(pix,num,mask_maglims[pix]))
subpix = ugali.utils.skymap.subpixel(pix, self.nside_mask, self.nside_pixel)
maglims = np.zeros(len(subpix)) + mask_maglims[pix]
out_pixels = np.append(out_pixels,subpix)
out_maglims = np.append(out_maglims,maglims)
# Remove empty pixels
logger.info("Removing empty pixels")
idx = np.nonzero(out_maglims > 0)[0]
out_pixels = out_pixels[idx]
out_maglims = out_maglims[idx]
# Remove pixels outside the footprint
if self.footfile:
logger.info("Checking footprint against %s"%self.footfile)
lon,lat = pix2ang(self.nside_pixel,out_pixels)
if self.config['coords']['coordsys'] == 'gal':
ra,dec = gal2cel(lon,lat)
else:
ra,dec = lon,lat
footprint = inFootprint(self.footprint,ra,dec)
idx = np.nonzero(footprint)[0]
out_pixels = out_pixels[idx]
out_maglims = out_maglims[idx]
logger.info("MAGLIM = %.3f +/- %.3f"%(np.mean(out_maglims),np.std(out_maglims)))
return out_pixels,out_maglims
def inFootprint(footprint,ra,dec):
"""
Check if set of ra,dec combinations are in footprint.
Careful, input files must be in celestial coordinates.
filename : Either healpix map or mangle polygon file
ra,dec : Celestial coordinates
Returns:
inside : boolean array of coordinates in footprint
"""
if footprint is None:
return np.ones(len(ra),dtype=bool)
try:
if isinstance(footprint,str) and os.path.exists(footprint):
filename = footprint
#footprint = hp.read_map(filename,verbose=False)
#footprint = fitsio.read(filename)['I'].ravel()
footprint = read_map(filename)
nside = hp.npix2nside(len(footprint))
pix = ang2pix(nside,ra,dec)
inside = (footprint[pix] > 0)
except IOError:
logger.warning("Failed to load healpix footprint; trying to use mangle...")
inside = inMangle(filename,ra,dec)
return inside
def inMangle(polyfile,ra,dec):
coords = tempfile.NamedTemporaryFile(suffix='.txt',delete=False)
logger.debug("Writing coordinates to %s"%coords.name)
np.savetxt(coords, np.array( [ra,dec] ).T, fmt='%.6g' )
coords.close()
weights = tempfile.NamedTemporaryFile(suffix='.txt',delete=False)
cmd = "polyid -W %s %s %s"%(polyfile,coords.name,weights.name)
logger.debug(cmd)
subprocess.call(cmd,shell=True)
tmp = tempfile.NamedTemporaryFile(suffix='.txt',delete=False)
cmd = """awk '{if($3==""){$3=0} print $1, $2, $3}' %s > %s"""%(weights.name,tmp.name)
logger.debug(cmd)
subprocess.call(cmd,shell=True)
data = np.loadtxt(tmp.name,unpack=True,skiprows=1)[-1]
for f in [coords,weights,tmp]:
logger.debug("Removing %s"%f.name)
os.remove(f.name)
return data > 0
#def simple_maglims(config,dirname='simple',force=False):
# """
# Create simple, uniform magnitude limits based on nominal
# survey depth.
# """
# DeprecationWarning("'simple_maglims' is deprecated")
# filenames = config.getFilenames()
# release = config['data']['release'].lower()
# #band_1 = config['isochrone']['mag_1_field']
# #band_2 = config['isochrone']['mag_2_field']
# band_1 = config['catalog']['mag_1_field']
# band_2 = config['catalog']['mag_2_field']
# mask_1 = filenames['mask_1'].compressed()
# mask_2 = filenames['mask_2'].compressed()
# basedir,basename = os.path.split(config['mask']['dirname'])
# if basename == dirname:
# raise Exception("Input and output directory are the same.")
# outdir = mkdir(os.path.join(basedir,dirname))
#
# for band, infiles in [(band_1,mask_1),(band_2,mask_2)]:
# maglim = MAGLIMS[release][band]
# for infile in infiles:
# basename = os.path.basename(infile)
# outfile = join(outdir,basename)
# logger.debug('Reading %s...'%infile)
# f = pyfits.open(infile)
# f[1].data['MAGLIM'][:] = maglim
# logger.debug('Writing %s...'%outfile)
# f.writeto(outfile,clobber=True)
def split(config,dirname='split',force=False):
""" Take a pre-existing maglim map and divide it into
chunks consistent with the catalog pixels. """
config = Config(config)
filenames = config.getFilenames()
#healpix = filenames['pix'].compressed()
# Check that things are ok
basedir,basename = os.path.split(config['mask']['dirname'])
#if basename == dirname:
# msg = "Input and output directory are the same."
# raise Exception(msg)
outdir = mkdir(os.path.join(basedir,dirname))
nside_catalog = config['coords']['nside_catalog']
nside_pixel = config['coords']['nside_pixel']
release = config['data']['release'].lower()
band1 = config['catalog']['mag_1_band']
band2 = config['catalog']['mag_2_band']
# Read the magnitude limits
maglimdir = config['maglim']['dirname']
maglimfile_1 = join(maglimdir,config['maglim']['filename_1'])
logger.info("Reading %s..."%maglimfile_1)
maglim1 = read_map(maglimfile_1)
maglimfile_2 = join(maglimdir,config['maglim']['filename_2'])
logger.info("Reading %s..."%maglimfile_2)
maglim2 = read_map(maglimfile_2)
# Read the footprint
footfile = config['data']['footprint']
logger.info("Reading %s..."%footfile)
footprint = read_map(footfile)
# Output mask names
mask1 = os.path.basename(config['mask']['basename_1'])
mask2 = os.path.basename(config['mask']['basename_2'])
for band,maglim,base in [(band1,maglim1,mask1),(band2,maglim2,mask2)]:
nside_maglim = hp.npix2nside(len(maglim))
if nside_maglim != nside_pixel:
msg = "Mask nside different from pixel nside"
logger.warning(msg)
#raise Exception(msg)
pixels = np.nonzero(maglim>0)[0]
superpix = superpixel(pixels,nside_maglim,nside_catalog)
healpix = np.unique(superpix)
for hpx in healpix:
outfile = join(outdir,base)%hpx
if os.path.exists(outfile) and not force:
logger.warning("Found %s; skipping..."%outfile)
continue
pix = pixels[superpix == hpx]
print(hpx, len(pix))
logger.info('Writing %s...'%outfile)
data = odict()
data['PIXEL']=pix
data['MAGLIM']=maglim[pix].astype('f4')
data['FRACDET']=footprint[pix].astype('f4')
ugali.utils.healpix.write_partial_map(outfile,data,nside_pixel)
if __name__ == "__main__":
from optparse import OptionParser
usage = "Usage: %prog [options] input"
description = "python script"
parser = OptionParser(usage=usage,description=description)
(opts, args) = parser.parse_args()
|
{
"content_hash": "982800764ef205b091055c9e811b1d1e",
"timestamp": "",
"source": "github",
"line_count": 338,
"max_line_length": 100,
"avg_line_length": 39.642011834319526,
"alnum_prop": 0.5916113142771848,
"repo_name": "kadrlica/ugali",
"id": "0b1b2454b1bcfdd70e7a03d9cf2133a8e9c4a076",
"size": "13421",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ugali/preprocess/maglims.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "355304"
},
{
"name": "Python",
"bytes": "952323"
}
],
"symlink_target": ""
}
|
__author__ = 'sulantha'
from Utils.DbUtils import DbUtils
import Config.PipelineConfig as pc
from Pipelines.ADNI_T1.ADNI_T1_Helper import ADNI_T1_Helper
from Utils.PipelineLogger import PipelineLogger
import distutils.dir_util
import distutils.file_util
import shutil
import subprocess
from Manager.QSubJob import QSubJob
from Manager.QSubJobHanlder import QSubJobHandler
import socket,os
import ast
from Pipelines.Helpers.PETHelper import PETHelper
class ProcessingItemObj:
def __init__(self, processingItem):
self.processing_rid = processingItem[0]
self.study = processingItem[1]
self.subject_rid = processingItem[2]
self.modality = processingItem[3]
self.scan_date = processingItem[4].strftime("%Y-%m-%d")
self.scan_time = str(processingItem[5])
self.s_identifier = processingItem[6]
self.i_identifier = processingItem[7]
self.root_folder = processingItem[8]
self.converted_folder = processingItem[9]
self.version = processingItem[10]
self.table_id = processingItem[17]
self.parameters = processingItem[19]
self.manual_xfm = processingItem[20]
self.qc = processingItem[21]
class ADNI_V2_AV1451:
def __init__(self):
self.DBClient = DbUtils()
self.MatchDBClient = DbUtils(database=pc.ADNI_dataMatchDBName)
self.PETHelper = PETHelper()
def process(self, processingItem):
processingItemObj = ProcessingItemObj(processingItem)
matching_t1 = ADNI_T1_Helper().getMatchingT1(processingItemObj)
if not matching_t1:
PipelineLogger.log('root', 'error', 'PET cannot be processed no matching T1 found. - {0} - {1} - {2}.'.format(processingItemObj.subject_rid, processingItemObj.modality, processingItemObj.scan_date))
return 0
processed = ADNI_T1_Helper().checkProcessed(matching_t1)
if not processed:
PipelineLogger.log('root', 'error', 'PET cannot be processed due to matching T1 not being processed - {0}'.format(matching_t1))
return 0
else:
PipelineLogger.log('root', 'INFO', '+++++++++ PET ready to be processed. Will check for initial xfm. - {0} - {1}'.format(processingItemObj.subject_rid, processingItemObj.scan_date))
if processingItemObj.manual_xfm == 'Req_man_reg':
coregDone = self.PETHelper.checkIfAlreadyDone(processingItemObj, matching_t1)
if coregDone:
manualXFM = coregDone
setPPTableSQL = "UPDATE {0}_{1}_Pipeline SET MANUAL_XFM = '{2}' WHERE RECORD_ID = {3}".format(processingItemObj.study, processingItemObj.modality, manualXFM, processingItemObj.table_id)
self.DBClient.executeNoResult(setPPTableSQL)
processingItemObj.manual_xfm = manualXFM
self.processPET(processingItemObj, processed)
else:
self.PETHelper.requestCoreg(processingItemObj, matching_t1)
PipelineLogger.log('root', 'INFO', 'Manual XFM was not found. Request to create one may have added. - {0} - {1}'.format(processingItemObj.subject_rid, processingItemObj.scan_date))
return 0
else:
self.processPET(processingItemObj, processed)
def getScanType(self, processingItemObj):
r = self.DBClient.executeAllResults("SELECT SCAN_TYPE FROM Conversion WHERE STUDY = '{0}' AND RID = '{1}' "
"AND SCAN_DATE = '{2}' AND S_IDENTIFIER = '{3}' "
"AND I_IDENTIFIER = '{4}'".format(processingItemObj.study,
processingItemObj.subject_rid,
processingItemObj.scan_date,
processingItemObj.s_identifier,
processingItemObj.i_identifier))
return r[0][0]
def processPET(self, processingItemObj, matchT1Path):
petFileName = '{0}/{1}_{2}{3}{4}{5}_{6}.mnc'.format(processingItemObj.converted_folder, processingItemObj.study,
processingItemObj.subject_rid, processingItemObj.scan_date.replace('-', ''),
processingItemObj.s_identifier, processingItemObj.i_identifier,
self.getScanType(processingItemObj))
processedFolder = '{0}/processed'.format(processingItemObj.root_folder)
logDir = '{0}/logs'.format(processingItemObj.root_folder)
PipelineLogger.log('manager', 'info', 'PET processing starting for {0}'.format(petFileName))
try:
distutils.dir_util.mkpath(logDir)
except Exception as e:
PipelineLogger.log('manager', 'error', 'Error in creating log folder \n {0}'.format(e))
return 0
id = '{0}{1}{2}{3}'.format(processingItemObj.subject_rid, processingItemObj.scan_date.replace('-', ''), processingItemObj.s_identifier, processingItemObj.i_identifier)
paramStrd = ast.literal_eval(processingItemObj.parameters)
paramStrt = ' '.join(['[\"{0}\"]=\"{1}\"'.format(k, v) for k,v in paramStrd.items()])
paramStr = '({0})'.format(paramStrt)
petCMD = "source /opt/minc-1.9.15/minc-toolkit-config.sh; Pipelines/ADNI_AV1451/ADNI_V2_AV1451_Process {0} {1} {2} {3} {4} {5} '{6}' {7} {8}".format(id, petFileName, processedFolder, matchT1Path, 'auto' if processingItemObj.manual_xfm == '' else processingItemObj.manual_xfm, logDir, paramStr,socket.gethostname(), 50500)
try:
processedFolder_del = '{0}/processed_del'.format(processingItemObj.root_folder)
os.rename(processedFolder, processedFolder_del)
shutil.rmtree(processedFolder_del)
except Exception as e:
PipelineLogger.log('manager', 'error', 'Error in deleting old processing folder. \n {0}'.format(e))
try:
distutils.dir_util.mkpath(processedFolder)
except Exception as e:
PipelineLogger.log('manager', 'error', 'Error in creating processing folder. \n {0}'.format(e))
return 0
PipelineLogger.log('manager', 'debug', 'Command : {0}'.format(petCMD))
p = subprocess.Popen(petCMD, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, executable='/bin/bash')
out, err = p.communicate()
PipelineLogger.log('manager', 'debug', 'Process Log Output : \n{0}'.format(out))
PipelineLogger.log('manager', 'debug', 'Process Log Err : \n{0}'.format(err))
QSubJobHandler.submittedJobs[id] = QSubJob(id, '23:00:00', processingItemObj, 'av1451')
return 1
|
{
"content_hash": "e25f34eb5a73ed457933983a09334494",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 329,
"avg_line_length": 58.21848739495798,
"alnum_prop": 0.6134526558891455,
"repo_name": "sulantha2006/Processing_Pipeline",
"id": "e2b3a599c2543242b038d85a840f2d788ca4a534",
"size": "6928",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Pipelines/ADNI_AV1451/ADNI_V2_AV1451.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "M",
"bytes": "713"
},
{
"name": "MATLAB",
"bytes": "13833"
},
{
"name": "Perl",
"bytes": "141400"
},
{
"name": "Python",
"bytes": "365424"
},
{
"name": "Shell",
"bytes": "201923"
}
],
"symlink_target": ""
}
|
from base import BaseHandler
from models import *
class RatePostHandler(BaseHandler):
"""Blog handler that is used to render blog posts and
to add blog comments"""
def get(self, direction, post_id):
have_error = False
self.post = Post.by_id(int(post_id))
if not self.user:
self.redirect('/login')
return
if not self.post:
self.redirect('/')
return
if self.user_owns_post(self.post):
error = "error_owner"
have_error = True
elif Rate.get_rate_by_user(self.user, self.post) > 0:
error = "already_rated"
have_error = True
if have_error:
# self.redirect('/' + post_id)
self.redirect('/%s?error=%s' % (post_id, error))
return
r = Rate.register(self.user.key,
self.post.key,
direction)
r.put()
self.redirect('/' + post_id)
|
{
"content_hash": "d0b4939ff82dfbe631ced8903df51f41",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 61,
"avg_line_length": 26.36842105263158,
"alnum_prop": 0.5129740518962076,
"repo_name": "kevink1986/my-first-blog",
"id": "8eef1d18881cb39dd550e8d679c5ab8396af2a02",
"size": "1002",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "handlers/rate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8173"
},
{
"name": "HTML",
"bytes": "8653"
},
{
"name": "JavaScript",
"bytes": "6829"
},
{
"name": "Python",
"bytes": "956255"
}
],
"symlink_target": ""
}
|
"""Classes and functions for accessing Helsinki area traffic data for more info
see http://developer.reittiopas.fi/pages/en/http-get-interface.php"""
# -*- coding: utf-8 -*-
from collections import defaultdict
from datetime import datetime
from pytz import timezone
from requests import get, exceptions
class HslUrls(object):
"""Helper class for building up HSL urls"""
def __init__(self, user, password):
"""
Initializer
:param user: username registered for HSL API
:param password: associated password
"""
self.user = user
self.password = password
self.baseurl = "http://api.reittiopas.fi/hsl/prod/?request="
def nearby_stops(self, longitude, latitude):
"""Get an URL to request nearby bus stops
:param longitude: longitude of the location
:param latitude: latitude of the location
:return: URL
"""
url = "%sstops_area&epsg_in=4326&" \
"center_coordinate=%s,%s&user=%s&pass=%s" % \
(self.baseurl, latitude, longitude, self.user, self.password)
return url
def stop_info(self, stop_code):
"""
Get an URL to fetch information about a bus stop
:param stop_code: code of the stop
:return: URL
"""
url = "%sstop&epsg_out=4326&code=%s&user=%s&pass=%s" % (
self.baseurl, stop_code, self.user, self.password)
return url
def multiple_stop_info(self, stop_codes):
"""
Get an URL to fetch information about a bus stop
:param stop_code: code of the stop
:return: URL
"""
code_str = "&code=".join([str(x) for x in stop_codes])
url = "%sstop&epsg_out=4326&code=%s&user=%s&pass=%s" % (
self.baseurl, code_str, self.user, self.password)
return url
def lines_info(self, lines):
"""
Builds up URL to query line info
:param lines: array of stop codes
:return: URL
"""
lines_str = "|".join(lines)
url = "%slines&epsg_out=4326&query=%s&user=%s&pass=%s" % (
self.baseurl, lines_str, self.user, self.password)
return url
def geocode_address(self, query):
"""
Builds up URL to search for stops around a search term
(API recognizes places, addresses etc.
api.reittiopas.fi/hsl/prod/?request=geocode&key=
:param query: search term
"""
url = "%sgeocode&key=%s&user=%s&pass=%s" % (self.baseurl,
query, self.user,
self.password)
return url
def hsl_time_to_time(hsltime):
"""
Converts HSL API timestamp to hh:dd format
:param hsltime: HSL API timestamp (hour maybe bigger than 24!)
:return: timestamp in hh:dd format
"""
return "%02d.%02d" % (hsltime / 100 % 24, hsltime % 100)
# 1 = Helsinki internal bus lines
# 2 = trams
# 3 = Espoo internal bus lines
# 4 = Vantaa internal bus lines
# 5 = regional bus lines
# 6 = metro
# 7 = ferry
# 8 = U-lines
# 12 = commuter trains
# 21 = Helsinki service lines
# 22 = Helsinki night buses
# 23 = Espoo service lines
# 24 = Vantaa service lines
# 25 = region night buses
# 36 = Kirkkonummi internal bus lines
# 39 = Kerava internal bus lines
def vehicle_emoji(veh):
"""Maps a vehicle type id to an emoji
:param veh: vehicle type id
:return: vehicle type emoji
"""
if veh == 2:
return u"\U0001F68B"
elif veh == 6:
return u"\U0001f687"
elif veh == 7:
return u"\U000026F4"
elif veh == 12:
return u"\U0001F686"
else:
return u"\U0001F68C"
# 1 = Helsinki internal bus lines
# 2 = trams
# 3 = Espoo internal bus lines
# 4 = Vantaa internal bus lines
# 5 = regional bus lines
# 6 = metro
# 7 = ferry
# 8 = U-lines
# 12 = commuter trains
# 21 = Helsinki service lines
# 22 = Helsinki night buses
# 23 = Espoo service lines
# 24 = Vantaa service lines
# 25 = region night buses
# 36 = Kirkkonummi internal bus lines
# 39 = Kerava internal bus lines
def vehicle_map(veh):
"""Maps a vehicle type id to a name
:param veh: vehicle type id
:return: vehicle type name
"""
if veh == 2:
return "Tram"
elif veh == 6:
return "Metro"
elif veh == 7:
return "Ferry"
elif veh == 12:
return "Train"
else:
return "Bus"
def relative_minutes(stoptime, comparison_time=None):
"""
Change timestamp of HSL API to relative time
:param stoptime: API timestamp for a vehicle passing a stop
(note that hour can be more than 24!)
:param comparison_time: datetime to compare the departure time to
if None use current timestamp
:return:
"""
if comparison_time:
usertime = comparison_time
else:
usertime = datetime.now(tz=timezone("Europe/Helsinki"))
sth = stoptime / 100
if sth >= 24 and usertime.hour < 12:
nowagg = (usertime.hour + 24) * 60 + usertime.minute
else:
nowagg = usertime.hour * 60 + usertime.minute
stm = stoptime % 100
stoptimeagg = sth * 60 + stm
delta = stoptimeagg - nowagg
if delta == 0:
return "Right now"
else:
return "in %d minutes" % (delta)
class HslRequests(object):
"""Class for making requests to HSL API"""
def __init__(self, user, password):
"""
Initializer
:param user: HSL API username
:param password: HSL API password
"""
self.urls = HslUrls(user, password)
self.last_error = None
def stop_summary(self, stop_code, buses=3):
"""
Provides an summary of bus stop information including departures
:param stop_code: HSL API stop code
:return: String containing bus and departures info
"""
(stop_info, line_data) = self._stop_info_lines_info(stop_code)
stop = stop_info[0]
if line_data:
lines = dict(
[(x["code"], "%s %s" % (x["code_short"], x["line_end"])) for x
in line_data])
else:
return ("Helsinki area has no such stop.",
"Helsinki area has no such stop.")
stop_line = stop["code_short"] + " " + stop["name_fi"] + " " \
+ stop["address_fi"]
if stop["departures"]:
departure_line = "\n".join(
["%s %s" % (hsl_time_to_time(x["time"]), lines[x["code"]]) for x
in stop["departures"][:buses]])
else:
departure_line = ""
return "\n".join([stop_line, departure_line])
def relative_time(self, stop_code, buses=3, end_line=""):
"""
Provides an summary of bus stop information including departures
used by the Alexa skill and Telegram bot
:param buses: how many buses(or trams, ferries) to return
:param stop_code: HSL API stop code
:return: String containing bus and departures info
"""
(stop_info, linfo) = self._stop_info_lines_info(stop_code)
sinfo = stop_info[0]
if linfo:
lines = dict([(x["code"], "%s %s" % (
vehicle_map(x["transport_type_id"]), x["code_short"])) for x in
linfo])
summary_lines = dict(
[(x["code"], "%s %s %s" % (
x["code_short"], vehicle_emoji(x["transport_type_id"]),
x["line_end"])) for x
in linfo])
else:
return ("Helsinki area has no such stop.",
"Helsinki area has no such stop.",
None)
actual_code = sinfo["code_short"]
stop_line = u"For stop {0:s}".format(actual_code)
card_stop_line = sinfo["name_fi"] + " " \
+ sinfo["address_fi"]
all_departures = sorted(
reduce(lambda x, y: x + y, [x["departures"] for x in stop_info]),
key=lambda x: x['time'])
if all_departures and len(all_departures) > 0:
departure_line = (
["%s %s" % (lines[x["code"]], relative_minutes(x["time"])) for x
in
all_departures[:buses]])
summary_line = "\n".join(
["%s %s" % (
hsl_time_to_time(x["time"]), summary_lines[x["code"]]) for x
in
all_departures[:buses]])
else:
departure_line = ["No departures within next 60 minutes"]
summary_line = "No departures within next 60 minutes"
if len(departure_line) == 1:
speech = "%s: %s" % (stop_line, departure_line[0])
elif len(departure_line) == 2:
speech = "%s: Next departures are %s and %s" % (
stop_line, departure_line[0], departure_line[1])
elif len(departure_line) == 3:
speech = "%s: Next departures are %s, %s, and %s" % (
stop_line, departure_line[0], departure_line[1],
departure_line[2])
else:
speech = "%s: Next departures are %s" % (
stop_line, ",".join(departure_line))
card = "\n".join([card_stop_line + end_line, summary_line])
return (speech, card, actual_code)
def _stop_info_lines_info(self, stop_code):
"""
Helper function
:param stop_code: HSL API bus stop code
"""
try:
stop_info = self._stop_info_json(stop_code)
except:
stop_info = "Error"
if stop_info == "Error":
return "Error", None
lines_info = self._lines_info(_stop_buses(stop_info))
return (stop_info, lines_info)
def _stop_info_json(self, stop_code):
"""
Helper function
:param stop_code: HSL API bus stop code
"""
url = self.urls.stop_info(stop_code)
try:
response = get(url)
except exceptions.RequestException:
return "Error"
return response.json()
def _lines_info(self, lines):
"""
Helper function to fetch line information of listed lines
:param lines: list of HSL API line codes
"""
url = self.urls.lines_info(lines)
try:
response = get(url)
except exceptions.RequestException:
return "Error"
return response.json()
def stop_lines_summary(self, stop_code):
"""Return bus code, name, address, and lines going from this stop
:param stop_code: HSL API stop code
:return: comma separated string of stop info
"""
(stop_info, linfo) = self._stop_info_lines_info(stop_code)
sinfo = stop_info[0]
if linfo:
linecodes = dict([(x["code"], x["code_short"]) for x in linfo])
else:
return "Helsinki area has no bus stop " + stop_code
dld = dict([x.split(":") for x in sinfo["lines"]])
stop_line = sinfo["code_short"] + " " + sinfo["name_fi"] + " " + sinfo[
"address_fi"]
ends_lines = [(dld[k].split(",")[0], linecodes[k]) for k in dld.keys()]
ddl = defaultdict(list)
for last, code in ends_lines:
ddl[last].append(code)
sumsum = ", ".join(
["%s %s" % (", ".join(sorted(ddl[k])), k) for k in
sorted(ddl.keys())])
return "\n".join([stop_line, sumsum])
def _location_stops(self, longitude, latitude):
"""
Fetch stops at location using HSL API
:param longitude: longitude of the location
:param latitude: latitude of the location
:return: new line separted list of close by stops
"""
url = self.urls.nearby_stops(longitude, latitude)
try:
response = get(url)
except exceptions.RequestException:
return "Error"
return response.json()
def _search_result_stops(self, query):
"""
Fetch stops by search term
:param query:
:return: JSON returned from HSL API
"""
url = self.urls.geocode_address(query)
try:
response = get(url)
except exceptions.RequestException:
return "Error"
return response.json()
def stops_for_query(self, query):
"""
Return
:param query:
"""
stops = self._search_result_stops(query)
if stops == "Error":
return "No stops nearby this location"
return ("\n".join(
["%s %s %s" % (
x["details"]["shortCode"], x["name"], x["details"]["address"])
for x
in
stops if x["locType"] == "stop"]),
["%s" % x["details"]["shortCode"] for x in stops if
x["locType"] == "stop"])
def stops_for_location(self, longitude, latitude):
"""
Get stops at location
:param longitude: longitude of the location
:param latitude: latitude of the location
:return: new line separted list of close by stops
"""
stops = self._location_stops(longitude, latitude)
if stops == "Error":
return "No stops nearby this location"
return ("\n".join(
["%s %s %s %dm" % (x["codeShort"], x["name"], x["address"], x["dist"]) for x in
stops]), ["%s" % x["codeShort"] for x in stops])
def city_code(city):
"""
:param city: city is a custom slot type in the alexa skill configuration
possible values are:
Helsinki
Helsingfors
Espoo
Esbo
Vantaa
Vanda
Kauniainen
Grankulla
:return: a short code is in HSL bus stops. "" for Helsinki, "E" for Espoo
"V" for Vantaa and "Ka" for Kauniainen
"""
lc_city = city.lower()
if lc_city == "helsinki" or lc_city == "helsingfors":
return ""
elif lc_city == "espoo" or lc_city == "esbo":
return "E"
elif lc_city == "vantaa" or lc_city == "vanda":
return "V"
elif lc_city == "kauniainen" or lc_city == "grankulla":
return "Ka"
else: # silently default to Helsinki
return ""
def normalize_stopcode(code):
"""
Make stopcode a four digit, zero padded string
:param code: raw cod
:return: normalized code
"""
return format(int(code), '04')
def _stop_buses(json):
"""
Helper function for enumerating buses going through the stop
:param json: HSL API bus stop code
"""
# lines = json[0]["lines"]
lines = reduce(lambda x, y: x + y, [x["lines"] for x in json])
return [x.split(":")[0] for x in lines]
|
{
"content_hash": "5b3c9ea9e6c71e11b76cca69aeb0f23b",
"timestamp": "",
"source": "github",
"line_count": 459,
"max_line_length": 91,
"avg_line_length": 32.06318082788671,
"alnum_prop": 0.5492287830400218,
"repo_name": "timokoola/timoechobot",
"id": "46334ab3160d4066068e0758add57d5cc36cc8fa",
"size": "14717",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "departures.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "52779"
},
{
"name": "JavaScript",
"bytes": "15800"
},
{
"name": "Makefile",
"bytes": "688"
},
{
"name": "Python",
"bytes": "2659232"
},
{
"name": "TeX",
"bytes": "1527"
}
],
"symlink_target": ""
}
|
painting_possible = True
try:
from skimage.filter import sobel
except Exception:
try:
from scipy.ndimage.filters import sobel
except Exception:
painting_possible = False
# -----------------------------------------------------------------------
import numpy as np
def to_painting(image, saturation=1.4, black=0.006):
""" transforms any photo into some kind of painting """
edges = sobel(image.mean(axis=2))
darkening = black * (255 * np.dstack(3 * [edges]))
painting = saturation * image - darkening
return np.maximum(0, np.minimum(255, painting)).astype("uint8")
def painting(clip, saturation=1.4, black=0.006):
"""
Transforms any photo into some kind of painting. Saturation
tells at which point the colors of the result should be
flashy. ``black`` gives the anount of black lines wanted.
Requires Scikit-image or Scipy installed.
"""
return clip.image_transform(lambda im: to_painting(im, saturation, black))
# ------- OVERWRITE IF REQUIREMENTS NOT MET -----------------------------
if not painting_possible:
doc = painting.__doc__
def painting(clip, saturation=None, black=None):
raise IOError("fx painting needs scikit-image or scipy")
painting.__doc__ = doc
# -----------------------------------------------------------------------
|
{
"content_hash": "0060abae33e442084ccfe1e376b3c624",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 78,
"avg_line_length": 32,
"alnum_prop": 0.5922619047619048,
"repo_name": "kerstin/moviepy",
"id": "c1411387463bd3be0a41ebecc8c9f489b4424de8",
"size": "1418",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moviepy/video/fx/painting.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "265264"
}
],
"symlink_target": ""
}
|
"""
A Printer which converts an expression into its LaTeX equivalent.
"""
from sympy.core import S, C, Basic, Add, Mul, Wild, var
from printer import Printer
from conventions import split_super_sub
from sympy.simplify import fraction
from sympy import Interval
import sympy.mpmath.libmp as mlib
from sympy.mpmath.libmp import prec_to_dps
import re, warnings
class LatexPrinter(Printer):
printmethod = "_latex"
_default_settings = {
"order": None,
"mode": "plain",
"itex": False,
"fold_frac_powers": False,
"fold_func_brackets": False,
"mul_symbol": None,
"inv_trig_style": "abbreviated",
"mat_str": "smallmatrix",
"mat_delim": "(",
}
def __init__(self, settings=None):
if settings is not None and 'inline' in settings and not settings['inline']:
# Change to "good" defaults for inline=False
settings['mat_str'] = 'bmatrix'
settings['mat_delim'] = None
Printer.__init__(self, settings)
if ('inline') in self._settings:
warnings.warn("'inline' is deprecated, please use 'mode'. "
"'mode' can be one of 'inline', 'plain', 'equation', or "
"'equation*'.")
if self._settings['inline']:
self._settings['mode'] = 'inline'
else:
self._settings['mode'] = 'equation*'
if 'mode' in self._settings:
valid_modes = ['inline', 'plain', 'equation', \
'equation*']
if self._settings['mode'] not in valid_modes:
raise ValueError, "'mode' must be one of 'inline', 'plain', " \
"'equation' or 'equation*'"
mul_symbol_table = {
None : r" ",
"ldot" : r" \,.\, ",
"dot" : r" \cdot ",
"times" : r" \times "
}
self._settings['mul_symbol_latex'] = \
mul_symbol_table[self._settings['mul_symbol']]
self._delim_dict = {'(':')','[':']'}
def doprint(self, expr):
tex = Printer.doprint(self, expr)
if self._settings['mode'] == 'plain':
return tex
elif self._settings['mode'] == 'inline':
return r"$%s$" % tex
elif self._settings['itex']:
return r"$$%s$$" % tex
else:
env_str = self._settings['mode']
return r"\begin{%s}%s\end{%s}" % (env_str, tex, env_str)
def _needs_brackets(self, expr):
"""
Returns True if the expression needs to be wrapped in brackets when
printed, False otherwise. For example: a + b => True; a => False;
10 => False; -10 => True.
"""
return not ((expr.is_Integer and expr.is_nonnegative)
or (expr.is_Atom and expr is not S.NegativeOne))
def _needs_function_brackets(self, expr):
"""
Returns True if the expression needs to be wrapped in brackets when
passed as an argument to a function, False otherwise. This is a more
liberal version of _needs_brackets, in that many expressions which need
to be wrapped in brackets when added/subtracted/raised to a power do
not need them when passed to a function. Such an example is a*b.
"""
if not self._needs_brackets(expr):
return False
else:
# Muls of the form a*b*c... can be folded
if expr.is_Mul and not self._mul_is_clean(expr):
return True
# Pows which don't need brackets can be folded
elif expr.is_Pow and not self._pow_is_clean(expr):
return True
# Add and Function always need brackets
elif expr.is_Add or expr.is_Function:
return True
else:
return False
def _mul_is_clean(self, expr):
for arg in expr.args:
if arg.is_Function:
return False
return True
def _pow_is_clean(self, expr):
return not self._needs_brackets(expr.base)
def _do_exponent(self, expr, exp):
if exp is not None:
return r"\left(%s\right)^{%s}" % (expr, exp)
else:
return expr
def _print_Add(self, expr, order=None):
terms = self._as_ordered_terms(expr, order=order)
tex = self._print(terms[0])
for term in terms[1:]:
coeff = term.as_coeff_mul()[0]
if coeff >= 0:
tex += " +"
tex += " " + self._print(term)
return tex
def _print_Real(self, expr):
# Based off of that in StrPrinter
dps = prec_to_dps(expr._prec)
str_real = mlib.to_str(expr._mpf_, dps, strip_zeros=True)
# Must always have a mul symbol (as 2.5 10^{20} just looks odd)
separator = r" \times "
if self._settings['mul_symbol'] is not None:
separator = self._settings['mul_symbol_latex']
if 'e' in str_real:
(mant, exp) = str_real.split('e')
if exp[0] == '+':
exp = exp[1:]
return r"%s%s10^{%s}" % (mant, separator, exp)
elif str_real == "+inf":
return r"\infty"
elif str_real == "-inf":
return r"- \intfy"
else:
return str_real
def _print_Mul(self, expr):
coeff, tail = expr.as_coeff_Mul()
if not coeff.is_negative:
tex = ""
else:
coeff = -coeff
tex = "- "
numer, denom = fraction(tail)
separator = self._settings['mul_symbol_latex']
def convert(expr):
if not expr.is_Mul:
return str(self._print(expr))
else:
_tex = last_term_tex = ""
if self.order:
args = sorted(expr.args, key=Basic.sorted_key)
else:
args = expr.args
for term in args:
pretty = self._print(term)
if term.is_Add:
term_tex = (r"\left(%s\right)" % pretty)
else:
term_tex = str(pretty)
# between two digits, \times must always be used,
# to avoid confusion
if separator == " " and \
re.search("[0-9][} ]*$", last_term_tex) and \
re.match("[{ ]*[-+0-9]", term_tex):
_tex += r" \times "
elif _tex:
_tex += separator
_tex += term_tex
last_term_tex = term_tex
return _tex
if denom is S.One:
if numer.is_Add:
_tex = r"\left(%s\right)" % convert(numer)
else:
_tex = r"%s" % convert(numer)
if coeff is not S.One:
tex += str(self._print(coeff))
# between two digits, \times must always be used, to avoid
# confusion
if separator == " " and re.search("[0-9][} ]*$", tex) and \
re.match("[{ ]*[-+0-9]", _tex):
tex += r" \times " + _tex
else:
tex += separator + _tex
else:
tex += _tex
else:
if numer is S.One:
if coeff.is_Integer:
numer *= coeff.p
elif coeff.is_Rational:
if coeff.p != 1:
numer *= coeff.p
denom *= coeff.q
elif coeff is not S.One:
tex += str(self._print(coeff)) + " "
else:
if coeff.is_Rational and coeff.p == 1:
denom *= coeff.q
elif coeff is not S.One:
tex += str(self._print(coeff)) + " "
tex += r"\frac{%s}{%s}" % \
(convert(numer), convert(denom))
return tex
def _print_Pow(self, expr):
# Treat x**(Rational(1,n)) as special case
if expr.exp.is_Rational\
and abs(expr.exp.p) == 1\
and expr.exp.q != 1:
base = self._print(expr.base)
expq = expr.exp.q
if expq == 2:
tex = r"\sqrt{%s}" % base
elif self._settings['itex']:
tex = r"\root{%d}{%s}" % (expq,base)
else:
tex = r"\sqrt[%d]{%s}" % (expq,base)
if expr.exp.is_negative:
return r"\frac{1}{%s}" % tex
else:
return tex
elif self._settings['fold_frac_powers'] \
and expr.exp.is_Rational \
and expr.exp.q != 1:
base, p, q = self._print(expr.base), expr.exp.p, expr.exp.q
return r"%s^{%s/%s}" % (base, p, q)
else:
if expr.base.is_Function:
return self._print(expr.base, self._print(expr.exp))
else:
if expr.exp == S.NegativeOne:
#solves issue 1030
#As Mul always simplify 1/x to x**-1
#The objective is achieved with this hack
#first we get the latex for -1 * expr,
#which is a Mul expression
tex = self._print(S.NegativeOne * expr).strip()
#the result comes with a minus and a space, so we remove
if tex[:1] == "-":
return tex[1:].strip()
if self._needs_brackets(expr.base):
tex = r"\left(%s\right)^{%s}"
else:
tex = r"%s^{%s}"
return tex % (self._print(expr.base),
self._print(expr.exp))
def _print_Sum(self, expr):
if len(expr.limits) == 1:
tex = r"\sum_{%s=%s}^{%s} " % \
tuple([ self._print(i) for i in expr.limits[0] ])
else:
def _format_ineq(l):
return r"%s \leq %s \leq %s" % \
tuple([self._print(s) for s in l[1], l[0], l[2]])
tex = r"\sum_{\substack{%s}} " % \
str.join('\\\\', [ _format_ineq(l) for l in expr.limits ])
if isinstance(expr.function, Add):
tex += r"\left(%s\right)" % self._print(expr.function)
else:
tex += self._print(expr.function)
return tex
def _print_Derivative(self, expr):
dim = len(expr.variables)
if dim == 1:
tex = r"\frac{\partial}{\partial %s}" % \
self._print(expr.variables[0])
else:
multiplicity, i, tex = [], 1, ""
current = expr.variables[0]
for symbol in expr.variables[1:]:
if symbol == current:
i = i + 1
else:
multiplicity.append((current, i))
current, i = symbol, 1
else:
multiplicity.append((current, i))
for x, i in multiplicity:
if i == 1:
tex += r"\partial %s" % self._print(x)
else:
tex += r"\partial^{%s} %s" % (i, self._print(x))
tex = r"\frac{\partial^{%s}}{%s} " % (dim, tex)
if isinstance(expr.expr, C.AssocOp):
return r"%s\left(%s\right)" % (tex, self._print(expr.expr))
else:
return r"%s %s" % (tex, self._print(expr.expr))
def _print_Integral(self, expr):
tex, symbols = "", []
for lim in reversed(expr.limits):
symbol = lim[0]
tex += r"\int"
if len(lim) > 1:
if self._settings['mode'] in ['equation','equation*'] \
and not self._settings['itex']:
tex += r"\limits"
if len(lim) == 3:
tex += "_{%s}^{%s}" % (self._print(lim[1]),
self._print(lim[2]))
if len(lim) == 2:
tex += "^{%s}" % (self._print(lim[1]))
symbols.insert(0, "d%s" % self._print(symbol))
return r"%s %s\,%s" % (tex,
str(self._print(expr.function)), " ".join(symbols))
def _print_Limit(self, expr):
e, z, z0, dir = expr.args
tex = r"\lim_{%s \to %s}" % (self._print(z),
self._print(z0))
if isinstance(e, C.AssocOp):
return r"%s\left(%s\right)" % (tex, self._print(e))
else:
return r"%s %s" % (tex, self._print(e))
def _print_Function(self, expr, exp=None):
func = expr.func.__name__
if hasattr(self, '_print_' + func):
return getattr(self, '_print_' + func)(expr, exp)
else:
args = [ str(self._print(arg)) for arg in expr.args ]
# How inverse trig functions should be displayed, formats are:
# abbreviated: asin, full: arcsin, power: sin^-1
inv_trig_style = self._settings['inv_trig_style']
# If we are dealing with a power-style inverse trig function
inv_trig_power_case = False
# If it is applicable to fold the argument brackets
can_fold_brackets = self._settings['fold_func_brackets'] and \
len(args) == 1 and \
not self._needs_function_brackets(expr.args[0])
inv_trig_table = ["asin", "acos", "atan", "acot"]
# If the function is an inverse trig function, handle the style
if func in inv_trig_table:
if inv_trig_style == "abbreviated":
func = func
elif inv_trig_style == "full":
func = "arc" + func[1:]
elif inv_trig_style == "power":
func = func[1:]
inv_trig_power_case = True
# Can never fold brackets if we're raised to a power
if exp is not None:
can_fold_brackets = False
if inv_trig_power_case:
name = r"\operatorname{%s}^{-1}" % func
elif exp is not None:
name = r"\operatorname{%s}^{%s}" % (func, exp)
else:
name = r"\operatorname{%s}" % func
if can_fold_brackets:
name += r"%s"
else:
name += r"\left(%s\right)"
if inv_trig_power_case and exp is not None:
name += r"^{%s}" % exp
return name % ",".join(args)
def _print_Poly(self, expr):
return self._print(expr.as_expr())
def _print_floor(self, expr, exp=None):
tex = r"\lfloor{%s}\rfloor" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_ceiling(self, expr, exp=None):
tex = r"\lceil{%s}\rceil" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_Abs(self, expr, exp=None):
tex = r"\lvert{%s}\rvert" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_re(self, expr, exp=None):
if self._needs_brackets(expr.args[0]):
tex = r"\Re\left(%s\right)" % self._print(expr.args[0])
else:
tex = r"\Re{%s}" % self._print(expr.args[0])
return self._do_exponent(tex, exp)
def _print_im(self, expr, exp=None):
if self._needs_brackets(expr.args[0]):
tex = r"\Im\left(%s\right)" % self._print(expr.args[0])
else:
tex = r"\Im{%s}" % self._print(expr.args[0])
return self._do_exponent(tex, exp)
def _print_conjugate(self, expr, exp=None):
tex = r"\overline{%s}" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_exp(self, expr, exp=None):
tex = r"e^{%s}" % self._print(expr.args[0])
return self._do_exponent(tex, exp)
def _print_gamma(self, expr, exp=None):
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"\operatorname{\Gamma}^{%s}%s" % (exp, tex)
else:
return r"\operatorname{\Gamma}%s" % tex
def _print_Factorial(self, expr, exp=None):
x = expr.args[0]
if self._needs_brackets(x):
tex = r"\left(%s\right)!" % self._print(x)
else:
tex = self._print(x) + "!"
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_Binomial(self, expr, exp=None):
tex = r"{{%s}\choose{%s}}" % (self._print(expr[0]),
self._print(expr[1]))
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_RisingFactorial(self, expr, exp=None):
tex = r"{\left(%s\right)}^{\left(%s\right)}" % \
(self._print(expr[0]), self._print(expr[1]))
return self._do_exponent(tex, exp)
def _print_FallingFactorial(self, expr, exp=None):
tex = r"{\left(%s\right)}_{\left(%s\right)}" % \
(self._print(expr[0]), self._print(expr[1]))
return self._do_exponent(tex, exp)
def _print_Rational(self, expr):
if expr.q != 1:
sign = ""
p = expr.p
if expr.p < 0:
sign = "- "
p = -p
return r"%s\frac{%d}{%d}" % (sign, p, expr.q)
else:
return self._print(expr.p)
def _print_Infinity(self, expr):
return r"\infty"
def _print_NegativeInfinity(self, expr):
return r"-\infty"
def _print_ComplexInfinity(self, expr):
return r"\tilde{\infty}"
def _print_ImaginaryUnit(self, expr):
return r"\mathbf{\imath}"
def _print_NaN(self, expr):
return r"\bot"
def _print_Pi(self, expr):
return r"\pi"
def _print_Exp1(self, expr):
return r"e"
def _print_EulerGamma(self, expr):
return r"\gamma"
def _print_Order(self, expr):
return r"\operatorname{\mathcal{O}}\left(%s\right)" % \
self._print(expr.args[0])
def _print_Symbol(self, expr):
name, supers, subs = split_super_sub(expr.name)
# translate name, supers and subs to tex keywords
greek = set([ 'alpha', 'beta', 'gamma', 'delta', 'epsilon', 'zeta',
'eta', 'theta', 'iota', 'kappa', 'lambda', 'mu', 'nu',
'xi', 'omicron', 'pi', 'rho', 'sigma', 'tau', 'upsilon',
'phi', 'chi', 'psi', 'omega' ])
other = set( ['aleph', 'beth', 'daleth', 'gimel', 'ell', 'eth',
'hbar', 'hslash', 'mho' ])
def translate(s):
tmp = s.lower()
if tmp in greek or tmp in other:
return "\\" + s
else:
return s
name = translate(name)
supers = [translate(sup) for sup in supers]
subs = [translate(sub) for sub in subs]
# glue all items together:
if len(supers) > 0:
name += "^{%s}" % " ".join(supers)
if len(subs) > 0:
name += "_{%s}" % " ".join(subs)
return name
def _print_Relational(self, expr):
if self._settings['itex']:
lt = r"\lt"
else:
lt = "<"
charmap = {
"==" : "=",
"<" : lt,
"<=" : r"\leq",
"!=" : r"\neq",
}
return "%s %s %s" % (self._print(expr.lhs),
charmap[expr.rel_op], self._print(expr.rhs))
def _print_Piecewise(self, expr):
ecpairs = [r"%s & \text{for}\: %s" % (self._print(e), self._print(c)) \
for e, c in expr.args[:-1]]
if expr.args[-1].cond == True:
ecpairs.append(r"%s & \text{otherwise}" % \
self._print(expr.args[-1].expr))
else:
ecpairs.append(r"%s & \text{for}\: %s" % \
(self._print(expr.args[-1].cond),
self._print(expr.args[-1].expr)))
tex = r"\begin{cases} %s \end{cases}"
return tex % r" \\".join(ecpairs)
def _print_Matrix(self, expr):
lines = []
for line in range(expr.rows): # horrible, should be 'rows'
lines.append(" & ".join([ self._print(i) for i in expr[line,:] ]))
out_str = r'\begin{%MATSTR%}%s\end{%MATSTR%}'
out_str = out_str.replace('%MATSTR%', self._settings['mat_str'])
if self._settings['mat_delim']:
left_delim = self._settings['mat_delim']
right_delim = self._delim_dict[left_delim]
out_str = r'\left' + left_delim + out_str + \
r'\right' + right_delim
return out_str % r"\\".join(lines)
def _print_tuple(self, expr):
return r"\begin{pmatrix}%s\end{pmatrix}" % \
r", & ".join([ self._print(i) for i in expr ])
def _print_list(self, expr):
return r"\begin{bmatrix}%s\end{bmatrix}" % \
r", & ".join([ self._print(i) for i in expr ])
def _print_dict(self, expr):
items = []
keys = expr.keys()
keys.sort(Basic.compare_pretty)
for key in keys:
val = expr[key]
items.append("%s : %s" % (self._print(key), self._print(val)))
return r"\begin{Bmatrix}%s\end{Bmatrix}" % r", & ".join(items)
def _print_DiracDelta(self, expr):
if len(expr.args) == 1 or expr.args[1] == 0:
tex = r"\delta\left(%s\right)" % self._print(expr.args[0])
else:
tex = r"\delta^{\left( %s \right)}\left( %s \right)" % (\
self._print(expr.args[1]), self._print(expr.args[0]))
return tex
def _print_Interval(self, i):
if i.start == i.end:
return r"\left{%s\right}" % self._print(i.start)
else:
if i.left_open:
left = '('
else:
left = '['
if i.right_open:
right = ')'
else:
right = ']'
return r"\left%s%s, %s\right%s" % \
(left, self._print(i.start), self._print(i.end), right)
def _print_Union(self, u):
other_sets, singletons = [], []
for set in u.args:
if isinstance(set, Interval) and set.measure == 0:
singletons.append(set.start)
else:
other_sets.append(set)
S2 = r"%s" % \
r" \cup ".join([ self._print_Interval(i) for i in other_sets ])
if len(singletons) > 0:
S1 = r"\left\{%s\right\}" % \
r", ".join([ self._print(i) for i in singletons ])
S = r"%s \cup %s" % (S1, S2)
else:
S = S2
return S
def _print_EmptySet(self, e):
return r"\emptyset"
def latex(expr, **settings):
r"""Convert the given expression to LaTeX representation.
You can specify how the generated code will be delimited using
the 'mode' keyword. 'mode' can be one of 'plain', 'inline',
'equation' or 'equation*'. If 'mode' is set to 'plain', then
the resulting code will not be delimited at all (this is the
default). If 'mode' is set to 'inline' then inline LaTeX $ $ will be
used. If 'mode' is set to 'equation' or 'equation*', the resulting
code will be enclosed in the 'equation' or 'equation*' environment
(remember to import 'amsmath' for 'equation*'), unless the 'itex'
option is set. In the latter case, the $$ $$ syntax is used.
>>> from sympy import latex, Rational
>>> from sympy.abc import x, y, mu, tau
>>> latex((2*tau)**Rational(7,2))
'8 \\sqrt{2} \\tau^{\\frac{7}{2}}'
>>> latex((2*mu)**Rational(7,2), mode='plain')
'8 \\sqrt{2} \\mu^{\\frac{7}{2}}'
>>> latex((2*tau)**Rational(7,2), mode='inline')
'$8 \\sqrt{2} \\tau^{\\frac{7}{2}}$'
>>> latex((2*mu)**Rational(7,2), mode='equation*')
'\\begin{equation*}8 \\sqrt{2} \\mu^{\\frac{7}{2}}\\end{equation*}'
>>> latex((2*mu)**Rational(7,2), mode='equation')
'\\begin{equation}8 \\sqrt{2} \\mu^{\\frac{7}{2}}\\end{equation}'
>>> latex((2*mu)**Rational(7,2), mode='equation', itex=True)
'$$8 \\sqrt{2} \\mu^{\\frac{7}{2}}$$'
Besides all Basic based expressions, you can recursively
convert Python containers (lists, tuples and dicts) and
also SymPy matrices:
>>> latex([2/x, y], mode='inline')
'$\\begin{bmatrix}\\frac{2}{x}, & y\\end{bmatrix}$'
"""
return LatexPrinter(settings).doprint(expr)
def print_latex(expr, **settings):
"""Prints LaTeX representation of the given expression."""
print latex(expr, **settings)
|
{
"content_hash": "14eb5dc77a15328f3fdc4b88739d143a",
"timestamp": "",
"source": "github",
"line_count": 757,
"max_line_length": 84,
"avg_line_length": 33.51651254953765,
"alnum_prop": 0.4717799148667823,
"repo_name": "pernici/sympy",
"id": "b08c3d85303ca6b527cf6a20fff42902a1bdfec3",
"size": "25372",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sympy/printing/latex.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "6531741"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "TeX",
"bytes": "8"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Video.autoPublish'
db.add_column('portal_video', 'autoPublish',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Video.autoPublish'
db.delete_column('portal_video', 'autoPublish')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
},
'portal.channel': {
'Meta': {'object_name': 'Channel'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()'})
},
'portal.comment': {
'Meta': {'object_name': 'Comment'},
'comment': ('django.db.models.fields.TextField', [], {'max_length': '1000'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'moderated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'timecode': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['portal.Video']"})
},
'portal.hotfolder': {
'Meta': {'object_name': 'Hotfolder'},
'activated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'autoPublish': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'channel': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['portal.Channel']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'defaultName': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'folderName': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.IntegerField', [], {'max_length': '1'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'portal.video': {
'Meta': {'object_name': 'Video'},
'assemblyid': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'autoPublish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'channel': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['portal.Channel']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'description': ('django.db.models.fields.TextField', [], {}),
'duration': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'encodingDone': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.IntegerField', [], {'max_length': '1'}),
'linkURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'mp3Size': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'mp3URL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'mp4Size': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'mp4URL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'oggSize': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'oggURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'originalFile': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'torrentURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'videoThumbURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'webmSize': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'webmURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
}
}
complete_apps = ['portal']
|
{
"content_hash": "c9badfe109cfdc20926f1deb89ac69ac",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 182,
"avg_line_length": 76.5,
"alnum_prop": 0.5459884436866534,
"repo_name": "LambdaCast/LambdaCast",
"id": "3f6a001c3aabd7393e641cb22f4f4a468a27944f",
"size": "10581",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "portal/migrations/0012_auto__add_field_video_autoPublish.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "58222"
},
{
"name": "HTML",
"bytes": "60231"
},
{
"name": "JavaScript",
"bytes": "122667"
},
{
"name": "Python",
"bytes": "928261"
}
],
"symlink_target": ""
}
|
"""
Form utilities.
`TrimCharField` - a char field which truncates it's value to max_length before
validation.
"""
from __future__ import unicode_literals
from django.forms import CharField
from django.forms import ModelForm
from django.forms.models import ModelFormMetaclass
from django.core.exceptions import ImproperlyConfigured
class TrimCharField(CharField):
"""
A char field which truncates it's value to `max_length` before validation.
"""
def to_python(self, value):
py_value = super(TrimCharField, self).to_python(value)
# Max_length can be 0.
if self.max_length is not None:
py_value = py_value[:self.max_length]
return py_value
class TrimCharFieldsModelFormMetaclass(ModelFormMetaclass):
"""
A metaclass for ModelForm which replaces `CharField` with `TrimCharField`
for fields listed in `Meta.trim_fields`.
Example:
class UserProfileForm(six.with_metaclass(
TrimCharFieldsModelFormMetaclass, forms.ModelForm)):
class Meta:
model = UserProfile
fields = ('real_name', 'about', )
trim_fields = ('about', )
# the rest of Meta and UserProfileForm
In this Example two fields (`real_name` and `about`) will be taken from
UserProfile. But we want to make `about` field able to trim posted value
to `max_length` instead of raiseing exception. So we add it to `trim_fields`
and the metaclass will replace its `CharField` with `TrimCharField` using
all `CharField`'s parameters (`label`, `required`, `widget` etc.)
"""
def __new__(cls, name, bases, attrs):
new_class = super(TrimCharFieldsModelFormMetaclass, cls).__new__(
cls, name, bases, attrs)
# Raise an exception in case the metaclass used with not
if not issubclass(new_class, ModelForm):
raise TypeError("Class '{}' must be derived from ModelForm".
format(name))
# Ignore NewBase class inserted by six. Also we're not interested in
# classes without Meta.
if name == 'NewBase' or 'Meta' not in attrs:
return new_class
trim_fields = attrs['Meta'].__dict__.get('trim_fields', [])
for f in trim_fields:
if f not in new_class.base_fields:
raise ImproperlyConfigured(
"Trim field '{}' doesn't exist in base_fields.".format(f))
field = new_class.base_fields[f]
if not isinstance(field, CharField):
raise TypeError(
"Trim field '{}' must be CharField, {} given.".format(
f, field.__class__.__name__))
trimField = TrimCharField(
required=field.required,
label=field.label,
initial=field.initial,
widget=field.widget,
error_messages=field.error_messages,
validators=field.validators,
localize=field.localize,
max_length=field.max_length,
min_length=field.min_length
)
new_class.base_fields[f] = trimField
return new_class
|
{
"content_hash": "071201e1c601ca3bac693ba2654872bf",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 80,
"avg_line_length": 35.45054945054945,
"alnum_prop": 0.6053936763794172,
"repo_name": "ivanyu/django-juice",
"id": "c9bd9655378e02742397d031f5a9326319178352",
"size": "3250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "juice/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24262"
}
],
"symlink_target": ""
}
|
"""
Font Bakery reporters/serialize can report the events of the Font Bakery
CheckRunner Protocol to a serializeable document e.g. for usage with `json.dumps`.
Separation of Concerns Disclaimer:
While created specifically for checking fonts and font-families this
module has no domain knowledge about fonts. It can be used for any kind
of (document) checking. Please keep it so. It will be valuable for other
domains as well.
Domain specific knowledge should be encoded only in the Profile (Checks,
Conditions) and MAYBE in *customized* reporters e.g. subclasses.
"""
from fontbakery.checkrunner import (
DEBUG
, SECTIONSUMMARY
, ENDCHECK
, START
, END
)
from fontbakery.reporters import FontbakeryReporter
from fontbakery.checkrunner import Status
class SerializeReporter(FontbakeryReporter):
"""
usage:
>> sr = SerializeReporter(runner=runner, collect_results_by='font')
>> sr.run()
>> import json
>> print(json.dumps(sr.getdoc(), sort_keys=True, indent=4))
"""
def __init__(self, loglevels,
succinct=None,
collect_results_by=None,
**kwd):
super().__init__(**kwd)
self.succinct = succinct
self.loglevels = loglevels
self._results_by = collect_results_by
self._items = {}
self._doc = None
# used when self._results_by is set
# this way we minimize our knowledge of the profile
self._max_cluster_by_index = None
self._observed_checks = {}
@staticmethod
def _set_metadata(identity, item):
section, check, iterargs = identity
# If section is None this is the main doc.
# If check is None this is `section`
# otherwise this `check`
pass
def omit_loglevel(self, msg) -> bool:
"""Determine if message is below log level."""
return self.loglevels and (
self.loglevels[0] > Status(msg)
)
def _register(self, event):
super()._register(event)
status, message, identity = event
section, check, iterargs = identity
key = self._get_key(identity)
# not item == True when item is empty
item = self._items.get(key, {})
if not item:
self._items[key] = item
# init
if status in (START, END) and not item:
item.update(dict(result=None, sections=[]))
if self._results_by:
# give the consumer a clue that/how the sections
# are structured differently.
item['clusteredBy'] = self._results_by
if status == SECTIONSUMMARY:
item.update(dict(key=key, result=None, checks=[]))
if check:
item.update(dict(key=key, result=None, logs=[]))
if self._results_by:
if self._results_by == '*check':
if check.id not in self._observed_checks:
self._observed_checks[check.id] = len(self._observed_checks)
index = self._observed_checks[check.id]
value = check.id
else:
index = dict(iterargs).get(self._results_by, None)
value = None
if self.runner:
value = self.runner.get_iterarg(self._results_by, index)
if index is not None:
if self._max_cluster_by_index is not None:
self._max_cluster_by_index = max(index, self._max_cluster_by_index)
else:
self._max_cluster_by_index = index
item['clustered'] = {
'name': self._results_by
, 'index': index # None if this check did not require self.results_by
}
if value: # Not set if self.runner was not defined on initialization
item['clustered']['value'] = value
self._set_metadata(identity, item)
if check:
item['description'] = check.description
if check.rationale:
item['rationale'] = check.rationale
if check.severity:
item['severity'] = check.severity
if item["key"][2] != ():
item['filename'] = self.runner.get_iterarg(*item["key"][2][0])
if status == END:
item['result'] = message # is a Counter
if status == SECTIONSUMMARY:
_, item['result'] = message # message[1] is a Counter
if status == ENDCHECK:
item['result'] = message.name # is a Status
if status >= DEBUG:
item['logs'].append({'status': status.name,
'message': f'{message}',
'traceback': getattr(message, 'traceback', None)
})
def getdoc(self):
if not self._ended:
raise Exception('Can\'t create doc before END status was recevived.')
if self._doc is not None:
return self._doc
doc = self._items[self._get_key((None, None, None))]
seen = set()
# this puts all in the original order
for identity in self._order:
key = self._get_key(identity)
section, _, _ = identity
sectionKey = self._get_key((section, None, None))
sectionDoc = self._items[sectionKey]
check = self._items[key]
if self._results_by:
if not len(sectionDoc['checks']):
clusterlen = self._max_cluster_by_index + 1
if self._results_by != '*check':
# + 1 for rests bucket
clusterlen += 1
sectionDoc['checks'] = [[] for _ in range(clusterlen)]
index = check['clustered']['index']
if index is None:
# last element collects unclustered
index = -1
sectionDoc['checks'][index].append(check)
else:
sectionDoc['checks'].append(check)
if sectionKey not in seen:
seen.add(sectionKey)
doc['sections'].append(sectionDoc)
self._doc = doc
return doc
def write(self):
import json
with open(self.output_file, "w") as fh:
json.dump(self.getdoc(), fh, sort_keys=True, indent=4)
print(f'A report in JSON format has been saved to "{self.output_file}"')
|
{
"content_hash": "7c72cab746f2f4a434e6e04ded34d566",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 95,
"avg_line_length": 39.49122807017544,
"alnum_prop": 0.5239152969050792,
"repo_name": "googlefonts/fontbakery",
"id": "87fe7a3c38dc83a263b64167553248442c5de2c2",
"size": "6753",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "Lib/fontbakery/reporters/serialize.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "12240"
},
{
"name": "PureBasic",
"bytes": "27550"
},
{
"name": "Python",
"bytes": "1580453"
},
{
"name": "Shell",
"bytes": "1379"
}
],
"symlink_target": ""
}
|
"""Constants for the DLNA MediaServer integration."""
from __future__ import annotations
from collections.abc import Mapping
import logging
from typing import Final
from homeassistant.components.media_player import const as _mp_const
LOGGER = logging.getLogger(__package__)
DOMAIN: Final = "dlna_dms"
DEFAULT_NAME: Final = "DLNA Media Server"
SOURCE_SEP: Final = "/"
ROOT_OBJECT_ID: Final = "0"
PATH_SEP: Final = "/"
PATH_SEARCH_FLAG: Final = "?"
PATH_OBJECT_ID_FLAG: Final = ":"
# Only request the metadata needed to build a browse response
DLNA_BROWSE_FILTER: Final = [
"id",
"upnp:class",
"dc:title",
"res",
"@childCount",
"upnp:albumArtURI",
]
# Get all metadata when resolving, for the use of media_players
DLNA_RESOLVE_FILTER: Final = "*"
# Metadata needed to resolve a path
DLNA_PATH_FILTER: Final = ["id", "upnp:class", "dc:title"]
DLNA_SORT_CRITERIA: Final = ["+upnp:class", "+upnp:originalTrackNumber", "+dc:title"]
PROTOCOL_HTTP: Final = "http-get"
PROTOCOL_RTSP: Final = "rtsp-rtp-udp"
PROTOCOL_ANY: Final = "*"
STREAMABLE_PROTOCOLS: Final = [PROTOCOL_HTTP, PROTOCOL_RTSP, PROTOCOL_ANY]
# Map UPnP object class to media_player media class
MEDIA_CLASS_MAP: Mapping[str, str] = {
"object": _mp_const.MEDIA_CLASS_URL,
"object.item": _mp_const.MEDIA_CLASS_URL,
"object.item.imageItem": _mp_const.MEDIA_CLASS_IMAGE,
"object.item.imageItem.photo": _mp_const.MEDIA_CLASS_IMAGE,
"object.item.audioItem": _mp_const.MEDIA_CLASS_MUSIC,
"object.item.audioItem.musicTrack": _mp_const.MEDIA_CLASS_MUSIC,
"object.item.audioItem.audioBroadcast": _mp_const.MEDIA_CLASS_MUSIC,
"object.item.audioItem.audioBook": _mp_const.MEDIA_CLASS_PODCAST,
"object.item.videoItem": _mp_const.MEDIA_CLASS_VIDEO,
"object.item.videoItem.movie": _mp_const.MEDIA_CLASS_MOVIE,
"object.item.videoItem.videoBroadcast": _mp_const.MEDIA_CLASS_TV_SHOW,
"object.item.videoItem.musicVideoClip": _mp_const.MEDIA_CLASS_VIDEO,
"object.item.playlistItem": _mp_const.MEDIA_CLASS_TRACK,
"object.item.textItem": _mp_const.MEDIA_CLASS_URL,
"object.item.bookmarkItem": _mp_const.MEDIA_CLASS_URL,
"object.item.epgItem": _mp_const.MEDIA_CLASS_EPISODE,
"object.item.epgItem.audioProgram": _mp_const.MEDIA_CLASS_MUSIC,
"object.item.epgItem.videoProgram": _mp_const.MEDIA_CLASS_VIDEO,
"object.container": _mp_const.MEDIA_CLASS_DIRECTORY,
"object.container.person": _mp_const.MEDIA_CLASS_ARTIST,
"object.container.person.musicArtist": _mp_const.MEDIA_CLASS_ARTIST,
"object.container.playlistContainer": _mp_const.MEDIA_CLASS_PLAYLIST,
"object.container.album": _mp_const.MEDIA_CLASS_ALBUM,
"object.container.album.musicAlbum": _mp_const.MEDIA_CLASS_ALBUM,
"object.container.album.photoAlbum": _mp_const.MEDIA_CLASS_ALBUM,
"object.container.genre": _mp_const.MEDIA_CLASS_GENRE,
"object.container.genre.musicGenre": _mp_const.MEDIA_CLASS_GENRE,
"object.container.genre.movieGenre": _mp_const.MEDIA_CLASS_GENRE,
"object.container.channelGroup": _mp_const.MEDIA_CLASS_CHANNEL,
"object.container.channelGroup.audioChannelGroup": _mp_const.MEDIA_TYPE_CHANNELS,
"object.container.channelGroup.videoChannelGroup": _mp_const.MEDIA_TYPE_CHANNELS,
"object.container.epgContainer": _mp_const.MEDIA_CLASS_DIRECTORY,
"object.container.storageSystem": _mp_const.MEDIA_CLASS_DIRECTORY,
"object.container.storageVolume": _mp_const.MEDIA_CLASS_DIRECTORY,
"object.container.storageFolder": _mp_const.MEDIA_CLASS_DIRECTORY,
"object.container.bookmarkFolder": _mp_const.MEDIA_CLASS_DIRECTORY,
}
|
{
"content_hash": "0a1ec256a4ce221ca04715dec571dd21",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 85,
"avg_line_length": 46.166666666666664,
"alnum_prop": 0.7256317689530686,
"repo_name": "GenericStudent/home-assistant",
"id": "8c260272d5fa780dc75c83cb50adfce2b2ae12f5",
"size": "3601",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/dlna_dms/const.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3070"
},
{
"name": "Python",
"bytes": "44491729"
},
{
"name": "Shell",
"bytes": "5092"
}
],
"symlink_target": ""
}
|
"""BiasOnly algorithm, where the compress bias only."""
from typing import List
import tensorflow as tf
from tensorflow_model_optimization.python.core.common.keras.compression import algorithm
# TODO(tfmot): This algorithm is showcase for bias only compression. if we find
# better algorithm that can show better compressible weights coverage, then
# we can remove this algorithm.
class BiasOnly(algorithm.WeightCompressor):
"""Define how to apply BiasOnly algorithm."""
# TODO(tfmot): communicate that `pretrained_weight` will sometimes
# be a dummy tensor and sometimes be actual pretrained values during
# its actual usage.
def init_training_weights(
self, pretrained_weight: tf.Tensor):
bias_mean = tf.reduce_mean(pretrained_weight)
bias_shape = tf.shape(pretrained_weight)
# TODO(tfmot): note that it does not suffice to just have the initializer
# to derive the shape from, in the case of a constant initializer.
# The unit test fail without providing the shape.
self.add_training_weight(
name='bias_mean',
shape=bias_mean.shape,
dtype=bias_mean.dtype,
initializer=tf.keras.initializers.Constant(bias_mean))
self.add_training_weight(
name='bias_shape',
shape=bias_shape.shape,
dtype=bias_shape.dtype,
initializer=tf.keras.initializers.Constant(bias_shape))
def decompress_weights(
self, bias_mean: tf.Tensor, bias_shape: tf.Tensor) -> tf.Tensor:
return tf.broadcast_to(bias_mean, bias_shape)
def project_training_weights(
self, bias_mean: tf.Tensor, bias_shape: tf.Tensor) -> tf.Tensor:
return self.decompress_weights(bias_mean, bias_shape)
def get_compressible_weights(
self, original_layer: tf.keras.layers.Layer) -> List[str]:
if isinstance(original_layer, tf.keras.layers.Conv2D) or \
isinstance(original_layer, tf.keras.layers.Dense):
return [original_layer.bias]
return []
def compress_model(self, to_optimize: tf.keras.Model) -> tf.keras.Model:
"""Model developer API for optimizing a model."""
# pylint: disable=protected-access
if not isinstance(to_optimize, tf.keras.Sequential) \
and not to_optimize._is_graph_network:
raise ValueError(
'`compress_model` can only either be a tf.keras Sequential or '
'Functional model.')
# pylint: enable=protected-access
def _optimize_layer(layer):
# Require layer to be built so that the average of bias can be
# initialized.
if not layer.built:
raise ValueError(
'Applying BiasOnly currently requires passing in a built model')
return algorithm.create_layer_for_training(layer, algorithm=self)
return tf.keras.models.clone_model(
to_optimize, clone_function=_optimize_layer)
|
{
"content_hash": "19977d1b90633fa328c98361503622a2",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 88,
"avg_line_length": 39.09722222222222,
"alnum_prop": 0.7023090586145648,
"repo_name": "tensorflow/model-optimization",
"id": "c5e0929ba67bceecd71ef60fab346dd19ee7fe52",
"size": "3504",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow_model_optimization/python/core/common/keras/compression/algorithms/bias_only.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1770"
},
{
"name": "Jupyter Notebook",
"bytes": "285964"
},
{
"name": "Python",
"bytes": "1700675"
},
{
"name": "Shell",
"bytes": "8525"
},
{
"name": "Starlark",
"bytes": "84517"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.contrib.taggit
import modelcluster.fields
import wagtail.core.blocks
import wagtail.core.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('wagtailcore', '0040_page_draft_title'),
('taggit', '0002_auto_20150616_2121'),
]
operations = [
migrations.CreateModel(
name='PublishedWork',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('summary', models.TextField(blank=True, help_text="This is shown at the top of the published work's own page and in search results.", null=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='PublishedWorkIndex',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('body', wagtail.core.fields.RichTextField(blank=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='PublishedWorkTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='LongStory',
fields=[
('publishedwork_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='publish.PublishedWork')),
('body', wagtail.core.fields.StreamField((('chapter', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=False)), ('text', wagtail.core.blocks.RichTextBlock(required=True))))),))),
('generate_navigation', models.BooleanField(default=True, help_text='This determines whether a navigation menu for chapters will be generated for this page', verbose_name='Generate a navigation menu')),
],
options={
'abstract': False,
},
bases=('publish.publishedwork',),
),
migrations.CreateModel(
name='Poem',
fields=[
('publishedwork_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='publish.PublishedWork')),
('body', wagtail.core.fields.RichTextField()),
],
options={
'abstract': False,
},
bases=('publish.publishedwork',),
),
migrations.CreateModel(
name='ShortStory',
fields=[
('publishedwork_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='publish.PublishedWork')),
('body', wagtail.core.fields.RichTextField()),
],
options={
'abstract': False,
},
bases=('publish.publishedwork',),
),
migrations.AddField(
model_name='publishedworktag',
name='content_object',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='tagged_items', to='publish.PublishedWork'),
),
migrations.AddField(
model_name='publishedworktag',
name='tag',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='publish_publishedworktag_items', to='taggit.Tag'),
),
migrations.AddField(
model_name='publishedwork',
name='tags',
field=modelcluster.contrib.taggit.ClusterTaggableManager(blank=True, help_text='A comma-separated list of tags.', through='publish.PublishedWorkTag', to='taggit.Tag', verbose_name='Tags'),
),
]
|
{
"content_hash": "370c499151b68bd492a80cfdde2315fc",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 227,
"avg_line_length": 44.772277227722775,
"alnum_prop": 0.5919946926138877,
"repo_name": "Taywee/amberherbert.com",
"id": "40dd267a27f2c9ea24248ec20e95a3f5b93d7d06",
"size": "4595",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "publish/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1920"
},
{
"name": "HTML",
"bytes": "9937"
},
{
"name": "Python",
"bytes": "32371"
}
],
"symlink_target": ""
}
|
import logging
from conary import dbstore
from mint.scripts.db2db import db2db
log = logging.getLogger(__name__)
def switchToPostgres(cfg):
if cfg.dbDriver in ('postgresql', 'pgpool'):
return
sourceTuple = (cfg.dbDriver, cfg.dbPath)
destTuple = ('postgresql', 'postgres@localhost:5439/mint')
finalTuple = ('pgpool', 'postgres@localhost.localdomain:6432/mint')
log.info("Migrating mint database from %s::%s to %s::%s",
*(sourceTuple + destTuple))
db2db.move_database(sourceTuple, destTuple)
# Update rbuilder-generated.conf
log.info("Changing configured mint database to %s::%s", *finalTuple)
cfg.dbDriver = finalTuple[0]
cfg.dbPath = finalTuple[1]
cfg.writeGeneratedConfig()
|
{
"content_hash": "a15b58903e9b4de5ef55427a036501fa",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 72,
"avg_line_length": 29.84,
"alnum_prop": 0.6890080428954424,
"repo_name": "sassoftware/mint",
"id": "afdc5e6b8c7e5fbbd8c90072b442896b700fb8a1",
"size": "1351",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mint/scripts/db2db/migrate.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "50165"
},
{
"name": "Genshi",
"bytes": "58741"
},
{
"name": "HTML",
"bytes": "2814"
},
{
"name": "JavaScript",
"bytes": "11470"
},
{
"name": "Makefile",
"bytes": "92418"
},
{
"name": "NASL",
"bytes": "582"
},
{
"name": "PLpgSQL",
"bytes": "5358"
},
{
"name": "Puppet",
"bytes": "17914"
},
{
"name": "Python",
"bytes": "3239135"
},
{
"name": "Ruby",
"bytes": "9268"
},
{
"name": "Shell",
"bytes": "24834"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('press_links', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='EntryTranslation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('language_code', models.CharField(max_length=15, verbose_name='Language', db_index=True)),
('title', models.CharField(max_length=255, verbose_name='title')),
('source', models.CharField(max_length=255, verbose_name='the source for the entry', blank=True)),
('excerpt', models.TextField(verbose_name='Excerpt', blank=True)),
('master', models.ForeignKey(related_name='translations', editable=False, to='press_links.Entry', null=True)),
],
options={
'managed': True,
'db_table': 'press_links_entry_translation',
'db_tablespace': '',
'default_permissions': (),
'verbose_name': 'Press Entry Translation',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='LinkTranslation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('language_code', models.CharField(max_length=15, verbose_name='Language', db_index=True)),
('link', models.CharField(max_length=255, verbose_name='link address (add http:// for external link)')),
('link_text', models.CharField(max_length=255, verbose_name='text for link')),
('master', models.ForeignKey(related_name='translations', editable=False, to='press_links.Link', null=True)),
],
options={
'managed': True,
'db_table': 'press_links_link_translation',
'db_tablespace': '',
'default_permissions': (),
'verbose_name': 'Press Link Translation',
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='linktranslation',
unique_together=set([('language_code', 'master')]),
),
migrations.AlterUniqueTogether(
name='entrytranslation',
unique_together=set([('language_code', 'master')]),
),
migrations.RemoveField(
model_name='entry',
name='excerpt',
),
migrations.RemoveField(
model_name='entry',
name='source',
),
migrations.RemoveField(
model_name='entry',
name='title',
),
migrations.RemoveField(
model_name='link',
name='link',
),
migrations.RemoveField(
model_name='link',
name='link_text',
),
]
|
{
"content_hash": "1fc5c21d39e886e9eedc73650f056ed9",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 126,
"avg_line_length": 39.294871794871796,
"alnum_prop": 0.5314845024469821,
"repo_name": "iberben/django-press-links",
"id": "1eae2d685ed29ce00e6ac4ef7707d261337035be",
"size": "3089",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "press_links/migrations/0002_auto_20151123_1552.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "4145"
},
{
"name": "Python",
"bytes": "12402"
}
],
"symlink_target": ""
}
|
from unittest import TestCase
from mock import MagicMock, patch
from api.probe_results.models import ProbeResult
class TestProbeResult(TestCase):
def setUp(self):
self.type = "type"
self.name = "name"
self.doc = MagicMock()
self.status = 1
self.file_web = MagicMock()
self.proberesult = ProbeResult(self.type, self.name, self.doc,
self.status, self.file_web)
def tearDown(self):
del self.proberesult
@patch("api.probe_results.models.IrmaFormatter")
def test_get_details_formatted(self, m_IrmaFormatter):
self.proberesult.get_details()
m_IrmaFormatter.format.assert_called_once()
self.assertEqual(m_IrmaFormatter.format.call_args[0][0],
self.name)
@patch("api.probe_results.models.IrmaFormatter")
def test_get_details_not_formatted(self, m_IrmaFormatter):
self.proberesult.get_details(formatted=False)
m_IrmaFormatter.format.assert_not_called()
|
{
"content_hash": "7fc1076cf6f60ca17a0f160cacfeef2e",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 70,
"avg_line_length": 34.46666666666667,
"alnum_prop": 0.6450676982591876,
"repo_name": "quarkslab/irma",
"id": "b87fff7fe144ccec8a13be5e2bbeb4c7023aecca",
"size": "1034",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "frontend/tests/api/probe_results/test_models_proberesult.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "79"
},
{
"name": "CSS",
"bytes": "86535"
},
{
"name": "DIGITAL Command Language",
"bytes": "68"
},
{
"name": "Gherkin",
"bytes": "2366"
},
{
"name": "HTML",
"bytes": "26577"
},
{
"name": "JavaScript",
"bytes": "1774854"
},
{
"name": "Jinja",
"bytes": "2672"
},
{
"name": "Less",
"bytes": "13774"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "PowerShell",
"bytes": "15660"
},
{
"name": "Python",
"bytes": "797592"
},
{
"name": "Shell",
"bytes": "61907"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import unicode_literals
from setuptools import setup, find_packages
def _requires_from_file(filename):
return open(filename).read().splitlines()
try:
with open('README.rst') as f:
readme = f.read()
except IOError:
readme = ''
setup(
name="trie-search",
version='0.3.0',
url='https://github.com/nkmrtty/trie-search',
author='Tatsuya Nakamura',
author_email='nkmrtty.com@gmail.com',
maintainer='Tatsuya Nakamura',
maintainer_email='nkmrtty.com@gmail.com',
description='Trie-search is a package for text pattern search using marisa-trie',
long_description=readme,
packages=find_packages(),
install_requires=_requires_from_file('requirements.txt'),
license="MIT",
classifiers=[
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'License :: OSI Approved :: MIT License',
'Topic :: Text Processing',
], )
|
{
"content_hash": "418159af7d199048cc4af566bf930b4f",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 85,
"avg_line_length": 30.07894736842105,
"alnum_prop": 0.6465441819772528,
"repo_name": "nkmrtty/trie-search",
"id": "5e31eb93ee9d7c7e010dd6e66a943ca5d8461e19",
"size": "1181",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4720"
}
],
"symlink_target": ""
}
|
from beritest_tools import BaseBERITestCase
from nose.plugins.attrib import attr
class test_tlb_store_protected(BaseBERITestCase):
@attr('tlb')
def test_epc_correct(self):
self.assertRegisterEqual(self.MIPS.a7, 0, "EPC not set to address of illegal store instruction.")
@attr('tlb')
def test_cause_correct(self):
self.assertRegisterMaskEqual(self.MIPS.a6, 0x7c, 0x4, "Cause not set to expected value.")
@attr('tlb')
def test_badvaddr_correct(self):
self.assertRegisterEqual(self.MIPS.s0, 0x0, "BadVAddr not set to expected value.")
@attr('tlb')
def test_store_blocked(self):
self.assertRegisterEqual(self.MIPS.a5, 0xfedcba9876543210, "Store to protected memory not blocked.")
|
{
"content_hash": "a3b647e9fe78c2ea544ecf0e95364c59",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 108,
"avg_line_length": 37.25,
"alnum_prop": 0.7087248322147651,
"repo_name": "8l/beri",
"id": "ab7611a2891b21cfcce41741049cc00930b88411",
"size": "1672",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cheritest/trunk/tests/tlb/test_tlb_store_protected.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1629022"
},
{
"name": "Bluespec",
"bytes": "2336405"
},
{
"name": "C",
"bytes": "1058899"
},
{
"name": "C++",
"bytes": "1864"
},
{
"name": "Groff",
"bytes": "14381"
},
{
"name": "Haskell",
"bytes": "11711"
},
{
"name": "Lex",
"bytes": "2894"
},
{
"name": "Makefile",
"bytes": "242450"
},
{
"name": "Mathematica",
"bytes": "291"
},
{
"name": "Objective-C",
"bytes": "2387"
},
{
"name": "OpenEdge ABL",
"bytes": "568"
},
{
"name": "Perl",
"bytes": "19159"
},
{
"name": "Python",
"bytes": "1491002"
},
{
"name": "Shell",
"bytes": "91130"
},
{
"name": "SystemVerilog",
"bytes": "12058"
},
{
"name": "Tcl",
"bytes": "132818"
},
{
"name": "TeX",
"bytes": "4996"
},
{
"name": "Verilog",
"bytes": "125674"
},
{
"name": "Yacc",
"bytes": "5871"
}
],
"symlink_target": ""
}
|
import logging
import os
from scli.constants import EbLocalDir, OptionSettingFile, ParameterName, \
ParameterSource, ServiceRegion, ServiceDefault
from scli.resources import ValidationMessage
from scli.exception import ValidationError
log = logging.getLogger("cli")
class Parameter(object):
'''
Parameter store parameter value used by operations
'''
def __init__(self, name, value, source):
self._name = name
self._source = source
self._value = value
@property
def name(self):
return self._name
@property
def value(self):
return self._value
@property
def source(self):
return self._source
@name.setter
def name(self, name):
self._name = name
@value.setter
def value(self, value):
self._value = value
@source.setter
def source(self, source):
self._source = source
class ParameterPool(object):
'''
A collection of runtime parameters.
'''
def __init__(self):
self._pool = dict()
@property
def command(self): # one pool can have at most one command
return self._pool[ParameterName.Command].value
@property
def parameter_names(self):
params = set()
for param_name in list(self._pool.keys()):
params.add(param_name)
return params
@property
def parameters(self):
return self._pool
def __getitem__(self, name):
return self._pool[name]
def get(self, name):
return self._pool[name]
def get_value(self, name):
return self._pool[name].value
def get_source(self, name):
return self._pool[name].source
def put(self, param, force = False):
'''
Add new parameter to pool.
When new parameter is not presented in pool or force is set to True, new
parameter will be always added/updated to pool. Otherwise, it will be
only updated to pool when source of new parameter has higher or equal priority
than the one in pool.
'''
if not isinstance(param, Parameter):
raise AttributeError("Cannot add item that's not instance of Parameter.")
if param.name not in self._pool \
or force\
or param.source == self._pool[param.name].source \
or ParameterSource.is_ahead(param.source, self._pool[param.name].source):
self._pool[param.name] = param
def update(self, name, value = None, source = None):
if name in self._pool:
if value is not None:
self._pool[name].value = value
if source is not None:
self._pool[name].source = source
else:
self.put(Parameter(name, value, source))
def has(self, name):
return name in self._pool
def remove(self, name):
if self.has(name):
del self._pool[name]
def validate(self, source = None):
validator = ParameterValidator()
validator.validate(self)
class ParameterValidator(object):
_validators = dict()
def __init__(self):
self._validators[ParameterName.ApplicationName] = \
self.validate_application_name
self._validators[ParameterName.ApplicationVersionName] = \
self.validate_application_version_name
self._validators[ParameterName.EnvironmentName] = \
self.validate_environment_name
self._validators[ParameterName.SolutionStack] = self.validate_solution_stack
self._validators[ParameterName.ServiceEndpoint] = self.validate_endpoint
self._validators[ParameterName.Region] = self.validate_region
#-------------------------------
# Helper method
#-------------------------------
@classmethod
def validate_alphanumeric(cls, value, min_size = None, max_size = None):
if value is not None:
size = len(value)
if min_size is not None and size < min_size:
return False
elif max_size is not None and size > max_size:
return False
else:
return value.isalnum()
else:
return False
@classmethod
def _validate_string(cls, value, name):
if len(value) < 1:
raise ValidationError(ValidationMessage.EmptyString.format(name))
@classmethod
def _validate_integer(cls, param, max_value = None, min_value = None):
try:
value = int(param)
except ValueError:
raise ValidationError(ValidationMessage.InvalidNumber.format(param))
if max_value is not None and max_value < value:
raise ValidationError(ValidationMessage.NumberTooBig.format(value))
if min_value is not None and min_value > value:
raise ValidationError(ValidationMessage.NumberTooSmall.format(value))
#-------------------------------
# Validation method
#-------------------------------
@classmethod
def validate(self, parameter_pool, source = None):
''' Validate parameters in pool when their sources equal to specified source.
Where source is None, validate all. '''
for name, parameter in list(parameter_pool.parameters.items()):
if source is None or parameter.source == source:
try:
self._validators[name](parameter_pool, source)
except KeyError:
continue # skip if don't have validator
@classmethod
def validate_application_name(cls, parameter_pool, source):
if parameter_pool.has(ParameterName.ApplicationName):
name = parameter_pool.get_value(ParameterName.ApplicationName)
cls._validate_string(name, ParameterName.ApplicationName)
@classmethod
def validate_application_version_name(cls, parameter_pool, source):
if parameter_pool.has(ParameterName.ApplicationVersionName):
name = parameter_pool.get_value(ParameterName.ApplicationVersionName)
cls._validate_string(name, ParameterName.ApplicationVersionName)
@classmethod
def validate_environment_name(cls, parameter_pool, source):
if parameter_pool.has(ParameterName.EnvironmentName):
name = parameter_pool.get_value(ParameterName.EnvironmentName)
cls._validate_string(name, ParameterName.EnvironmentName)
@classmethod
def validate_solution_stack(cls, parameter_pool, source):
if parameter_pool.has(ParameterName.SolutionStack):
name = parameter_pool.get_value(ParameterName.SolutionStack)
cls._validate_string(name, ParameterName.SolutionStack)
@classmethod
def validate_region(cls, parameter_pool, source):
if (parameter_pool.has(ParameterName.Region)):
region = parameter_pool.get_value(ParameterName.Region)
if region not in ServiceRegion:
raise ValidationError(ValidationMessage.InvalidRegion.\
format(region))
@classmethod
def validate_endpoint(cls, parameter_pool, source):
if parameter_pool.has(ParameterName.ServiceEndpoint):
name = parameter_pool.get_value(ParameterName.ServiceEndpoint)
cls._validate_string(name, ParameterName.ServiceEndpoint)
class DefaultParameterValue(object):
@classmethod
def fill_default(cls, parameter_pool):
cls.fill_version_name(parameter_pool)
cls.fill_option_setting_file_name(parameter_pool)
cls.fill_connection_timeout(parameter_pool)
cls.fill_wait_timeout(parameter_pool)
cls.fill_update_timeout(parameter_pool)
cls.fill_poll_delay(parameter_pool)
@classmethod
def fill_version_name(cls, parameter_pool):
parameter_pool.put(Parameter(ParameterName.ApplicationVersionName,
ServiceDefault.DEFAULT_VERSION_NAME,
ParameterSource.Default
))
@classmethod
def fill_option_setting_file_name(cls, parameter_pool):
path = os.path.join(EbLocalDir.Path, OptionSettingFile.Name)
parameter_pool.put(Parameter(ParameterName.OptionSettingFile,
path,
ParameterSource.Default
))
@classmethod
def fill_connection_timeout(cls, parameter_pool):
parameter_pool.put(Parameter(ParameterName.ServiceConnectionTimeout,
ServiceDefault.CONNECTION_TIMEOUT_IN_SEC,
ParameterSource.Default
))
@classmethod
def fill_wait_timeout(cls, parameter_pool):
parameter_pool.put(Parameter(ParameterName.WaitForFinishTimeout,
ServiceDefault.WAIT_TIMEOUT_IN_SEC,
ParameterSource.Default
))
@classmethod
def fill_update_timeout(cls, parameter_pool):
parameter_pool.put(Parameter(ParameterName.WaitForUpdateTimeout,
ServiceDefault.UPDATE_TIMEOUT_IN_SEC,
ParameterSource.Default
))
@classmethod
def fill_poll_delay(cls, parameter_pool):
parameter_pool.put(Parameter(ParameterName.PollDelay,
ServiceDefault.POLL_DELAY_IN_SEC,
ParameterSource.Default
))
|
{
"content_hash": "f43ec63869a4eb10fb1848f861fc1127",
"timestamp": "",
"source": "github",
"line_count": 273,
"max_line_length": 87,
"avg_line_length": 36.27472527472528,
"alnum_prop": 0.5874987377562355,
"repo_name": "JoaoVasques/aws-devtool",
"id": "25d942784748313cf201395412176225a7db64ef",
"size": "10635",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "eb/linux/python3/scli/parameter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "31565"
},
{
"name": "Python",
"bytes": "6266200"
},
{
"name": "Ruby",
"bytes": "159495"
},
{
"name": "Shell",
"bytes": "1895"
}
],
"symlink_target": ""
}
|
from google.cloud import webrisk_v1
def sample_search_hashes():
# Create a client
client = webrisk_v1.WebRiskServiceClient()
# Initialize request argument(s)
request = webrisk_v1.SearchHashesRequest(
threat_types="SOCIAL_ENGINEERING_EXTENDED_COVERAGE",
)
# Make the request
response = client.search_hashes(request=request)
# Handle the response
print(response)
# [END webrisk_v1_generated_WebRiskService_SearchHashes_sync]
|
{
"content_hash": "7d9b93dddafd2589b016e4a423310d6c",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 61,
"avg_line_length": 24.894736842105264,
"alnum_prop": 0.7167019027484144,
"repo_name": "googleapis/python-webrisk",
"id": "bcbf772c9043558c299a7e267bced7f2c610061a",
"size": "1857",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/webrisk_v1_generated_web_risk_service_search_hashes_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "433503"
},
{
"name": "Shell",
"bytes": "30663"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_system_vdom_radius_server
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_system_vdom_radius_server.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_system_vdom_radius_server_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_vdom_radius_server': {
'name': 'default_name_3',
'radius_server_vdom': 'test_value_4',
'status': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_vdom_radius_server.fortios_system(input_data, fos_instance)
expected_data = {
'name': 'default_name_3',
'radius-server-vdom': 'test_value_4',
'status': 'enable'
}
set_method_mock.assert_called_with('system', 'vdom-radius-server', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_system_vdom_radius_server_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_vdom_radius_server': {
'name': 'default_name_3',
'radius_server_vdom': 'test_value_4',
'status': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_vdom_radius_server.fortios_system(input_data, fos_instance)
expected_data = {
'name': 'default_name_3',
'radius-server-vdom': 'test_value_4',
'status': 'enable'
}
set_method_mock.assert_called_with('system', 'vdom-radius-server', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_system_vdom_radius_server_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'system_vdom_radius_server': {
'name': 'default_name_3',
'radius_server_vdom': 'test_value_4',
'status': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_vdom_radius_server.fortios_system(input_data, fos_instance)
delete_method_mock.assert_called_with('system', 'vdom-radius-server', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_system_vdom_radius_server_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'system_vdom_radius_server': {
'name': 'default_name_3',
'radius_server_vdom': 'test_value_4',
'status': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_vdom_radius_server.fortios_system(input_data, fos_instance)
delete_method_mock.assert_called_with('system', 'vdom-radius-server', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_system_vdom_radius_server_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_vdom_radius_server': {
'name': 'default_name_3',
'radius_server_vdom': 'test_value_4',
'status': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_vdom_radius_server.fortios_system(input_data, fos_instance)
expected_data = {
'name': 'default_name_3',
'radius-server-vdom': 'test_value_4',
'status': 'enable'
}
set_method_mock.assert_called_with('system', 'vdom-radius-server', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_system_vdom_radius_server_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_vdom_radius_server': {
'random_attribute_not_valid': 'tag',
'name': 'default_name_3',
'radius_server_vdom': 'test_value_4',
'status': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_vdom_radius_server.fortios_system(input_data, fos_instance)
expected_data = {
'name': 'default_name_3',
'radius-server-vdom': 'test_value_4',
'status': 'enable'
}
set_method_mock.assert_called_with('system', 'vdom-radius-server', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
|
{
"content_hash": "744cf588a9ecf4a7b81f803ce2fcac71",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 142,
"avg_line_length": 37.5615763546798,
"alnum_prop": 0.648,
"repo_name": "thaim/ansible",
"id": "ea96bca2947a110a89203afa4998c33b18a0f0b1",
"size": "8321",
"binary": false,
"copies": "20",
"ref": "refs/heads/fix-broken-link",
"path": "test/units/modules/network/fortios/test_fortios_system_vdom_radius_server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
}
|
import sys
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy
from .._version import VERSION
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class MachineLearningServicesMgmtClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for MachineLearningServicesMgmtClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription. Required.
:type subscription_id: str
:keyword api_version: Api Version. Default value is "2022-10-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(self, credential: "AsyncTokenCredential", subscription_id: str, **kwargs: Any) -> None:
super(MachineLearningServicesMgmtClientConfiguration, self).__init__(**kwargs)
api_version: Literal["2022-10-01"] = kwargs.pop("api_version", "2022-10-01")
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.api_version = api_version
self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"])
kwargs.setdefault("sdk_moniker", "mgmt-machinelearningservices/{}".format(VERSION))
self._configure(**kwargs)
def _configure(self, **kwargs: Any) -> None:
self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get("http_logging_policy") or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get("authentication_policy")
if self.credential and not self.authentication_policy:
self.authentication_policy = AsyncARMChallengeAuthenticationPolicy(
self.credential, *self.credential_scopes, **kwargs
)
|
{
"content_hash": "b70e7df11a5ad7de822c76e2703709b3",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 116,
"avg_line_length": 52.5625,
"alnum_prop": 0.724435196195006,
"repo_name": "Azure/azure-sdk-for-python",
"id": "88eada53ff687700a98af8b5449fdf5b2f4b082d",
"size": "3832",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/_configuration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('zerver', '0100_usermessage_remove_is_me_message'),
]
operations = [
migrations.CreateModel(
name='MutedTopic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('topic_name', models.CharField(max_length=60)),
('recipient', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='zerver.Recipient')),
('stream', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='zerver.Stream')),
('user_profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='mutedtopic',
unique_together={('user_profile', 'stream', 'topic_name')},
),
]
|
{
"content_hash": "f0ea9f372810487928a7557bfeac9308",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 126,
"avg_line_length": 39.25,
"alnum_prop": 0.6096451319381255,
"repo_name": "shubhamdhama/zulip",
"id": "bdb956993ed3a2bb0c6a7b830cfc847f427d0f09",
"size": "1148",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "zerver/migrations/0101_muted_topic.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "400387"
},
{
"name": "Dockerfile",
"bytes": "2939"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "721395"
},
{
"name": "JavaScript",
"bytes": "3095896"
},
{
"name": "Perl",
"bytes": "398763"
},
{
"name": "Puppet",
"bytes": "71124"
},
{
"name": "Python",
"bytes": "6896725"
},
{
"name": "Ruby",
"bytes": "6110"
},
{
"name": "Shell",
"bytes": "119898"
},
{
"name": "TypeScript",
"bytes": "14645"
}
],
"symlink_target": ""
}
|
from handsome.MicropolygonMesh import MicropolygonMesh, Vertex, Position
from handsome.Pixel import FloatPixel, array_view, pixel_view
from handsome.Tile import Tile
from handsome.TileCache import TileCache
from handsome.util import point, render_mesh
from handsome.capi import generate_numpy_begin, generate_numpy_span
import numpy as np
import os
def main():
from handsome.util import save_array_as_image
canvas = make_canvas({
'extents' : (512, 512),
'color' : '#fff',
})
surface = generate_surface()
shader = generate_texture_shader()
mesh = generate_mesh(surface, shader)
cache = render_mesh(mesh)
cache.composite_into(canvas)
buffer = array_view(canvas.downsample(1))
buffer = np.clip(buffer, 0., 1.)
buffer = (255 * buffer).astype(np.uint8)
save_array_as_image(pixel_view(buffer), 'render/006_texture.tiff', 'RGBA')
def generate_surface():
lower_left = point(0, 0, 1, 1,)
# up = 128 * np.array([0, 1, 0, 0], dtype=np.float32)
# right = 128 * np.array([1, 0, 0, 0], dtype=np.float32)
# up = np.array([256, 512, 0, 0], dtype=np.float32)
# right = np.array([256, 0, 0, 0], dtype=np.float32)
up = np.array([256, 512, 0, 0], dtype=np.float32)
right = np.array([256, 0, 0, 0], dtype=np.float32)
def surface(u, v):
return lower_left + u * right + v * up
return surface
def generate_texture_shader():
dir_path, base_path = os.path.split(__file__)
texture_path = os.path.abspath(os.path.join(dir_path, 'render', 'texture.tiff'))
# texture_path = os.path.abspath(os.path.join(dir_path, 'render', 'texture_black.tif'))
texture = read_texture(texture_path)
c_sample_texture = load_sampler_lib()['sample_texture']
texture_start, texture_end = generate_numpy_span(texture)
rows, columns = texture.shape
def shader(u, v):
nonlocal texture # forces closure so texture isn't garbage collected
return c_sample_texture(texture_start, texture_end, columns, rows, u, v)
# return sample_texture(texture, u, v)
return shader
def generate_mesh(surface, shader):
# shape = (128, 128)
shape = (256, 256)
mesh = MicropolygonMesh(shape)
shape = mesh.buffer.shape
u_steps = np.linspace(0, 1, shape[1], endpoint=True)
v_steps = np.linspace(0, 1, shape[0], endpoint=True)
points = [ [ surface(u, v) for u in u_steps ] for v in v_steps ]
mesh.buffer[:,:]['position'] = points
colors = [ [ shader(u, v) for u in u_steps ] for v in v_steps ]
mesh.buffer[:,:]['color'] = colors
return mesh
sample_texture_to_mesh = load_sampler_lib()['sample_texture_to_mesh']
mesh_start = generate_numpy_begin(mesh.buffer)
mesh_width, mesh_height = mesh.buffer.shape
dir_path, base_path = os.path.split(__file__)
texture_path = os.path.abspath(os.path.join(dir_path, 'render', 'texture.tiff'))
# texture_path = os.path.abspath(os.path.join(dir_path, 'render', 'texture_black.tif'))
texture = read_texture(texture_path)
texture_start = generate_numpy_begin(texture)
texture_width, texture_height = texture.shape
sample_texture_to_mesh(
mesh_start, mesh_width, mesh_height,
texture_start, texture_width, texture_height,
)
return mesh
def read_texture(path):
from PIL import Image
image = Image.open(path)
if image.mode != 'RGBA':
image = image.convert('RGBA')
out = np.array(image).astype(np.float32) / 255.
out = np.squeeze(out.view(FloatPixel))
return out
black = pixel_view(np.array([ 0., 0., 0., 0. ], dtype=np.float32))
def sample_texture(texture, s, t):
from math import floor
t = 1. - t
if not (0. <= s < 1.):
return black
if not (0. <= t < 1.):
return black
shape = texture.shape
s = s * (shape[0] - 1)
s_index = floor(s)
s_frac = s - s_index
s_index = int(s_index)
t = t * (shape[1] - 1)
t_index = floor(t)
t_frac = t - t_index
t_index = int(t_index)
texture = array_view(texture)
top_left = texture[t_index, s_index,:]
bottom_left = texture[t_index, s_index + 1,:]
top_right = texture[t_index + 1, s_index,:]
bottom_right = texture[t_index + 1, s_index + 1,:]
u = s_frac
up = 1 - s_frac
v = t_frac
vp = 1 - t_frac
out = up * vp * top_left
out += up * v * bottom_left
out += u * vp * top_right
out += u * v * bottom_right
return out
def make_canvas(canvas, sample_rate=4):
from sweatervest.util import parse_color
extents = canvas['extents']
color = canvas.get('color', None)
if color is None:
color = np.array([1, 1, 1, 1], dtype=np.float32).view(dtype=FloatPixel)
elif isinstance(color, str):
color = [ c / 255. for c in parse_color(color) ]
color = np.array(color, dtype=np.float32).view(dtype=FloatPixel)
elif isinstance(color, (tuple, list)):
color = [ c / 255. if isinstance(c, int) else c for c in color ]
color = np.array(color, dtype=np.float32).view(dtype=FloatPixel)
out = Tile((0, 0), extents, sample_rate, dtype=FloatPixel)
out.buffer[:,:] = color
return out
def import_from_dll(name, path):
from importlib.machinery import ExtensionFileLoader
return ExtensionFileLoader(name, path).load_module()
def load_sampler_lib():
import ctypes
from ctypes import c_void_p, c_int, c_float
so_path = build_so('__main__.006_shader', [ '006/shader.cpp' ])
lib = ctypes.cdll.LoadLibrary(so_path)
class Sample(ctypes.Structure):
_fields_ = [
('R', ctypes.c_float),
('G', ctypes.c_float),
('B', ctypes.c_float),
('A', ctypes.c_float),
]
out = { }
out['sample_texture'] = lib['sample_texture']
out['sample_texture'].argtypes = (c_void_p, c_void_p, c_int, c_int, c_float, c_float)
out['sample_texture'].restype = Sample
out['sample_texture_to_mesh'] = lib['sample_texture_to_mesh']
out['sample_texture_to_mesh'].argtypes = (c_void_p, c_int, c_int, c_void_p, c_int, c_int)
return out
def find_handsome_include_dir():
import handsome
handsome_dir, _ = os.path.split(handsome.__file__)
cpp_dir = os.path.join(handsome_dir, '..', 'src', 'cpp')
cpp_dir = os.path.abspath(cpp_dir)
return cpp_dir
def build_so(module_name, sources, setup_args=None):
from distutils.dist import Distribution
from distutils.errors import DistutilsArgError
from distutils.extension import Extension
from shutil import copy2
setup_args = generate_setup_args(setup_args)
dist = Distribution(setup_args)
ext = Extension(
name = module_name,
sources = sources,
include_dirs = [ find_handsome_include_dir() ],
extra_compile_args = [ '-std=c++11' ],
)
if dist.ext_modules is None:
dist.ext_modules = [ ext ]
else:
dist.ext_modules.append(ext)
target_dir, _ = os.path.split(os.path.abspath(__file__))
build = dist.get_command_obj('build')
build.build_base = os.path.join(target_dir, 'build')
cfgfiles = dist.find_config_files()
dist.parse_config_files(cfgfiles)
try:
ok = dist.parse_command_line()
except DistutilsArgError:
raise
if not ok:
raise RuntimeError('Build cannot continue')
command = dist.get_command_obj("build_ext")
dist.run_commands()
so_path = os.path.abspath(command.get_outputs()[0])
_, so_name = os.path.split(so_path)
target_path = os.path.join(target_dir, so_name)
if os.path.isfile(target_path):
os.unlink(target_path)
copy2(so_path, target_path)
return target_path
def generate_setup_args(setup_args=None):
setup_args = { } if setup_args is None else dict(setup_args)
script_args = setup_args.get('script_args')
if script_args is None:
script_args = [ ]
args = [ "--quiet", "build_ext" ]
setup_args['script_name'] = None
setup_args['script_args'] = args + script_args
return setup_args
if __name__ == '__main__':
main()
|
{
"content_hash": "c76a37c0540d58955f3d117a07f2bf9e",
"timestamp": "",
"source": "github",
"line_count": 305,
"max_line_length": 93,
"avg_line_length": 26.80655737704918,
"alnum_prop": 0.6175391389432485,
"repo_name": "bracket/handsome",
"id": "8e33b04dc90afff8aa468d51ef90fc7bb85d42e8",
"size": "8176",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/006_texture.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "7291"
},
{
"name": "C++",
"bytes": "74603"
},
{
"name": "Python",
"bytes": "64252"
}
],
"symlink_target": ""
}
|
sizeof_workid = 8
import base58 as _base58
from binascii import b2a_hex as _b2a_hex
from hashlib import sha256 as _sha256
from struct import pack as _pack
from time import time as _time
from blktemplate import _Transaction, request as _request
MAX_BLOCK_VERSION = 4
coinbase_size_limit = 100
def _dblsha256(data):
return _sha256(_sha256(data).digest()).digest()
def init_generation3(tmpl, script, override_cb=False):
if (not tmpl.cbtxn is None) and not (override_cb and ('generate' in tmpl.mutations)):
return (0, False)
if len(script) >= 0xfd:
return (0, True)
sh = b''
h = tmpl.height
while h > 127:
sh += _pack('<B', h & 0xff)
h >>= 8
sh += _pack('<B', h)
sh = _pack('<B', len(sh)) + sh
if getattr(tmpl, 'auxs', None):
auxcat = b''
for aux in tmpl.auxs.values():
auxcat += aux
if len(auxcat):
sh += _pack('<B', len(auxcat)) + auxcat
if len(sh) > coinbase_size_limit:
return (0, True)
data = b''
data += b"\x01\0\0\0" # txn ver
data += b"\x01" # input count
data += b"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" # prevout
data += b"\xff\xff\xff\xff" # index (-1)
data += _pack('<B', len(sh)) # scriptSig length
data += sh
data += b"\xff\xff\xff\xff" # sequence
data += b"\x01" # output count
data += _pack('<Q', tmpl.cbvalue)
data += _pack('<B', len(script))
data += script
data += b'\0\0\0\0' # lock time
if tmpl.txns_datasz + len(data) > tmpl.sizelimit:
return (0, True)
txn = _Transaction(None)
txn.data = data
tmpl.cbtxn = txn
tmpl.mutations.add('coinbase/append')
tmpl.mutations.add('coinbase')
tmpl.mutations.add('generate')
return (tmpl.cbvalue, True)
init_generation2 = init_generation3
def init_generation(tmpl, script, override_cb=False):
return init_generation2(tmpl, script, override_cb)[0]
def _hash_transactions(tmpl):
for txn in tmpl.txns:
if hasattr(txn, 'hash_'):
continue
txn.hash_ = _dblsha256(txn.data)
return True
def _build_merkle_branches(tmpl):
if hasattr(tmpl, '_mrklbranch'):
return True
if not _hash_transactions(tmpl):
return False
branchcount = len(tmpl.txns).bit_length()
branches = []
merklehashes = [None] + [txn.hash_ for txn in tmpl.txns]
while len(branches) < branchcount:
branches.append(merklehashes[1])
if len(merklehashes) % 2:
merklehashes.append(merklehashes[-1])
merklehashes = [None] + [_dblsha256(merklehashes[i] + merklehashes[i + 1]) for i in range(2, len(merklehashes), 2)]
tmpl._mrklbranch = branches
return True
def _build_merkle_root(tmpl, coinbase):
if not _build_merkle_branches(tmpl):
return None
lhs = _dblsha256(coinbase)
for rhs in tmpl._mrklbranch:
lhs = _dblsha256(lhs + rhs)
return lhs
_cbScriptSigLen = 4 + 1 + 36
def _append_cb(tmpl, append, appended_at_offset = None):
coinbase = tmpl.cbtxn.data
# The following can be done better in both Python 2 and Python 3, but this way works with both
origLen = ord(coinbase[_cbScriptSigLen:_cbScriptSigLen+1])
appendsz = len(append)
if origLen > coinbase_size_limit - appendsz:
return None
if len(tmpl.cbtxn.data) + tmpl.txns_datasz + appendsz > tmpl.sizelimit:
return None
cbExtraNonce = _cbScriptSigLen + 1 + origLen
if not appended_at_offset is None:
appended_at_offset[0] = cbExtraNonce
newLen = origLen + appendsz
coinbase = coinbase[:_cbScriptSigLen] + chr(newLen).encode('ascii') + coinbase[_cbScriptSigLen+1:cbExtraNonce] + append + coinbase[cbExtraNonce:]
return coinbase
def append_coinbase_safe2(tmpl, append, extranoncesz = 0, merkle_only = False):
if 'coinbase/append' not in tmpl.mutations and 'coinbase' not in tmpl.mutations:
raise RuntimeError('Coinbase appending not allowed by template')
datasz = len(tmpl.cbtxn.data)
if extranoncesz == sizeof_workid:
extranoncesz += 1
elif not merkle_only:
if extranoncesz < sizeof_workid:
extranoncesz = sizeof_workid
availsz = coinbase_size_limit - extranoncesz - ord(tmpl.cbtxn.data[_cbScriptSigLen:_cbScriptSigLen+1])
current_blocksize = len(tmpl.cbtxn.data) + tmpl.txns_datasz
if current_blocksize > tmpl.sizelimit:
return 0
availsz2 = tmpl.sizelimit - current_blocksize
if availsz2 < availsz:
availsz = availsz2
if len(append) > availsz:
return availsz
newcb = _append_cb(tmpl, append)
if newcb is None:
raise RuntimeError('Append failed')
return availsz
append_coinbase_safe = append_coinbase_safe2
def _extranonce(tmpl, workid):
coinbase = tmpl.cbtxn.data
if not workid:
return coinbase
extradata = _pack('<Q', workid)
coinbase = _append_cb(tmpl, extradata)
return coinbase
def _set_times(tmpl, usetime = None, out_expire = None, can_roll_ntime = False):
time_passed = int(usetime - tmpl._time_rcvd)
timehdr = tmpl.curtime + time_passed
if (timehdr > tmpl.maxtime):
timehdr = tmpl.maxtime
return _pack('<I', timehdr)
if not out_expire is None:
out_expire[0] = tmpl.expires - time_passed - 1
if can_roll_ntime:
# If the caller can roll the time header, we need to expire before reaching the maxtime
maxtime_expire_limit = (tmpl.maxtime - timehdr) + 1
if out_expire[0] > maxtime_expire_limit:
out_expire[0] = maxtime_expire_limit
def _sample_data(tmpl, dataid):
cbuf = _pack('<I', tmpl.version)
cbuf += tmpl.prevblk
cbtxndata = _extranonce(tmpl, dataid)
if not cbtxndata:
return None
merkleroot = _build_merkle_root(tmpl, cbtxndata)
if not merkleroot:
return None
cbuf += merkleroot
cbuf += _pack('<I', tmpl.curtime)
cbuf += tmpl.diffbits
return cbuf
def get_data(tmpl, usetime = None, out_expire = None):
if usetime is None: usetime = _time()
if ((not (time_left(tmpl, usetime) and work_left(tmpl))) and not tmpl.cbtxn is None):
return (None, None)
dataid = tmpl.next_dataid
tmpl.next_dataid += 1
cbuf = _sample_data(tmpl, dataid)
if cbuf is None:
return (None, None)
cbuf = cbuf[:68] + _set_times(tmpl, usetime, out_expire) + cbuf[68+4:]
return (cbuf, dataid)
def get_mdata(tmpl, usetime = None, out_expire = None, extranoncesz = sizeof_workid, can_roll_ntime = True):
if usetime is None: usetime = _time()
if not (True
and time_left(tmpl, usetime)
and (not tmpl.cbtxn is None)
and _build_merkle_branches(tmpl)
):
return None
if extranoncesz == sizeof_workid:
# Avoid overlapping with blkmk_get_data use
extranoncesz += 1
cbuf = _pack('<I', tmpl.version)
cbuf += tmpl.prevblk
dummy = b'\0' * extranoncesz
cbextranonceoffset = [None]
cbtxn = _append_cb(tmpl, dummy, cbextranonceoffset)
if cbtxn is None:
return None
cbuf += b'\0' * 0x20
cbuf += _set_times(tmpl, usetime, out_expire, can_roll_ntime)
cbuf += tmpl.diffbits
return (cbuf, cbtxn, cbextranonceoffset[0], tmpl._mrklbranch)
def time_left(tmpl, nowtime = None):
if nowtime is None: nowtime = _time()
age = (nowtime - tmpl._time_rcvd)
if age >= tmpl.expires:
return 0
return tmpl.expires - age
def work_left(tmpl):
if not tmpl.version:
return 0
if 'coinbase/append' not in tmpl.mutations and 'coinbase' not in tmpl.mutations:
return 1
return 0xffffffffffffffff - tmpl.next_dataid
def _varintEncode(n):
if n < 0xfd:
return _pack('<B', n)
# NOTE: Technically, there are more encodings for numbers bigger than
# 16-bit, but transaction counts can't be that high with version 2 Bitcoin
# blocks
return b'\xfd' + _pack('<H', n)
def _assemble_submission2_internal(tmpl, data, extranonce, nonce, foreign):
data = data[:76]
data += _pack('!I', nonce)
if foreign or ('submit/truncate' not in tmpl.mutations or extranonce):
data += _varintEncode(1 + len(tmpl.txns))
# Essentially _extranonce
if extranonce:
data += _append_cb(tmpl, extranonce)
else:
data += tmpl.cbtxn.data
if foreign or ('submit/coinbase' not in tmpl.mutations):
for i in range(len(tmpl.txns)):
data += tmpl.txns[i].data
return _b2a_hex(data).decode('ascii')
def _assemble_submission2(tmpl, data, extranonce, dataid, nonce, foreign):
if dataid:
if extranonce:
raise RuntimeError('Cannot specify both extranonce and dataid')
extranonce = _pack('<Q', workid)
elif extranonce and len(extranonce) == sizeof_workid:
# Avoid overlapping with blkmk_get_data use
extranonce += b'\0'
return _assemble_submission2_internal(tmpl, data, extranonce, nonce, foreign)
def propose(tmpl, caps, foreign):
jreq = _request(caps)
jparams = jreq['params'][0]
jparams['mode'] = 'proposal'
if (not getattr(tmpl, 'workid', None) is None) and not foreign:
jparams['workid'] = tmpl.workid
dataid = 0
if 'coinbase/append' in tmpl.mutations or 'coinbase' in tmpl.mutations:
dataid = 1
sdata = _sample_data(tmpl, dataid)
blkhex = _assemble_submission2(tmpl, sdata, None, dataid, 0, foreign)
jparams['data'] = blkhex
return jreq
def _submit(tmpl, data, extranonce, dataid, nonce, foreign):
blkhex = _assemble_submission2(tmpl, data, extranonce, dataid, nonce, foreign)
info = {}
if (not getattr(tmpl, 'workid', None) is None) and not foreign:
info['workid'] = tmpl.workid
return {
'id': 0,
'method': 'submitblock',
'params': [
blkhex,
info
]
}
def submit(tmpl, data, dataid, nonce, foreign=False):
return _submit(tmpl, data, None, dataid, nonce, foreign)
def submit_foreign(tmpl, data, dataid, nonce):
return _submit(tmpl, data, None, dataid, nonce, True)
def submitm(tmpl, data, extranonce, nonce, foreign=False):
return _submit(tmpl, data, extranonce, None, nonce, foreign)
def address_to_script(addr):
addrbin = _base58.b58decode(addr, 25)
if addrbin is None:
raise RuntimeError('Invalid address')
addrver = _base58.get_bcaddress_version(addr)
if addrver == 0 or addrver == 111:
# Bitcoin pubkey hash or Testnet pubkey hash
return b''
+ b'\x76' # OP_DUP
+ b'\xa9' # OP_HASH160
+ b'\x14' # push 20 bytes
+ addrbin
+ b'\x88' # OP_EQUALVERIFY
+ b'\xac' # OP_CHECKSIG
if addrver == 5 or addrver == 196:
# Bitcoin script hash or Testnet script hash
return b''
+ b'\xa9' # OP_HASH160
+ b'\x14' # push 20 bytes
+ addrbin
+ b'\x87' # OP_EQUAL
raise RuntimeError('Invalid address version')
|
{
"content_hash": "b338b5417fb580e05c0439b990402093",
"timestamp": "",
"source": "github",
"line_count": 364,
"max_line_length": 146,
"avg_line_length": 27.736263736263737,
"alnum_prop": 0.6835380348652932,
"repo_name": "luke-jr/python-blkmaker",
"id": "c1324bbf2f0e1f9b07a7a43102d7316fbcd9a39c",
"size": "10285",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blkmaker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17592"
}
],
"symlink_target": ""
}
|
try:
range = xrange
except NameError:
pass
class MatrixPointer:
def __init__(self, data, size=None, offset=(0,0), transpose=False):
"""Create a matrix pointer.
Keyword arguments:
data -- array, matrix of matrix pointer;
size -- tuple (width, height) with size of current image fragment;
default is None, but you can use it only when data is a matrix;
offset -- tuple (x, y) with offset of current image fragment
from the left top element of the matrix; default is (0, 0);
transpose -- True if matrix needs to be transposed and False otherwise;
default is False.
"""
if size is None:
if isinstance(data, MatrixPointer) and transpose:
raise ValueError('You cannot transpose existing MatrixPointer')
elif isinstance(data, MatrixPointer):
size = data.get_size()
elif type(data) is not list or len(data) == 0 \
or type(data[0]) is not list:
raise ValueError('If size is not set, data should be a matrix')
elif transpose:
size = (len(data), len(data[0]))
elif not transpose:
size = (len(data[0]), len(data))
if isinstance(data, MatrixPointer):
self.__data = data.get_data(True)
self.__original_size = data.get_size(True)
self.__size = size
self.__offset = data.get_offset(offset)
else:
self.__data = self.__flatten(data)
self.__original_size = size
self.__size = size
self.__offset = offset
if transpose:
self.__transpose()
def __flatten(self, lst):
"""Convert lst to one-dimensional list.
Works with non-lists, lists and matrices.
"""
if type(lst) is not list:
return [lst]
elif type(lst[0]) is not list:
return lst
return sum(lst, [])
def __transpose(self):
"""Transpose the matrix."""
self.__data = [d for i in range(self.__original_size[1])
for d in self.__data[i:len(self.__data):
self.__original_size[1]]]
def get_data(self, source=False):
"""Get list with data, current pointer points to.
Keyword arguments:
source -- whether you need the source data (True),
or cropped fragment (False); default is False.
"""
if source:
return self.__data
if self.__size == (0, 0):
return None
return [e for e in self.get_generator()]
def __get_generator(self):
return (e
for y in range(self.__offset[1], self.__offset[1]+self.__size[1])
for e in self.__data[self.__original_size[0]*y+self.__offset[0]:
self.__original_size[0]*y+self.__offset[0]+self.__size[0]])
def get_generator(self):
"""Get generator of list with data, current pointer points to."""
if self.__size == (0, 0):
raise StopIteration
if self.__offset == (0, 0) and self.__size == self.__original_size:
return iter(self.__data)
return self.__get_generator()
def get_offset(self, initial_offset=(0,0), y=0):
"""Get current pointer offset and add new offset to it.
Useful for the case, when you create new pointer
on the basis of existing one.
Keyword arguments:
initial_offset -- tuple (x, y) with offset from current pointer's
left upper corner either horizontal offset;
y -- vertical offset, if `initial_offset` is a horizontal offset.
"""
if type(initial_offset) is tuple:
return (self.__offset[0]+initial_offset[0],
self.__offset[1]+initial_offset[1])
return (self.__offset[0]+initial_offset,
self.__offset[1]+y)
def get_size(self, original=False):
"""Get width and height of current matrix"""
return self.__original_size if original else self.__size
def split_vertical(self, width):
"""Split the matrix vertically.
Get two pointers:
- to first `width` columns of the matrix;
- to other columns of the matrix.
"""
height = 0 if width == 0 else self.__size[1]
left = MatrixPointer(self, (width, height))
height = 0 if width == self.__size[0] else self.__size[1]
right = MatrixPointer(self, (self.__size[0]-width, height),
(width, 0))
return (left, right)
def split_horizontal(self, height):
"""Split the matrix horizontally.
Get two pointers:
- to first `height` rows of the matrix;
- to other rows of the matrix.
"""
width = 0 if height == 0 else self.__size[0]
top = MatrixPointer(self, (width, height))
width = 0 if height == self.__size[1] else self.__size[0]
bottom = MatrixPointer(self, (width, self.__size[1]-height),
(0, height))
return (top, bottom)
def __sync_generators(self, *matrices):
"""Create synchronized generator based on length of current matrix.
Positional arguments:
matrices -- matrices,
which are needed to be iterated during the operation.
"""
if len(matrices) == 1:
g = matrices[0].get_generator()
for v in self.get_generator():
yield [v, next(g)]
raise StopIteration
generators = [m.get_generator() for m in matrices]
for v in self.get_generator():
yield [v]+[next(g) for g in generators]
def map(self, f, *matrices):
"""Apply function to matrices and return list with processed values.
Keyword arguments:
f -- function to apply;
Positional arguments:
matrices -- matrices,
which are needed to be iterated during the operation.
"""
return [f(*v) for v in self.__sync_generators(*matrices)]
def reduce(self, f, current_value=None, *matrices):
"""Apply function to matrices using accumulator and return the result.
Keyword arguments:
f -- function to apply;
Positional arguments:
current_value -- initial value,
which will be passed to f in the first operation.
matrices -- matrices,
which are needed to be iterated during the operation.
"""
for v in self.__sync_generators(*matrices):
current_value = (f(current_value, *v))
return current_value
def __getitem__(self, index):
y, x = index
return self.__data[self.__original_size[0] * (self.__offset[1] + y) + \
self.__offset[0] + x]
|
{
"content_hash": "b38cc95e94d98b5df92c4bda75ef561c",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 80,
"avg_line_length": 34.389162561576356,
"alnum_prop": 0.5489184930525712,
"repo_name": "char-lie/patterns_recognition",
"id": "670803a3e8a91214565575b7370e2880572f45d7",
"size": "6981",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "classes/image/MatrixPointer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "63699"
},
{
"name": "Shell",
"bytes": "360"
}
],
"symlink_target": ""
}
|
def convertTabs(code, x):
return code.replace('\t', ' ' * x)
|
{
"content_hash": "f297299436cd4ad88b09e22238c62ad0",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 38,
"avg_line_length": 32.5,
"alnum_prop": 0.5846153846153846,
"repo_name": "RevansChen/online-judge",
"id": "825403d237ce3b1c7dd052ca41920ca1a670456d",
"size": "100",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Codefights/arcade/python-arcade/level-2/14.Convert-Tabs/Python/solution1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Brainfuck",
"bytes": "102"
},
{
"name": "C",
"bytes": "6829"
},
{
"name": "C#",
"bytes": "19758"
},
{
"name": "C++",
"bytes": "9439"
},
{
"name": "Clojure",
"bytes": "75"
},
{
"name": "CoffeeScript",
"bytes": "903"
},
{
"name": "Crystal",
"bytes": "52"
},
{
"name": "Dart",
"bytes": "182"
},
{
"name": "Elixir",
"bytes": "1027"
},
{
"name": "Erlang",
"bytes": "132"
},
{
"name": "F#",
"bytes": "40"
},
{
"name": "Go",
"bytes": "83"
},
{
"name": "Haskell",
"bytes": "102"
},
{
"name": "Java",
"bytes": "11057"
},
{
"name": "JavaScript",
"bytes": "44773"
},
{
"name": "Kotlin",
"bytes": "82"
},
{
"name": "Lua",
"bytes": "93"
},
{
"name": "PHP",
"bytes": "2875"
},
{
"name": "Python",
"bytes": "563400"
},
{
"name": "R",
"bytes": "265"
},
{
"name": "Ruby",
"bytes": "7171"
},
{
"name": "Rust",
"bytes": "74"
},
{
"name": "Scala",
"bytes": "84"
},
{
"name": "Shell",
"bytes": "438"
},
{
"name": "Swift",
"bytes": "6597"
},
{
"name": "TSQL",
"bytes": "3531"
},
{
"name": "TypeScript",
"bytes": "5744"
}
],
"symlink_target": ""
}
|
import base64
import mock
import re
import os
from collections import defaultdict
from typing import Any, Dict, Iterable, List, Optional, Tuple
from django.test import TestCase
from django.http import HttpResponse, HttpRequest
from django.conf import settings
from zerver.forms import OurAuthenticationForm
from zerver.lib.actions import do_deactivate_realm, do_deactivate_user, \
do_reactivate_user, do_reactivate_realm, do_set_realm_property
from zerver.lib.exceptions import JsonableError
from zerver.lib.initial_password import initial_password
from zerver.lib.test_helpers import (
HostRequestMock,
)
from zerver.lib.test_classes import (
ZulipTestCase,
)
from zerver.lib.response import json_response, json_success
from zerver.lib.users import get_api_key
from zerver.lib.user_agent import parse_user_agent
from zerver.lib.request import \
REQ, has_request_variables, RequestVariableMissingError, \
RequestVariableConversionError, RequestConfusingParmsError
from zerver.lib.webhooks.common import UnexpectedWebhookEventType
from zerver.decorator import (
api_key_only_webhook_view,
authenticated_json_view,
authenticated_rest_api_view,
authenticated_uploads_api_view,
authenticate_notify, cachify,
get_client_name, internal_notify_view, is_local_addr,
rate_limit, validate_api_key,
return_success_on_head_request, to_not_negative_int_or_none,
zulip_login_required
)
from zerver.lib.cache import ignore_unhashable_lru_cache, dict_to_items_tuple, items_tuple_to_dict
from zerver.lib.validator import (
check_string, check_dict, check_dict_only, check_bool, check_float, check_int, check_list, Validator,
check_variable_type, equals, check_none_or, check_url, check_short_string,
check_string_fixed_length, check_capped_string, check_color, to_non_negative_int,
check_string_or_int_list, check_string_or_int
)
from zerver.models import \
get_realm, get_user, UserProfile, Realm
import ujson
class DecoratorTestCase(TestCase):
def test_get_client_name(self) -> None:
class Request:
def __init__(self, GET: Dict[str, str], POST: Dict[str, str], META: Dict[str, str]) -> None:
self.GET = GET
self.POST = POST
self.META = META
req = Request(
GET=dict(),
POST=dict(),
META=dict(),
)
self.assertEqual(get_client_name(req, is_browser_view=True), 'website')
self.assertEqual(get_client_name(req, is_browser_view=False), 'Unspecified')
req = Request(
GET=dict(),
POST=dict(),
META=dict(HTTP_USER_AGENT='Mozilla/bla bla bla'),
)
self.assertEqual(get_client_name(req, is_browser_view=True), 'website')
self.assertEqual(get_client_name(req, is_browser_view=False), 'Mozilla')
req = Request(
GET=dict(),
POST=dict(),
META=dict(HTTP_USER_AGENT='ZulipDesktop/bla bla bla'),
)
self.assertEqual(get_client_name(req, is_browser_view=True), 'ZulipDesktop')
self.assertEqual(get_client_name(req, is_browser_view=False), 'ZulipDesktop')
req = Request(
GET=dict(),
POST=dict(),
META=dict(HTTP_USER_AGENT='ZulipMobile/bla bla bla'),
)
self.assertEqual(get_client_name(req, is_browser_view=True), 'ZulipMobile')
self.assertEqual(get_client_name(req, is_browser_view=False), 'ZulipMobile')
req = Request(
GET=dict(client='fancy phone'),
POST=dict(),
META=dict(),
)
self.assertEqual(get_client_name(req, is_browser_view=True), 'fancy phone')
self.assertEqual(get_client_name(req, is_browser_view=False), 'fancy phone')
def test_REQ_aliases(self) -> None:
@has_request_variables
def double(request: HttpRequest,
x: int=REQ(whence='number', aliases=['x', 'n'], converter=int)) -> int:
return x + x
class Request:
GET = {} # type: Dict[str, str]
POST = {} # type: Dict[str, str]
request = Request()
request.POST = dict(bogus='5555')
with self.assertRaises(RequestVariableMissingError):
double(request)
request.POST = dict(number='3')
self.assertEqual(double(request), 6)
request.POST = dict(x='4')
self.assertEqual(double(request), 8)
request.POST = dict(n='5')
self.assertEqual(double(request), 10)
request.POST = dict(number='6', x='7')
with self.assertRaises(RequestConfusingParmsError) as cm:
double(request)
self.assertEqual(str(cm.exception), "Can't decide between 'number' and 'x' arguments")
def test_REQ_converter(self) -> None:
def my_converter(data: str) -> List[int]:
lst = ujson.loads(data)
if not isinstance(lst, list):
raise ValueError('not a list')
if 13 in lst:
raise JsonableError('13 is an unlucky number!')
return [int(elem) for elem in lst]
@has_request_variables
def get_total(request: HttpRequest, numbers: Iterable[int]=REQ(converter=my_converter)) -> int:
return sum(numbers)
class Request:
GET = {} # type: Dict[str, str]
POST = {} # type: Dict[str, str]
request = Request()
with self.assertRaises(RequestVariableMissingError):
get_total(request)
request.POST['numbers'] = 'bad_value'
with self.assertRaises(RequestVariableConversionError) as cm:
get_total(request)
self.assertEqual(str(cm.exception), "Bad value for 'numbers': bad_value")
request.POST['numbers'] = ujson.dumps('{fun: unfun}')
with self.assertRaises(JsonableError) as cm:
get_total(request)
self.assertEqual(str(cm.exception), 'Bad value for \'numbers\': "{fun: unfun}"')
request.POST['numbers'] = ujson.dumps([2, 3, 5, 8, 13, 21])
with self.assertRaises(JsonableError) as cm:
get_total(request)
self.assertEqual(str(cm.exception), "13 is an unlucky number!")
request.POST['numbers'] = ujson.dumps([1, 2, 3, 4, 5, 6])
result = get_total(request)
self.assertEqual(result, 21)
def test_REQ_converter_and_validator_invalid(self) -> None:
with self.assertRaisesRegex(AssertionError, "converter and validator are mutually exclusive"):
@has_request_variables
def get_total(request: HttpRequest,
numbers: Iterable[int]=REQ(validator=check_list(check_int),
converter=lambda x: [])) -> int:
return sum(numbers) # nocoverage -- isn't intended to be run
def test_REQ_validator(self) -> None:
@has_request_variables
def get_total(request: HttpRequest,
numbers: Iterable[int]=REQ(validator=check_list(check_int))) -> int:
return sum(numbers)
class Request:
GET = {} # type: Dict[str, str]
POST = {} # type: Dict[str, str]
request = Request()
with self.assertRaises(RequestVariableMissingError):
get_total(request)
request.POST['numbers'] = 'bad_value'
with self.assertRaises(JsonableError) as cm:
get_total(request)
self.assertEqual(str(cm.exception), 'Argument "numbers" is not valid JSON.')
request.POST['numbers'] = ujson.dumps([1, 2, "what?", 4, 5, 6])
with self.assertRaises(JsonableError) as cm:
get_total(request)
self.assertEqual(str(cm.exception), 'numbers[2] is not an integer')
request.POST['numbers'] = ujson.dumps([1, 2, 3, 4, 5, 6])
result = get_total(request)
self.assertEqual(result, 21)
def test_REQ_str_validator(self) -> None:
@has_request_variables
def get_middle_characters(request: HttpRequest,
value: str=REQ(str_validator=check_string_fixed_length(5))) -> str:
return value[1:-1]
class Request:
GET = {} # type: Dict[str, str]
POST = {} # type: Dict[str, str]
request = Request()
with self.assertRaises(RequestVariableMissingError):
get_middle_characters(request)
request.POST['value'] = 'long_value'
with self.assertRaises(JsonableError) as cm:
get_middle_characters(request)
self.assertEqual(str(cm.exception), 'value has incorrect length 10; should be 5')
request.POST['value'] = 'valid'
result = get_middle_characters(request)
self.assertEqual(result, 'ali')
def test_REQ_argument_type(self) -> None:
@has_request_variables
def get_payload(request: HttpRequest,
payload: Dict[str, Any]=REQ(argument_type='body')) -> Dict[str, Any]:
return payload
class MockRequest:
body = {} # type: Any
request = MockRequest()
request.body = 'notjson'
with self.assertRaises(JsonableError) as cm:
get_payload(request)
self.assertEqual(str(cm.exception), 'Malformed JSON')
request.body = '{"a": "b"}'
self.assertEqual(get_payload(request), {'a': 'b'})
# Test we properly handle an invalid argument_type.
with self.assertRaises(Exception) as cm:
@has_request_variables
def test(request: HttpRequest,
payload: Any=REQ(argument_type="invalid")) -> None:
# Any is ok; exception should occur in decorator:
pass # nocoverage # this function isn't meant to be called
test(request)
def test_api_key_only_webhook_view(self) -> None:
@api_key_only_webhook_view('ClientName')
def my_webhook(request: HttpRequest, user_profile: UserProfile) -> str:
return user_profile.email
@api_key_only_webhook_view('ClientName')
def my_webhook_raises_exception(request: HttpRequest, user_profile: UserProfile) -> None:
raise Exception("raised by webhook function")
@api_key_only_webhook_view('ClientName')
def my_webhook_raises_exception_unexpected_event(
request: HttpRequest, user_profile: UserProfile) -> None:
raise UnexpectedWebhookEventType("helloworld", "test_event")
webhook_bot_email = 'webhook-bot@zulip.com'
webhook_bot_realm = get_realm('zulip')
webhook_bot = get_user(webhook_bot_email, webhook_bot_realm)
webhook_bot_api_key = get_api_key(webhook_bot)
webhook_client_name = "ZulipClientNameWebhook"
request = HostRequestMock()
request.POST['api_key'] = 'not_existing_api_key'
with self.assertRaisesRegex(JsonableError, "Invalid API key"):
my_webhook(request) # type: ignore # mypy doesn't seem to apply the decorator
# Start a valid request here
request.POST['api_key'] = webhook_bot_api_key
with mock.patch('logging.warning') as mock_warning:
with self.assertRaisesRegex(JsonableError,
"Account is not associated with this subdomain"):
api_result = my_webhook(request) # type: ignore # mypy doesn't seem to apply the decorator
mock_warning.assert_called_with(
"User {} ({}) attempted to access API on wrong "
"subdomain ({})".format(webhook_bot_email, 'zulip', ''))
with mock.patch('logging.warning') as mock_warning:
with self.assertRaisesRegex(JsonableError,
"Account is not associated with this subdomain"):
request.host = "acme." + settings.EXTERNAL_HOST
api_result = my_webhook(request) # type: ignore # mypy doesn't seem to apply the decorator
mock_warning.assert_called_with(
"User {} ({}) attempted to access API on wrong "
"subdomain ({})".format(webhook_bot_email, 'zulip', 'acme'))
request.host = "zulip.testserver"
# Test when content_type is application/json and request.body
# is valid JSON; exception raised in the webhook function
# should be re-raised
with mock.patch('zerver.decorator.webhook_logger.exception') as mock_exception:
with self.assertRaisesRegex(Exception, "raised by webhook function"):
request.body = "{}"
request.content_type = 'application/json'
my_webhook_raises_exception(request) # type: ignore # mypy doesn't seem to apply the decorator
# Test when content_type is not application/json; exception raised
# in the webhook function should be re-raised
with mock.patch('zerver.decorator.webhook_logger.exception') as mock_exception:
with self.assertRaisesRegex(Exception, "raised by webhook function"):
request.body = "notjson"
request.content_type = 'text/plain'
my_webhook_raises_exception(request) # type: ignore # mypy doesn't seem to apply the decorator
# Test when content_type is application/json but request.body
# is not valid JSON; invalid JSON should be logged and the
# exception raised in the webhook function should be re-raised
with mock.patch('zerver.decorator.webhook_logger.exception') as mock_exception:
with self.assertRaisesRegex(Exception, "raised by webhook function"):
request.body = "invalidjson"
request.content_type = 'application/json'
request.META['HTTP_X_CUSTOM_HEADER'] = 'custom_value'
my_webhook_raises_exception(request) # type: ignore # mypy doesn't seem to apply the decorator
message = """
user: {email} ({realm})
client: {client_name}
URL: {path_info}
content_type: {content_type}
custom_http_headers:
{custom_headers}
body:
{body}
"""
message = message.strip(' ')
mock_exception.assert_called_with(message.format(
email=webhook_bot_email,
realm=webhook_bot_realm.string_id,
client_name=webhook_client_name,
path_info=request.META.get('PATH_INFO'),
content_type=request.content_type,
custom_headers="HTTP_X_CUSTOM_HEADER: custom_value\n",
body=request.body,
))
# Test when an unexpected webhook event occurs
with mock.patch('zerver.decorator.webhook_unexpected_events_logger.exception') as mock_exception:
exception_msg = "The 'test_event' event isn't currently supported by the helloworld webhook"
with self.assertRaisesRegex(UnexpectedWebhookEventType, exception_msg):
request.body = "invalidjson"
request.content_type = 'application/json'
request.META['HTTP_X_CUSTOM_HEADER'] = 'custom_value'
my_webhook_raises_exception_unexpected_event(request) # type: ignore # mypy doesn't seem to apply the decorator
message = """
user: {email} ({realm})
client: {client_name}
URL: {path_info}
content_type: {content_type}
custom_http_headers:
{custom_headers}
body:
{body}
"""
message = message.strip(' ')
mock_exception.assert_called_with(message.format(
email=webhook_bot_email,
realm=webhook_bot_realm.string_id,
client_name=webhook_client_name,
path_info=request.META.get('PATH_INFO'),
content_type=request.content_type,
custom_headers="HTTP_X_CUSTOM_HEADER: custom_value\n",
body=request.body,
))
with self.settings(RATE_LIMITING=True):
with mock.patch('zerver.decorator.rate_limit_user') as rate_limit_mock:
api_result = my_webhook(request) # type: ignore # mypy doesn't seem to apply the decorator
# Verify rate limiting was attempted.
self.assertTrue(rate_limit_mock.called)
# Verify decorator set the magic _email field used by some of our back end logging.
self.assertEqual(request._email, webhook_bot_email)
# Verify the main purpose of the decorator, which is that it passed in the
# user_profile to my_webhook, allowing it return the correct
# email for the bot (despite the API caller only knowing the API key).
self.assertEqual(api_result, webhook_bot_email)
# Now deactivate the user
webhook_bot.is_active = False
webhook_bot.save()
with self.assertRaisesRegex(JsonableError, "Account is deactivated"):
my_webhook(request) # type: ignore # mypy doesn't seem to apply the decorator
# Reactive the user, but deactivate their realm.
webhook_bot.is_active = True
webhook_bot.save()
webhook_bot.realm.deactivated = True
webhook_bot.realm.save()
with self.assertRaisesRegex(JsonableError, "This organization has been deactivated"):
my_webhook(request) # type: ignore # mypy doesn't seem to apply the decorator
class SkipRateLimitingTest(ZulipTestCase):
def test_authenticated_rest_api_view(self) -> None:
@authenticated_rest_api_view(skip_rate_limiting=False)
def my_rate_limited_view(request: HttpRequest, user_profile: UserProfile) -> str:
return json_success() # nocoverage # mock prevents this from being called
@authenticated_rest_api_view(skip_rate_limiting=True)
def my_unlimited_view(request: HttpRequest, user_profile: UserProfile) -> str:
return json_success()
request = HostRequestMock(host="zulip.testserver")
request.META['HTTP_AUTHORIZATION'] = self.encode_credentials(self.example_email("hamlet"))
request.method = 'POST'
with mock.patch('zerver.decorator.rate_limit') as rate_limit_mock:
result = my_unlimited_view(request) # type: ignore # mypy doesn't seem to apply the decorator
self.assert_json_success(result)
self.assertFalse(rate_limit_mock.called)
with mock.patch('zerver.decorator.rate_limit') as rate_limit_mock:
result = my_rate_limited_view(request) # type: ignore # mypy doesn't seem to apply the decorator
# Don't assert json_success, since it'll be the rate_limit mock object
self.assertTrue(rate_limit_mock.called)
def test_authenticated_uploads_api_view(self) -> None:
@authenticated_uploads_api_view(skip_rate_limiting=False)
def my_rate_limited_view(request: HttpRequest, user_profile: UserProfile) -> str:
return json_success() # nocoverage # mock prevents this from being called
@authenticated_uploads_api_view(skip_rate_limiting=True)
def my_unlimited_view(request: HttpRequest, user_profile: UserProfile) -> str:
return json_success()
request = HostRequestMock(host="zulip.testserver")
request.method = 'POST'
request.POST['api_key'] = get_api_key(self.example_user("hamlet"))
with mock.patch('zerver.decorator.rate_limit') as rate_limit_mock:
result = my_unlimited_view(request) # type: ignore # mypy doesn't seem to apply the decorator
self.assert_json_success(result)
self.assertFalse(rate_limit_mock.called)
with mock.patch('zerver.decorator.rate_limit') as rate_limit_mock:
result = my_rate_limited_view(request) # type: ignore # mypy doesn't seem to apply the decorator
# Don't assert json_success, since it'll be the rate_limit mock object
self.assertTrue(rate_limit_mock.called)
def test_authenticated_json_view(self) -> None:
def my_view(request: HttpRequest, user_profile: UserProfile) -> str:
return json_success()
my_rate_limited_view = authenticated_json_view(my_view, skip_rate_limiting=False)
my_unlimited_view = authenticated_json_view(my_view, skip_rate_limiting=True)
request = HostRequestMock(host="zulip.testserver")
request.method = 'POST'
request.is_authenticated = True # type: ignore # HostRequestMock doesn't have is_authenticated
request.user = self.example_user("hamlet")
with mock.patch('zerver.decorator.rate_limit') as rate_limit_mock:
result = my_unlimited_view(request) # type: ignore # mypy doesn't seem to apply the decorator
self.assert_json_success(result)
self.assertFalse(rate_limit_mock.called)
with mock.patch('zerver.decorator.rate_limit') as rate_limit_mock:
result = my_rate_limited_view(request) # type: ignore # mypy doesn't seem to apply the decorator
# Don't assert json_success, since it'll be the rate_limit mock object
self.assertTrue(rate_limit_mock.called)
class DecoratorLoggingTestCase(ZulipTestCase):
def test_authenticated_rest_api_view_logging(self) -> None:
@authenticated_rest_api_view(webhook_client_name="ClientName")
def my_webhook_raises_exception(request: HttpRequest, user_profile: UserProfile) -> None:
raise Exception("raised by webhook function")
webhook_bot_email = 'webhook-bot@zulip.com'
webhook_bot_realm = get_realm('zulip')
request = HostRequestMock()
request.META['HTTP_AUTHORIZATION'] = self.encode_credentials(webhook_bot_email)
request.method = 'POST'
request.host = "zulip.testserver"
request.body = '{}'
request.POST['payload'] = '{}'
request.content_type = 'text/plain'
with mock.patch('zerver.decorator.webhook_logger.exception') as mock_exception:
with self.assertRaisesRegex(Exception, "raised by webhook function"):
my_webhook_raises_exception(request) # type: ignore # mypy doesn't seem to apply the decorator
message = """
user: {email} ({realm})
client: {client_name}
URL: {path_info}
content_type: {content_type}
custom_http_headers:
{custom_headers}
body:
{body}
"""
message = message.strip(' ')
mock_exception.assert_called_with(message.format(
email=webhook_bot_email,
realm=webhook_bot_realm.string_id,
client_name='ZulipClientNameWebhook',
path_info=request.META.get('PATH_INFO'),
content_type=request.content_type,
custom_headers=None,
body=request.body,
))
def test_authenticated_rest_api_view_logging_unexpected_event(self) -> None:
@authenticated_rest_api_view(webhook_client_name="ClientName")
def my_webhook_raises_exception(request: HttpRequest, user_profile: UserProfile) -> None:
raise UnexpectedWebhookEventType("helloworld", "test_event")
webhook_bot_email = 'webhook-bot@zulip.com'
webhook_bot_realm = get_realm('zulip')
request = HostRequestMock()
request.META['HTTP_AUTHORIZATION'] = self.encode_credentials(webhook_bot_email)
request.method = 'POST'
request.host = "zulip.testserver"
request.body = '{}'
request.POST['payload'] = '{}'
request.content_type = 'text/plain'
with mock.patch('zerver.decorator.webhook_unexpected_events_logger.exception') as mock_exception:
exception_msg = "The 'test_event' event isn't currently supported by the helloworld webhook"
with self.assertRaisesRegex(UnexpectedWebhookEventType, exception_msg):
my_webhook_raises_exception(request) # type: ignore # mypy doesn't seem to apply the decorator
message = """
user: {email} ({realm})
client: {client_name}
URL: {path_info}
content_type: {content_type}
custom_http_headers:
{custom_headers}
body:
{body}
"""
message = message.strip(' ')
mock_exception.assert_called_with(message.format(
email=webhook_bot_email,
realm=webhook_bot_realm.string_id,
client_name='ZulipClientNameWebhook',
path_info=request.META.get('PATH_INFO'),
content_type=request.content_type,
custom_headers=None,
body=request.body,
))
def test_authenticated_rest_api_view_with_non_webhook_view(self) -> None:
@authenticated_rest_api_view()
def non_webhook_view_raises_exception(request: HttpRequest, user_profile: UserProfile=None) -> None:
raise Exception("raised by a non-webhook view")
request = HostRequestMock()
request.META['HTTP_AUTHORIZATION'] = self.encode_credentials("aaron@zulip.com")
request.method = 'POST'
request.host = "zulip.testserver"
request.body = '{}'
request.content_type = 'application/json'
with mock.patch('zerver.decorator.webhook_logger.exception') as mock_exception:
with self.assertRaisesRegex(Exception, "raised by a non-webhook view"):
non_webhook_view_raises_exception(request)
self.assertFalse(mock_exception.called)
def test_authenticated_rest_api_view_errors(self) -> None:
user_profile = self.example_user("hamlet")
api_key = get_api_key(user_profile)
credentials = "%s:%s" % (user_profile.email, api_key)
api_auth = 'Digest ' + base64.b64encode(credentials.encode('utf-8')).decode('utf-8')
result = self.client_post('/api/v1/external/zendesk', {},
HTTP_AUTHORIZATION=api_auth)
self.assert_json_error(result, "This endpoint requires HTTP basic authentication.")
api_auth = 'Basic ' + base64.b64encode("foo".encode('utf-8')).decode('utf-8')
result = self.client_post('/api/v1/external/zendesk', {},
HTTP_AUTHORIZATION=api_auth)
self.assert_json_error(result, "Invalid authorization header for basic auth",
status_code=401)
result = self.client_post('/api/v1/external/zendesk', {})
self.assert_json_error(result, "Missing authorization header for basic auth",
status_code=401)
class RateLimitTestCase(TestCase):
def errors_disallowed(self) -> Any:
# Due to what is probably a hack in rate_limit(),
# some tests will give a false positive (or succeed
# for the wrong reason), unless we complain
# about logging errors. There might be a more elegant way
# make logging errors fail than what I'm doing here.
class TestLoggingErrorException(Exception):
pass
return mock.patch('logging.error', side_effect=TestLoggingErrorException)
def test_internal_local_clients_skip_rate_limiting(self) -> None:
class Client:
name = 'internal'
class Request:
client = Client()
META = {'REMOTE_ADDR': '127.0.0.1'}
req = Request()
def f(req: Any) -> str:
return 'some value'
f = rate_limit()(f)
with self.settings(RATE_LIMITING=True):
with mock.patch('zerver.decorator.rate_limit_user') as rate_limit_mock:
with self.errors_disallowed():
self.assertEqual(f(req), 'some value')
self.assertFalse(rate_limit_mock.called)
def test_debug_clients_skip_rate_limiting(self) -> None:
class Client:
name = 'internal'
class Request:
client = Client()
META = {'REMOTE_ADDR': '3.3.3.3'}
req = Request()
def f(req: Any) -> str:
return 'some value'
f = rate_limit()(f)
with self.settings(RATE_LIMITING=True):
with mock.patch('zerver.decorator.rate_limit_user') as rate_limit_mock:
with self.errors_disallowed():
with self.settings(DEBUG_RATE_LIMITING=True):
self.assertEqual(f(req), 'some value')
self.assertFalse(rate_limit_mock.called)
def test_rate_limit_setting_of_false_bypasses_rate_limiting(self) -> None:
class Client:
name = 'external'
class Request:
client = Client()
META = {'REMOTE_ADDR': '3.3.3.3'}
user = 'stub' # any non-None value here exercises the correct code path
req = Request()
def f(req: Any) -> str:
return 'some value'
f = rate_limit()(f)
with self.settings(RATE_LIMITING=False):
with mock.patch('zerver.decorator.rate_limit_user') as rate_limit_mock:
with self.errors_disallowed():
self.assertEqual(f(req), 'some value')
self.assertFalse(rate_limit_mock.called)
def test_rate_limiting_happens_in_normal_case(self) -> None:
class Client:
name = 'external'
class Request:
client = Client()
META = {'REMOTE_ADDR': '3.3.3.3'}
user = 'stub' # any non-None value here exercises the correct code path
req = Request()
def f(req: Any) -> str:
return 'some value'
f = rate_limit()(f)
with self.settings(RATE_LIMITING=True):
with mock.patch('zerver.decorator.rate_limit_user') as rate_limit_mock:
with self.errors_disallowed():
self.assertEqual(f(req), 'some value')
self.assertTrue(rate_limit_mock.called)
class ValidatorTestCase(TestCase):
def test_check_string(self) -> None:
x = "hello" # type: Any
self.assertEqual(check_string('x', x), None)
x = 4
self.assertEqual(check_string('x', x), 'x is not a string')
def test_check_string_fixed_length(self) -> None:
x = "hello" # type: Any
self.assertEqual(check_string_fixed_length(5)('x', x), None)
x = 4
self.assertEqual(check_string_fixed_length(5)('x', x), 'x is not a string')
x = "helloz"
self.assertEqual(check_string_fixed_length(5)('x', x), 'x has incorrect length 6; should be 5')
x = "hi"
self.assertEqual(check_string_fixed_length(5)('x', x), 'x has incorrect length 2; should be 5')
def test_check_capped_string(self) -> None:
x = "hello" # type: Any
self.assertEqual(check_capped_string(5)('x', x), None)
x = 4
self.assertEqual(check_capped_string(5)('x', x), 'x is not a string')
x = "helloz"
self.assertEqual(check_capped_string(5)('x', x), 'x is too long (limit: 5 characters)')
x = "hi"
self.assertEqual(check_capped_string(5)('x', x), None)
def test_check_short_string(self) -> None:
x = "hello" # type: Any
self.assertEqual(check_short_string('x', x), None)
x = 'x' * 201
self.assertEqual(check_short_string('x', x), "x is too long (limit: 50 characters)")
x = 4
self.assertEqual(check_short_string('x', x), 'x is not a string')
def test_check_bool(self) -> None:
x = True # type: Any
self.assertEqual(check_bool('x', x), None)
x = 4
self.assertEqual(check_bool('x', x), 'x is not a boolean')
def test_check_int(self) -> None:
x = 5 # type: Any
self.assertEqual(check_int('x', x), None)
x = [{}]
self.assertEqual(check_int('x', x), 'x is not an integer')
def test_to_non_negative_int(self) -> None:
self.assertEqual(to_non_negative_int('5'), 5)
with self.assertRaisesRegex(ValueError, 'argument is negative'):
self.assertEqual(to_non_negative_int('-1'))
with self.assertRaisesRegex(ValueError, re.escape('5 is too large (max 4)')):
self.assertEqual(to_non_negative_int('5', max_int_size=4))
with self.assertRaisesRegex(ValueError, re.escape('%s is too large (max %s)' % (2**32, 2**32-1))):
self.assertEqual(to_non_negative_int(str(2**32)))
def test_check_to_not_negative_int_or_none(self) -> None:
self.assertEqual(to_not_negative_int_or_none('5'), 5)
self.assertEqual(to_not_negative_int_or_none(None), None)
with self.assertRaises(ValueError):
to_not_negative_int_or_none('-5')
def test_check_float(self) -> None:
x = 5.5 # type: Any
self.assertEqual(check_float('x', x), None)
x = 5
self.assertEqual(check_float('x', x), 'x is not a float')
x = [{}]
self.assertEqual(check_float('x', x), 'x is not a float')
def test_check_color(self) -> None:
x = ['#000099', '#80ffaa', '#80FFAA', '#abcd12', '#ffff00', '#ff0', '#f00'] # valid
y = ['000099', '#80f_aa', '#80fraa', '#abcd1234', 'blue'] # invalid
z = 5 # invalid
for hex_color in x:
error = check_color('color', hex_color)
self.assertEqual(error, None)
for hex_color in y:
error = check_color('color', hex_color)
self.assertEqual(error, 'color is not a valid hex color code')
error = check_color('color', z)
self.assertEqual(error, 'color is not a string')
def test_check_list(self) -> None:
x = 999 # type: Any
error = check_list(check_string)('x', x)
self.assertEqual(error, 'x is not a list')
x = ["hello", 5]
error = check_list(check_string)('x', x)
self.assertEqual(error, 'x[1] is not a string')
x = [["yo"], ["hello", "goodbye", 5]]
error = check_list(check_list(check_string))('x', x)
self.assertEqual(error, 'x[1][2] is not a string')
x = ["hello", "goodbye", "hello again"]
error = check_list(check_string, length=2)('x', x)
self.assertEqual(error, 'x should have exactly 2 items')
def test_check_dict(self) -> None:
keys = [
('names', check_list(check_string)),
('city', check_string),
] # type: List[Tuple[str, Validator]]
x = {
'names': ['alice', 'bob'],
'city': 'Boston',
} # type: Any
error = check_dict(keys)('x', x)
self.assertEqual(error, None)
x = 999
error = check_dict(keys)('x', x)
self.assertEqual(error, 'x is not a dict')
x = {}
error = check_dict(keys)('x', x)
self.assertEqual(error, 'names key is missing from x')
x = {
'names': ['alice', 'bob', {}]
}
error = check_dict(keys)('x', x)
self.assertEqual(error, 'x["names"][2] is not a string')
x = {
'names': ['alice', 'bob'],
'city': 5
}
error = check_dict(keys)('x', x)
self.assertEqual(error, 'x["city"] is not a string')
x = {
'names': ['alice', 'bob'],
'city': 'Boston'
}
error = check_dict(value_validator=check_string)('x', x)
self.assertEqual(error, 'x contains a value that is not a string')
x = {
'city': 'Boston'
}
error = check_dict(value_validator=check_string)('x', x)
self.assertEqual(error, None)
# test dict_only
x = {
'names': ['alice', 'bob'],
'city': 'Boston',
}
error = check_dict_only(keys)('x', x)
self.assertEqual(error, None)
x = {
'names': ['alice', 'bob'],
'city': 'Boston',
'state': 'Massachusetts',
}
error = check_dict_only(keys)('x', x)
self.assertEqual(error, 'Unexpected arguments: state')
# Test optional keys
optional_keys = [
('food', check_list(check_string)),
('year', check_int)
]
x = {
'names': ['alice', 'bob'],
'city': 'Boston',
'food': ['Lobster Spaghetti']
}
error = check_dict(keys)('x', x)
self.assertEqual(error, None) # since _allow_only_listed_keys is False
error = check_dict_only(keys)('x', x)
self.assertEqual(error, 'Unexpected arguments: food')
error = check_dict_only(keys, optional_keys)('x', x)
self.assertEqual(error, None)
x = {
'names': ['alice', 'bob'],
'city': 'Boston',
'food': 'Lobster Spaghetti'
}
error = check_dict_only(keys, optional_keys)('x', x)
self.assertEqual(error, 'x["food"] is not a list')
def test_encapsulation(self) -> None:
# There might be situations where we want deep
# validation, but the error message should be customized.
# This is an example.
def check_person(val: Any) -> Optional[str]:
error = check_dict([
('name', check_string),
('age', check_int),
])('_', val)
if error:
return 'This is not a valid person'
return None
person = {'name': 'King Lear', 'age': 42}
self.assertEqual(check_person(person), None)
nonperson = 'misconfigured data'
self.assertEqual(check_person(nonperson), 'This is not a valid person')
def test_check_variable_type(self) -> None:
x = 5 # type: Any
self.assertEqual(check_variable_type([check_string, check_int])('x', x), None)
x = 'x'
self.assertEqual(check_variable_type([check_string, check_int])('x', x), None)
x = [{}]
self.assertEqual(check_variable_type([check_string, check_int])('x', x), 'x is not an allowed_type')
def test_equals(self) -> None:
x = 5 # type: Any
self.assertEqual(equals(5)('x', x), None)
self.assertEqual(equals(6)('x', x), 'x != 6 (5 is wrong)')
def test_check_none_or(self) -> None:
x = 5 # type: Any
self.assertEqual(check_none_or(check_int)('x', x), None)
x = None
self.assertEqual(check_none_or(check_int)('x', x), None)
x = 'x'
self.assertEqual(check_none_or(check_int)('x', x), 'x is not an integer')
def test_check_url(self) -> None:
url = "http://127.0.0.1:5002/" # type: Any
self.assertEqual(check_url('url', url), None)
url = "http://zulip-bots.example.com/"
self.assertEqual(check_url('url', url), None)
url = "http://127.0.0"
self.assertEqual(check_url('url', url), 'url is not a URL')
url = 99.3
self.assertEqual(check_url('url', url), 'url is not a string')
def test_check_string_or_int_list(self) -> None:
x = "string" # type: Any
self.assertEqual(check_string_or_int_list('x', x), None)
x = [1, 2, 4]
self.assertEqual(check_string_or_int_list('x', x), None)
x = None
self.assertEqual(check_string_or_int_list('x', x), 'x is not a string or an integer list')
x = [1, 2, '3']
self.assertEqual(check_string_or_int_list('x', x), 'x[2] is not an integer')
def test_check_string_or_int(self) -> None:
x = "string" # type: Any
self.assertEqual(check_string_or_int('x', x), None)
x = 1
self.assertEqual(check_string_or_int('x', x), None)
x = None
self.assertEqual(check_string_or_int('x', x), 'x is not a string or integer')
class DeactivatedRealmTest(ZulipTestCase):
def test_send_deactivated_realm(self) -> None:
"""
rest_dispatch rejects requests in a deactivated realm, both /json and api
"""
realm = get_realm("zulip")
do_deactivate_realm(get_realm("zulip"))
result = self.client_post("/json/messages", {"type": "private",
"content": "Test message",
"client": "test suite",
"to": self.example_email("othello")})
self.assert_json_error_contains(result, "Not logged in", status_code=401)
# Even if a logged-in session was leaked, it still wouldn't work
realm.deactivated = False
realm.save()
self.login(self.example_email("hamlet"))
realm.deactivated = True
realm.save()
result = self.client_post("/json/messages", {"type": "private",
"content": "Test message",
"client": "test suite",
"to": self.example_email("othello")})
self.assert_json_error_contains(result, "has been deactivated", status_code=400)
result = self.api_post(self.example_email("hamlet"),
"/api/v1/messages", {"type": "private",
"content": "Test message",
"client": "test suite",
"to": self.example_email("othello")})
self.assert_json_error_contains(result, "has been deactivated", status_code=401)
def test_fetch_api_key_deactivated_realm(self) -> None:
"""
authenticated_json_view views fail in a deactivated realm
"""
realm = get_realm("zulip")
user_profile = self.example_user('hamlet')
email = user_profile.email
test_password = "abcd1234"
user_profile.set_password(test_password)
self.login(email)
realm.deactivated = True
realm.save()
result = self.client_post("/json/fetch_api_key", {"password": test_password})
self.assert_json_error_contains(result, "has been deactivated", status_code=400)
def test_webhook_deactivated_realm(self) -> None:
"""
Using a webhook while in a deactivated realm fails
"""
do_deactivate_realm(get_realm("zulip"))
user_profile = self.example_user("hamlet")
api_key = get_api_key(user_profile)
url = "/api/v1/external/jira?api_key=%s&stream=jira_custom" % (api_key,)
data = self.webhook_fixture_data('jira', 'created_v2')
result = self.client_post(url, data,
content_type="application/json")
self.assert_json_error_contains(result, "has been deactivated", status_code=400)
class LoginRequiredTest(ZulipTestCase):
def test_login_required(self) -> None:
"""
Verifies the zulip_login_required decorator blocks deactivated users.
"""
user_profile = self.example_user('hamlet')
email = user_profile.email
# Verify fails if logged-out
result = self.client_get('/accounts/accept_terms/')
self.assertEqual(result.status_code, 302)
# Verify succeeds once logged-in
self.login(email)
result = self.client_get('/accounts/accept_terms/')
self.assert_in_response("I agree to the", result)
# Verify fails if user deactivated (with session still valid)
user_profile.is_active = False
user_profile.save()
result = self.client_get('/accounts/accept_terms/')
self.assertEqual(result.status_code, 302)
# Verify succeeds if user reactivated
do_reactivate_user(user_profile)
self.login(email)
result = self.client_get('/accounts/accept_terms/')
self.assert_in_response("I agree to the", result)
# Verify fails if realm deactivated
user_profile.realm.deactivated = True
user_profile.realm.save()
result = self.client_get('/accounts/accept_terms/')
self.assertEqual(result.status_code, 302)
class FetchAPIKeyTest(ZulipTestCase):
def test_fetch_api_key_success(self) -> None:
email = self.example_email("cordelia")
self.login(email)
result = self.client_post("/json/fetch_api_key", {"password": initial_password(email)})
self.assert_json_success(result)
def test_fetch_api_key_email_address_visibility(self) -> None:
user_profile = self.example_user("cordelia")
email = user_profile.email
do_set_realm_property(user_profile.realm, "email_address_visibility",
Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS)
self.login(email)
result = self.client_post("/json/fetch_api_key",
{"password": initial_password(email)})
self.assert_json_success(result)
def test_fetch_api_key_wrong_password(self) -> None:
email = self.example_email("cordelia")
self.login(email)
result = self.client_post("/json/fetch_api_key", {"password": "wrong_password"})
self.assert_json_error_contains(result, "password is incorrect")
class InactiveUserTest(ZulipTestCase):
def test_send_deactivated_user(self) -> None:
"""
rest_dispatch rejects requests from deactivated users, both /json and api
"""
user_profile = self.example_user('hamlet')
email = user_profile.email
self.login(email)
do_deactivate_user(user_profile)
result = self.client_post("/json/messages", {"type": "private",
"content": "Test message",
"client": "test suite",
"to": self.example_email("othello")})
self.assert_json_error_contains(result, "Not logged in", status_code=401)
# Even if a logged-in session was leaked, it still wouldn't work
do_reactivate_user(user_profile)
self.login(email)
user_profile.is_active = False
user_profile.save()
result = self.client_post("/json/messages", {"type": "private",
"content": "Test message",
"client": "test suite",
"to": self.example_email("othello")})
self.assert_json_error_contains(result, "Account is deactivated", status_code=400)
result = self.api_post(self.example_email("hamlet"),
"/api/v1/messages", {"type": "private",
"content": "Test message",
"client": "test suite",
"to": self.example_email("othello")})
self.assert_json_error_contains(result, "Account is deactivated", status_code=401)
def test_fetch_api_key_deactivated_user(self) -> None:
"""
authenticated_json_view views fail with a deactivated user
"""
user_profile = self.example_user('hamlet')
email = user_profile.email
test_password = "abcd1234"
user_profile.set_password(test_password)
user_profile.save()
self.login(email, password=test_password)
user_profile.is_active = False
user_profile.save()
result = self.client_post("/json/fetch_api_key", {"password": test_password})
self.assert_json_error_contains(result, "Account is deactivated", status_code=400)
def test_login_deactivated_user(self) -> None:
"""
logging in fails with an inactive user
"""
user_profile = self.example_user('hamlet')
do_deactivate_user(user_profile)
result = self.login_with_return(self.example_email("hamlet"))
self.assert_in_response(
"Your account is no longer active.",
result)
def test_login_deactivated_mirror_dummy(self) -> None:
"""
logging in fails with an inactive user
"""
user_profile = self.example_user('hamlet')
user_profile.is_mirror_dummy = True
user_profile.save()
password = initial_password(user_profile.email)
request = mock.MagicMock()
request.get_host.return_value = 'zulip.testserver'
# Test a mirror-dummy active user.
form = OurAuthenticationForm(request,
data={'username': user_profile.email,
'password': password})
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.EmailAuthBackend',)):
self.assertTrue(form.is_valid())
# Test a mirror-dummy deactivated user.
do_deactivate_user(user_profile)
user_profile.save()
form = OurAuthenticationForm(request,
data={'username': user_profile.email,
'password': password})
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.EmailAuthBackend',)):
self.assertFalse(form.is_valid())
self.assertIn("Please enter a correct email", str(form.errors))
# Test a non-mirror-dummy deactivated user.
user_profile.is_mirror_dummy = False
user_profile.save()
form = OurAuthenticationForm(request,
data={'username': user_profile.email,
'password': password})
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.EmailAuthBackend',)):
self.assertFalse(form.is_valid())
self.assertIn("Your account is no longer active", str(form.errors))
def test_webhook_deactivated_user(self) -> None:
"""
Deactivated users can't use webhooks
"""
user_profile = self.example_user('hamlet')
do_deactivate_user(user_profile)
api_key = get_api_key(user_profile)
url = "/api/v1/external/jira?api_key=%s&stream=jira_custom" % (api_key,)
data = self.webhook_fixture_data('jira', 'created_v2')
result = self.client_post(url, data,
content_type="application/json")
self.assert_json_error_contains(result, "Account is deactivated", status_code=400)
class TestIncomingWebhookBot(ZulipTestCase):
def setUp(self) -> None:
zulip_realm = get_realm('zulip')
self.webhook_bot = get_user('webhook-bot@zulip.com', zulip_realm)
def test_webhook_bot_permissions(self) -> None:
result = self.api_post("webhook-bot@zulip.com",
"/api/v1/messages", {"type": "private",
"content": "Test message",
"client": "test suite",
"to": self.example_email("othello")})
self.assert_json_success(result)
post_params = {"anchor": 1, "num_before": 1, "num_after": 1}
result = self.api_get("webhook-bot@zulip.com", "/api/v1/messages", dict(post_params))
self.assert_json_error(result, 'This API is not available to incoming webhook bots.',
status_code=401)
class TestValidateApiKey(ZulipTestCase):
def setUp(self) -> None:
zulip_realm = get_realm('zulip')
self.webhook_bot = get_user('webhook-bot@zulip.com', zulip_realm)
self.default_bot = get_user('default-bot@zulip.com', zulip_realm)
def test_validate_api_key_if_profile_does_not_exist(self) -> None:
with self.assertRaises(JsonableError):
validate_api_key(HostRequestMock(), 'email@doesnotexist.com', 'api_key')
def test_validate_api_key_if_api_key_does_not_match_profile_api_key(self) -> None:
with self.assertRaises(JsonableError):
validate_api_key(HostRequestMock(), self.webhook_bot.email, 'not_32_length')
with self.assertRaises(JsonableError):
# We use default_bot's key but webhook_bot's email address to test
# the logic when an API key is passed and it doesn't belong to the
# user whose email address has been provided.
api_key = get_api_key(self.default_bot)
validate_api_key(HostRequestMock(), self.webhook_bot.email, api_key)
def test_validate_api_key_if_profile_is_not_active(self) -> None:
self._change_is_active_field(self.default_bot, False)
with self.assertRaises(JsonableError):
api_key = get_api_key(self.default_bot)
validate_api_key(HostRequestMock(), self.default_bot.email, api_key)
self._change_is_active_field(self.default_bot, True)
def test_validate_api_key_if_profile_is_incoming_webhook_and_is_webhook_is_unset(self) -> None:
with self.assertRaises(JsonableError):
api_key = get_api_key(self.webhook_bot)
validate_api_key(HostRequestMock(), self.webhook_bot.email, api_key)
def test_validate_api_key_if_profile_is_incoming_webhook_and_is_webhook_is_set(self) -> None:
api_key = get_api_key(self.webhook_bot)
profile = validate_api_key(HostRequestMock(host="zulip.testserver"),
self.webhook_bot.email, api_key,
is_webhook=True)
self.assertEqual(profile.id, self.webhook_bot.id)
def test_validate_api_key_if_email_is_case_insensitive(self) -> None:
api_key = get_api_key(self.default_bot)
profile = validate_api_key(HostRequestMock(host="zulip.testserver"), self.default_bot.email.upper(), api_key)
self.assertEqual(profile.id, self.default_bot.id)
def test_valid_api_key_if_user_is_on_wrong_subdomain(self) -> None:
with self.settings(RUNNING_INSIDE_TORNADO=False):
api_key = get_api_key(self.default_bot)
with mock.patch('logging.warning') as mock_warning:
with self.assertRaisesRegex(JsonableError,
"Account is not associated with this subdomain"):
validate_api_key(HostRequestMock(host=settings.EXTERNAL_HOST),
self.default_bot.email, api_key)
mock_warning.assert_called_with(
"User {} ({}) attempted to access API on wrong "
"subdomain ({})".format(self.default_bot.email, 'zulip', ''))
with mock.patch('logging.warning') as mock_warning:
with self.assertRaisesRegex(JsonableError,
"Account is not associated with this subdomain"):
validate_api_key(HostRequestMock(host='acme.' + settings.EXTERNAL_HOST),
self.default_bot.email, api_key)
mock_warning.assert_called_with(
"User {} ({}) attempted to access API on wrong "
"subdomain ({})".format(self.default_bot.email, 'zulip', 'acme'))
def _change_is_active_field(self, profile: UserProfile, value: bool) -> None:
profile.is_active = value
profile.save()
class TestInternalNotifyView(TestCase):
BORING_RESULT = 'boring'
class Request:
def __init__(self, POST: Dict[str, Any], META: Dict[str, Any]) -> None:
self.POST = POST
self.META = META
self.method = 'POST'
def internal_notify(self, is_tornado: bool, req: HttpRequest) -> HttpResponse:
boring_view = lambda req: self.BORING_RESULT
return internal_notify_view(is_tornado)(boring_view)(req)
def test_valid_internal_requests(self) -> None:
secret = 'random'
req = self.Request(
POST=dict(secret=secret),
META=dict(REMOTE_ADDR='127.0.0.1'),
) # type: HttpRequest
with self.settings(SHARED_SECRET=secret):
self.assertTrue(authenticate_notify(req))
self.assertEqual(self.internal_notify(False, req), self.BORING_RESULT)
self.assertEqual(req._email, 'internal')
with self.assertRaises(RuntimeError):
self.internal_notify(True, req)
req._tornado_handler = 'set'
with self.settings(SHARED_SECRET=secret):
self.assertTrue(authenticate_notify(req))
self.assertEqual(self.internal_notify(True, req), self.BORING_RESULT)
self.assertEqual(req._email, 'internal')
with self.assertRaises(RuntimeError):
self.internal_notify(False, req)
def test_internal_requests_with_broken_secret(self) -> None:
secret = 'random'
req = self.Request(
POST=dict(secret=secret),
META=dict(REMOTE_ADDR='127.0.0.1'),
)
with self.settings(SHARED_SECRET='broken'):
self.assertFalse(authenticate_notify(req))
self.assertEqual(self.internal_notify(True, req).status_code, 403)
def test_external_requests(self) -> None:
secret = 'random'
req = self.Request(
POST=dict(secret=secret),
META=dict(REMOTE_ADDR='3.3.3.3'),
)
with self.settings(SHARED_SECRET=secret):
self.assertFalse(authenticate_notify(req))
self.assertEqual(self.internal_notify(True, req).status_code, 403)
def test_is_local_address(self) -> None:
self.assertTrue(is_local_addr('127.0.0.1'))
self.assertTrue(is_local_addr('::1'))
self.assertFalse(is_local_addr('42.43.44.45'))
class TestHumanUsersOnlyDecorator(ZulipTestCase):
def test_human_only_endpoints(self) -> None:
post_endpoints = [
"/api/v1/users/me/apns_device_token",
"/api/v1/users/me/android_gcm_reg_id",
"/api/v1/users/me/enter-sends",
"/api/v1/users/me/hotspots",
"/api/v1/users/me/presence",
"/api/v1/users/me/tutorial_status",
"/api/v1/report/send_times",
"/api/v1/report/narrow_times",
"/api/v1/report/unnarrow_times",
]
for endpoint in post_endpoints:
result = self.api_post('default-bot@zulip.com', endpoint)
self.assert_json_error(result, "This endpoint does not accept bot requests.")
patch_endpoints = [
"/api/v1/settings",
"/api/v1/settings/display",
"/api/v1/settings/notifications",
"/api/v1/users/me/profile_data"
]
for endpoint in patch_endpoints:
result = self.api_patch('default-bot@zulip.com', endpoint)
self.assert_json_error(result, "This endpoint does not accept bot requests.")
delete_endpoints = [
"/api/v1/users/me/apns_device_token",
"/api/v1/users/me/android_gcm_reg_id",
]
for endpoint in delete_endpoints:
result = self.api_delete('default-bot@zulip.com', endpoint)
self.assert_json_error(result, "This endpoint does not accept bot requests.")
class TestAuthenticatedJsonPostViewDecorator(ZulipTestCase):
def test_authenticated_json_post_view_if_everything_is_correct(self) -> None:
user_email = self.example_email('hamlet')
user_realm = get_realm('zulip')
self._login(user_email, user_realm)
response = self._do_test(user_email)
self.assertEqual(response.status_code, 200)
def test_authenticated_json_post_view_with_get_request(self) -> None:
user_email = self.example_email('hamlet')
user_realm = get_realm('zulip')
self._login(user_email, user_realm)
with mock.patch('logging.warning') as mock_warning:
result = self.client_get(r'/json/subscriptions/exists', {'stream': 'Verona'})
self.assertEqual(result.status_code, 405)
mock_warning.assert_called_once() # Check we logged the Mock Not Allowed
self.assertEqual(mock_warning.call_args_list[0][0],
('Method Not Allowed (%s): %s', 'GET', '/json/subscriptions/exists'))
def test_authenticated_json_post_view_if_subdomain_is_invalid(self) -> None:
user_email = self.example_email('hamlet')
user_realm = get_realm('zulip')
self._login(user_email, user_realm)
with mock.patch('logging.warning') as mock_warning, \
mock.patch('zerver.decorator.get_subdomain', return_value=''):
self.assert_json_error_contains(self._do_test(user_email),
"Account is not associated with this "
"subdomain")
mock_warning.assert_called_with(
"User {} ({}) attempted to access API on wrong "
"subdomain ({})".format(user_email, 'zulip', ''))
with mock.patch('logging.warning') as mock_warning, \
mock.patch('zerver.decorator.get_subdomain', return_value='acme'):
self.assert_json_error_contains(self._do_test(user_email),
"Account is not associated with this "
"subdomain")
mock_warning.assert_called_with(
"User {} ({}) attempted to access API on wrong "
"subdomain ({})".format(user_email, 'zulip', 'acme'))
def test_authenticated_json_post_view_if_user_is_incoming_webhook(self) -> None:
user_email = 'webhook-bot@zulip.com'
user_realm = get_realm('zulip')
self._login(user_email, user_realm, password="test") # we set a password because user is a bot
self.assert_json_error_contains(self._do_test(user_email), "Webhook bots can only access webhooks")
def test_authenticated_json_post_view_if_user_is_not_active(self) -> None:
user_email = self.example_email('hamlet')
user_realm = get_realm('zulip')
self._login(user_email, user_realm, password="test")
# Get user_profile after _login so that we have the latest data.
user_profile = get_user(user_email, user_realm)
# we deactivate user manually because do_deactivate_user removes user session
user_profile.is_active = False
user_profile.save()
self.assert_json_error_contains(self._do_test(user_email), "Account is deactivated")
do_reactivate_user(user_profile)
def test_authenticated_json_post_view_if_user_realm_is_deactivated(self) -> None:
user_email = self.example_email('hamlet')
user_realm = get_realm('zulip')
user_profile = get_user(user_email, user_realm)
self._login(user_email, user_realm)
# we deactivate user's realm manually because do_deactivate_user removes user session
user_profile.realm.deactivated = True
user_profile.realm.save()
self.assert_json_error_contains(self._do_test(user_email), "This organization has been deactivated")
do_reactivate_realm(user_profile.realm)
def _do_test(self, user_email: str) -> HttpResponse:
stream_name = "stream name"
self.common_subscribe_to_streams(user_email, [stream_name])
data = {"password": initial_password(user_email), "stream": stream_name}
return self.client_post(r'/json/subscriptions/exists', data)
def _login(self, user_email: str, user_realm: Realm, password: str=None) -> None:
if password:
user_profile = get_user(user_email, user_realm)
user_profile.set_password(password)
user_profile.save()
self.login(user_email, password)
class TestAuthenticatedJsonViewDecorator(ZulipTestCase):
def test_authenticated_json_view_if_subdomain_is_invalid(self) -> None:
user_email = self.example_email("hamlet")
self.login(user_email)
with mock.patch('logging.warning') as mock_warning, \
mock.patch('zerver.decorator.get_subdomain', return_value=''):
self.assert_json_error_contains(self._do_test(str(user_email)),
"Account is not associated with this "
"subdomain")
mock_warning.assert_called_with(
"User {} ({}) attempted to access API on wrong "
"subdomain ({})".format(user_email, 'zulip', ''))
with mock.patch('logging.warning') as mock_warning, \
mock.patch('zerver.decorator.get_subdomain', return_value='acme'):
self.assert_json_error_contains(self._do_test(str(user_email)),
"Account is not associated with this "
"subdomain")
mock_warning.assert_called_with(
"User {} ({}) attempted to access API on wrong "
"subdomain ({})".format(user_email, 'zulip', 'acme'))
def _do_test(self, user_email: str) -> HttpResponse:
data = {"password": initial_password(user_email)}
return self.client_post(r'/accounts/webathena_kerberos_login/', data)
class TestZulipLoginRequiredDecorator(ZulipTestCase):
def test_zulip_login_required_if_subdomain_is_invalid(self) -> None:
user_email = self.example_email("hamlet")
self.login(user_email)
with mock.patch('zerver.decorator.get_subdomain', return_value='zulip'):
result = self.client_get('/accounts/accept_terms/')
self.assertEqual(result.status_code, 200)
with mock.patch('zerver.decorator.get_subdomain', return_value=''):
result = self.client_get('/accounts/accept_terms/')
self.assertEqual(result.status_code, 302)
with mock.patch('zerver.decorator.get_subdomain', return_value='acme'):
result = self.client_get('/accounts/accept_terms/')
self.assertEqual(result.status_code, 302)
def test_2fa_failure(self) -> None:
@zulip_login_required
def test_view(request: HttpRequest) -> HttpResponse:
return HttpResponse('Success')
request = HttpRequest()
request.META['SERVER_NAME'] = 'localhost'
request.META['SERVER_PORT'] = 80
request.META['PATH_INFO'] = ''
request.user = hamlet = self.example_user('hamlet')
request.user.is_verified = lambda: False
self.login(hamlet.email)
request.session = self.client.session
request.get_host = lambda: 'zulip.testserver'
response = test_view(request)
content = getattr(response, 'content')
self.assertEqual(content.decode(), 'Success')
with self.settings(TWO_FACTOR_AUTHENTICATION_ENABLED=True):
request = HttpRequest()
request.META['SERVER_NAME'] = 'localhost'
request.META['SERVER_PORT'] = 80
request.META['PATH_INFO'] = ''
request.user = hamlet = self.example_user('hamlet')
request.user.is_verified = lambda: False
self.login(hamlet.email)
request.session = self.client.session
request.get_host = lambda: 'zulip.testserver'
self.create_default_device(request.user)
response = test_view(request)
status_code = getattr(response, 'status_code')
self.assertEqual(status_code, 302)
url = getattr(response, 'url')
response_url = url.split("?")[0]
self.assertEqual(response_url, settings.HOME_NOT_LOGGED_IN)
def test_2fa_success(self) -> None:
@zulip_login_required
def test_view(request: HttpRequest) -> HttpResponse:
return HttpResponse('Success')
with self.settings(TWO_FACTOR_AUTHENTICATION_ENABLED=True):
request = HttpRequest()
request.META['SERVER_NAME'] = 'localhost'
request.META['SERVER_PORT'] = 80
request.META['PATH_INFO'] = ''
request.user = hamlet = self.example_user('hamlet')
request.user.is_verified = lambda: True
self.login(hamlet.email)
request.session = self.client.session
request.get_host = lambda: 'zulip.testserver'
self.create_default_device(request.user)
response = test_view(request)
content = getattr(response, 'content')
self.assertEqual(content.decode(), 'Success')
class TestRequireDecorators(ZulipTestCase):
def test_require_server_admin_decorator(self) -> None:
user_email = self.example_email('hamlet')
user_realm = get_realm('zulip')
self.login(user_email)
result = self.client_get('/activity')
self.assertEqual(result.status_code, 302)
user_profile = get_user(user_email, user_realm)
user_profile.is_staff = True
user_profile.save()
result = self.client_get('/activity')
self.assertEqual(result.status_code, 200)
def test_require_non_guest_user_decorator(self) -> None:
guest_user = self.example_user('polonius')
self.login(guest_user.email)
result = self.common_subscribe_to_streams(guest_user.email, ["Denmark"])
self.assert_json_error(result, "Not allowed for guest users")
def test_require_member_or_admin_decorator(self) -> None:
result = self.api_get("outgoing-webhook@zulip.com", '/api/v1/bots')
self.assert_json_error(result, "This endpoint does not accept bot requests.")
guest_user = self.example_user('polonius')
self.login(guest_user.email)
result = self.client_get('/json/bots')
self.assert_json_error(result, "Not allowed for guest users")
class ReturnSuccessOnHeadRequestDecorator(ZulipTestCase):
def test_returns_200_if_request_method_is_head(self) -> None:
class HeadRequest:
method = 'HEAD'
request = HeadRequest()
@return_success_on_head_request
def test_function(request: HttpRequest) -> HttpResponse:
return json_response(msg=u'from_test_function') # nocoverage. isn't meant to be called
response = test_function(request)
self.assert_json_success(response)
self.assertNotEqual(ujson.loads(response.content).get('msg'), u'from_test_function')
def test_returns_normal_response_if_request_method_is_not_head(self) -> None:
class HeadRequest:
method = 'POST'
request = HeadRequest()
@return_success_on_head_request
def test_function(request: HttpRequest) -> HttpResponse:
return json_response(msg=u'from_test_function')
response = test_function(request)
self.assertEqual(ujson.loads(response.content).get('msg'), u'from_test_function')
class RestAPITest(ZulipTestCase):
def test_method_not_allowed(self) -> None:
self.login(self.example_email("hamlet"))
result = self.client_patch('/json/users')
self.assertEqual(result.status_code, 405)
self.assert_in_response('Method Not Allowed', result)
def test_options_method(self) -> None:
self.login(self.example_email("hamlet"))
result = self.client_options('/json/users')
self.assertEqual(result.status_code, 204)
self.assertEqual(str(result['Allow']), 'GET, POST')
result = self.client_options('/json/streams/15')
self.assertEqual(result.status_code, 204)
self.assertEqual(str(result['Allow']), 'DELETE, PATCH')
def test_http_accept_redirect(self) -> None:
result = self.client_get('/json/users',
HTTP_ACCEPT='text/html')
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith("/login/?next=/json/users"))
class CacheTestCase(ZulipTestCase):
def test_cachify_basics(self) -> None:
@cachify
def add(w: Any, x: Any, y: Any, z: Any) -> Any:
return w + x + y + z
for i in range(2):
self.assertEqual(add(1, 2, 4, 8), 15)
self.assertEqual(add('a', 'b', 'c', 'd'), 'abcd')
def test_cachify_is_per_call(self) -> None:
def test_greetings(greeting: str) -> Tuple[List[str], List[str]]:
result_log = [] # type: List[str]
work_log = [] # type: List[str]
@cachify
def greet(first_name: str, last_name: str) -> str:
msg = '%s %s %s' % (greeting, first_name, last_name)
work_log.append(msg)
return msg
result_log.append(greet('alice', 'smith'))
result_log.append(greet('bob', 'barker'))
result_log.append(greet('alice', 'smith'))
result_log.append(greet('cal', 'johnson'))
return (work_log, result_log)
work_log, result_log = test_greetings('hello')
self.assertEqual(work_log, [
'hello alice smith',
'hello bob barker',
'hello cal johnson',
])
self.assertEqual(result_log, [
'hello alice smith',
'hello bob barker',
'hello alice smith',
'hello cal johnson',
])
work_log, result_log = test_greetings('goodbye')
self.assertEqual(work_log, [
'goodbye alice smith',
'goodbye bob barker',
'goodbye cal johnson',
])
self.assertEqual(result_log, [
'goodbye alice smith',
'goodbye bob barker',
'goodbye alice smith',
'goodbye cal johnson',
])
class TestUserAgentParsing(ZulipTestCase):
def test_user_agent_parsing(self) -> None:
"""Test for our user agent parsing logic, using a large data set."""
user_agents_parsed = defaultdict(int) # type: Dict[str, int]
user_agents_path = os.path.join(settings.DEPLOY_ROOT, "zerver/tests/fixtures/user_agents_unique")
for line in open(user_agents_path).readlines():
line = line.strip()
match = re.match('^(?P<count>[0-9]+) "(?P<user_agent>.*)"$', line)
self.assertIsNotNone(match)
groupdict = match.groupdict()
count = groupdict["count"]
user_agent = groupdict["user_agent"]
ret = parse_user_agent(user_agent)
user_agents_parsed[ret["name"]] += int(count)
class TestIgnoreUnhashableLRUCache(ZulipTestCase):
def test_cache_hit(self) -> None:
@ignore_unhashable_lru_cache()
def f(arg: Any) -> Any:
return arg
def get_cache_info() -> Tuple[int, int, int]:
info = getattr(f, 'cache_info')()
hits = getattr(info, 'hits')
misses = getattr(info, 'misses')
currsize = getattr(info, 'currsize')
return hits, misses, currsize
def clear_cache() -> None:
getattr(f, 'cache_clear')()
# Check hashable argument.
result = f(1)
hits, misses, currsize = get_cache_info()
# First one should be a miss.
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
self.assertEqual(result, 1)
result = f(1)
hits, misses, currsize = get_cache_info()
# Second one should be a hit.
self.assertEqual(hits, 1)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
self.assertEqual(result, 1)
# Check unhashable argument.
result = f({1: 2})
hits, misses, currsize = get_cache_info()
# Cache should not be used.
self.assertEqual(hits, 1)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
self.assertEqual(result, {1: 2})
# Clear cache.
clear_cache()
hits, misses, currsize = get_cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
self.assertEqual(currsize, 0)
def test_cache_hit_dict_args(self) -> None:
@ignore_unhashable_lru_cache()
@items_tuple_to_dict
def g(arg: Any) -> Any:
return arg
def get_cache_info() -> Tuple[int, int, int]:
info = getattr(g, 'cache_info')()
hits = getattr(info, 'hits')
misses = getattr(info, 'misses')
currsize = getattr(info, 'currsize')
return hits, misses, currsize
def clear_cache() -> None:
getattr(g, 'cache_clear')()
# Not used as a decorator on the definition to allow defining
# get_cache_info and clear_cache
f = dict_to_items_tuple(g)
# Check hashable argument.
result = f(1)
hits, misses, currsize = get_cache_info()
# First one should be a miss.
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
self.assertEqual(result, 1)
result = f(1)
hits, misses, currsize = get_cache_info()
# Second one should be a hit.
self.assertEqual(hits, 1)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
self.assertEqual(result, 1)
# Check dict argument.
result = f({1: 2})
hits, misses, currsize = get_cache_info()
# First one is a miss
self.assertEqual(hits, 1)
self.assertEqual(misses, 2)
self.assertEqual(currsize, 2)
self.assertEqual(result, {1: 2})
result = f({1: 2})
hits, misses, currsize = get_cache_info()
# Second one should be a hit.
self.assertEqual(hits, 2)
self.assertEqual(misses, 2)
self.assertEqual(currsize, 2)
self.assertEqual(result, {1: 2})
# Clear cache.
clear_cache()
hits, misses, currsize = get_cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
self.assertEqual(currsize, 0)
|
{
"content_hash": "54fd56b009d5d27a54950b52756feb09",
"timestamp": "",
"source": "github",
"line_count": 1906,
"max_line_length": 128,
"avg_line_length": 40.93126967471144,
"alnum_prop": 0.5907453694802282,
"repo_name": "tommyip/zulip",
"id": "df70f7f053ce29cfecb985e604a839967e90d76e",
"size": "78039",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zerver/tests/test_decorators.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "400301"
},
{
"name": "Dockerfile",
"bytes": "2939"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "718599"
},
{
"name": "JavaScript",
"bytes": "3092201"
},
{
"name": "Perl",
"bytes": "398763"
},
{
"name": "Puppet",
"bytes": "71123"
},
{
"name": "Python",
"bytes": "6889539"
},
{
"name": "Ruby",
"bytes": "6110"
},
{
"name": "Shell",
"bytes": "119898"
},
{
"name": "TypeScript",
"bytes": "14645"
}
],
"symlink_target": ""
}
|
"""Basic message object for the email package object model."""
__all__ = ['Message']
import re
import uu
import base64
import binascii
from io import BytesIO, StringIO
# Intrapackage imports
from email import utils
from email import errors
from email._policybase import compat32
from email import charset as _charset
from email._encoded_words import decode_b
Charset = _charset.Charset
SEMISPACE = '; '
# Regular expression that matches `special' characters in parameters, the
# existence of which force quoting of the parameter value.
tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]')
def _splitparam(param):
# Split header parameters. BAW: this may be too simple. It isn't
# strictly RFC 2045 (section 5.1) compliant, but it catches most headers
# found in the wild. We may eventually need a full fledged parser.
# RDM: we might have a Header here; for now just stringify it.
a, sep, b = str(param).partition(';')
if not sep:
return a.strip(), None
return a.strip(), b.strip()
def _formatparam(param, value=None, quote=True):
"""Convenience function to format and return a key=value pair.
This will quote the value if needed or if quote is true. If value is a
three tuple (charset, language, value), it will be encoded according
to RFC2231 rules. If it contains non-ascii characters it will likewise
be encoded according to RFC2231 rules, using the utf-8 charset and
a null language.
"""
if value is not None and len(value) > 0:
# A tuple is used for RFC 2231 encoded parameter values where items
# are (charset, language, value). charset is a string, not a Charset
# instance. RFC 2231 encoded values are never quoted, per RFC.
if isinstance(value, tuple):
# Encode as per RFC 2231
param += '*'
value = utils.encode_rfc2231(value[2], value[0], value[1])
return '%s=%s' % (param, value)
else:
try:
value.encode('ascii')
except UnicodeEncodeError:
param += '*'
value = utils.encode_rfc2231(value, 'utf-8', '')
return '%s=%s' % (param, value)
# BAW: Please check this. I think that if quote is set it should
# force quoting even if not necessary.
if quote or tspecials.search(value):
return '%s="%s"' % (param, utils.quote(value))
else:
return '%s=%s' % (param, value)
else:
return param
def _parseparam(s):
# RDM This might be a Header, so for now stringify it.
s = ';' + str(s)
plist = []
while s[:1] == ';':
s = s[1:]
end = s.find(';')
while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2:
end = s.find(';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
if '=' in f:
i = f.index('=')
f = f[:i].strip().lower() + '=' + f[i+1:].strip()
plist.append(f.strip())
s = s[end:]
return plist
def _unquotevalue(value):
# This is different than utils.collapse_rfc2231_value() because it doesn't
# try to convert the value to a unicode. Message.get_param() and
# Message.get_params() are both currently defined to return the tuple in
# the face of RFC 2231 parameters.
if isinstance(value, tuple):
return value[0], value[1], utils.unquote(value[2])
else:
return utils.unquote(value)
class Message:
"""Basic message object.
A message object is defined as something that has a bunch of RFC 2822
headers and a payload. It may optionally have an envelope header
(a.k.a. Unix-From or From_ header). If the message is a container (i.e. a
multipart or a message/rfc822), then the payload is a list of Message
objects, otherwise it is a string.
Message objects implement part of the `mapping' interface, which assumes
there is exactly one occurrence of the header per message. Some headers
do in fact appear multiple times (e.g. Received) and for those headers,
you must use the explicit API to set or get all the headers. Not all of
the mapping methods are implemented.
"""
def __init__(self, policy=compat32):
self.policy = policy
self._headers = []
self._unixfrom = None
self._payload = None
self._charset = None
# Defaults for multipart messages
self.preamble = self.epilogue = None
self.defects = []
# Default content type
self._default_type = 'text/plain'
def __str__(self):
"""Return the entire formatted message as a string.
This includes the headers, body, and envelope header.
"""
return self.as_string()
def as_string(self, unixfrom=False, maxheaderlen=0):
"""Return the entire formatted message as a string.
Optional `unixfrom' when True, means include the Unix From_ envelope
header.
This is a convenience method and may not generate the message exactly
as you intend. For more flexibility, use the flatten() method of a
Generator instance.
"""
from email.generator import Generator
fp = StringIO()
g = Generator(fp, mangle_from_=False, maxheaderlen=maxheaderlen)
g.flatten(self, unixfrom=unixfrom)
return fp.getvalue()
def is_multipart(self):
"""Return True if the message consists of multiple parts."""
return isinstance(self._payload, list)
#
# Unix From_ line
#
def set_unixfrom(self, unixfrom):
self._unixfrom = unixfrom
def get_unixfrom(self):
return self._unixfrom
#
# Payload manipulation.
#
def attach(self, payload):
"""Add the given payload to the current payload.
The current payload will always be a list of objects after this method
is called. If you want to set the payload to a scalar object, use
set_payload() instead.
"""
if self._payload is None:
self._payload = [payload]
else:
self._payload.append(payload)
def get_payload(self, i=None, decode=False):
"""Return a reference to the payload.
The payload will either be a list object or a string. If you mutate
the list object, you modify the message's payload in place. Optional
i returns that index into the payload.
Optional decode is a flag indicating whether the payload should be
decoded or not, according to the Content-Transfer-Encoding header
(default is False).
When True and the message is not a multipart, the payload will be
decoded if this header's value is `quoted-printable' or `base64'. If
some other encoding is used, or the header is missing, or if the
payload has bogus data (i.e. bogus base64 or uuencoded data), the
payload is returned as-is.
If the message is a multipart and the decode flag is True, then None
is returned.
"""
# Here is the logic table for this code, based on the email5.0.0 code:
# i decode is_multipart result
# ------ ------ ------------ ------------------------------
# None True True None
# i True True None
# None False True _payload (a list)
# i False True _payload element i (a Message)
# i False False error (not a list)
# i True False error (not a list)
# None False False _payload
# None True False _payload decoded (bytes)
# Note that Barry planned to factor out the 'decode' case, but that
# isn't so easy now that we handle the 8 bit data, which needs to be
# converted in both the decode and non-decode path.
if self.is_multipart():
if decode:
return None
if i is None:
return self._payload
else:
return self._payload[i]
# For backward compatibility, Use isinstance and this error message
# instead of the more logical is_multipart test.
if i is not None and not isinstance(self._payload, list):
raise TypeError('Expected list, got %s' % type(self._payload))
payload = self._payload
# cte might be a Header, so for now stringify it.
cte = str(self.get('content-transfer-encoding', '')).lower()
# payload may be bytes here.
if isinstance(payload, str):
if utils._has_surrogates(payload):
bpayload = payload.encode('ascii', 'surrogateescape')
if not decode:
try:
payload = bpayload.decode(self.get_param('charset', 'ascii'), 'replace')
except LookupError:
payload = bpayload.decode('ascii', 'replace')
elif decode:
try:
bpayload = payload.encode('ascii')
except UnicodeError:
# This won't happen for RFC compliant messages (messages
# containing only ASCII codepoints in the unicode input).
# If it does happen, turn the string into bytes in a way
# guaranteed not to fail.
bpayload = payload.encode('raw-unicode-escape')
if not decode:
return payload
if cte == 'quoted-printable':
return utils._qdecode(bpayload)
elif cte == 'base64':
# XXX: this is a bit of a hack; decode_b should probably be factored
# out somewhere, but I haven't figured out where yet.
value, defects = decode_b(b''.join(bpayload.splitlines()))
for defect in defects:
self.policy.handle_defect(self, defect)
return value
elif cte in ('x-uuencode', 'uuencode', 'uue', 'x-uue'):
in_file = BytesIO(bpayload)
out_file = BytesIO()
try:
uu.decode(in_file, out_file, quiet=True)
return out_file.getvalue()
except uu.Error:
# Some decoding problem
return bpayload
if isinstance(payload, str):
return bpayload
return payload
def set_payload(self, payload, charset=None):
"""Set the payload to the given value.
Optional charset sets the message's default character set. See
set_charset() for details.
"""
self._payload = payload
if charset is not None:
self.set_charset(charset)
def set_charset(self, charset):
"""Set the charset of the payload to a given character set.
charset can be a Charset instance, a string naming a character set, or
None. If it is a string it will be converted to a Charset instance.
If charset is None, the charset parameter will be removed from the
Content-Type field. Anything else will generate a TypeError.
The message will be assumed to be of type text/* encoded with
charset.input_charset. It will be converted to charset.output_charset
and encoded properly, if needed, when generating the plain text
representation of the message. MIME headers (MIME-Version,
Content-Type, Content-Transfer-Encoding) will be added as needed.
"""
if charset is None:
self.del_param('charset')
self._charset = None
return
if not isinstance(charset, Charset):
charset = Charset(charset)
self._charset = charset
if 'MIME-Version' not in self:
self.add_header('MIME-Version', '1.0')
if 'Content-Type' not in self:
self.add_header('Content-Type', 'text/plain',
charset=charset.get_output_charset())
else:
self.set_param('charset', charset.get_output_charset())
if charset != charset.get_output_charset():
self._payload = charset.body_encode(self._payload)
if 'Content-Transfer-Encoding' not in self:
cte = charset.get_body_encoding()
try:
cte(self)
except TypeError:
self._payload = charset.body_encode(self._payload)
self.add_header('Content-Transfer-Encoding', cte)
def get_charset(self):
"""Return the Charset instance associated with the message's payload.
"""
return self._charset
#
# MAPPING INTERFACE (partial)
#
def __len__(self):
"""Return the total number of headers, including duplicates."""
return len(self._headers)
def __getitem__(self, name):
"""Get a header value.
Return None if the header is missing instead of raising an exception.
Note that if the header appeared multiple times, exactly which
occurrence gets returned is undefined. Use get_all() to get all
the values matching a header field name.
"""
return self.get(name)
def __setitem__(self, name, val):
"""Set the value of a header.
Note: this does not overwrite an existing header with the same field
name. Use __delitem__() first to delete any existing headers.
"""
max_count = self.policy.header_max_count(name)
if max_count:
lname = name.lower()
found = 0
for k, v in self._headers:
if k.lower() == lname:
found += 1
if found >= max_count:
raise ValueError("There may be at most {} {} headers "
"in a message".format(max_count, name))
self._headers.append(self.policy.header_store_parse(name, val))
def __delitem__(self, name):
"""Delete all occurrences of a header, if present.
Does not raise an exception if the header is missing.
"""
name = name.lower()
newheaders = []
for k, v in self._headers:
if k.lower() != name:
newheaders.append((k, v))
self._headers = newheaders
def __contains__(self, name):
return name.lower() in [k.lower() for k, v in self._headers]
def __iter__(self):
for field, value in self._headers:
yield field
def keys(self):
"""Return a list of all the message's header field names.
These will be sorted in the order they appeared in the original
message, or were added to the message, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [k for k, v in self._headers]
def values(self):
"""Return a list of all the message's header values.
These will be sorted in the order they appeared in the original
message, or were added to the message, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [self.policy.header_fetch_parse(k, v)
for k, v in self._headers]
def items(self):
"""Get all the message's header fields and values.
These will be sorted in the order they appeared in the original
message, or were added to the message, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [(k, self.policy.header_fetch_parse(k, v))
for k, v in self._headers]
def get(self, name, failobj=None):
"""Get a header value.
Like __getitem__() but return failobj instead of None when the field
is missing.
"""
name = name.lower()
for k, v in self._headers:
if k.lower() == name:
return self.policy.header_fetch_parse(k, v)
return failobj
#
# "Internal" methods (public API, but only intended for use by a parser
# or generator, not normal application code.
#
def set_raw(self, name, value):
"""Store name and value in the model without modification.
This is an "internal" API, intended only for use by a parser.
"""
self._headers.append((name, value))
def raw_items(self):
"""Return the (name, value) header pairs without modification.
This is an "internal" API, intended only for use by a generator.
"""
return iter(self._headers.copy())
#
# Additional useful stuff
#
def get_all(self, name, failobj=None):
"""Return a list of all the values for the named field.
These will be sorted in the order they appeared in the original
message, and may contain duplicates. Any fields deleted and
re-inserted are always appended to the header list.
If no such fields exist, failobj is returned (defaults to None).
"""
values = []
name = name.lower()
for k, v in self._headers:
if k.lower() == name:
values.append(self.policy.header_fetch_parse(k, v))
if not values:
return failobj
return values
def add_header(self, _name, _value, **_params):
"""Extended header setting.
name is the header field to add. keyword arguments can be used to set
additional parameters for the header field, with underscores converted
to dashes. Normally the parameter will be added as key="value" unless
value is None, in which case only the key will be added. If a
parameter value contains non-ASCII characters it can be specified as a
three-tuple of (charset, language, value), in which case it will be
encoded according to RFC2231 rules. Otherwise it will be encoded using
the utf-8 charset and a language of ''.
Examples:
msg.add_header('content-disposition', 'attachment', filename='bud.gif')
msg.add_header('content-disposition', 'attachment',
filename=('utf-8', '', Fußballer.ppt'))
msg.add_header('content-disposition', 'attachment',
filename='Fußballer.ppt'))
"""
parts = []
for k, v in _params.items():
if v is None:
parts.append(k.replace('_', '-'))
else:
parts.append(_formatparam(k.replace('_', '-'), v))
if _value is not None:
parts.insert(0, _value)
self[_name] = SEMISPACE.join(parts)
def replace_header(self, _name, _value):
"""Replace a header.
Replace the first matching header found in the message, retaining
header order and case. If no matching header was found, a KeyError is
raised.
"""
_name = _name.lower()
for i, (k, v) in zip(range(len(self._headers)), self._headers):
if k.lower() == _name:
self._headers[i] = self.policy.header_store_parse(k, _value)
break
else:
raise KeyError(_name)
#
# Use these three methods instead of the three above.
#
def get_content_type(self):
"""Return the message's content type.
The returned string is coerced to lower case of the form
`maintype/subtype'. If there was no Content-Type header in the
message, the default type as given by get_default_type() will be
returned. Since according to RFC 2045, messages always have a default
type this will always return a value.
RFC 2045 defines a message's default type to be text/plain unless it
appears inside a multipart/digest container, in which case it would be
message/rfc822.
"""
missing = object()
value = self.get('content-type', missing)
if value is missing:
# This should have no parameters
return self.get_default_type()
ctype = _splitparam(value)[0].lower()
# RFC 2045, section 5.2 says if its invalid, use text/plain
if ctype.count('/') != 1:
return 'text/plain'
return ctype
def get_content_maintype(self):
"""Return the message's main content type.
This is the `maintype' part of the string returned by
get_content_type().
"""
ctype = self.get_content_type()
return ctype.split('/')[0]
def get_content_subtype(self):
"""Returns the message's sub-content type.
This is the `subtype' part of the string returned by
get_content_type().
"""
ctype = self.get_content_type()
return ctype.split('/')[1]
def get_default_type(self):
"""Return the `default' content type.
Most messages have a default content type of text/plain, except for
messages that are subparts of multipart/digest containers. Such
subparts have a default content type of message/rfc822.
"""
return self._default_type
def set_default_type(self, ctype):
"""Set the `default' content type.
ctype should be either "text/plain" or "message/rfc822", although this
is not enforced. The default content type is not stored in the
Content-Type header.
"""
self._default_type = ctype
def _get_params_preserve(self, failobj, header):
# Like get_params() but preserves the quoting of values. BAW:
# should this be part of the public interface?
missing = object()
value = self.get(header, missing)
if value is missing:
return failobj
params = []
for p in _parseparam(value):
try:
name, val = p.split('=', 1)
name = name.strip()
val = val.strip()
except ValueError:
# Must have been a bare attribute
name = p.strip()
val = ''
params.append((name, val))
params = utils.decode_params(params)
return params
def get_params(self, failobj=None, header='content-type', unquote=True):
"""Return the message's Content-Type parameters, as a list.
The elements of the returned list are 2-tuples of key/value pairs, as
split on the `=' sign. The left hand side of the `=' is the key,
while the right hand side is the value. If there is no `=' sign in
the parameter the value is the empty string. The value is as
described in the get_param() method.
Optional failobj is the object to return if there is no Content-Type
header. Optional header is the header to search instead of
Content-Type. If unquote is True, the value is unquoted.
"""
missing = object()
params = self._get_params_preserve(missing, header)
if params is missing:
return failobj
if unquote:
return [(k, _unquotevalue(v)) for k, v in params]
else:
return params
def get_param(self, param, failobj=None, header='content-type',
unquote=True):
"""Return the parameter value if found in the Content-Type header.
Optional failobj is the object to return if there is no Content-Type
header, or the Content-Type header has no such parameter. Optional
header is the header to search instead of Content-Type.
Parameter keys are always compared case insensitively. The return
value can either be a string, or a 3-tuple if the parameter was RFC
2231 encoded. When it's a 3-tuple, the elements of the value are of
the form (CHARSET, LANGUAGE, VALUE). Note that both CHARSET and
LANGUAGE can be None, in which case you should consider VALUE to be
encoded in the us-ascii charset. You can usually ignore LANGUAGE.
The parameter value (either the returned string, or the VALUE item in
the 3-tuple) is always unquoted, unless unquote is set to False.
If your application doesn't care whether the parameter was RFC 2231
encoded, it can turn the return value into a string as follows:
param = msg.get_param('foo')
param = email.utils.collapse_rfc2231_value(rawparam)
"""
if header not in self:
return failobj
for k, v in self._get_params_preserve(failobj, header):
if k.lower() == param.lower():
if unquote:
return _unquotevalue(v)
else:
return v
return failobj
def set_param(self, param, value, header='Content-Type', requote=True,
charset=None, language=''):
"""Set a parameter in the Content-Type header.
If the parameter already exists in the header, its value will be
replaced with the new value.
If header is Content-Type and has not yet been defined for this
message, it will be set to "text/plain" and the new parameter and
value will be appended as per RFC 2045.
An alternate header can specified in the header argument, and all
parameters will be quoted as necessary unless requote is False.
If charset is specified, the parameter will be encoded according to RFC
2231. Optional language specifies the RFC 2231 language, defaulting
to the empty string. Both charset and language should be strings.
"""
if not isinstance(value, tuple) and charset:
value = (charset, language, value)
if header not in self and header.lower() == 'content-type':
ctype = 'text/plain'
else:
ctype = self.get(header)
if not self.get_param(param, header=header):
if not ctype:
ctype = _formatparam(param, value, requote)
else:
ctype = SEMISPACE.join(
[ctype, _formatparam(param, value, requote)])
else:
ctype = ''
for old_param, old_value in self.get_params(header=header,
unquote=requote):
append_param = ''
if old_param.lower() == param.lower():
append_param = _formatparam(param, value, requote)
else:
append_param = _formatparam(old_param, old_value, requote)
if not ctype:
ctype = append_param
else:
ctype = SEMISPACE.join([ctype, append_param])
if ctype != self.get(header):
del self[header]
self[header] = ctype
def del_param(self, param, header='content-type', requote=True):
"""Remove the given parameter completely from the Content-Type header.
The header will be re-written in place without the parameter or its
value. All values will be quoted as necessary unless requote is
False. Optional header specifies an alternative to the Content-Type
header.
"""
if header not in self:
return
new_ctype = ''
for p, v in self.get_params(header=header, unquote=requote):
if p.lower() != param.lower():
if not new_ctype:
new_ctype = _formatparam(p, v, requote)
else:
new_ctype = SEMISPACE.join([new_ctype,
_formatparam(p, v, requote)])
if new_ctype != self.get(header):
del self[header]
self[header] = new_ctype
def set_type(self, type, header='Content-Type', requote=True):
"""Set the main type and subtype for the Content-Type header.
type must be a string in the form "maintype/subtype", otherwise a
ValueError is raised.
This method replaces the Content-Type header, keeping all the
parameters in place. If requote is False, this leaves the existing
header's quoting as is. Otherwise, the parameters will be quoted (the
default).
An alternative header can be specified in the header argument. When
the Content-Type header is set, we'll always also add a MIME-Version
header.
"""
# BAW: should we be strict?
if not type.count('/') == 1:
raise ValueError
# Set the Content-Type, you get a MIME-Version
if header.lower() == 'content-type':
del self['mime-version']
self['MIME-Version'] = '1.0'
if header not in self:
self[header] = type
return
params = self.get_params(header=header, unquote=requote)
del self[header]
self[header] = type
# Skip the first param; it's the old type.
for p, v in params[1:]:
self.set_param(p, v, header, requote)
def get_filename(self, failobj=None):
"""Return the filename associated with the payload if present.
The filename is extracted from the Content-Disposition header's
`filename' parameter, and it is unquoted. If that header is missing
the `filename' parameter, this method falls back to looking for the
`name' parameter.
"""
missing = object()
filename = self.get_param('filename', missing, 'content-disposition')
if filename is missing:
filename = self.get_param('name', missing, 'content-type')
if filename is missing:
return failobj
return utils.collapse_rfc2231_value(filename).strip()
def get_boundary(self, failobj=None):
"""Return the boundary associated with the payload if present.
The boundary is extracted from the Content-Type header's `boundary'
parameter, and it is unquoted.
"""
missing = object()
boundary = self.get_param('boundary', missing)
if boundary is missing:
return failobj
# RFC 2046 says that boundaries may begin but not end in w/s
return utils.collapse_rfc2231_value(boundary).rstrip()
def set_boundary(self, boundary):
"""Set the boundary parameter in Content-Type to 'boundary'.
This is subtly different than deleting the Content-Type header and
adding a new one with a new boundary parameter via add_header(). The
main difference is that using the set_boundary() method preserves the
order of the Content-Type header in the original message.
HeaderParseError is raised if the message has no Content-Type header.
"""
missing = object()
params = self._get_params_preserve(missing, 'content-type')
if params is missing:
# There was no Content-Type header, and we don't know what type
# to set it to, so raise an exception.
raise errors.HeaderParseError('No Content-Type header found')
newparams = []
foundp = False
for pk, pv in params:
if pk.lower() == 'boundary':
newparams.append(('boundary', '"%s"' % boundary))
foundp = True
else:
newparams.append((pk, pv))
if not foundp:
# The original Content-Type header had no boundary attribute.
# Tack one on the end. BAW: should we raise an exception
# instead???
newparams.append(('boundary', '"%s"' % boundary))
# Replace the existing Content-Type header with the new value
newheaders = []
for h, v in self._headers:
if h.lower() == 'content-type':
parts = []
for k, v in newparams:
if v == '':
parts.append(k)
else:
parts.append('%s=%s' % (k, v))
val = SEMISPACE.join(parts)
newheaders.append(self.policy.header_store_parse(h, val))
else:
newheaders.append((h, v))
self._headers = newheaders
def get_content_charset(self, failobj=None):
"""Return the charset parameter of the Content-Type header.
The returned string is always coerced to lower case. If there is no
Content-Type header, or if that header has no charset parameter,
failobj is returned.
"""
missing = object()
charset = self.get_param('charset', missing)
if charset is missing:
return failobj
if isinstance(charset, tuple):
# RFC 2231 encoded, so decode it, and it better end up as ascii.
pcharset = charset[0] or 'us-ascii'
try:
# LookupError will be raised if the charset isn't known to
# Python. UnicodeError will be raised if the encoded text
# contains a character not in the charset.
as_bytes = charset[2].encode('raw-unicode-escape')
charset = str(as_bytes, pcharset)
except (LookupError, UnicodeError):
charset = charset[2]
# charset characters must be in us-ascii range
try:
charset.encode('us-ascii')
except UnicodeError:
return failobj
# RFC 2046, $4.1.2 says charsets are not case sensitive
return charset.lower()
def get_charsets(self, failobj=None):
"""Return a list containing the charset(s) used in this message.
The returned list of items describes the Content-Type headers'
charset parameter for this message and all the subparts in its
payload.
Each item will either be a string (the value of the charset parameter
in the Content-Type header of that part) or the value of the
'failobj' parameter (defaults to None), if the part does not have a
main MIME type of "text", or the charset is not defined.
The list will contain one string for each part of the message, plus
one for the container message (i.e. self), so that a non-multipart
message will still return a list of length 1.
"""
return [part.get_content_charset(failobj) for part in self.walk()]
# I.e. def walk(self): ...
from email.iterators import walk
|
{
"content_hash": "d60450cf9e3b20e436eee54eab8ac8e6",
"timestamp": "",
"source": "github",
"line_count": 875,
"max_line_length": 96,
"avg_line_length": 39.62285714285714,
"alnum_prop": 0.5888952985289876,
"repo_name": "MalloyPower/parsing-python",
"id": "3feab52799a43c52e54d9592f8fb9d1f06fc9758",
"size": "34781",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-3.3.0/Lib/email/message.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
}
|
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: f5bigip_ltm_monitor_radius
short_description: BIG-IP ltm monitor radius module
description:
- Configures a Remote Access Dial-in User Service (RADIUS) monitor.
version_added: "2.4"
author:
- "Gabriel Fortin (@GabrielFortin)"
options:
app_service:
description:
- Specifies the name of the application service to which the monitor belongs
debug:
description:
- Specifies whether the monitor sends error messages and additional information to a log file created and
labeled specifically for this monitor.
default: no
choices: ['no', 'yes']
defaults_from:
description:
- Specifies the name of the monitor from which you want your custom monitor to inherit settings.
default: radius
description:
description:
- User defined description.
destination:
description:
- Specifies the IP address and service port of the resource that is the destination of this monitor.
interval:
description:
- Specifies, in seconds, the frequency at which the system issues the monitor check when either the resource
is down or the status of the resource is unknown.
default: 10
manual_resume:
description:
- Specifies whether the system automatically changes the status of a resource to up at the next successful
monitor check.
default: disabled
choices: ['disabled', 'enabled']
name:
description:
- Specifies a unique name for the component.
required: true
nas_ip_address:
description:
- Specifies the network access server IP address that the system uses to identify itself to the RADIUS
server.
partition:
description:
- Specifies the administrative partition in which the component object resides.
default: Common
password:
description:
- Specifies the password if the monitored target requires authentication
secret:
description:
- Specifies the secret the monitor must use when contacting the resource
state:
description:
- Specifies the state of the component on the BIG-IP system.
default: present
choices: ['absent', 'present']
time_until_up:
description:
- Specifies the amount of time, in seconds, after the first successful response before a node is marked up.
default: 0
timeout:
description:
- Specifies the number of seconds the target has in which to respond to the monitor request.
default: 31
up_interval:
description:
- Specifies, in seconds, the frequency at which the system issues the monitor check when the resource is up.
default: 0
username:
description:
- Specifies the username, if the monitored target requires authentication
requirements:
- BIG-IP >= 12.0
- ansible-common-f5
- f5-sdk
'''
EXAMPLES = '''
- name: Create LTM Monitor RADIUS
f5bigip_ltm_monitor_radius:
f5_hostname: 172.16.227.35
f5_username: admin
f5_password: admin
f5_port: 443
name: my_radius_monitor
partition: Common
description: My radius monitor
state: present
delegate_to: localhost
'''
RETURN = ''' # '''
from ansible.module_utils.basic import AnsibleModule
from ansible_common_f5.base import F5_ACTIVATION_CHOICES
from ansible_common_f5.base import F5_NAMED_OBJ_ARGS
from ansible_common_f5.base import F5_POLAR_CHOICES
from ansible_common_f5.base import F5_PROVIDER_ARGS
from ansible_common_f5.bigip import F5BigIpNamedObject
class ModuleParams(object):
@property
def argument_spec(self):
argument_spec = dict(
app_service=dict(type='str'),
debug=dict(type='str', choices=F5_POLAR_CHOICES),
defaults_from=dict(type='str'),
description=dict(type='str'),
destination=dict(type='str'),
interval=dict(type='int'),
manual_resume=dict(type='str', choices=F5_ACTIVATION_CHOICES),
nas_ip_address=dict(type='str'),
password=dict(type='str', no_log=True),
secret=dict(type='str'),
time_until_up=dict(type='int'),
timeout=dict(type='int'),
up_interval=dict(type='int'),
username=dict(type='str')
)
argument_spec.update(F5_PROVIDER_ARGS)
argument_spec.update(F5_NAMED_OBJ_ARGS)
return argument_spec
@property
def supports_check_mode(self):
return True
class F5BigIpLtmMonitorRadius(F5BigIpNamedObject):
def _set_crud_methods(self):
self._methods = {
'create': self._api.tm.ltm.monitor.radius_s.radius.create,
'read': self._api.tm.ltm.monitor.radius_s.radius.load,
'update': self._api.tm.ltm.monitor.radius_s.radius.update,
'delete': self._api.tm.ltm.monitor.radius_s.radius.delete,
'exists': self._api.tm.ltm.monitor.radius_s.radius.exists
}
def main():
params = ModuleParams()
module = AnsibleModule(argument_spec=params.argument_spec, supports_check_mode=params.supports_check_mode)
try:
obj = F5BigIpLtmMonitorRadius(check_mode=module.check_mode, **module.params)
result = obj.flush()
module.exit_json(**result)
except Exception as exc:
module.fail_json(msg=str(exc))
if __name__ == '__main__':
main()
|
{
"content_hash": "27514e2140f28cc31a22d96a652a0152",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 120,
"avg_line_length": 34.23952095808383,
"alnum_prop": 0.6376355369010144,
"repo_name": "erjac77/ansible-module-f5bigip",
"id": "9aaa54de82a96a289010ed66bde654e017f64c35",
"size": "6363",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "library/f5bigip_ltm_monitor_radius.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1183958"
}
],
"symlink_target": ""
}
|
"""
Tests of neo.io.blackrockio
"""
# needed for python 3 compatibility
from __future__ import absolute_import
import unittest
import warnings
from numpy.testing import assert_equal
import numpy as np
import quantities as pq
from neo.io.blackrockio import BlackrockIO
from neo.test.iotest.common_io_test import BaseTestIO
from neo.test.iotest.tools import get_test_file_full_path
from neo.test.tools import assert_neo_object_is_compliant
# check scipy
try:
from distutils import version
import scipy.io
import scipy.version
except ImportError as err:
HAVE_SCIPY = False
SCIPY_ERR = err
else:
if version.LooseVersion(scipy.version.version) < '0.8':
HAVE_SCIPY = False
SCIPY_ERR = ImportError("your scipy version is too old to support " +
"MatlabIO, you need at least 0.8. " +
"You have %s" % scipy.version.version)
else:
HAVE_SCIPY = True
SCIPY_ERR = None
class CommonTests(BaseTestIO, unittest.TestCase):
ioclass = BlackrockIO
files_to_test = ['FileSpec2.3001',
'blackrock_2_1/l101210-001']
files_to_download = [
'FileSpec2.3001.nev',
'FileSpec2.3001.ns5',
'FileSpec2.3001.ccf',
'FileSpec2.3001.mat',
'blackrock_2_1/l101210-001.mat',
'blackrock_2_1/l101210-001_nev-02_ns5.mat',
'blackrock_2_1/l101210-001.ns2',
'blackrock_2_1/l101210-001.ns5',
'blackrock_2_1/l101210-001.nev',
'blackrock_2_1/l101210-001-02.nev',
'segment/PauseCorrect/pause_correct.nev',
'segment/PauseCorrect/pause_correct.ns2',
'segment/PauseSpikesOutside/pause_spikes_outside_seg.nev',
'segment/ResetCorrect/reset.nev',
'segment/ResetCorrect/reset.ns2',
'segment/ResetFail/reset_fail.nev']
ioclass = BlackrockIO
def test_load_waveforms(self):
filename = self.get_filename_path('FileSpec2.3001')
reader = BlackrockIO(filename=filename, verbose=False)
bl = reader.read_block(load_waveforms=True)
assert_neo_object_is_compliant(bl)
def test_inputs_V23(self):
"""
Test various inputs to BlackrockIO.read_block with version 2.3 file
to check for parsing errors.
"""
filename = self.get_filename_path('FileSpec2.3001')
reader = BlackrockIO(filename=filename, verbose=False, nsx_to_load=5)
# Assert IOError is raised when no Blackrock files are available
with self.assertRaises(IOError):
reader2 = BlackrockIO(filename='nonexistent')
# Load data to maximum extent, one None is not given as list
block = reader.read_block(load_waveforms=False)
lena = len(block.segments[0].analogsignals[0])
numspa = len(block.segments[0].spiketrains[0])
# Load data using a negative time and a time exceeding the end of the
# recording
too_large_tstop = block.segments[0].analogsignals[0].t_stop + 1 * pq.s
buggy_slice = (-100 * pq.ms, too_large_tstop)
# this is valid in read_segment
seg = reader.read_segment(seg_index=0, time_slice=buggy_slice, strict_slicing=False)
# this raise an error
with self.assertRaises(AssertionError):
seg = reader.read_segment(seg_index=0, time_slice=buggy_slice, strict_slicing=True)
lenb = len(seg.analogsignals[0])
numspb = len(seg.spiketrains[0])
# Same length of analog signal?
# Both should have read the complete data set!
self.assertEqual(lena, lenb)
# Same length of spike train?
# Both should have read the complete data set!
self.assertEqual(numspa, numspb)
# test 4 Units
block = reader.read_block(load_waveforms=True,
signal_group_mode='split-all',
units_group_mode='all-in-one')
self.assertEqual(len(block.segments[0].analogsignals), 10)
self.assertEqual(len(block.channel_indexes[-1].units), 4)
self.assertEqual(len(block.channel_indexes[-1].units),
len(block.segments[0].spiketrains))
anasig = block.segments[0].analogsignals[0]
self.assertIsNotNone(anasig.file_origin)
def test_inputs_V21(self):
"""
Test various inputs to BlackrockIO.read_block with version 2.3 file
to check for parsing errors.
"""
filename = self.get_filename_path('blackrock_2_1/l101210-001')
reader = BlackrockIO(filename=filename, verbose=False, nsx_to_load=5)
# Assert IOError is raised when no Blackrock files are available
with self.assertRaises(IOError):
reader2 = BlackrockIO(filename='nonexistent')
# with self.assertRaises(IOError):
# reader2 = BlackrockIO(filename=filename, nev_override='nonexistent')
# Load data to maximum extent, one None is not given as list
block = reader.read_block(load_waveforms=False, signal_group_mode='split-all')
lena = len(block.segments[0].analogsignals[0])
numspa = len(block.segments[0].spiketrains[0])
# Load data using a negative time and a time exceeding the end of the
# recording
too_large_tstop = block.segments[0].analogsignals[0].t_stop + 1 * pq.s
buggy_slice = (-100 * pq.ms, too_large_tstop)
# This is valid in read_segment
seg = reader.read_segment(seg_index=0, time_slice=buggy_slice, strict_slicing=False)
# this raise error
with self.assertRaises(AssertionError):
seg = reader.read_segment(seg_index=0, time_slice=buggy_slice, strict_slicing=True)
lenb = len(seg.analogsignals[0])
numspb = len(seg.spiketrains[0])
# Same length of analog signal?
# Both should have read the complete data set!
self.assertEqual(lena, lenb)
# Same length of spike train?
# Both should have read the complete data set!
self.assertEqual(numspa, numspb)
# test 4 Units
block = reader.read_block(load_waveforms=True,
signal_group_mode='split-all',
units_group_mode='all-in-one')
self.assertEqual(len(block.segments[0].analogsignals), 96)
self.assertEqual(len(block.channel_indexes[-1].units), 218)
self.assertEqual(len(block.channel_indexes[-1].units),
len(block.segments[0].spiketrains))
anasig = block.segments[0].analogsignals[0]
self.assertIsNotNone(anasig.file_origin)
def test_load_muliple_nsx(self):
"""
Test if multiple nsx signals can be loaded at the same time.
"""
filename = self.get_filename_path('blackrock_2_1/l101210-001')
reader = BlackrockIO(filename=filename, verbose=False, nsx_to_load='all')
# number of different sampling rates corresponds to number of nsx signals, because
# single nsx contains only signals of identical sampling rate
block = reader.read_block(load_waveforms=False)
sampling_rates = np.unique(
[a.sampling_rate.rescale('Hz') for a in block.filter(objects='AnalogSignal')])
self.assertEqual(len(sampling_rates), 2)
segment = reader.read_segment()
sampling_rates = np.unique(
[a.sampling_rate.rescale('Hz') for a in segment.filter(objects='AnalogSignal')])
self.assertEqual(len(sampling_rates), 2)
# load only ns5
reader = BlackrockIO(filename=filename, nsx_to_load=5)
seg = reader.read_segment()
self.assertEqual(len(seg.analogsignals), 1)
self.assertEqual(seg.analogsignals[0].shape, (109224, 96))
# load only ns2
reader = BlackrockIO(filename=filename, nsx_to_load=2)
seg = reader.read_segment()
self.assertEqual(len(seg.analogsignals), 1)
self.assertEqual(seg.analogsignals[0].shape, (3640, 6))
# load only ns2
reader = BlackrockIO(filename=filename, nsx_to_load=[2])
seg = reader.read_segment()
self.assertEqual(len(seg.analogsignals), 1)
# load ns2 + ns5
reader = BlackrockIO(filename=filename, nsx_to_load=[2, 5])
seg = reader.read_segment()
self.assertEqual(len(seg.analogsignals), 2)
self.assertEqual(seg.analogsignals[0].shape, (3640, 6))
self.assertEqual(seg.analogsignals[1].shape, (109224, 96))
# load only ns5
reader = BlackrockIO(filename=filename, nsx_to_load='max')
seg = reader.read_segment()
self.assertEqual(len(seg.analogsignals), 1)
self.assertEqual(seg.analogsignals[0].shape, (109224, 96))
@unittest.skipUnless(HAVE_SCIPY, "requires scipy")
def test_compare_blackrockio_with_matlabloader_v21(self):
"""
This test compares the output of BlackrockIO.read_block() with the
output generated by a Matlab implementation of a Blackrock file reader
provided by the company. The output for comparison is provided in a
.mat file created by the script create_data_matlab_blackrock.m.
The function tests LFPs, spike times, and digital events.
"""
dirname = get_test_file_full_path(ioclass=BlackrockIO,
filename='blackrock_2_1/l101210-001',
directory=self.local_test_dir, clean=False)
# First run with parameters for ns5, then run with correct parameters for ns2
parameters = [('blackrock_2_1/l101210-001_nev-02_ns5.mat',
{'nsx_to_load': 5, 'nev_override': '-'.join([dirname, '02'])}),
('blackrock_2_1/l101210-001.mat', {'nsx_to_load': 2})]
for index, param in enumerate(parameters):
# Load data from matlab generated files
ml = scipy.io.loadmat(
get_test_file_full_path(
ioclass=BlackrockIO,
filename=param[0],
directory=self.local_test_dir, clean=False))
lfp_ml = ml['lfp'] # (channel x time) LFP matrix
ts_ml = ml['ts'] # spike time stamps
elec_ml = ml['el'] # spike electrodes
unit_ml = ml['un'] # spike unit IDs
wf_ml = ml['wf'] # waveforms
mts_ml = ml['mts'] # marker time stamps
mid_ml = ml['mid'] # marker IDs
# Load data from original data files using the Neo BlackrockIO
session = BlackrockIO(
dirname,
verbose=False, **param[1])
block = session.read_block(load_waveforms=True, signal_group_mode='split-all')
# Check if analog data are equal
self.assertGreater(len(block.channel_indexes), 0)
for i, chidx in enumerate(block.channel_indexes):
# Break for ChannelIndexes for Units that don't contain any Analogsignals
if len(chidx.analogsignals) == 0 and len(chidx.units) >= 1:
break
# Should only have one AnalogSignal per ChannelIndex
self.assertEqual(len(chidx.analogsignals), 1)
# Find out channel_id in order to compare correctly
idx = chidx.analogsignals[0].annotations['channel_id']
# Get data of AnalogSignal without pq.units
anasig = np.squeeze(chidx.analogsignals[0].base[:].magnitude)
# Test for equality of first nonzero values of AnalogSignal
# and matlab file contents
# If not equal test if hardcoded gain is responsible for this
# See BlackrockRawIO ll. 1420 commit 77a645655605ae39eca2de3ee511f3b522f11bd7
j = 0
while anasig[j] == 0:
j += 1
if lfp_ml[i, j] != np.squeeze(chidx.analogsignals[0].base[j].magnitude):
anasig = anasig / 152.592547
anasig = np.round(anasig).astype(int)
# Special case because id 142 is not included in ns2 file
if idx == 143:
idx -= 1
if idx > 128:
idx = idx - 136
assert_equal(anasig, lfp_ml[idx - 1, :])
# Check if spikes are equal
self.assertEqual(len(block.segments), 1)
for st_i in block.segments[0].spiketrains:
channelid = st_i.annotations['channel_id']
unitid = st_i.annotations['unit_id']
# Compare waveforms
matlab_wf = wf_ml[np.nonzero(
np.logical_and(elec_ml == channelid, unit_ml == unitid)), :][0]
# Atleast_2d as correction for waveforms that are saved
# in single dimension in SpikeTrain
# because only one waveform is available
assert_equal(np.atleast_2d(np.squeeze(st_i.waveforms).magnitude), matlab_wf)
# Compare spike timestamps
matlab_spikes = ts_ml[np.nonzero(
np.logical_and(elec_ml == channelid, unit_ml == unitid))]
# Going sure that unit is really seconds and not 1/30000 seconds
if (not st_i.units == pq.CompoundUnit("1.0/{} * s".format(30000))) and \
st_i.units == pq.s:
st_i = np.round(st_i.base * 30000).astype(int)
assert_equal(st_i, matlab_spikes)
# Check if digital input port events are equal
self.assertGreater(len(block.segments[0].events), 0)
for ea_i in block.segments[0].events:
if ea_i.name == 'digital_input_port':
# Get all digital event IDs in this recording
marker_ids = set(ea_i.labels)
for marker_id in marker_ids:
python_digievents = np.round(
ea_i.times.base[ea_i.labels == marker_id] * 30000).astype(int)
matlab_digievents = mts_ml[
np.nonzero(mid_ml == int(marker_id))]
assert_equal(python_digievents, matlab_digievents)
# Note: analog input events are not yet supported
def test_segment_detection_reset(self):
"""
This test makes sure segments are detected correctly when reset was used during recording.
"""
# Path to nev that will fail
filename_nev_fail = self.get_filename_path('segment/ResetFail/reset_fail')
# Path to nsX and nev that will NOT fail
filename = self.get_filename_path('segment/ResetCorrect/reset')
# Warning filter needs to be set to always before first occurrence of this warning
warnings.simplefilter("always", UserWarning)
# This fails, because in the nev there is no way to separate two segments
with self.assertRaises(AssertionError):
reader = BlackrockIO(filename=filename, nsx_to_load=2, nev_override=filename_nev_fail)
# The correct file will issue a warning because a reset has occurred
# and could be detected, but was not explicitly documented in the file
with warnings.catch_warnings(record=True) as w:
reader = BlackrockIO(filename=filename, nsx_to_load=2)
self.assertGreaterEqual(len(w), 1)
messages = [str(warning.message) for warning in w if warning.category == UserWarning]
self.assertIn("Detected 1 undocumented segments within nev data after "
"timestamps [5451].", messages)
# Manually reset warning filter in order to not show too many warnings afterwards
warnings.simplefilter("default")
block = reader.read_block(load_waveforms=False, signal_group_mode="split-all")
# 1 Segment at the beginning and 1 after reset
self.assertEqual(len(block.segments), 2)
# Checking all times are correct as read from file itself
# (taking neo calculations into account)
self.assertEqual(block.segments[0].t_start, 0.0)
self.assertEqual(block.segments[0].t_stop, 4.02)
# Clock is reset to 0
self.assertEqual(block.segments[1].t_start, 0.0032)
self.assertEqual(block.segments[1].t_stop, 3.9842)
self.assertEqual(block.segments[0].analogsignals[0].t_start, 0.0)
self.assertEqual(block.segments[0].analogsignals[0].t_stop, 4.02)
self.assertEqual(block.segments[1].analogsignals[0].t_start, 0.0032)
self.assertEqual(block.segments[1].analogsignals[0].t_stop, 3.9842)
self.assertEqual(block.segments[0].spiketrains[0].t_start, 0.0)
self.assertEqual(block.segments[0].spiketrains[0].t_stop, 4.02)
self.assertEqual(block.segments[1].spiketrains[0].t_start, 0.0032)
self.assertEqual(block.segments[1].spiketrains[0].t_stop, 3.9842)
# Each segment must have the same number of analogsignals
self.assertEqual(len(block.segments[0].analogsignals),
len(block.segments[1].analogsignals))
# Length of analogsignals as created
self.assertEqual(len(block.segments[0].analogsignals[0][:]), 4020)
self.assertEqual(len(block.segments[1].analogsignals[0][:]), 3981)
def test_segment_detection_pause(self):
"""
This test makes sure segments are detected correctly when pause was used during recording.
"""
# Path to nev that has spikes that don't fit nsX segment
filename_nev_outside_seg = self.get_filename_path(
'segment/PauseSpikesOutside/pause_spikes_outside_seg')
# Path to nsX and nev that are correct
filename = self.get_filename_path('segment/PauseCorrect/pause_correct')
# This issues a warning, because there are spikes a long time after the last segment
# And another one because there are spikes between segments
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
reader = BlackrockIO(filename=filename, nsx_to_load=2,
nev_override=filename_nev_outside_seg)
self.assertGreaterEqual(len(w), 2)
# Check that warnings are correct
messages = [str(warning.message) for warning in w if warning.category == UserWarning]
self.assertIn('Spikes outside any segment. Detected on segment #1', messages)
self.assertIn('Spikes 0.0776s after last segment.', messages)
block = reader.read_block(load_waveforms=False, signal_group_mode="split-all")
# 2 segments
self.assertEqual(len(block.segments), 2)
# Checking all times are correct as read from file itself
# (taking neo calculations into account)
self.assertEqual(block.segments[0].t_start, 0.0)
# This value is so high, because a spike occurred right before the second segment
# And thus is added to the first segment
# This is not normal behavior and occurs because of the way the files were cut
# into test files
self.assertAlmostEqual(block.segments[0].t_stop.magnitude, 15.83916667)
# Clock is not reset
self.assertEqual(block.segments[1].t_start.magnitude, 31.0087)
# Segment time is longer here as well because of spikes after second segment
self.assertEqual(block.segments[1].t_stop.magnitude, 35.0863)
self.assertEqual(block.segments[0].analogsignals[0].t_start, 0.0)
# The AnalogSignal is only 4 seconds long, as opposed to the segment
# whose length is caused by the additional spike
self.assertEqual(block.segments[0].analogsignals[0].t_stop, 4.0)
self.assertEqual(block.segments[1].analogsignals[0].t_start, 31.0087)
self.assertAlmostEqual(block.segments[1].analogsignals[0].t_stop.magnitude, 35.0087,
places=6)
self.assertEqual(block.segments[0].spiketrains[0].t_start, 0.0)
self.assertAlmostEqual(block.segments[0].spiketrains[0].t_stop.magnitude, 15.83916667,
places=8)
self.assertEqual(block.segments[1].spiketrains[0].t_start, 31.0087)
self.assertEqual(block.segments[1].spiketrains[0].t_stop, 35.0863)
# Each segment has same number of analogsignals
self.assertEqual(len(block.segments[0].analogsignals),
len(block.segments[1].analogsignals))
# Analogsignals have exactly 4000 samples
self.assertEqual(len(block.segments[0].analogsignals[0][:]), 4000)
self.assertEqual(len(block.segments[1].analogsignals[0][:]), 4000)
# This case is correct, no spikes outside segment or anything
reader = BlackrockIO(filename=filename, nsx_to_load=2)
block = reader.read_block(load_waveforms=False, signal_group_mode="split-all")
# 2 segments
self.assertEqual(len(block.segments), 2)
# Checking all times are correct as read from file itself
# (taking neo calculations into account)
self.assertEqual(block.segments[0].t_start, 0.0)
# Now segment time is only 4 seconds, because there were no additional spikes
self.assertEqual(block.segments[0].t_stop, 4.0)
self.assertEqual(block.segments[1].t_start, 31.0087)
self.assertAlmostEqual(block.segments[1].t_stop.magnitude, 35.0087, places=6)
self.assertEqual(block.segments[0].analogsignals[0].t_start, 0.0)
self.assertEqual(block.segments[0].analogsignals[0].t_stop, 4.0)
self.assertEqual(block.segments[1].analogsignals[0].t_start, 31.0087)
self.assertAlmostEqual(block.segments[1].analogsignals[0].t_stop.magnitude, 35.0087,
places=6)
self.assertEqual(block.segments[0].spiketrains[0].t_start, 0.0)
self.assertEqual(block.segments[0].spiketrains[0].t_stop, 4.0)
self.assertEqual(block.segments[1].spiketrains[0].t_start, 31.0087)
self.assertAlmostEqual(block.segments[1].spiketrains[0].t_stop.magnitude, 35.0087,
places=6)
# Each segment has same number of analogsignals
self.assertEqual(len(block.segments[0].analogsignals),
len(block.segments[1].analogsignals))
# ns2 was created in such a way that all analogsignals have 4000 samples
self.assertEqual(len(block.segments[0].analogsignals[0][:]), 4000)
self.assertEqual(len(block.segments[1].analogsignals[0][:]), 4000)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "b65df7455a948b2be42b8cffe6f10e7b",
"timestamp": "",
"source": "github",
"line_count": 490,
"max_line_length": 98,
"avg_line_length": 46.46734693877551,
"alnum_prop": 0.6195704686196144,
"repo_name": "rgerkin/python-neo",
"id": "f102d280dc4a7e6b5ed7167c767ec89ee644dd64",
"size": "22793",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neo/test/iotest/test_blackrockio.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "2486594"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import json
import re
import itertools
from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
compat_str,
compat_urlparse,
)
from ..utils import (
determine_ext,
ExtractorError,
InAdvancePagedList,
int_or_none,
NO_DEFAULT,
RegexNotFoundError,
sanitized_Request,
smuggle_url,
std_headers,
try_get,
unified_timestamp,
unsmuggle_url,
urlencode_postdata,
unescapeHTML,
parse_filesize,
)
class VimeoBaseInfoExtractor(InfoExtractor):
_NETRC_MACHINE = 'vimeo'
_LOGIN_REQUIRED = False
_LOGIN_URL = 'https://vimeo.com/log_in'
def _login(self):
(username, password) = self._get_login_info()
if username is None:
if self._LOGIN_REQUIRED:
raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
return
self.report_login()
webpage = self._download_webpage(self._LOGIN_URL, None, False)
token, vuid = self._extract_xsrft_and_vuid(webpage)
data = urlencode_postdata({
'action': 'login',
'email': username,
'password': password,
'service': 'vimeo',
'token': token,
})
login_request = sanitized_Request(self._LOGIN_URL, data)
login_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
login_request.add_header('Referer', self._LOGIN_URL)
self._set_vimeo_cookie('vuid', vuid)
self._download_webpage(login_request, None, False, 'Wrong login info')
def _verify_video_password(self, url, video_id, webpage):
password = self._downloader.params.get('videopassword')
if password is None:
raise ExtractorError('This video is protected by a password, use the --video-password option', expected=True)
token, vuid = self._extract_xsrft_and_vuid(webpage)
data = urlencode_postdata({
'password': password,
'token': token,
})
if url.startswith('http://'):
# vimeo only supports https now, but the user can give an http url
url = url.replace('http://', 'https://')
password_request = sanitized_Request(url + '/password', data)
password_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
password_request.add_header('Referer', url)
self._set_vimeo_cookie('vuid', vuid)
return self._download_webpage(
password_request, video_id,
'Verifying the password', 'Wrong password')
def _extract_xsrft_and_vuid(self, webpage):
xsrft = self._search_regex(
r'(?:(?P<q1>["\'])xsrft(?P=q1)\s*:|xsrft\s*[=:])\s*(?P<q>["\'])(?P<xsrft>.+?)(?P=q)',
webpage, 'login token', group='xsrft')
vuid = self._search_regex(
r'["\']vuid["\']\s*:\s*(["\'])(?P<vuid>.+?)\1',
webpage, 'vuid', group='vuid')
return xsrft, vuid
def _set_vimeo_cookie(self, name, value):
self._set_cookie('vimeo.com', name, value)
def _vimeo_sort_formats(self, formats):
# Bitrates are completely broken. Single m3u8 may contain entries in kbps and bps
# at the same time without actual units specified. This lead to wrong sorting.
self._sort_formats(formats, field_preference=('preference', 'height', 'width', 'fps', 'tbr', 'format_id'))
def _parse_config(self, config, video_id):
video_data = config['video']
# Extract title
video_title = video_data['title']
# Extract uploader, uploader_url and uploader_id
video_uploader = video_data.get('owner', {}).get('name')
video_uploader_url = video_data.get('owner', {}).get('url')
video_uploader_id = video_uploader_url.split('/')[-1] if video_uploader_url else None
# Extract video thumbnail
video_thumbnail = video_data.get('thumbnail')
if video_thumbnail is None:
video_thumbs = video_data.get('thumbs')
if video_thumbs and isinstance(video_thumbs, dict):
_, video_thumbnail = sorted((int(width if width.isdigit() else 0), t_url) for (width, t_url) in video_thumbs.items())[-1]
# Extract video duration
video_duration = int_or_none(video_data.get('duration'))
formats = []
config_files = video_data.get('files') or config['request'].get('files', {})
for f in config_files.get('progressive', []):
video_url = f.get('url')
if not video_url:
continue
formats.append({
'url': video_url,
'format_id': 'http-%s' % f.get('quality'),
'width': int_or_none(f.get('width')),
'height': int_or_none(f.get('height')),
'fps': int_or_none(f.get('fps')),
'tbr': int_or_none(f.get('bitrate')),
})
for files_type in ('hls', 'dash'):
for cdn_name, cdn_data in config_files.get(files_type, {}).get('cdns', {}).items():
manifest_url = cdn_data.get('url')
if not manifest_url:
continue
format_id = '%s-%s' % (files_type, cdn_name)
if files_type == 'hls':
formats.extend(self._extract_m3u8_formats(
manifest_url, video_id, 'mp4',
'm3u8_native', m3u8_id=format_id,
note='Downloading %s m3u8 information' % cdn_name,
fatal=False))
elif files_type == 'dash':
mpd_pattern = r'/%s/(?:sep/)?video/' % video_id
mpd_manifest_urls = []
if re.search(mpd_pattern, manifest_url):
for suffix, repl in (('', 'video'), ('_sep', 'sep/video')):
mpd_manifest_urls.append((format_id + suffix, re.sub(
mpd_pattern, '/%s/%s/' % (video_id, repl), manifest_url)))
else:
mpd_manifest_urls = [(format_id, manifest_url)]
for f_id, m_url in mpd_manifest_urls:
formats.extend(self._extract_mpd_formats(
m_url.replace('/master.json', '/master.mpd'), video_id, f_id,
'Downloading %s MPD information' % cdn_name,
fatal=False))
subtitles = {}
text_tracks = config['request'].get('text_tracks')
if text_tracks:
for tt in text_tracks:
subtitles[tt['lang']] = [{
'ext': 'vtt',
'url': 'https://vimeo.com' + tt['url'],
}]
return {
'title': video_title,
'uploader': video_uploader,
'uploader_id': video_uploader_id,
'uploader_url': video_uploader_url,
'thumbnail': video_thumbnail,
'duration': video_duration,
'formats': formats,
'subtitles': subtitles,
}
class VimeoIE(VimeoBaseInfoExtractor):
"""Information extractor for vimeo.com."""
# _VALID_URL matches Vimeo URLs
_VALID_URL = r'''(?x)
https?://
(?:
(?:
www|
(?P<player>player)
)
\.
)?
vimeo(?P<pro>pro)?\.com/
(?!(?:channels|album)/[^/?#]+/?(?:$|[?#])|[^/]+/review/|ondemand/)
(?:.*?/)?
(?:
(?:
play_redirect_hls|
moogaloop\.swf)\?clip_id=
)?
(?:videos?/)?
(?P<id>[0-9]+)
(?:/[\da-f]+)?
/?(?:[?&].*)?(?:[#].*)?$
'''
IE_NAME = 'vimeo'
_TESTS = [
{
'url': 'http://vimeo.com/56015672#at=0',
'md5': '8879b6cc097e987f02484baf890129e5',
'info_dict': {
'id': '56015672',
'ext': 'mp4',
'title': "youtube-dl test video - \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550",
'description': 'md5:2d3305bad981a06ff79f027f19865021',
'timestamp': 1355990239,
'upload_date': '20121220',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/user7108434',
'uploader_id': 'user7108434',
'uploader': 'Filippo Valsorda',
'duration': 10,
'license': 'by-sa',
},
},
{
'url': 'http://vimeopro.com/openstreetmapus/state-of-the-map-us-2013/video/68093876',
'md5': '3b5ca6aa22b60dfeeadf50b72e44ed82',
'note': 'Vimeo Pro video (#1197)',
'info_dict': {
'id': '68093876',
'ext': 'mp4',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/openstreetmapus',
'uploader_id': 'openstreetmapus',
'uploader': 'OpenStreetMap US',
'title': 'Andy Allan - Putting the Carto into OpenStreetMap Cartography',
'description': 'md5:fd69a7b8d8c34a4e1d2ec2e4afd6ec30',
'duration': 1595,
},
},
{
'url': 'http://player.vimeo.com/video/54469442',
'md5': '619b811a4417aa4abe78dc653becf511',
'note': 'Videos that embed the url in the player page',
'info_dict': {
'id': '54469442',
'ext': 'mp4',
'title': 'Kathy Sierra: Building the minimum Badass User, Business of Software 2012',
'uploader': 'The BLN & Business of Software',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/theblnbusinessofsoftware',
'uploader_id': 'theblnbusinessofsoftware',
'duration': 3610,
'description': None,
},
},
{
'url': 'http://vimeo.com/68375962',
'md5': 'aaf896bdb7ddd6476df50007a0ac0ae7',
'note': 'Video protected with password',
'info_dict': {
'id': '68375962',
'ext': 'mp4',
'title': 'youtube-dl password protected test video',
'timestamp': 1371200155,
'upload_date': '20130614',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/user18948128',
'uploader_id': 'user18948128',
'uploader': 'Jaime Marquínez Ferrándiz',
'duration': 10,
'description': 'md5:dca3ea23adb29ee387127bc4ddfce63f',
},
'params': {
'videopassword': 'youtube-dl',
},
},
{
'url': 'http://vimeo.com/channels/keypeele/75629013',
'md5': '2f86a05afe9d7abc0b9126d229bbe15d',
'info_dict': {
'id': '75629013',
'ext': 'mp4',
'title': 'Key & Peele: Terrorist Interrogation',
'description': 'md5:8678b246399b070816b12313e8b4eb5c',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/atencio',
'uploader_id': 'atencio',
'uploader': 'Peter Atencio',
'timestamp': 1380339469,
'upload_date': '20130928',
'duration': 187,
},
},
{
'url': 'http://vimeo.com/76979871',
'note': 'Video with subtitles',
'info_dict': {
'id': '76979871',
'ext': 'mp4',
'title': 'The New Vimeo Player (You Know, For Videos)',
'description': 'md5:2ec900bf97c3f389378a96aee11260ea',
'timestamp': 1381846109,
'upload_date': '20131015',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/staff',
'uploader_id': 'staff',
'uploader': 'Vimeo Staff',
'duration': 62,
}
},
{
# from https://www.ouya.tv/game/Pier-Solar-and-the-Great-Architects/
'url': 'https://player.vimeo.com/video/98044508',
'note': 'The js code contains assignments to the same variable as the config',
'info_dict': {
'id': '98044508',
'ext': 'mp4',
'title': 'Pier Solar OUYA Official Trailer',
'uploader': 'Tulio Gonçalves',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/user28849593',
'uploader_id': 'user28849593',
},
},
{
# contains original format
'url': 'https://vimeo.com/33951933',
'md5': '53c688fa95a55bf4b7293d37a89c5c53',
'info_dict': {
'id': '33951933',
'ext': 'mp4',
'title': 'FOX CLASSICS - Forever Classic ID - A Full Minute',
'uploader': 'The DMCI',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/dmci',
'uploader_id': 'dmci',
'timestamp': 1324343742,
'upload_date': '20111220',
'description': 'md5:ae23671e82d05415868f7ad1aec21147',
},
},
{
# only available via https://vimeo.com/channels/tributes/6213729 and
# not via https://vimeo.com/6213729
'url': 'https://vimeo.com/channels/tributes/6213729',
'info_dict': {
'id': '6213729',
'ext': 'mov',
'title': 'Vimeo Tribute: The Shining',
'uploader': 'Casey Donahue',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/caseydonahue',
'uploader_id': 'caseydonahue',
'timestamp': 1250886430,
'upload_date': '20090821',
'description': 'md5:bdbf314014e58713e6e5b66eb252f4a6',
},
'params': {
'skip_download': True,
},
'expected_warnings': ['Unable to download JSON metadata'],
},
{
# redirects to ondemand extractor and should be passed through it
# for successful extraction
'url': 'https://vimeo.com/73445910',
'info_dict': {
'id': '73445910',
'ext': 'mp4',
'title': 'The Reluctant Revolutionary',
'uploader': '10Ft Films',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/tenfootfilms',
'uploader_id': 'tenfootfilms',
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://vimeo.com/moogaloop.swf?clip_id=2539741',
'only_matching': True,
},
{
'url': 'https://vimeo.com/109815029',
'note': 'Video not completely processed, "failed" seed status',
'only_matching': True,
},
{
'url': 'https://vimeo.com/groups/travelhd/videos/22439234',
'only_matching': True,
},
{
'url': 'https://vimeo.com/album/2632481/video/79010983',
'only_matching': True,
},
{
# source file returns 403: Forbidden
'url': 'https://vimeo.com/7809605',
'only_matching': True,
},
{
'url': 'https://vimeo.com/160743502/abd0e13fb4',
'only_matching': True,
}
]
@staticmethod
def _smuggle_referrer(url, referrer_url):
return smuggle_url(url, {'http_headers': {'Referer': referrer_url}})
@staticmethod
def _extract_urls(url, webpage):
urls = []
# Look for embedded (iframe) Vimeo player
for mobj in re.finditer(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//player\.vimeo\.com/video/.+?)\1',
webpage):
urls.append(VimeoIE._smuggle_referrer(unescapeHTML(mobj.group('url')), url))
PLAIN_EMBED_RE = (
# Look for embedded (swf embed) Vimeo player
r'<embed[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?vimeo\.com/moogaloop\.swf.+?)\1',
# Look more for non-standard embedded Vimeo player
r'<video[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?vimeo\.com/[0-9]+)\1',
)
for embed_re in PLAIN_EMBED_RE:
for mobj in re.finditer(embed_re, webpage):
urls.append(mobj.group('url'))
return urls
@staticmethod
def _extract_url(url, webpage):
urls = VimeoIE._extract_urls(url, webpage)
return urls[0] if urls else None
def _verify_player_video_password(self, url, video_id):
password = self._downloader.params.get('videopassword')
if password is None:
raise ExtractorError('This video is protected by a password, use the --video-password option')
data = urlencode_postdata({'password': password})
pass_url = url + '/check-password'
password_request = sanitized_Request(pass_url, data)
password_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
password_request.add_header('Referer', url)
return self._download_json(
password_request, video_id,
'Verifying the password', 'Wrong password')
def _real_initialize(self):
self._login()
def _real_extract(self, url):
url, data = unsmuggle_url(url, {})
headers = std_headers.copy()
if 'http_headers' in data:
headers.update(data['http_headers'])
if 'Referer' not in headers:
headers['Referer'] = url
# Extract ID from URL
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
orig_url = url
if mobj.group('pro') or mobj.group('player'):
url = 'https://player.vimeo.com/video/' + video_id
elif any(p in url for p in ('play_redirect_hls', 'moogaloop.swf')):
url = 'https://vimeo.com/' + video_id
# Retrieve video webpage to extract further information
request = sanitized_Request(url, headers=headers)
try:
webpage, urlh = self._download_webpage_handle(request, video_id)
# Some URLs redirect to ondemand can't be extracted with
# this extractor right away thus should be passed through
# ondemand extractor (e.g. https://vimeo.com/73445910)
if VimeoOndemandIE.suitable(urlh.geturl()):
return self.url_result(urlh.geturl(), VimeoOndemandIE.ie_key())
except ExtractorError as ee:
if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403:
errmsg = ee.cause.read()
if b'Because of its privacy settings, this video cannot be played here' in errmsg:
raise ExtractorError(
'Cannot download embed-only video without embedding '
'URL. Please call youtube-dl with the URL of the page '
'that embeds this video.',
expected=True)
raise
# Now we begin extracting as much information as we can from what we
# retrieved. First we extract the information common to all extractors,
# and latter we extract those that are Vimeo specific.
self.report_extraction(video_id)
vimeo_config = self._search_regex(
r'vimeo\.config\s*=\s*(?:({.+?})|_extend\([^,]+,\s+({.+?})\));', webpage,
'vimeo config', default=None)
if vimeo_config:
seed_status = self._parse_json(vimeo_config, video_id).get('seed_status', {})
if seed_status.get('state') == 'failed':
raise ExtractorError(
'%s said: %s' % (self.IE_NAME, seed_status['title']),
expected=True)
cc_license = None
timestamp = None
# Extract the config JSON
try:
try:
config_url = self._html_search_regex(
r' data-config-url="(.+?)"', webpage,
'config URL', default=None)
if not config_url:
# Sometimes new react-based page is served instead of old one that require
# different config URL extraction approach (see
# https://github.com/rg3/youtube-dl/pull/7209)
vimeo_clip_page_config = self._search_regex(
r'vimeo\.clip_page_config\s*=\s*({.+?});', webpage,
'vimeo clip page config')
page_config = self._parse_json(vimeo_clip_page_config, video_id)
config_url = page_config['player']['config_url']
cc_license = page_config.get('cc_license')
timestamp = try_get(
page_config, lambda x: x['clip']['uploaded_on'],
compat_str)
config_json = self._download_webpage(config_url, video_id)
config = json.loads(config_json)
except RegexNotFoundError:
# For pro videos or player.vimeo.com urls
# We try to find out to which variable is assigned the config dic
m_variable_name = re.search(r'(\w)\.video\.id', webpage)
if m_variable_name is not None:
config_re = r'%s=({[^}].+?});' % re.escape(m_variable_name.group(1))
else:
config_re = [r' = {config:({.+?}),assets:', r'(?:[abc])=({.+?});']
config = self._search_regex(config_re, webpage, 'info section',
flags=re.DOTALL)
config = json.loads(config)
except Exception as e:
if re.search('The creator of this video has not given you permission to embed it on this domain.', webpage):
raise ExtractorError('The author has restricted the access to this video, try with the "--referer" option')
if re.search(r'<form[^>]+?id="pw_form"', webpage) is not None:
if '_video_password_verified' in data:
raise ExtractorError('video password verification failed!')
self._verify_video_password(url, video_id, webpage)
return self._real_extract(
smuggle_url(url, {'_video_password_verified': 'verified'}))
else:
raise ExtractorError('Unable to extract info section',
cause=e)
else:
if config.get('view') == 4:
config = self._verify_player_video_password(url, video_id)
def is_rented():
if '>You rented this title.<' in webpage:
return True
if config.get('user', {}).get('purchased'):
return True
label = try_get(
config, lambda x: x['video']['vod']['purchase_options'][0]['label_string'], compat_str)
if label and label.startswith('You rented this'):
return True
return False
if is_rented():
feature_id = config.get('video', {}).get('vod', {}).get('feature_id')
if feature_id and not data.get('force_feature_id', False):
return self.url_result(smuggle_url(
'https://player.vimeo.com/player/%s' % feature_id,
{'force_feature_id': True}), 'Vimeo')
# Extract video description
video_description = self._html_search_regex(
r'(?s)<div\s+class="[^"]*description[^"]*"[^>]*>(.*?)</div>',
webpage, 'description', default=None)
if not video_description:
video_description = self._html_search_meta(
'description', webpage, default=None)
if not video_description and mobj.group('pro'):
orig_webpage = self._download_webpage(
orig_url, video_id,
note='Downloading webpage for description',
fatal=False)
if orig_webpage:
video_description = self._html_search_meta(
'description', orig_webpage, default=None)
if not video_description and not mobj.group('player'):
self._downloader.report_warning('Cannot find video description')
# Extract upload date
if not timestamp:
timestamp = self._search_regex(
r'<time[^>]+datetime="([^"]+)"', webpage,
'timestamp', default=None)
try:
view_count = int(self._search_regex(r'UserPlays:(\d+)', webpage, 'view count'))
like_count = int(self._search_regex(r'UserLikes:(\d+)', webpage, 'like count'))
comment_count = int(self._search_regex(r'UserComments:(\d+)', webpage, 'comment count'))
except RegexNotFoundError:
# This info is only available in vimeo.com/{id} urls
view_count = None
like_count = None
comment_count = None
formats = []
download_request = sanitized_Request('https://vimeo.com/%s?action=load_download_config' % video_id, headers={
'X-Requested-With': 'XMLHttpRequest'})
download_data = self._download_json(download_request, video_id, fatal=False)
if download_data:
source_file = download_data.get('source_file')
if isinstance(source_file, dict):
download_url = source_file.get('download_url')
if download_url and not source_file.get('is_cold') and not source_file.get('is_defrosting'):
source_name = source_file.get('public_name', 'Original')
if self._is_valid_url(download_url, video_id, '%s video' % source_name):
ext = source_file.get('extension', determine_ext(download_url)).lower()
formats.append({
'url': download_url,
'ext': ext,
'width': int_or_none(source_file.get('width')),
'height': int_or_none(source_file.get('height')),
'filesize': parse_filesize(source_file.get('size')),
'format_id': source_name,
'preference': 1,
})
info_dict = self._parse_config(config, video_id)
formats.extend(info_dict['formats'])
self._vimeo_sort_formats(formats)
if not cc_license:
cc_license = self._search_regex(
r'<link[^>]+rel=["\']license["\'][^>]+href=(["\'])(?P<license>(?:(?!\1).)+)\1',
webpage, 'license', default=None, group='license')
info_dict.update({
'id': video_id,
'formats': formats,
'timestamp': unified_timestamp(timestamp),
'description': video_description,
'webpage_url': url,
'view_count': view_count,
'like_count': like_count,
'comment_count': comment_count,
'license': cc_license,
})
return info_dict
class VimeoOndemandIE(VimeoBaseInfoExtractor):
IE_NAME = 'vimeo:ondemand'
_VALID_URL = r'https?://(?:www\.)?vimeo\.com/ondemand/(?P<id>[^/?#&]+)'
_TESTS = [{
# ondemand video not available via https://vimeo.com/id
'url': 'https://vimeo.com/ondemand/20704',
'md5': 'c424deda8c7f73c1dfb3edd7630e2f35',
'info_dict': {
'id': '105442900',
'ext': 'mp4',
'title': 'המעבדה - במאי יותם פלדמן',
'uploader': 'גם סרטים',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/gumfilms',
'uploader_id': 'gumfilms',
},
'params': {
'format': 'best[protocol=https]',
},
}, {
# requires Referer to be passed along with og:video:url
'url': 'https://vimeo.com/ondemand/36938/126682985',
'info_dict': {
'id': '126682985',
'ext': 'mp4',
'title': 'Rävlock, rätt läte på rätt plats',
'uploader': 'Lindroth & Norin',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/user14430847',
'uploader_id': 'user14430847',
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://vimeo.com/ondemand/nazmaalik',
'only_matching': True,
}, {
'url': 'https://vimeo.com/ondemand/141692381',
'only_matching': True,
}, {
'url': 'https://vimeo.com/ondemand/thelastcolony/150274832',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
return self.url_result(
# Some videos require Referer to be passed along with og:video:url
# similarly to generic vimeo embeds (e.g.
# https://vimeo.com/ondemand/36938/126682985).
VimeoIE._smuggle_referrer(self._og_search_video_url(webpage), url),
VimeoIE.ie_key())
class VimeoChannelIE(VimeoBaseInfoExtractor):
IE_NAME = 'vimeo:channel'
_VALID_URL = r'https://vimeo\.com/channels/(?P<id>[^/?#]+)/?(?:$|[?#])'
_MORE_PAGES_INDICATOR = r'<a.+?rel="next"'
_TITLE = None
_TITLE_RE = r'<link rel="alternate"[^>]+?title="(.*?)"'
_TESTS = [{
'url': 'https://vimeo.com/channels/tributes',
'info_dict': {
'id': 'tributes',
'title': 'Vimeo Tributes',
},
'playlist_mincount': 25,
}]
def _page_url(self, base_url, pagenum):
return '%s/videos/page:%d/' % (base_url, pagenum)
def _extract_list_title(self, webpage):
return self._TITLE or self._html_search_regex(self._TITLE_RE, webpage, 'list title')
def _login_list_password(self, page_url, list_id, webpage):
login_form = self._search_regex(
r'(?s)<form[^>]+?id="pw_form"(.*?)</form>',
webpage, 'login form', default=None)
if not login_form:
return webpage
password = self._downloader.params.get('videopassword')
if password is None:
raise ExtractorError('This album is protected by a password, use the --video-password option', expected=True)
fields = self._hidden_inputs(login_form)
token, vuid = self._extract_xsrft_and_vuid(webpage)
fields['token'] = token
fields['password'] = password
post = urlencode_postdata(fields)
password_path = self._search_regex(
r'action="([^"]+)"', login_form, 'password URL')
password_url = compat_urlparse.urljoin(page_url, password_path)
password_request = sanitized_Request(password_url, post)
password_request.add_header('Content-type', 'application/x-www-form-urlencoded')
self._set_vimeo_cookie('vuid', vuid)
self._set_vimeo_cookie('xsrft', token)
return self._download_webpage(
password_request, list_id,
'Verifying the password', 'Wrong password')
def _title_and_entries(self, list_id, base_url):
for pagenum in itertools.count(1):
page_url = self._page_url(base_url, pagenum)
webpage = self._download_webpage(
page_url, list_id,
'Downloading page %s' % pagenum)
if pagenum == 1:
webpage = self._login_list_password(page_url, list_id, webpage)
yield self._extract_list_title(webpage)
# Try extracting href first since not all videos are available via
# short https://vimeo.com/id URL (e.g. https://vimeo.com/channels/tributes/6213729)
clips = re.findall(
r'id="clip_(\d+)"[^>]*>\s*<a[^>]+href="(/(?:[^/]+/)*\1)(?:[^>]+\btitle="([^"]+)")?', webpage)
if clips:
for video_id, video_url, video_title in clips:
yield self.url_result(
compat_urlparse.urljoin(base_url, video_url),
VimeoIE.ie_key(), video_id=video_id, video_title=video_title)
# More relaxed fallback
else:
for video_id in re.findall(r'id=["\']clip_(\d+)', webpage):
yield self.url_result(
'https://vimeo.com/%s' % video_id,
VimeoIE.ie_key(), video_id=video_id)
if re.search(self._MORE_PAGES_INDICATOR, webpage, re.DOTALL) is None:
break
def _extract_videos(self, list_id, base_url):
title_and_entries = self._title_and_entries(list_id, base_url)
list_title = next(title_and_entries)
return self.playlist_result(title_and_entries, list_id, list_title)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
channel_id = mobj.group('id')
return self._extract_videos(channel_id, 'https://vimeo.com/channels/%s' % channel_id)
class VimeoUserIE(VimeoChannelIE):
IE_NAME = 'vimeo:user'
_VALID_URL = r'https://vimeo\.com/(?!(?:[0-9]+|watchlater)(?:$|[?#/]))(?P<name>[^/]+)(?:/videos|[#?]|$)'
_TITLE_RE = r'<a[^>]+?class="user">([^<>]+?)</a>'
_TESTS = [{
'url': 'https://vimeo.com/nkistudio/videos',
'info_dict': {
'title': 'Nki',
'id': 'nkistudio',
},
'playlist_mincount': 66,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
name = mobj.group('name')
return self._extract_videos(name, 'https://vimeo.com/%s' % name)
class VimeoAlbumIE(VimeoChannelIE):
IE_NAME = 'vimeo:album'
_VALID_URL = r'https://vimeo\.com/album/(?P<id>\d+)(?:$|[?#]|/(?!video))'
_TITLE_RE = r'<header id="page_header">\n\s*<h1>(.*?)</h1>'
_TESTS = [{
'url': 'https://vimeo.com/album/2632481',
'info_dict': {
'id': '2632481',
'title': 'Staff Favorites: November 2013',
},
'playlist_mincount': 13,
}, {
'note': 'Password-protected album',
'url': 'https://vimeo.com/album/3253534',
'info_dict': {
'title': 'test',
'id': '3253534',
},
'playlist_count': 1,
'params': {
'videopassword': 'youtube-dl',
}
}, {
'url': 'https://vimeo.com/album/2632481/sort:plays/format:thumbnail',
'only_matching': True,
}, {
# TODO: respect page number
'url': 'https://vimeo.com/album/2632481/page:2/sort:plays/format:thumbnail',
'only_matching': True,
}]
def _page_url(self, base_url, pagenum):
return '%s/page:%d/' % (base_url, pagenum)
def _real_extract(self, url):
album_id = self._match_id(url)
return self._extract_videos(album_id, 'https://vimeo.com/album/%s' % album_id)
class VimeoGroupsIE(VimeoAlbumIE):
IE_NAME = 'vimeo:group'
_VALID_URL = r'https://vimeo\.com/groups/(?P<name>[^/]+)(?:/(?!videos?/\d+)|$)'
_TESTS = [{
'url': 'https://vimeo.com/groups/rolexawards',
'info_dict': {
'id': 'rolexawards',
'title': 'Rolex Awards for Enterprise',
},
'playlist_mincount': 73,
}]
def _extract_list_title(self, webpage):
return self._og_search_title(webpage)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
name = mobj.group('name')
return self._extract_videos(name, 'https://vimeo.com/groups/%s' % name)
class VimeoReviewIE(VimeoBaseInfoExtractor):
IE_NAME = 'vimeo:review'
IE_DESC = 'Review pages on vimeo'
_VALID_URL = r'https://vimeo\.com/[^/]+/review/(?P<id>[^/]+)'
_TESTS = [{
'url': 'https://vimeo.com/user21297594/review/75524534/3c257a1b5d',
'md5': 'c507a72f780cacc12b2248bb4006d253',
'info_dict': {
'id': '75524534',
'ext': 'mp4',
'title': "DICK HARDWICK 'Comedian'",
'uploader': 'Richard Hardwick',
'uploader_id': 'user21297594',
}
}, {
'note': 'video player needs Referer',
'url': 'https://vimeo.com/user22258446/review/91613211/13f927e053',
'md5': '6295fdab8f4bf6a002d058b2c6dce276',
'info_dict': {
'id': '91613211',
'ext': 'mp4',
'title': 're:(?i)^Death by dogma versus assembling agile . Sander Hoogendoorn',
'uploader': 'DevWeek Events',
'duration': 2773,
'thumbnail': r're:^https?://.*\.jpg$',
'uploader_id': 'user22258446',
}
}, {
'note': 'Password protected',
'url': 'https://vimeo.com/user37284429/review/138823582/c4d865efde',
'info_dict': {
'id': '138823582',
'ext': 'mp4',
'title': 'EFFICIENT PICKUP MASTERCLASS MODULE 1',
'uploader': 'TMB',
'uploader_id': 'user37284429',
},
'params': {
'videopassword': 'holygrail',
},
'skip': 'video gone',
}]
def _real_initialize(self):
self._login()
def _get_config_url(self, webpage_url, video_id, video_password_verified=False):
webpage = self._download_webpage(webpage_url, video_id)
config_url = self._html_search_regex(
r'data-config-url=(["\'])(?P<url>(?:(?!\1).)+)\1', webpage,
'config URL', default=None, group='url')
if not config_url:
data = self._parse_json(self._search_regex(
r'window\s*=\s*_extend\(window,\s*({.+?})\);', webpage, 'data',
default=NO_DEFAULT if video_password_verified else '{}'), video_id)
config_url = data.get('vimeo_esi', {}).get('config', {}).get('configUrl')
if config_url is None:
self._verify_video_password(webpage_url, video_id, webpage)
config_url = self._get_config_url(
webpage_url, video_id, video_password_verified=True)
return config_url
def _real_extract(self, url):
video_id = self._match_id(url)
config_url = self._get_config_url(url, video_id)
config = self._download_json(config_url, video_id)
info_dict = self._parse_config(config, video_id)
self._vimeo_sort_formats(info_dict['formats'])
info_dict['id'] = video_id
return info_dict
class VimeoWatchLaterIE(VimeoChannelIE):
IE_NAME = 'vimeo:watchlater'
IE_DESC = 'Vimeo watch later list, "vimeowatchlater" keyword (requires authentication)'
_VALID_URL = r'https://vimeo\.com/(?:home/)?watchlater|:vimeowatchlater'
_TITLE = 'Watch Later'
_LOGIN_REQUIRED = True
_TESTS = [{
'url': 'https://vimeo.com/watchlater',
'only_matching': True,
}]
def _real_initialize(self):
self._login()
def _page_url(self, base_url, pagenum):
url = '%s/page:%d/' % (base_url, pagenum)
request = sanitized_Request(url)
# Set the header to get a partial html page with the ids,
# the normal page doesn't contain them.
request.add_header('X-Requested-With', 'XMLHttpRequest')
return request
def _real_extract(self, url):
return self._extract_videos('watchlater', 'https://vimeo.com/watchlater')
class VimeoLikesIE(InfoExtractor):
_VALID_URL = r'https://(?:www\.)?vimeo\.com/user(?P<id>[0-9]+)/likes/?(?:$|[?#]|sort:)'
IE_NAME = 'vimeo:likes'
IE_DESC = 'Vimeo user likes'
_TEST = {
'url': 'https://vimeo.com/user755559/likes/',
'playlist_mincount': 293,
'info_dict': {
'id': 'user755559_likes',
'description': 'See all the videos urza likes',
'title': 'Videos urza likes',
},
}
def _real_extract(self, url):
user_id = self._match_id(url)
webpage = self._download_webpage(url, user_id)
page_count = self._int(
self._search_regex(
r'''(?x)<li><a\s+href="[^"]+"\s+data-page="([0-9]+)">
.*?</a></li>\s*<li\s+class="pagination_next">
''', webpage, 'page count'),
'page count', fatal=True)
PAGE_SIZE = 12
title = self._html_search_regex(
r'(?s)<h1>(.+?)</h1>', webpage, 'title', fatal=False)
description = self._html_search_meta('description', webpage)
def _get_page(idx):
page_url = 'https://vimeo.com/user%s/likes/page:%d/sort:date' % (
user_id, idx + 1)
webpage = self._download_webpage(
page_url, user_id,
note='Downloading page %d/%d' % (idx + 1, page_count))
video_list = self._search_regex(
r'(?s)<ol class="js-browse_list[^"]+"[^>]*>(.*?)</ol>',
webpage, 'video content')
paths = re.findall(
r'<li[^>]*>\s*<a\s+href="([^"]+)"', video_list)
for path in paths:
yield {
'_type': 'url',
'url': compat_urlparse.urljoin(page_url, path),
}
pl = InAdvancePagedList(_get_page, page_count, PAGE_SIZE)
return {
'_type': 'playlist',
'id': 'user%s_likes' % user_id,
'title': title,
'description': description,
'entries': pl,
}
|
{
"content_hash": "a75f435da07aeb422bd2d22d27a97dfb",
"timestamp": "",
"source": "github",
"line_count": 1019,
"max_line_length": 137,
"avg_line_length": 41.2855740922473,
"alnum_prop": 0.5135250772521988,
"repo_name": "bosstb/HaberPush",
"id": "61cc469bf27b58bfc70eb8bd036737ec0a4cb66c",
"size": "42120",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "youtube_dl/extractor/vimeo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "53"
},
{
"name": "HTML",
"bytes": "1074"
},
{
"name": "Python",
"bytes": "4295385"
}
],
"symlink_target": ""
}
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
from mpl_toolkits.mplot3d import Axes3D
FONT = FontProperties(fname=("LaserQt_Font/wqy-microhei.ttc"), size=10)
dataframe = pd.read_csv('data.txt', header=None)
dataframe.dropna()
matrix = dataframe.as_matrix()
print(type(matrix))
fig = plt.figure()
fig.set_facecolor("white")
fig.set_edgecolor("black")
axes = Axes3D(fig)
axes.set_xlim([np.min(matrix[:, 0]), np.max(matrix[:, 0])])
axes.set_ylim([np.min(matrix[:, 1]), np.max(matrix[:, 1])])
axes.set_zlim([np.min(matrix[:, 2]), np.max(matrix[:, 2])])
axes.set_xticks([])
axes.set_yticks([])
axes.set_zticks([])
axes.set_xlabel("加工板X方向", fontproperties=FONT, fontsize=9)
axes.set_ylabel("加工板Y方向", fontproperties=FONT, fontsize=9)
axes.set_zlabel("加工板Z方向", fontproperties=FONT, fontsize=9)
axes.grid(True, which="both")
axes.scatter(matrix[::10, 0], matrix[::10, 1], matrix[::10, 2], c='red')
plt.show()
|
{
"content_hash": "0ecfddfb03505b21d2f76412fe203588",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 72,
"avg_line_length": 33.758620689655174,
"alnum_prop": 0.710929519918284,
"repo_name": "IamLJT/LaserQt",
"id": "6ff6711ec4c8da3b78ed61b24a642451e8d11293",
"size": "1033",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "code/plot_huge_amount_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "653"
},
{
"name": "C++",
"bytes": "127839"
},
{
"name": "Python",
"bytes": "87322"
},
{
"name": "Shell",
"bytes": "25"
}
],
"symlink_target": ""
}
|
import re
from unittest import skip
from social_auth.utils import setting
from social_auth.tests.base import SocialAuthTestsCase, FormParserByID
from django.contrib.sites.models import Site
class FacebookTestCase(SocialAuthTestsCase):
SERVER_NAME = 'myapp.com'
SERVER_PORT = '8000'
def __init__(self, methodName='runTest'):
self.SERVER_NAME = Site.objects.get_current()
super(FacebookTestCase, self).__init__(methodName)
name = 'facebook'
def setUp(self, *args, **kwargs):
super(FacebookTestCase, self).setUp(*args, **kwargs)
self.user = setting('TEST_FACEBOOK_USER')
self.passwd = setting('TEST_FACEBOOK_PASSWORD')
# check that user and password are setup properly
# Ugh, these fail too.
#self.assertTrue(self.user)
#self.assertTrue(self.passwd)
REDIRECT_RE = re.compile('window.location.replace\("(.*)"\);')
class FacebookTestLogin(FacebookTestCase):
@skip("FacebookTestCase.setUp() is broken")
def test_login_succeful(self):
"""
"""
response = self.client.get('http://%s%s' % (self.SERVER_NAME, self.reverse('socialauth_begin', 'facebook')))
# social_auth must redirect to service page
self.assertEqual(response.status_code, 302)
# Open first redirect page, it contains user login form because
# we don't have cookie to send to twitter
parser = FormParserByID('login_form')
content = self.get_content(response['Location'], use_cookies=True)
parser.feed(content)
auth = {'email': self.user,
'pass': self.passwd}
# Check that action and values were loaded properly
self.assertTrue(parser.action)
self.assertTrue(parser.values)
# Post login form, will return authorization or redirect page
parser.values.update(auth)
redirect = self.get_redirect(parser.action, parser.values,
use_cookies=True)
# If page contains a form#login_form, then we are in the app
# authorization page because the app is not authorized yet,
# otherwise the app already gained permission and twitter sends
# a page that redirects to redirect_url
if 'login_form' in content:
# authorization form post, returns redirect_page
parser = FormParserByID('login_form')
parser.feed(content)
self.assertTrue(parser.action)
self.assertTrue(parser.values)
parser.values.update(auth)
redirect = self.get_redirect(parser.action, parser.values,
use_cookies=True)
redirect_page = redirect.read()
else:
redirect = self.get_redirect(redirect.headers['Location'],
use_cookies=True)
redirect_page = redirect.read()
if 'uiserver_form' in redirect_page:
# authorization form post, returns redirect_page
parser = FormParserByID('uiserver_form')
parser.feed(redirect_page)
self.assertTrue(parser.action)
self.assertTrue(parser.values)
parser.values.update(auth)
redirect = self.get_redirect(parser.action, parser.values,
use_cookies=True)
self.assertTrue(setting('LOGIN_REDIRECT_URL') in self.make_relative(redirect.headers['Location']))
|
{
"content_hash": "e96abd2a0297d3877c030fd589318880",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 116,
"avg_line_length": 39.04545454545455,
"alnum_prop": 0.6292200232828871,
"repo_name": "sk7/django-social-auth",
"id": "092d7e95aeb8247558aba09433f1166705d7332b",
"size": "3436",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "social_auth/tests/facebook.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "359558"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import print_function
import json
import os
import stat
from twisted.internet import defer
from twisted.python import log
from buildbot import config
from buildbot.interfaces import WorkerTooOldError
from buildbot.process import remotecommand
from buildbot.process import remotetransfer
from buildbot.process.buildstep import FAILURE
from buildbot.process.buildstep import SKIPPED
from buildbot.process.buildstep import SUCCESS
from buildbot.process.buildstep import BuildStep
from buildbot.steps.worker import CompositeStepMixin
from buildbot.util import flatten
from buildbot.util.eventual import eventually
from buildbot.worker_transition import WorkerAPICompatMixin
from buildbot.worker_transition import reportDeprecatedWorkerNameUsage
def makeStatusRemoteCommand(step, remote_command, args):
self = remotecommand.RemoteCommand(
remote_command, args, decodeRC={None: SUCCESS, 0: SUCCESS})
self.useLogDelayed('stdio', lambda arg: step.step_status.addLog('stdio'), True)
return self
class _TransferBuildStep(BuildStep):
"""
Base class for FileUpload and FileDownload to factor out common
functionality.
"""
renderables = ['workdir']
haltOnFailure = True
flunkOnFailure = True
def __init__(self, workdir=None, **buildstep_kwargs):
BuildStep.__init__(self, **buildstep_kwargs)
self.workdir = workdir
def runTransferCommand(self, cmd, writer=None):
# Run a transfer step, add a callback to extract the command status,
# add an error handler that cancels the writer.
self.cmd = cmd
d = self.runCommand(cmd)
@d.addCallback
def checkResult(_):
if writer and cmd.didFail():
writer.cancel()
return FAILURE if cmd.didFail() else SUCCESS
@d.addErrback
def cancel(res):
if writer:
writer.cancel()
return res
return d
def interrupt(self, reason):
self.addCompleteLog('interrupt', str(reason))
if self.cmd:
d = self.cmd.interrupt(reason)
return d
class FileUpload(_TransferBuildStep, WorkerAPICompatMixin):
name = 'upload'
renderables = ['workersrc', 'masterdest', 'url']
def __init__(self, workersrc=None, masterdest=None,
workdir=None, maxsize=None, blocksize=16 * 1024, mode=None,
keepstamp=False, url=None, urlText=None,
slavesrc=None, # deprecated, use `workersrc` instead
**buildstep_kwargs):
# Deprecated API support.
if slavesrc is not None:
reportDeprecatedWorkerNameUsage(
"'slavesrc' keyword argument is deprecated, "
"use 'workersrc' instead")
assert workersrc is None
workersrc = slavesrc
# Emulate that first two arguments are positional.
if workersrc is None or masterdest is None:
raise TypeError("__init__() takes at least 3 arguments")
_TransferBuildStep.__init__(self, workdir=workdir, **buildstep_kwargs)
self.workersrc = workersrc
self._registerOldWorkerAttr("workersrc")
self.masterdest = masterdest
self.maxsize = maxsize
self.blocksize = blocksize
if not isinstance(mode, (int, type(None))):
config.error(
'mode must be an integer or None')
self.mode = mode
self.keepstamp = keepstamp
self.url = url
self.urlText = urlText
def finished(self, results):
log.msg("File '{}' upload finished with results {}".format(
os.path.basename(self.workersrc), str(results)))
self.step_status.setText(self.descriptionDone)
_TransferBuildStep.finished(self, results)
def start(self):
self.checkWorkerHasCommand("uploadFile")
source = self.workersrc
masterdest = self.masterdest
# we rely upon the fact that the buildmaster runs chdir'ed into its
# basedir to make sure that relative paths in masterdest are expanded
# properly. TODO: maybe pass the master's basedir all the way down
# into the BuildStep so we can do this better.
masterdest = os.path.expanduser(masterdest)
log.msg("FileUpload started, from worker %r to master %r"
% (source, masterdest))
if self.description is None:
self.description = ['uploading %s' % (os.path.basename(source))]
if self.descriptionDone is None:
self.descriptionDone = self.description
if self.url is not None:
urlText = self.urlText
if urlText is None:
urlText = os.path.basename(masterdest)
self.addURL(urlText, self.url)
self.step_status.setText(self.description)
# we use maxsize to limit the amount of data on both sides
fileWriter = remotetransfer.FileWriter(
masterdest, self.maxsize, self.mode)
if self.keepstamp and self.workerVersionIsOlderThan("uploadFile", "2.13"):
m = ("This worker (%s) does not support preserving timestamps. "
"Please upgrade the worker." % self.build.workername)
raise WorkerTooOldError(m)
# default arguments
args = {
'workdir': self.workdir,
'writer': fileWriter,
'maxsize': self.maxsize,
'blocksize': self.blocksize,
'keepstamp': self.keepstamp,
}
if self.workerVersionIsOlderThan('uploadFile', '3.0'):
args['slavesrc'] = source
else:
args['workersrc'] = source
cmd = makeStatusRemoteCommand(self, 'uploadFile', args)
d = self.runTransferCommand(cmd, fileWriter)
d.addCallback(self.finished).addErrback(self.failed)
class DirectoryUpload(_TransferBuildStep, WorkerAPICompatMixin):
name = 'upload'
renderables = ['workersrc', 'masterdest', 'url']
def __init__(self, workersrc=None, masterdest=None,
workdir=None, maxsize=None, blocksize=16 * 1024,
compress=None, url=None,
slavesrc=None, # deprecated, use `workersrc` instead
**buildstep_kwargs
):
# Deprecated API support.
if slavesrc is not None:
reportDeprecatedWorkerNameUsage(
"'slavesrc' keyword argument is deprecated, "
"use 'workersrc' instead")
assert workersrc is None
workersrc = slavesrc
# Emulate that first two arguments are positional.
if workersrc is None or masterdest is None:
raise TypeError("__init__() takes at least 3 arguments")
_TransferBuildStep.__init__(self, workdir=workdir, **buildstep_kwargs)
self.workersrc = workersrc
self._registerOldWorkerAttr("workersrc")
self.masterdest = masterdest
self.maxsize = maxsize
self.blocksize = blocksize
if compress not in (None, 'gz', 'bz2'):
config.error(
"'compress' must be one of None, 'gz', or 'bz2'")
self.compress = compress
self.url = url
def start(self):
self.checkWorkerHasCommand("uploadDirectory")
source = self.workersrc
masterdest = self.masterdest
# we rely upon the fact that the buildmaster runs chdir'ed into its
# basedir to make sure that relative paths in masterdest are expanded
# properly. TODO: maybe pass the master's basedir all the way down
# into the BuildStep so we can do this better.
masterdest = os.path.expanduser(masterdest)
log.msg("DirectoryUpload started, from worker %r to master %r"
% (source, masterdest))
self.descriptionDone = "uploading %s" % os.path.basename(source)
if self.url is not None:
self.addURL(
os.path.basename(os.path.normpath(masterdest)), self.url)
# we use maxsize to limit the amount of data on both sides
dirWriter = remotetransfer.DirectoryWriter(
masterdest, self.maxsize, self.compress, 0o600)
# default arguments
args = {
'workdir': self.workdir,
'writer': dirWriter,
'maxsize': self.maxsize,
'blocksize': self.blocksize,
'compress': self.compress
}
if self.workerVersionIsOlderThan('uploadDirectory', '3.0'):
args['slavesrc'] = source
else:
args['workersrc'] = source
cmd = makeStatusRemoteCommand(self, 'uploadDirectory', args)
d = self.runTransferCommand(cmd, dirWriter)
d.addCallback(self.finished).addErrback(self.failed)
class MultipleFileUpload(_TransferBuildStep, WorkerAPICompatMixin,
CompositeStepMixin):
name = 'upload'
logEnviron = False
renderables = ['workersrcs', 'masterdest', 'url']
def __init__(self, workersrcs=None, masterdest=None,
workdir=None, maxsize=None, blocksize=16 * 1024, glob=False,
mode=None, compress=None, keepstamp=False, url=None,
slavesrcs=None, # deprecated, use `workersrcs` instead
**buildstep_kwargs):
# Deprecated API support.
if slavesrcs is not None:
reportDeprecatedWorkerNameUsage(
"'slavesrcs' keyword argument is deprecated, "
"use 'workersrcs' instead")
assert workersrcs is None
workersrcs = slavesrcs
# Emulate that first two arguments are positional.
if workersrcs is None or masterdest is None:
raise TypeError("__init__() takes at least 3 arguments")
_TransferBuildStep.__init__(self, workdir=workdir, **buildstep_kwargs)
self.workersrcs = workersrcs
self._registerOldWorkerAttr("workersrcs")
self.masterdest = masterdest
self.maxsize = maxsize
self.blocksize = blocksize
if not isinstance(mode, (int, type(None))):
config.error(
'mode must be an integer or None')
self.mode = mode
if compress not in (None, 'gz', 'bz2'):
config.error(
"'compress' must be one of None, 'gz', or 'bz2'")
self.compress = compress
self.glob = glob
self.keepstamp = keepstamp
self.url = url
def uploadFile(self, source, masterdest):
fileWriter = remotetransfer.FileWriter(
masterdest, self.maxsize, self.mode)
args = {
'workdir': self.workdir,
'writer': fileWriter,
'maxsize': self.maxsize,
'blocksize': self.blocksize,
'keepstamp': self.keepstamp,
}
if self.workerVersionIsOlderThan('uploadFile', '3.0'):
args['slavesrc'] = source
else:
args['workersrc'] = source
cmd = makeStatusRemoteCommand(self, 'uploadFile', args)
return self.runTransferCommand(cmd, fileWriter)
def uploadDirectory(self, source, masterdest):
dirWriter = remotetransfer.DirectoryWriter(
masterdest, self.maxsize, self.compress, 0o600)
args = {
'workdir': self.workdir,
'writer': dirWriter,
'maxsize': self.maxsize,
'blocksize': self.blocksize,
'compress': self.compress
}
if self.workerVersionIsOlderThan('uploadDirectory', '3.0'):
args['slavesrc'] = source
else:
args['workersrc'] = source
cmd = makeStatusRemoteCommand(self, 'uploadDirectory', args)
return self.runTransferCommand(cmd, dirWriter)
def startUpload(self, source, destdir):
masterdest = os.path.join(destdir, os.path.basename(source))
args = {
'file': source,
'workdir': self.workdir
}
cmd = makeStatusRemoteCommand(self, 'stat', args)
d = self.runCommand(cmd)
@d.addCallback
def checkStat(_):
s = cmd.updates['stat'][-1]
if stat.S_ISDIR(s[stat.ST_MODE]):
return self.uploadDirectory(source, masterdest)
elif stat.S_ISREG(s[stat.ST_MODE]):
return self.uploadFile(source, masterdest)
return defer.fail('%r is neither a regular file, nor a directory' % source)
@d.addCallback
def uploadDone(result):
d = defer.maybeDeferred(
self.uploadDone, result, source, masterdest)
d.addCallback(lambda _: result)
return d
return d
def uploadDone(self, result, source, masterdest):
pass
def allUploadsDone(self, result, sources, masterdest):
if self.url is not None:
self.addURL(
os.path.basename(os.path.normpath(masterdest)), self.url)
def start(self):
self.checkWorkerHasCommand("uploadDirectory")
self.checkWorkerHasCommand("uploadFile")
self.checkWorkerHasCommand("stat")
masterdest = os.path.expanduser(self.masterdest)
sources = self.workersrcs if isinstance(self.workersrcs, list) else [self.workersrcs]
if self.keepstamp and self.workerVersionIsOlderThan("uploadFile", "2.13"):
m = ("This worker (%s) does not support preserving timestamps. "
"Please upgrade the worker." % self.build.workername)
raise WorkerTooOldError(m)
if not sources:
return self.finished(SKIPPED)
@defer.inlineCallbacks
def globSources(sources):
dl = defer.DeferredList([
self.runGlob(
os.path.join(self.workdir, source), abandonOnFailure=False) for source in sources
])
results = yield dl
results = [
result[1]
for result in filter(lambda result: result[0], results)
]
results = flatten(results)
defer.returnValue(results)
@defer.inlineCallbacks
def uploadSources(sources):
if not sources:
defer.returnValue(SKIPPED)
else:
for source in sources:
result = yield self.startUpload(source, masterdest)
if result == FAILURE:
defer.returnValue(FAILURE)
defer.returnValue(SUCCESS)
def logUpload(sources):
log.msg("MultipleFileUpload started, from worker %r to master %r" %
(sources, masterdest))
nsrcs = len(sources)
self.descriptionDone = 'uploading %d %s' % (nsrcs, 'file'
if nsrcs == 1 else
'files')
return sources
if self.glob:
s = globSources(sources)
else:
s = defer.succeed(sources)
s.addCallback(logUpload)
d = s.addCallback(uploadSources)
@d.addCallback
def allUploadsDone(result):
d = defer.maybeDeferred(
self.allUploadsDone, result, sources, masterdest)
d.addCallback(lambda _: result)
return d
d.addCallback(self.finished).addErrback(self.failed)
def finished(self, result):
return BuildStep.finished(self, result)
class FileDownload(_TransferBuildStep, WorkerAPICompatMixin):
name = 'download'
renderables = ['mastersrc', 'workerdest']
def __init__(self, mastersrc, workerdest=None,
workdir=None, maxsize=None, blocksize=16 * 1024, mode=None,
slavedest=None, # deprecated, use `workerdest` instead
**buildstep_kwargs):
# Deprecated API support.
if slavedest is not None:
reportDeprecatedWorkerNameUsage(
"'slavedest' keyword argument is deprecated, "
"use 'workerdest' instead")
assert workerdest is None
workerdest = slavedest
# Emulate that first two arguments are positional.
if workerdest is None:
raise TypeError("__init__() takes at least 3 arguments")
_TransferBuildStep.__init__(self, workdir=workdir, **buildstep_kwargs)
self.mastersrc = mastersrc
self.workerdest = workerdest
self._registerOldWorkerAttr("workerdest")
self.maxsize = maxsize
self.blocksize = blocksize
if not isinstance(mode, (int, type(None))):
config.error(
'mode must be an integer or None')
self.mode = mode
def start(self):
self.checkWorkerHasCommand("downloadFile")
# we are currently in the buildmaster's basedir, so any non-absolute
# paths will be interpreted relative to that
source = os.path.expanduser(self.mastersrc)
workerdest = self.workerdest
log.msg("FileDownload started, from master %r to worker %r" %
(source, workerdest))
self.descriptionDone = "downloading to %s" % os.path.basename(
workerdest)
# setup structures for reading the file
try:
fp = open(source, 'rb')
except IOError:
# if file does not exist, bail out with an error
self.addCompleteLog('stderr',
'File %r not available at master' % source)
# TODO: once BuildStep.start() gets rewritten to use
# maybeDeferred, just re-raise the exception here.
eventually(BuildStep.finished, self, FAILURE)
return
fileReader = remotetransfer.FileReader(fp)
# default arguments
args = {
'maxsize': self.maxsize,
'reader': fileReader,
'blocksize': self.blocksize,
'workdir': self.workdir,
'mode': self.mode,
}
if self.workerVersionIsOlderThan('downloadFile', '3.0'):
args['slavedest'] = workerdest
else:
args['workerdest'] = workerdest
cmd = makeStatusRemoteCommand(self, 'downloadFile', args)
d = self.runTransferCommand(cmd)
d.addCallback(self.finished).addErrback(self.failed)
class StringDownload(_TransferBuildStep, WorkerAPICompatMixin):
name = 'string_download'
renderables = ['workerdest', 's']
def __init__(self, s, workerdest=None,
workdir=None, maxsize=None, blocksize=16 * 1024, mode=None,
slavedest=None, # deprecated, use `workerdest` instead
**buildstep_kwargs):
# Deprecated API support.
if slavedest is not None:
reportDeprecatedWorkerNameUsage(
"'slavedest' keyword argument is deprecated, "
"use 'workerdest' instead")
assert workerdest is None
workerdest = slavedest
# Emulate that first two arguments are positional.
if workerdest is None:
raise TypeError("__init__() takes at least 3 arguments")
_TransferBuildStep.__init__(self, workdir=workdir, **buildstep_kwargs)
self.s = s
self.workerdest = workerdest
self._registerOldWorkerAttr("workerdest")
self.maxsize = maxsize
self.blocksize = blocksize
if not isinstance(mode, (int, type(None))):
config.error(
"StringDownload step's mode must be an integer or None,"
" got '%s'" % mode)
self.mode = mode
def start(self):
# we use 'downloadFile' remote command on the worker
self.checkWorkerHasCommand("downloadFile")
# we are currently in the buildmaster's basedir, so any non-absolute
# paths will be interpreted relative to that
workerdest = self.workerdest
log.msg("StringDownload started, from master to worker %r" %
workerdest)
self.descriptionDone = "downloading to %s" % os.path.basename(
workerdest)
# setup structures for reading the file
fileReader = remotetransfer.StringFileReader(self.s)
# default arguments
args = {
'maxsize': self.maxsize,
'reader': fileReader,
'blocksize': self.blocksize,
'workdir': self.workdir,
'mode': self.mode,
}
if self.workerVersionIsOlderThan('downloadFile', '3.0'):
args['slavedest'] = workerdest
else:
args['workerdest'] = workerdest
cmd = makeStatusRemoteCommand(self, 'downloadFile', args)
d = self.runTransferCommand(cmd)
d.addCallback(self.finished).addErrback(self.failed)
class JSONStringDownload(StringDownload, WorkerAPICompatMixin):
name = "json_download"
def __init__(self, o, workerdest=None,
slavedest=None, # deprecated, use `workerdest` instead
**buildstep_kwargs):
# Deprecated API support.
if slavedest is not None:
reportDeprecatedWorkerNameUsage(
"'slavedest' keyword argument is deprecated, "
"use 'workerdest' instead")
assert workerdest is None
workerdest = slavedest
# Emulate that first two arguments are positional.
if workerdest is None:
raise TypeError("__init__() takes at least 3 arguments")
if 's' in buildstep_kwargs:
del buildstep_kwargs['s']
s = json.dumps(o)
StringDownload.__init__(
self, s=s, workerdest=workerdest, **buildstep_kwargs)
class JSONPropertiesDownload(StringDownload, WorkerAPICompatMixin):
name = "json_properties_download"
def __init__(self, workerdest=None,
slavedest=None, # deprecated, use `workerdest` instead
**buildstep_kwargs):
# Deprecated API support.
if slavedest is not None:
reportDeprecatedWorkerNameUsage(
"'slavedest' keyword argument is deprecated, "
"use 'workerdest' instead")
assert workerdest is None
workerdest = slavedest
# Emulate that first two arguments are positional.
if workerdest is None:
raise TypeError("__init__() takes at least 2 arguments")
self.super_class = StringDownload
if 's' in buildstep_kwargs:
del buildstep_kwargs['s']
StringDownload.__init__(
self, s=None, workerdest=workerdest, **buildstep_kwargs)
def start(self):
properties = self.build.getProperties()
props = {}
for key, value, source in properties.asList():
props[key] = value
self.s = json.dumps(dict(
properties=props,
sourcestamps=[ss.asDict()
for ss in self.build.getAllSourceStamps()],
),
)
return self.super_class.start(self)
|
{
"content_hash": "52a88c5af998416cd83c68503f42a505",
"timestamp": "",
"source": "github",
"line_count": 658,
"max_line_length": 101,
"avg_line_length": 35.20820668693009,
"alnum_prop": 0.5970561574653602,
"repo_name": "Alecto3-D/testable-greeter",
"id": "8c8cf4fd1df6b9dd7c8ff6983772959a6086a8a9",
"size": "23873",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "bb-master/sandbox/lib/python3.5/site-packages/buildbot/steps/transfer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1340"
},
{
"name": "JavaScript",
"bytes": "6003191"
},
{
"name": "Makefile",
"bytes": "7521"
},
{
"name": "Python",
"bytes": "4833445"
},
{
"name": "RAML",
"bytes": "62192"
},
{
"name": "Shell",
"bytes": "3682"
}
],
"symlink_target": ""
}
|
from PyObjCTools.TestSupport import *
from Foundation import *
try:
unicode
except NameError:
unicode = str
try:
bytes
except NameError:
bytes = str
class Behaviour (NSObject):
def exceptionDuringOperation_error_leftOperand_rightOperand_(self, exc, err, l, r):
pass
class TestNSDecimalNumber (TestCase):
def testConstants(self):
self.assertIsInstance(NSDecimalNumberExactnessException, unicode)
self.assertIsInstance(NSDecimalNumberOverflowException, unicode)
self.assertIsInstance(NSDecimalNumberUnderflowException, unicode)
self.assertIsInstance(NSDecimalNumberDivideByZeroException, unicode)
def testNSDecimal(self):
dec = NSDecimal('55.0')
v = NSDecimalNumber.alloc().initWithDecimal_(dec)
self.assertIsInstance(v, NSDecimalNumber)
self.assertEqual(v.description(), '55')
v = NSDecimalNumber.decimalNumberWithDecimal_(dec)
self.assertIsInstance(v, NSDecimalNumber)
self.assertEqual(v.description(), '55')
o = v.decimalValue()
self.assertIsInstance(o, NSDecimal)
o = v.objCType()
self.assertIsInstance(o, bytes)
def testNSNumberAsNSDecimal(self):
v = NSNumber.numberWithFloat_(33.5)
o = v.decimalValue()
self.assertIsInstance(o, NSDecimal)
def testNSScannerWithDecimal(self):
v = NSScanner.alloc().initWithString_("55.23")
dec = NSDecimal()
o = v.scanDecimal_(dec)
self.assertIs(o, True)
self.assertEqual(dec.description(), '55.23')
def testMethods(self):
self.assertArgIsBOOL(NSDecimalNumber.initWithMantissa_exponent_isNegative_, 2)
self.assertArgIsBOOL(NSDecimalNumber.decimalNumberWithMantissa_exponent_isNegative_, 2)
self.assertArgIsBOOL(NSDecimalNumberHandler.initWithRoundingMode_scale_raiseOnExactness_raiseOnOverflow_raiseOnUnderflow_raiseOnDivideByZero_, 2)
self.assertArgIsBOOL(NSDecimalNumberHandler.initWithRoundingMode_scale_raiseOnExactness_raiseOnOverflow_raiseOnUnderflow_raiseOnDivideByZero_, 3)
self.assertArgIsBOOL(NSDecimalNumberHandler.initWithRoundingMode_scale_raiseOnExactness_raiseOnOverflow_raiseOnUnderflow_raiseOnDivideByZero_, 4)
self.assertArgIsBOOL(NSDecimalNumberHandler.initWithRoundingMode_scale_raiseOnExactness_raiseOnOverflow_raiseOnUnderflow_raiseOnDivideByZero_, 5)
self.assertArgIsBOOL(NSDecimalNumberHandler.decimalNumberHandlerWithRoundingMode_scale_raiseOnExactness_raiseOnOverflow_raiseOnUnderflow_raiseOnDivideByZero_, 2)
self.assertArgIsBOOL(NSDecimalNumberHandler.decimalNumberHandlerWithRoundingMode_scale_raiseOnExactness_raiseOnOverflow_raiseOnUnderflow_raiseOnDivideByZero_, 3)
self.assertArgIsBOOL(NSDecimalNumberHandler.decimalNumberHandlerWithRoundingMode_scale_raiseOnExactness_raiseOnOverflow_raiseOnUnderflow_raiseOnDivideByZero_, 4)
self.assertArgIsBOOL(NSDecimalNumberHandler.decimalNumberHandlerWithRoundingMode_scale_raiseOnExactness_raiseOnOverflow_raiseOnUnderflow_raiseOnDivideByZero_, 5)
def testProtocols(self):
self.assertArgHasType(Behaviour.exceptionDuringOperation_error_leftOperand_rightOperand_, 0, objc._C_SEL)
self.assertArgHasType(Behaviour.exceptionDuringOperation_error_leftOperand_rightOperand_, 1, objc._C_NSUInteger)
if __name__ == "__main__":
main()
|
{
"content_hash": "a16bb81d0a393eb16be3902ca77b0baf",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 169,
"avg_line_length": 44.05194805194805,
"alnum_prop": 0.7632665094339622,
"repo_name": "Khan/pyobjc-framework-Cocoa",
"id": "00fbb865b5fb8216b06aff5e3abca8cf3035a8e7",
"size": "3392",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "PyObjCTest/test_nsdecimalnumber.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "M",
"bytes": "5481"
},
{
"name": "Objective-C",
"bytes": "213902"
},
{
"name": "Python",
"bytes": "2450939"
}
],
"symlink_target": ""
}
|
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_secret_projection import V1SecretProjection
class TestV1SecretProjection(unittest.TestCase):
""" V1SecretProjection unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1SecretProjection(self):
"""
Test V1SecretProjection
"""
model = kubernetes.client.models.v1_secret_projection.V1SecretProjection()
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "3ee6df26ed80097ef148489f29cdf632",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 105,
"avg_line_length": 21.65,
"alnum_prop": 0.6997690531177829,
"repo_name": "skuda/client-python",
"id": "cf4bec523598d8e6d7fab653f4477bbcc8e9b46e",
"size": "883",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kubernetes/test/test_v1_secret_projection.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5907789"
},
{
"name": "Shell",
"bytes": "8195"
}
],
"symlink_target": ""
}
|
"""irc2 general utilities"""
import asyncio
import collections
import time
class IStr(str):
"""
IStr is a string which follows RFC1459 casing rules, and allows for
case-insensitive equality testing.
>>> IStr("Hello World") == "HELLO world"
True
>>> IStr("Hello[] World~") == "hello{] WORLD^"
True
>>> IStr("Hello World") == "this is a completely different string"
False
"""
case_map = list(zip("[]\\~", "{}|^"))
def lower(self):
s = str.lower(self)
for lo, up in IStr.case_map:
s = s.replace(up, lo)
return IStr(s)
def upper(self):
s = str.upper(self)
for lo, up in IStr.case_map:
s = s.replace(lo, up)
return IStr(s)
def __hash__(self):
return hash(str(self.lower()))
def __eq__(self, other):
if not isinstance(other, IStr):
other = IStr(other)
return str(self.lower()) == str(other.lower())
class IDict(collections.MutableMapping):
"""
IDict is a dict-like object with case-insensitive keys.
>>> d = IDict(A=1, b=2, c=3)
>>> "B" in d and d["B"] == d["b"]
True
>>> "a" in d and d["A"] == d["a"]
True
"""
def __init__(self, data={}, **more_data):
self._data = dict()
self.update(data)
self.update(more_data)
def __getitem__(self, key):
key, value = self._data[IStr(key).lower()]
return value
def __setitem__(self, key, value):
self._data[IStr(key).lower()] = key, value
def __delitem__(self, key):
del self._data[IStr(key).lower()]
def __iter__(self):
return (key for key, value in self._data)
def __len__(self):
return len(self._data)
def __repr__(self):
return "IDict({" + ", ".join(repr(key) + ": " + repr(value) for key, value in self._data.values()) + "})"
class IDefaultDict(IDict):
"""
IDefaultDict is IDict but with collections.defaultdict functionality:
>>> d = IDefaultDict(int)
>>> d["A"] += 1
>>> d["A"]
1
>>> d["a"]
1
"""
def __init__(self, default, data={}, **more_data):
self.default = default
super().__init__(data, **more_data)
def __getitem__(self, key):
if IStr(key).lower() not in self._data:
self._data[IStr(key).lower()] = key, self.default()
return super().__getitem__(key)
def join_max_length(l, sep, maxlen=400):
"""
Join the items in l with sep such that the result is not longer than
maxlen. Returns a tuple (result, remaining items).
>>> join_max_length(["lorem", "ipsum", "dolor", "sit", "amet"], ":", 15)
('lorem:ipsum', ['dolor', 'sit', 'amet'])
>>> join_max_length(["dolor", "sit", "amet"], ":", 15)
('dolor:sit:amet', [])
"""
result = ""
l = list(l)
while l and len(result) + len(l[0]) < maxlen:
result += (l.pop(0) + sep)
return result[:-len(sep)], l
class TokenBucket(object):
"""
Implements token-bucket rate limiting with the given bucket size ("fill")
and replenishing time (t).
"""
def __init__(self, fill, t):
self._amount = fill
self.last = time.time()
self.fill = fill
self.time = t
def amount(self):
"""
Get the current number of tokens in the bucket. Does not decrement the
number of tokens (don't use this for rate-limiting).
"""
old = self._amount
self._amount = min(self._amount + ((time.time() - self.last) // self.time), self.fill)
if old != self._amount:
self.last = time.time()
return self._amount
def take(self):
"""
Take a token from the bucket, if available. Returns True if successful,
and False if not.
"""
if self.amount():
self._amount -= 1
return True
return False
async def wait(self):
"""
Asynchronously wait for a token to be available in the bucket, then
take it. Will complete immediately if a token is already available.
"""
if self.take():
return True
await asyncio.sleep(self.time - (time.time() - self.last))
return self.take()
class AttrGetFollower(object):
"""
AttrGetFollower takes getattr requests, keeps track of the path they
create, and calls a callback when called. Best explained with an example:
>>> def callback(path):
... print("callback: {}".format(", ".join(path)))
>>> follower = AttrGetFollower([], callback)
>>> abcdef = follower.a.b.c.d.e.f
>>> print(abcdef)
AttrGetFollower(['a', 'b', 'c', 'd', 'e', 'f'])
>>> abcdef()
callback: a, b, c, d, e, f
"""
def __init__(self, path, callback):
self.path = path
self.callback = callback
def __repr__(self):
return "AttrGetFollower({})".format(self.path)
def __getattr__(self, attr):
return AttrGetFollower(self.path + [attr], self.callback)
def __call__(self, *args, **kwargs):
self.callback(self.path, *args, **kwargs)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
{
"content_hash": "7b6e354c0ccf53bfd6c76adcdf01e038",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 113,
"avg_line_length": 27.0625,
"alnum_prop": 0.5452270977675134,
"repo_name": "fwilson42/irc2",
"id": "b1912c4f7b4f1af5599006ea91552319ff9c5854",
"size": "5196",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "irc2/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "38818"
}
],
"symlink_target": ""
}
|
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from froi.algorithm import imtool
def inverse_image(model):
"""Inverse current selected image by multiplying with -1."""
# get data and name from current selected image
current_row = model.currentIndex().row()
source_vol = model.data(model.index(current_row), Qt.UserRole + 6)
source_name = model.data(model.index(current_row), Qt.DisplayRole)
# inverse process
inversed_vol = imtool.inverse_transformation(source_vol)
inversed_vol_name = 'inverse_' + source_name
# save result as a new image
model.addItem(inversed_vol, None, inversed_vol_name,
model._data[0].get_header())
return
def edge_detection(model):
"""Image edge detection."""
# get data and name from current selected image
current_row = model.currentIndex().row()
source_vol = model.data(model.index(current_row), Qt.UserRole + 6)
source_name = model.data(model.index(current_row), Qt.DisplayRole)
# detect the edge
new_vol = imtool.multi_label_edge_detection(source_vol)
new_vol_name = 'edge_' + source_name
# save result as a new image
model.addItem(new_vol, None, new_vol_name,
model._data[0].get_header(),
None, None, 255, 'green')
return
def gen_label_color(color):
icon_image = QImage(QSize(32, 32), QImage.Format_RGB888)
icon_image.fill(color.rgb())
icon_image = icon_image.rgbSwapped()
icon_pixmap = QPixmap.fromImage(icon_image)
return QIcon(icon_pixmap)
|
{
"content_hash": "425ebb9ecd2aea0a738b3cb44a71ddba",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 70,
"avg_line_length": 35.27272727272727,
"alnum_prop": 0.6675257731958762,
"repo_name": "liuzhaoguo/FreeROI-1",
"id": "9f6e924559ccdfdbaab47644a9200b40a0dbb7ac",
"size": "1667",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "froi/gui/component/no_gui_tools.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "507742"
},
{
"name": "Shell",
"bytes": "508"
}
],
"symlink_target": ""
}
|
import os
import sys
sys.path.append(os.path.join(os.getcwd(), 'tools'))
|
{
"content_hash": "25486d28a4ee44d16ef4dece39ff8a45",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 51,
"avg_line_length": 18.5,
"alnum_prop": 0.7162162162162162,
"repo_name": "savi-dev/quantum",
"id": "8e233bcf4234451fcab777204729d542c64b2d16",
"size": "788",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "bin/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67928"
},
{
"name": "Perl",
"bytes": "18263"
},
{
"name": "Python",
"bytes": "1519204"
},
{
"name": "Shell",
"bytes": "7766"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
}
|
import random
def count_the_rands(target=1, floor=1, ceiling=1000):
count = 0 # how many random numbers generated
numbers = {} # keeping track of the numbers generated
number = None # the number we generate each cycle
while number != target:
number = random.randint(floor, ceiling)
count += 1 # increment counter
numbers[number] = numbers.get(number, 0) + 1 # add to number dictionary
print('To find the target {}, we looped {} times.'.format(target, count))
most_common = sorted(numbers.items(), key=lambda r: r[1], reverse=True)[0][0]
least_common = sorted(numbers.items(), key=lambda r: r[1])[0][0]
print('Most common number: {}'.format(most_common))
print('Lest common number: {}'.format(least_common))
print('')
def main():
random.seed(1)
tests = [
{'target': 15, 'floor': 1, 'ceiling': 100},
{'target': 20, 'floor': 1, 'ceiling': 1000},
{'target': 25, 'floor': 1, 'ceiling': 10000},
{'target': 50, 'floor': 1, 'ceiling': 100000},
]
for test in tests:
count_the_rands(**test)
if __name__ == '__main__':
main()
|
{
"content_hash": "44193719264ef738654779d38cac750d",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 81,
"avg_line_length": 38.06666666666667,
"alnum_prop": 0.5980735551663747,
"repo_name": "bandarji/lekhan",
"id": "cd309353df0f460785b7e06412df7cf983ebc517",
"size": "1165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/reddit/count_rand.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "66166"
}
],
"symlink_target": ""
}
|
import json
from django.test import SimpleTestCase, TestCase
from django.urls import reverse
from django.utils import translation
from wagtail.contrib.table_block.blocks import DEFAULT_TABLE_OPTIONS, TableBlock
from wagtail.core.models import Page
from wagtail.tests.testapp.models import TableBlockStreamPage
from wagtail.tests.utils import WagtailTestUtils
class TestTableBlock(TestCase):
def setUp(self):
self.default_table_options = {
'minSpareRows': 0,
'startRows': 3,
'startCols': 3,
'colHeaders': False,
'rowHeaders': False,
'contextMenu': True,
'editor': 'text',
'stretchH': 'all',
'height': 108,
'language': 'en',
'renderer': 'text',
'autoColumnSize': False,
}
def test_table_block_render(self):
"""
Test a generic render.
"""
value = {'first_row_is_table_header': False, 'first_col_is_header': False,
'data': [['Test 1', 'Test 2', 'Test 3'], [None, None, None],
[None, None, None]]}
block = TableBlock()
result = block.render(value)
expected = """
<table>
<tbody>
<tr><td>Test 1</td><td>Test 2</td><td>Test 3</td></tr>
<tr><td></td><td></td><td></td></tr>
<tr><td></td><td></td><td></td></tr>
</tbody>
</table>
"""
self.assertHTMLEqual(result, expected)
self.assertIn('Test 2', result)
def test_table_block_alignment_render(self):
"""
Test a generic render with some cells aligned.
"""
value = {'first_row_is_table_header': True, 'first_col_is_header': False,
'cell': [{'row': 0, 'col': 1, 'className': 'htLeft'},
{'row': 1, 'col': 1, 'className': 'htRight'}],
'data': [['Test 1', 'Test 2', 'Test 3'], [None, None, None],
[None, None, None]]}
block = TableBlock()
result = block.render(value)
expected = """
<table>
<thead>
<tr><th>Test 1</th><th class="htLeft">Test 2</th><th>Test 3</th></tr>
</thead>
<tbody>
<tr><td></td><td class="htRight"></td><td></td></tr>
<tr><td></td><td></td><td></td></tr>
</tbody>
</table>
"""
self.assertHTMLEqual(result, expected)
self.assertIn('Test 2', result)
def test_render_empty_table(self):
"""
An empty table should render okay.
"""
block = TableBlock()
result = block.render({
'first_row_is_table_header': False,
'first_col_is_header': False,
'data': [
[None, None, None],
[None, None, None],
[None, None, None]
]
})
expected = """
<table>
<tbody>
<tr><td></td><td></td><td></td></tr>
<tr><td></td><td></td><td></td></tr>
<tr><td></td><td></td><td></td></tr>
</tbody>
</table>
"""
self.assertHTMLEqual(result, expected)
def test_do_not_render_html(self):
"""
Ensure that raw html doesn't render
by default.
"""
value = {'first_row_is_table_header': False, 'first_col_is_header': False,
'data': [['<p><strong>Test</strong></p>', None, None], [None, None, None],
[None, None, None]]}
expected = """
<table>
<tbody>
<tr><td><p><strong>Test</strong></p></td><td></td><td></td></tr>
<tr><td></td><td></td><td></td></tr>
<tr><td></td><td></td><td></td></tr>
</tbody>
</table>
"""
block = TableBlock()
result = block.render(value)
self.assertHTMLEqual(result, expected)
def test_row_headers(self):
"""
Ensure that row headers are properly rendered.
"""
value = {'first_row_is_table_header': True, 'first_col_is_header': False,
'data': [['Foo', 'Bar', 'Baz'], [None, None, None], [None, None, None]]}
expected = """
<table>
<thead>
<tr><th>Foo</th><th>Bar</th><th>Baz</th></tr>
</thead>
<tbody>
<tr><td></td><td></td><td></td></tr>
<tr><td></td><td></td><td></td></tr>
</tbody>
</table>
"""
block = TableBlock()
result = block.render(value)
self.assertHTMLEqual(result, expected)
def test_column_headers(self):
"""
Ensure that column headers are properly rendered.
"""
value = {'first_row_is_table_header': False, 'first_col_is_header': True,
'data': [['Foo', 'Bar', 'Baz'], ['one', 'two', 'three'], ['four', 'five', 'six']]}
expected = """
<table>
<tbody>
<tr><th>Foo</th><td>Bar</td><td>Baz</td></tr>
<tr><th>one</th><td>two</td><td>three</td></tr>
<tr><th>four</th><td>five</td><td>six</td></tr>
</tbody>
</table>
"""
block = TableBlock()
result = block.render(value)
self.assertHTMLEqual(result, expected)
def test_row_and_column_headers(self):
"""
Test row and column headers at the same time.
"""
value = {'first_row_is_table_header': True, 'first_col_is_header': True,
'data': [['Foo', 'Bar', 'Baz'], ['one', 'two', 'three'], ['four', 'five', 'six']]}
expected = """
<table>
<thead>
<tr><th>Foo</th><th>Bar</th><th>Baz</th></tr>
</thead>
<tbody>
<tr><th>one</th><td>two</td><td>three</td></tr>
<tr><th>four</th><td>five</td><td>six</td></tr>
</tbody>
</table>
"""
block = TableBlock()
result = block.render(value)
self.assertHTMLEqual(result, expected)
def test_value_for_and_from_form(self):
"""
Make sure we get back good json and make
sure it translates back to python.
"""
value = {'first_row_is_table_header': False, 'first_col_is_header': False,
'data': [['Foo', 1, None], [3.5, 'Bar', 'Baz']]}
block = TableBlock()
expected_json = '{"first_row_is_table_header": false, "first_col_is_header": false, "data": [["Foo", 1, null], [3.5, "Bar", "Baz"]]}'
returned_json = block.value_for_form(value)
self.assertJSONEqual(expected_json, returned_json)
self.assertEqual(block.value_from_form(returned_json), value)
def test_is_html_renderer(self):
"""
Test that settings flow through correctly to
the is_html_renderer method.
"""
# TableBlock with default table_options
block1 = TableBlock()
self.assertEqual(block1.is_html_renderer(), False)
# TableBlock with altered table_options
new_options = self.default_table_options.copy()
new_options['renderer'] = 'html'
block2 = TableBlock(table_options=new_options)
self.assertEqual(block2.is_html_renderer(), True)
def test_searchable_content(self):
value = {'first_row_is_table_header': False, 'first_col_is_header': False,
'data': [['Test 1', 'Test 2', 'Test 3'], [None, 'Bar', None],
[None, 'Foo', None]]}
block = TableBlock()
content = block.get_searchable_content(value)
self.assertEqual(content, ['Test 1', 'Test 2', 'Test 3', 'Bar', 'Foo', ])
def test_render_with_extra_context(self):
"""
Test that extra context variables passed in block.render are passed through
to the template.
"""
block = TableBlock(template="tests/blocks/table_block_with_caption.html")
value = {'first_row_is_table_header': False, 'first_col_is_header': False,
'data': [['Test 1', 'Test 2', 'Test 3'], [None, None, None],
[None, None, None]]}
result = block.render(value, context={
'caption': "A fascinating table."
})
self.assertIn("Test 1", result)
self.assertIn("<div>A fascinating table.</div>", result)
def test_table_block_caption_render(self):
"""
Test a generic render with caption.
"""
value = {'table_caption': 'caption', 'first_row_is_table_header': False,
'first_col_is_header': False,
'data': [['Test 1', 'Test 2', 'Test 3'], [None, None, None],
[None, None, None]]}
block = TableBlock()
result = block.render(value)
expected = """
<table>
<caption>caption</caption>
<tbody>
<tr><td>Test 1</td><td>Test 2</td><td>Test 3</td></tr>
<tr><td></td><td></td><td></td></tr>
<tr><td></td><td></td><td></td></tr>
</tbody>
</table>
"""
self.assertHTMLEqual(result, expected)
self.assertIn('Test 2', result)
class TestTableBlockForm(WagtailTestUtils, SimpleTestCase):
def setUp(self):
# test value for table data
self.value = {
'first_row_is_table_header': True,
'first_col_is_header': True,
'data': [
['Ship', 'Type', 'Status'],
['Galactica', 'Battlestar', 'Active'],
['Valkyrie', 'Battlestar', 'Destroyed'],
['Cylon Basestar', 'Basestar', 'Active'],
['Brenik', 'Small Military Vessel', 'Destroyed'],
]
}
# set language from testing envrionment
language = translation.get_language()
self.default_table_options = DEFAULT_TABLE_OPTIONS.copy()
self.default_table_options['language'] = language
def test_default_table_options(self):
"""
Test options without any custom table_options provided.
"""
block = TableBlock()
# check that default_table_options created correctly
self.assertEqual(block.table_options, block.get_table_options())
# check that default_table_options used on self
self.assertEqual(self.default_table_options, block.table_options)
# check a few individual keys from DEFAULT_TABLE_OPTIONS
self.assertEqual(DEFAULT_TABLE_OPTIONS['startRows'], block.table_options['startRows'])
self.assertEqual(DEFAULT_TABLE_OPTIONS['colHeaders'], block.table_options['colHeaders'])
self.assertEqual(DEFAULT_TABLE_OPTIONS['contextMenu'], block.table_options['contextMenu'])
self.assertEqual(DEFAULT_TABLE_OPTIONS['editor'], block.table_options['editor'])
self.assertEqual(DEFAULT_TABLE_OPTIONS['stretchH'], block.table_options['stretchH'])
def test_table_options_language(self):
"""
Test that the envrionment's language is used if no language provided.
"""
# default must always contain a language value
block = TableBlock()
self.assertIn('language', block.table_options)
# French
translation.activate('fr-fr')
block_fr = TableBlock()
self.assertEqual('fr-fr', block_fr.table_options['language'])
translation.activate('it')
# Italian
block_it = TableBlock()
self.assertEqual('it', block_it.table_options['language'])
# table_options with language provided, different to envrionment
block_with_lang = TableBlock(table_options={'language': 'ja'})
self.assertNotEqual('it', block_with_lang.table_options['language'])
self.assertEqual('ja', block_with_lang.table_options['language'])
translation.activate('en')
def test_table_options_context_menu(self):
"""
Test how contextMenu is set to default.
"""
default_context_menu = list(DEFAULT_TABLE_OPTIONS['contextMenu']) # create copy
# confirm the default is correct
table_options = TableBlock().table_options
self.assertEqual(table_options['contextMenu'], default_context_menu)
# confirm that when custom option is True, default is still used
table_options_menu_true = TableBlock(table_options={'contextMenu': True}).table_options
self.assertEqual(table_options_menu_true['contextMenu'], default_context_menu)
# confirm menu is removed if False is passed in
table_options_menu_false = TableBlock(table_options={'contextMenu': False}).table_options
self.assertEqual(table_options_menu_false['contextMenu'], False)
# confirm if list passed in, it is used
table_options_menu_list = TableBlock(table_options={'contextMenu': ['undo', 'redo']}).table_options
self.assertEqual(table_options_menu_list['contextMenu'], ['undo', 'redo'])
# test if empty array passed in
table_options_menu_list = TableBlock(table_options={'contextMenu': []}).table_options
self.assertEqual(table_options_menu_list['contextMenu'], [])
def test_table_options_others(self):
"""
Test simple options overrides get passed correctly.
"""
block_1_opts = TableBlock(table_options={'startRows': 5, 'startCols': 2}).table_options
self.assertEqual(block_1_opts['startRows'], 5)
self.assertEqual(block_1_opts['startCols'], 2)
block_2_opts = TableBlock(table_options={'stretchH': 'none'}).table_options
self.assertEqual(block_2_opts['stretchH'], 'none')
# check value that is not part of the defaults
block_3_opts = TableBlock(table_options={'allowEmpty': False}).table_options
self.assertEqual(block_3_opts['allowEmpty'], False)
def test_tableblock_render_form(self):
"""
Test the rendered form field generated by TableBlock.
"""
block = TableBlock()
html = block.render_form(value=self.value)
self.assertIn('<script>initTable', html)
self.assertIn('<div class="field char_field widget-table_input">', html)
# check that options render in the init function
self.assertIn('"editor": "text"', html)
self.assertIn('"autoColumnSize": false', html)
self.assertIn('"stretchH": "all"', html)
def test_searchable_content(self):
"""
Test searchable content is created correctly.
"""
block = TableBlock()
search_content = block.get_searchable_content(value=self.value)
self.assertIn('Galactica', search_content)
self.assertIn('Brenik', search_content)
class TestTableBlockPageEdit(TestCase, WagtailTestUtils):
def setUp(self):
self.value = {
'first_row_is_table_header': True,
'first_col_is_header': True,
'data': [
['Ship', 'Type', 'Status'],
['Galactica', 'Battlestar', 'Active'],
['Valkyrie', 'Battlestar', 'Destroyed'],
['Cylon Basestar', 'Basestar', 'Active'],
['Brenik', 'Small Military Vessel', 'Destroyed'],
]
}
self.root_page = Page.objects.get(id=2)
table_block_page_instance = TableBlockStreamPage(
title='Ships',
table=json.dumps([{'type': 'table', 'value': self.value}])
)
self.table_block_page = self.root_page.add_child(instance=table_block_page_instance)
self.user = self.login()
def test_page_edit_page_view(self):
"""
Test that edit page loads with saved table data and correct init function.
"""
response = self.client.get(reverse('wagtailadmin_pages:edit', args=(self.table_block_page.id,)))
# check page + field renders
self.assertContains(response, '<div class="field char_field widget-table_input fieldname-table">')
# check data
self.assertContains(response, 'Battlestar')
self.assertContains(response, 'Galactica')
# check init
self.assertContains(response, 'initTable("table\\u002D0\\u002Dvalue"')
self.assertContains(response, 'minSpareRows')
self.assertContains(response, 'startRows')
|
{
"content_hash": "f8272d673ba13e1431ecd7ab8c81e3a1",
"timestamp": "",
"source": "github",
"line_count": 420,
"max_line_length": 141,
"avg_line_length": 39.930952380952384,
"alnum_prop": 0.5395027130165166,
"repo_name": "mikedingjan/wagtail",
"id": "63967d32b180f15d2c4ae4ec92155248e385755c",
"size": "16771",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wagtail/contrib/table_block/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "183841"
},
{
"name": "Dockerfile",
"bytes": "703"
},
{
"name": "HTML",
"bytes": "373400"
},
{
"name": "JavaScript",
"bytes": "266257"
},
{
"name": "Makefile",
"bytes": "992"
},
{
"name": "Python",
"bytes": "3607707"
},
{
"name": "Shell",
"bytes": "8289"
}
],
"symlink_target": ""
}
|
__all__ = [
'Response',
'ResponseParser',
]
from kitsu.http.errors import *
from kitsu.http.headers import Headers
from kitsu.http.parsers import LineParser
class Response(object):
def __init__(self, version=(1,1), code=200, phrase='OK', headers=(), body=None):
self.version = version
self.code = code
self.phrase = phrase
self.headers = Headers(headers)
self.body = body
self.__parserState = 'STATUS'
def toLines(self, lines=None):
if lines is None:
lines = []
lines.append("HTTP/%d.%d %d %s\r\n" % (self.version[0], self.version[1], self.code, self.phrase))
self.headers.toLines(lines)
lines.append("\r\n")
return lines
def toString(self):
return ''.join(self.toLines())
def __str__(self):
return self.toString()
def __parseStatus(self, line):
parts = line.split(None, 2)
if len(parts) not in (2, 3):
raise HTTPDataError("response must be in 'HTTP/n.n status message' format: %r" % (line,))
version = parts[0]
code = parts[1]
phrase = len(parts) >= 3 and parts[2] or ""
if not version.startswith('HTTP/'):
raise HTTPDataError("protocol must be HTTP: %r" % (line,))
version = version[5:].split('.')
if len(version) != 2:
raise HTTPDataError("invalid version: %r" % (line,))
try:
version = (int(version[0]), int(version[1]))
except ValueError:
raise HTTPDataError("invalid version: %r" % (line,))
try:
code = int(code)
except ValueError:
raise HTTPDataError("status code must be a number: %r" % (line,))
self.version = version
self.code = code
self.phrase = phrase
def parseLine(self, line):
if self.__parserState == 'STATUS':
if not line:
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html
# Just ignore all empty lines for maximum compatibility
return True
self.__parseStatus(line)
self.__parserState = 'HEADERS'
return True
elif self.__parserState == 'HEADERS':
if not self.headers.parseLine(line):
self.__parserState = 'DONE'
return False
return True
return False
class ResponseParser(LineParser):
"""Response parser"""
def __init__(self):
self.response = Response()
def parseLine(self, line):
if not self.response.parseLine(line):
self.done = True
return [self.response]
return []
|
{
"content_hash": "21b104926b65ab67ed0c8f22d0aa4722",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 105,
"avg_line_length": 32.734939759036145,
"alnum_prop": 0.548398969451601,
"repo_name": "snaury/kitsu.http",
"id": "f076d48fcef201428b13082093cc9ceaa08a40f4",
"size": "2717",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kitsu/http/response.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "63479"
},
{
"name": "Shell",
"bytes": "493"
}
],
"symlink_target": ""
}
|
import sys, os, warnings
warnings.simplefilter("ignore", DeprecationWarning)
from ncclient import manager
def demo(host, user):
with manager.connect(host=host, port=22, username=user, hostkey_verify=False) as m:
c = m.get_config(source='running').data_xml
with open("%s.xml" % host, 'w') as f:
f.write(c)
if __name__ == '__main__':
demo(sys.argv[1], os.getenv("USER"))
|
{
"content_hash": "e39fffe696dcb350a10c06c55c8324d6",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 87,
"avg_line_length": 33.916666666666664,
"alnum_prop": 0.6388206388206388,
"repo_name": "sysbot/ncclient",
"id": "5f3ba47c9faf36e45985a9c9a0951c0e7fab60a9",
"size": "598",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/nc02.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
"""
Recover the toy dataset generated by example/generate_toy/bnmf/generate_bnmtf.py
using ICM, and plot the MSE against timestamps.
We can plot the MSE, R2 and Rp as it converges, on the entire dataset.
We have I=100, J=80, K=5, L=5, and no test data.
We give flatter priors (1/10) than what was used to generate the data (1).
"""
import sys, os
project_location = os.path.dirname(__file__)+"/../../../../"
sys.path.append(project_location)
from BNMTF.code.models.nmtf_icm import nmtf_icm
import numpy, random, scipy, matplotlib.pyplot as plt
##########
input_folder = project_location+"BNMTF/data_toy/bnmtf/"
repeats = 10
iterations = 5000
init_FG = 'kmeans'
init_S = 'random'
I, J, K, L = 100, 80, 5, 5
minimum_TN = 0.01
alpha, beta = 1., 1.
lambdaF = numpy.ones((I,K))/10.
lambdaS = numpy.ones((K,L))/10.
lambdaG = numpy.ones((J,L))/10.
priors = { 'alpha':alpha, 'beta':beta, 'lambdaF':lambdaF, 'lambdaS':lambdaS, 'lambdaG':lambdaG }
# Load in data
R = numpy.loadtxt(input_folder+"R.txt")
M = numpy.ones((I,J))
# Run the VB algorithm, <repeats> times
times_repeats = []
performances_repeats = []
for i in range(0,repeats):
# Set all the seeds
numpy.random.seed(3)
random.seed(4)
scipy.random.seed(5)
# Run the classifier
NMTF = nmtf_icm(R,M,K,L,priors)
NMTF.initialise(init_S=init_S,init_FG=init_FG)
NMTF.run(iterations,minimum_TN=minimum_TN)
# Extract the performances and timestamps across all iterations
times_repeats.append(NMTF.all_times)
performances_repeats.append(NMTF.all_performances)
# Check whether seed worked: all performances should be the same
assert all([numpy.array_equal(performances, performances_repeats[0]) for performances in performances_repeats]), \
"Seed went wrong - performances not the same across repeats!"
# Print out the performances, and the average times
icm_all_times_average = list(numpy.average(times_repeats, axis=0))
icm_all_performances = performances_repeats[0]
print "icm_all_times_average = %s" % icm_all_times_average
print "icm_all_performances = %s" % icm_all_performances
# Print all time plots, the average, and performance vs iterations
plt.figure()
plt.title("Performance against time")
plt.ylim(0,10)
for times in times_repeats:
plt.plot(times, icm_all_performances['MSE'])
plt.figure()
plt.title("Performance against average time")
plt.plot(icm_all_times_average, icm_all_performances['MSE'])
plt.ylim(0,10)
plt.figure()
plt.title("Performance against iteration")
plt.plot(icm_all_performances['MSE'])
plt.ylim(0,10)
|
{
"content_hash": "1886938b2087de3b9ce91f56609cc8ab",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 114,
"avg_line_length": 28.065934065934066,
"alnum_prop": 0.7106499608457322,
"repo_name": "ThomasBrouwer/BNMTF",
"id": "3006225bb46b02cf622f97b54d35234cb19a5f35",
"size": "2554",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "experiments/experiments_toy/time/nmtf_icm_time.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Matlab",
"bytes": "29924"
},
{
"name": "Python",
"bytes": "2751547"
},
{
"name": "R",
"bytes": "41465"
}
],
"symlink_target": ""
}
|
"""
Abstraction around a package index.
"""
from abc import ABCMeta, abstractmethod
class Index(object):
"""
Abstract interface for managing PyPI data.
"""
__metaclass__ = ABCMeta
@abstractmethod
def get_projects(self):
"""
Get the list of hosted projects.
:returns: an iterable of project names
"""
pass
@abstractmethod
def get_versions(self, name):
"""
Get the list of versions for a project.
:param name: the project name
:returns: a dictionary mapping versions to paths
"""
pass
@abstractmethod
def get_metadata(self, name, version):
"""
Get metadata for a project version.
:param name: the project name
:param version: the project version
:returns: a dictionary of metadata
"""
pass
@abstractmethod
def get_distribution(self, location, **kwargs):
"""
Get distribution for a project version.
:param location: location of the distribution content
:returns: a pair of content data and content type for a distribution
"""
pass
@abstractmethod
def remove_version(self, name, version):
"""
Remove all data for a project version.
:param name: the project name
:param version: the project version
"""
pass
@abstractmethod
def validate_metadata(metadata):
"""
Validate a distribution's metadata.
At a minimum, this function must verify that the project name and version
are present in the dictionary. Other entries may be required as well.
:param metadata: a dictionary of key-value metadata
:returns: whether the metadata was valid
"""
pass
@abstractmethod
def upload_distribution(self, upload_file):
"""
Upload a distribution for a project version.
The project name and version are determined from the metadata
in the uploade_file contents.
:param upload_file: an instance of `werkezeug.datastructures.FileStorage`
"""
pass
|
{
"content_hash": "99fdd153ca5555cd7ebf2d976423c909",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 81,
"avg_line_length": 25.25581395348837,
"alnum_prop": 0.6109576427255985,
"repo_name": "jessemyers/cheddar",
"id": "80f9086a4c308613448eddb2ab341bd8a3570750",
"size": "2172",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "cheddar/index/index.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "4032"
},
{
"name": "Python",
"bytes": "74930"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.apps import AppConfig
class ContactsConfig(AppConfig):
name = 'contacts'
|
{
"content_hash": "25923e001b3039cc7c2641b066119872",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 39,
"avg_line_length": 18.857142857142858,
"alnum_prop": 0.7575757575757576,
"repo_name": "Zombusters/zchat-lambdas",
"id": "65098afd914362501df7e310a69b5267f61cd396",
"size": "156",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "zchat/contacts/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19824"
}
],
"symlink_target": ""
}
|
from asyncio import wait_for
from aiojobs import create_scheduler
from aioredis import Redis
from virtool.dispatcher.client import DispatcherClient
async def test_client(loop, redis: Redis):
"""
Test that the client can successfully publish a Pub/Sub message to the dispatch Redis channel.
"""
(channel,) = await redis.subscribe("channel:dispatch")
interface = DispatcherClient(redis)
scheduler = await create_scheduler()
await scheduler.spawn(interface.run())
interface.enqueue_change("samples", "update", [1, 3, 4])
change = await wait_for(channel.get_json(), timeout=3)
assert change == {
"interface": "samples",
"operation": "update",
"id_list": [1, 3, 4],
}
await scheduler.close()
|
{
"content_hash": "1a3c8bb3abd4c5bf3e32a791774b2cbf",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 98,
"avg_line_length": 25.633333333333333,
"alnum_prop": 0.6736020806241872,
"repo_name": "virtool/virtool",
"id": "037e202ca1a30ba7f3f11b74271cd8d03e36810b",
"size": "769",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tests/dispatcher/test_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "961"
},
{
"name": "HTML",
"bytes": "44858"
},
{
"name": "Python",
"bytes": "1316464"
}
],
"symlink_target": ""
}
|
""" Test functions for rbf module """
import numpy as np
from numpy.testing import (assert_, assert_array_almost_equal,
assert_almost_equal, run_module_suite)
from numpy import linspace, sin, random, exp, allclose
from scipy.interpolate.rbf import Rbf
FUNCTIONS = ('multiquadric', 'inverse multiquadric', 'gaussian',
'cubic', 'quintic', 'thin-plate', 'linear')
def check_rbf1d_interpolation(function):
"""Check that the Rbf function interpolates throught the nodes (1D)"""
olderr = np.seterr(all="ignore")
try:
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y, function=function)
yi = rbf(x)
assert_array_almost_equal(y, yi)
assert_almost_equal(rbf(float(x[0])), y[0])
finally:
np.seterr(**olderr)
def check_rbf2d_interpolation(function):
"""Check that the Rbf function interpolates throught the nodes (2D)"""
olderr = np.seterr(all="ignore")
try:
x = random.rand(50,1)*4-2
y = random.rand(50,1)*4-2
z = x*exp(-x**2-1j*y**2)
rbf = Rbf(x, y, z, epsilon=2, function=function)
zi = rbf(x, y)
zi.shape = x.shape
assert_array_almost_equal(z, zi)
finally:
np.seterr(**olderr)
def check_rbf3d_interpolation(function):
"""Check that the Rbf function interpolates throught the nodes (3D)"""
olderr = np.seterr(all="ignore")
try:
x = random.rand(50,1)*4-2
y = random.rand(50,1)*4-2
z = random.rand(50,1)*4-2
d = x*exp(-x**2-y**2)
rbf = Rbf(x, y, z, d, epsilon=2, function=function)
di = rbf(x, y, z)
di.shape = x.shape
assert_array_almost_equal(di, d)
finally:
np.seterr(**olderr)
def test_rbf_interpolation():
for function in FUNCTIONS:
yield check_rbf1d_interpolation, function
yield check_rbf2d_interpolation, function
yield check_rbf3d_interpolation, function
def check_rbf1d_regularity(function, atol):
"""Check that the Rbf function approximates a smooth function well away
from the nodes."""
olderr = np.seterr(all="ignore")
try:
x = linspace(0, 10, 9)
y = sin(x)
rbf = Rbf(x, y, function=function)
xi = linspace(0, 10, 100)
yi = rbf(xi)
#import matplotlib.pyplot as plt
#plt.figure()
#plt.plot(x, y, 'o', xi, sin(xi), ':', xi, yi, '-')
#plt.title(function)
#plt.show()
msg = "abs-diff: %f" % abs(yi - sin(xi)).max()
assert_(allclose(yi, sin(xi), atol=atol), msg)
finally:
np.seterr(**olderr)
def test_rbf_regularity():
tolerances = {
'multiquadric': 0.05,
'inverse multiquadric': 0.02,
'gaussian': 0.01,
'cubic': 0.15,
'quintic': 0.1,
'thin-plate': 0.1,
'linear': 0.2
}
for function in FUNCTIONS:
yield check_rbf1d_regularity, function, tolerances.get(function, 1e-2)
def test_default_construction():
"""Check that the Rbf class can be constructed with the default
multiquadric basis function. Regression test for ticket #1228."""
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_function_is_callable():
"""Check that the Rbf class can be constructed with function=callable."""
x = linspace(0,10,9)
y = sin(x)
linfunc = lambda x:x
rbf = Rbf(x, y, function=linfunc)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_two_arg_function_is_callable():
"""Check that the Rbf class can be constructed with a two argument
function=callable."""
def _func(self, r):
return self.epsilon + r
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y, function=_func)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_rbf_epsilon_none():
x = linspace(0, 10, 9)
y = sin(x)
rbf = Rbf(x, y, epsilon=None)
if __name__ == "__main__":
run_module_suite()
|
{
"content_hash": "3217f29cff6bc4974bb8c0866f8721ff",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 78,
"avg_line_length": 30.892307692307693,
"alnum_prop": 0.588894422310757,
"repo_name": "teoliphant/scipy",
"id": "a4045dc708bb82b0037a743813d6cc1e399e9760",
"size": "4086",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scipy/interpolate/tests/test_rbf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11530901"
},
{
"name": "C++",
"bytes": "7695320"
},
{
"name": "FORTRAN",
"bytes": "5898903"
},
{
"name": "Matlab",
"bytes": "1861"
},
{
"name": "Objective-C",
"bytes": "137083"
},
{
"name": "Python",
"bytes": "5863600"
},
{
"name": "Shell",
"bytes": "1793"
}
],
"symlink_target": ""
}
|
"""MySQL Connector/Python version information
The file version.py gets installed and is available after installation
as mysql.connector.version.
"""
VERSION = (2, 0, 3, '', 0)
if VERSION[3] and VERSION[4]:
VERSION_TEXT = '{0}.{1}.{2}{3}{4}'.format(*VERSION)
else:
VERSION_TEXT = '{0}.{1}.{2}'.format(*VERSION[0:3])
LICENSE = 'GPLv2 with FOSS License Exception'
EDITION = '' # Added in package names, after the version
|
{
"content_hash": "e2226d07f14279bbeb56ffceeb59fea4",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 70,
"avg_line_length": 28.733333333333334,
"alnum_prop": 0.6774941995359629,
"repo_name": "lowitty/selenium",
"id": "c2a8150029d23950a1129b4d65dd172f1082baa3",
"size": "1560",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "libs/mysql/connector/version.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2173161"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.contrib.auth.models import User
from django.core.cache import cache
from django.db import models
from django.core.urlresolvers import reverse
from django.db.models.signals import pre_delete, post_save
from django.dispatch import receiver
from cloudinary import api
from cloudinary.models import CloudinaryField
from rest_framework.authtoken.models import Token
from taggit.managers import TaggableManager
from uuslug import uuslug
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance)
for user in User.objects.all():
Token.objects.get_or_create(user=user)
class ArtworkManager(models.Manager):
def get_artwork(self, slug):
key = "artwork:%s" % slug
artwork = cache.get(key)
if not artwork:
artwork = Artwork.objects.select_related().get(slug=slug)
if artwork:
cache.add(key, artwork)
return artwork
class Artwork(models.Model):
title = models.CharField("Title", max_length=200, blank=False, db_index=True)
slug = models.SlugField("Slug", max_length=50, blank=False, unique=True, db_index=True)
artist = models.CharField("Artist", max_length=200, blank=True, db_index=True)
description = models.TextField("Description", blank=True, null=True)
publisher = models.ForeignKey(settings.AUTH_USER_MODEL, editable=False, db_index=True)
published = models.DateTimeField("Published", auto_now_add=True)
tags = TaggableManager("Tags", blank=True)
objects = ArtworkManager()
def save(self, *args, **kwargs):
self.slug = uuslug(self.title, instance=self, max_length=50, word_boundary=True)
super(Artwork, self).save(*args, **kwargs)
key = "artwork:%s" % self.slug
cache.set(key, self)
def get_absolute_url(self):
return reverse('core:artwork_detail', kwargs={'slug': self.slug})
def get_photo(self):
return Photo.objects.get_photo(self)
def dehydrate_tags(self):
return self.tags.names()
def __unicode__(self):
return "%s by %s" % (self.title, self.artist)
@receiver(pre_delete, sender=Artwork, dispatch_uid='artwork_pre_delete')
def artwork_pre_delete(sender, instance, using, **kwargs):
key = "artwork:%s" % instance.slug
cache.delete(key)
class PhotoManager(models.Manager):
def get_photo(self, artwork):
key = "photo:%s" % artwork.slug
photo = cache.get(key)
if not photo:
photo = Photo.objects.get(artwork=artwork)
if photo:
cache.add(key, photo)
return photo
class Photo(models.Model):
caption = models.CharField("Caption", max_length=200, blank=True)
image = CloudinaryField("Image", blank=True)
artwork = models.ForeignKey(Artwork, on_delete=models.CASCADE, related_name='photos')
objects = PhotoManager()
def save(self, *args, **kwargs):
super(Photo, self).save(*args, **kwargs)
key = "photo:%s" % self.artwork.slug
cache.set(key, self)
def get_absolute_url(self):
return self.image.url
def __unicode__(self):
try:
result = self.image.public_id
except AttributeError:
result = ''
return result
@receiver(pre_delete, sender=Photo, dispatch_uid='photo_pre_delete')
def photo_pre_delete(sender, instance, using, **kwargs):
api.delete_resources([instance.image.public_id])
key = "photo:%s" % instance.artwork.slug
cache.delete(key)
|
{
"content_hash": "6cd1f378b743c3248b63e70736a6503b",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 91,
"avg_line_length": 31.05128205128205,
"alnum_prop": 0.6694192127718139,
"repo_name": "plankter/augeo-cloud",
"id": "b0462d4d0eebafdf597e1342c16e835bd208661f",
"size": "3633",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20320"
},
{
"name": "JavaScript",
"bytes": "17544"
},
{
"name": "Python",
"bytes": "46624"
}
],
"symlink_target": ""
}
|
import optparse, os, re
import math
import glob
import inspect
import m5
from m5.objects import *
from m5.util import addToPath
addToPath('../')
from ruby import Ruby
from common import Options
from common import Simulation
from common import GPUTLBOptions, GPUTLBConfig
########################## Script Options ########################
def setOption(parser, opt_str, value = 1):
# check to make sure the option actually exists
if not parser.has_option(opt_str):
raise Exception("cannot find %s in list of possible options" % opt_str)
opt = parser.get_option(opt_str)
# set the value
exec("parser.values.%s = %s" % (opt.dest, value))
def getOption(parser, opt_str):
# check to make sure the option actually exists
if not parser.has_option(opt_str):
raise Exception("cannot find %s in list of possible options" % opt_str)
opt = parser.get_option(opt_str)
# get the value
exec("return_value = parser.values.%s" % opt.dest)
return return_value
# Adding script options
parser = optparse.OptionParser()
Options.addCommonOptions(parser)
Options.addSEOptions(parser)
parser.add_option("--cpu-only-mode", action="store_true", default=False,
help="APU mode. Used to take care of problems in "\
"Ruby.py while running APU protocols")
parser.add_option("-k", "--kernel-files",
help="file(s) containing GPU kernel code (colon separated)")
parser.add_option("-u", "--num-compute-units", type="int", default=1,
help="number of GPU compute units"),
parser.add_option("--num-cp", type="int", default=0,
help="Number of GPU Command Processors (CP)")
parser.add_option("--benchmark-root", help="Root of benchmark directory tree")
# not super important now, but to avoid putting the number 4 everywhere, make
# it an option/knob
parser.add_option("--cu-per-sqc", type="int", default=4, help="number of CUs" \
"sharing an SQC (icache, and thus icache TLB)")
parser.add_option("--simds-per-cu", type="int", default=4, help="SIMD units" \
"per CU")
parser.add_option("--wf-size", type="int", default=64,
help="Wavefront size(in workitems)")
parser.add_option("--sp-bypass-path-length", type="int", default=4, \
help="Number of stages of bypass path in vector ALU for Single Precision ops")
parser.add_option("--dp-bypass-path-length", type="int", default=4, \
help="Number of stages of bypass path in vector ALU for Double Precision ops")
# issue period per SIMD unit: number of cycles before issuing another vector
parser.add_option("--issue-period", type="int", default=4, \
help="Number of cycles per vector instruction issue period")
parser.add_option("--glbmem-wr-bus-width", type="int", default=32, \
help="VGPR to Coalescer (Global Memory) data bus width in bytes")
parser.add_option("--glbmem-rd-bus-width", type="int", default=32, \
help="Coalescer to VGPR (Global Memory) data bus width in bytes")
# Currently we only support 1 local memory pipe
parser.add_option("--shr-mem-pipes-per-cu", type="int", default=1, \
help="Number of Shared Memory pipelines per CU")
# Currently we only support 1 global memory pipe
parser.add_option("--glb-mem-pipes-per-cu", type="int", default=1, \
help="Number of Global Memory pipelines per CU")
parser.add_option("--wfs-per-simd", type="int", default=10, help="Number of " \
"WF slots per SIMD")
parser.add_option("--vreg-file-size", type="int", default=2048,
help="number of physical vector registers per SIMD")
parser.add_option("--bw-scalor", type="int", default=0,
help="bandwidth scalor for scalability analysis")
parser.add_option("--CPUClock", type="string", default="2GHz",
help="CPU clock")
parser.add_option("--GPUClock", type="string", default="1GHz",
help="GPU clock")
parser.add_option("--cpu-voltage", action="store", type="string",
default='1.0V',
help = """CPU voltage domain""")
parser.add_option("--gpu-voltage", action="store", type="string",
default='1.0V',
help = """CPU voltage domain""")
parser.add_option("--CUExecPolicy", type="string", default="OLDEST-FIRST",
help="WF exec policy (OLDEST-FIRST, ROUND-ROBIN)")
parser.add_option("--xact-cas-mode", action="store_true",
help="enable load_compare mode (transactional CAS)")
parser.add_option("--SegFaultDebug",action="store_true",
help="checks for GPU seg fault before TLB access")
parser.add_option("--FunctionalTLB",action="store_true",
help="Assumes TLB has no latency")
parser.add_option("--LocalMemBarrier",action="store_true",
help="Barrier does not wait for writethroughs to complete")
parser.add_option("--countPages", action="store_true",
help="Count Page Accesses and output in per-CU output files")
parser.add_option("--TLB-prefetch", type="int", help = "prefetch depth for"\
"TLBs")
parser.add_option("--pf-type", type="string", help="type of prefetch: "\
"PF_CU, PF_WF, PF_PHASE, PF_STRIDE")
parser.add_option("--pf-stride", type="int", help="set prefetch stride")
parser.add_option("--numLdsBanks", type="int", default=32,
help="number of physical banks per LDS module")
parser.add_option("--ldsBankConflictPenalty", type="int", default=1,
help="number of cycles per LDS bank conflict")
parser.add_option('--fast-forward-pseudo-op', action='store_true',
help = 'fast forward using kvm until the m5_switchcpu'
' pseudo-op is encountered, then switch cpus. subsequent'
' m5_switchcpu pseudo-ops will toggle back and forth')
parser.add_option('--outOfOrderDataDelivery', action='store_true',
default=False, help='enable OoO data delivery in the GM'
' pipeline')
Ruby.define_options(parser)
#add TLB options to the parser
GPUTLBOptions.tlb_options(parser)
(options, args) = parser.parse_args()
# The GPU cache coherence protocols only work with the backing store
setOption(parser, "--access-backing-store")
# if benchmark root is specified explicitly, that overrides the search path
if options.benchmark_root:
benchmark_path = [options.benchmark_root]
else:
# Set default benchmark search path to current dir
benchmark_path = ['.']
########################## Sanity Check ########################
# Currently the gpu model requires ruby
if buildEnv['PROTOCOL'] == 'None':
fatal("GPU model requires ruby")
# Currently the gpu model requires only timing or detailed CPU
if not (options.cpu_type == "timing" or
options.cpu_type == "detailed"):
fatal("GPU model requires timing or detailed CPU")
# This file can support multiple compute units
assert(options.num_compute_units >= 1)
# Currently, the sqc (I-Cache of GPU) is shared by
# multiple compute units(CUs). The protocol works just fine
# even if sqc is not shared. Overriding this option here
# so that the user need not explicitly set this (assuming
# sharing sqc is the common usage)
n_cu = options.num_compute_units
num_sqc = int(math.ceil(float(n_cu) / options.cu_per_sqc))
options.num_sqc = num_sqc # pass this to Ruby
########################## Creating the GPU system ########################
# shader is the GPU
shader = Shader(n_wf = options.wfs_per_simd,
clk_domain = SrcClockDomain(
clock = options.GPUClock,
voltage_domain = VoltageDomain(
voltage = options.gpu_voltage)))
# GPU_RfO(Read For Ownership) implements SC/TSO memory model.
# Other GPU protocols implement release consistency at GPU side.
# So, all GPU protocols other than GPU_RfO should make their writes
# visible to the global memory and should read from global memory
# during kernal boundary. The pipeline initiates(or do not initiate)
# the acquire/release operation depending on this impl_kern_boundary_sync
# flag. This flag=true means pipeline initiates a acquire/release operation
# at kernel boundary.
if buildEnv['PROTOCOL'] == 'GPU_RfO':
shader.impl_kern_boundary_sync = False
else:
shader.impl_kern_boundary_sync = True
# Switching off per-lane TLB by default
per_lane = False
if options.TLB_config == "perLane":
per_lane = True
# List of compute units; one GPU can have multiple compute units
compute_units = []
for i in xrange(n_cu):
compute_units.append(ComputeUnit(cu_id = i, perLaneTLB = per_lane,
num_SIMDs = options.simds_per_cu,
wfSize = options.wf_size,
spbypass_pipe_length = options.sp_bypass_path_length,
dpbypass_pipe_length = options.dp_bypass_path_length,
issue_period = options.issue_period,
coalescer_to_vrf_bus_width = \
options.glbmem_rd_bus_width,
vrf_to_coalescer_bus_width = \
options.glbmem_wr_bus_width,
num_global_mem_pipes = \
options.glb_mem_pipes_per_cu,
num_shared_mem_pipes = \
options.shr_mem_pipes_per_cu,
n_wf = options.wfs_per_simd,
execPolicy = options.CUExecPolicy,
xactCasMode = options.xact_cas_mode,
debugSegFault = options.SegFaultDebug,
functionalTLB = options.FunctionalTLB,
localMemBarrier = options.LocalMemBarrier,
countPages = options.countPages,
localDataStore = \
LdsState(banks = options.numLdsBanks,
bankConflictPenalty = \
options.ldsBankConflictPenalty),
out_of_order_data_delivery =
options.outOfOrderDataDelivery))
wavefronts = []
vrfs = []
for j in xrange(options.simds_per_cu):
for k in xrange(shader.n_wf):
wavefronts.append(Wavefront(simdId = j, wf_slot_id = k,
wfSize = options.wf_size))
vrfs.append(VectorRegisterFile(simd_id=j,
num_regs_per_simd=options.vreg_file_size))
compute_units[-1].wavefronts = wavefronts
compute_units[-1].vector_register_file = vrfs
if options.TLB_prefetch:
compute_units[-1].prefetch_depth = options.TLB_prefetch
compute_units[-1].prefetch_prev_type = options.pf_type
# attach the LDS and the CU to the bus (actually a Bridge)
compute_units[-1].ldsPort = compute_units[-1].ldsBus.slave
compute_units[-1].ldsBus.master = compute_units[-1].localDataStore.cuPort
# Attach compute units to GPU
shader.CUs = compute_units
########################## Creating the CPU system ########################
options.num_cpus = options.num_cpus
# The shader core will be whatever is after the CPU cores are accounted for
shader_idx = options.num_cpus
# The command processor will be whatever is after the shader is accounted for
cp_idx = shader_idx + 1
cp_list = []
# List of CPUs
cpu_list = []
CpuClass, mem_mode = Simulation.getCPUClass(options.cpu_type)
if CpuClass == AtomicSimpleCPU:
fatal("AtomicSimpleCPU is not supported")
if mem_mode != 'timing':
fatal("Only the timing memory mode is supported")
shader.timing = True
if options.fast_forward and options.fast_forward_pseudo_op:
fatal("Cannot fast-forward based both on the number of instructions and"
" on pseudo-ops")
fast_forward = options.fast_forward or options.fast_forward_pseudo_op
if fast_forward:
FutureCpuClass, future_mem_mode = CpuClass, mem_mode
CpuClass = X86KvmCPU
mem_mode = 'atomic_noncaching'
# Leave shader.timing untouched, because its value only matters at the
# start of the simulation and because we require switching cpus
# *before* the first kernel launch.
future_cpu_list = []
# Initial CPUs to be used during fast-forwarding.
for i in xrange(options.num_cpus):
cpu = CpuClass(cpu_id = i,
clk_domain = SrcClockDomain(
clock = options.CPUClock,
voltage_domain = VoltageDomain(
voltage = options.cpu_voltage)))
cpu_list.append(cpu)
if options.fast_forward:
cpu.max_insts_any_thread = int(options.fast_forward)
if fast_forward:
MainCpuClass = FutureCpuClass
else:
MainCpuClass = CpuClass
# CPs to be used throughout the simulation.
for i in xrange(options.num_cp):
cp = MainCpuClass(cpu_id = options.num_cpus + i,
clk_domain = SrcClockDomain(
clock = options.CPUClock,
voltage_domain = VoltageDomain(
voltage = options.cpu_voltage)))
cp_list.append(cp)
# Main CPUs (to be used after fast-forwarding if fast-forwarding is specified).
for i in xrange(options.num_cpus):
cpu = MainCpuClass(cpu_id = i,
clk_domain = SrcClockDomain(
clock = options.CPUClock,
voltage_domain = VoltageDomain(
voltage = options.cpu_voltage)))
if fast_forward:
cpu.switched_out = True
future_cpu_list.append(cpu)
else:
cpu_list.append(cpu)
########################## Creating the GPU dispatcher ########################
# Dispatcher dispatches work from host CPU to GPU
host_cpu = cpu_list[0]
dispatcher = GpuDispatcher()
########################## Create and assign the workload ########################
# Check for rel_path in elements of base_list using test, returning
# the first full path that satisfies test
def find_path(base_list, rel_path, test):
for base in base_list:
if not base:
# base could be None if environment var not set
continue
full_path = os.path.join(base, rel_path)
if test(full_path):
return full_path
fatal("%s not found in %s" % (rel_path, base_list))
def find_file(base_list, rel_path):
return find_path(base_list, rel_path, os.path.isfile)
executable = find_path(benchmark_path, options.cmd, os.path.exists)
# it's common for a benchmark to be in a directory with the same
# name as the executable, so we handle that automatically
if os.path.isdir(executable):
benchmark_path = [executable]
executable = find_file(benchmark_path, options.cmd)
if options.kernel_files:
kernel_files = [find_file(benchmark_path, f)
for f in options.kernel_files.split(':')]
else:
# if kernel_files is not set, see if there's a unique .asm file
# in the same directory as the executable
kernel_path = os.path.dirname(executable)
kernel_files = glob.glob(os.path.join(kernel_path, '*.asm'))
if kernel_files:
print "Using GPU kernel code file(s)", ",".join(kernel_files)
else:
fatal("Can't locate kernel code (.asm) in " + kernel_path)
# OpenCL driver
driver = ClDriver(filename="hsa", codefile=kernel_files)
for cpu in cpu_list:
cpu.workload = LiveProcess(executable = executable,
cmd = [options.cmd] + options.options.split(),
drivers = [driver])
for cp in cp_list:
cp.workload = host_cpu.workload
if fast_forward:
for i in xrange(len(future_cpu_list)):
future_cpu_list[i].workload = cpu_list[i].workload
########################## Create the overall system ########################
# List of CPUs that must be switched when moving between KVM and simulation
if fast_forward:
switch_cpu_list = \
[(cpu_list[i], future_cpu_list[i]) for i in xrange(options.num_cpus)]
# Full list of processing cores in the system. Note that
# dispatcher is also added to cpu_list although it is
# not a processing element
cpu_list = cpu_list + [shader] + cp_list + [dispatcher]
# creating the overall system
# notice the cpu list is explicitly added as a parameter to System
system = System(cpu = cpu_list,
mem_ranges = [AddrRange(options.mem_size)],
cache_line_size = options.cacheline_size,
mem_mode = mem_mode)
if fast_forward:
system.future_cpu = future_cpu_list
system.voltage_domain = VoltageDomain(voltage = options.sys_voltage)
system.clk_domain = SrcClockDomain(clock = options.sys_clock,
voltage_domain = system.voltage_domain)
if fast_forward:
have_kvm_support = 'BaseKvmCPU' in globals()
if have_kvm_support and buildEnv['TARGET_ISA'] == "x86":
system.vm = KvmVM()
for i in xrange(len(host_cpu.workload)):
host_cpu.workload[i].useArchPT = True
host_cpu.workload[i].kvmInSE = True
else:
fatal("KvmCPU can only be used in SE mode with x86")
# configure the TLB hierarchy
GPUTLBConfig.config_tlb_hierarchy(options, system, shader_idx)
# create Ruby system
system.piobus = IOXBar(width=32, response_latency=0,
frontend_latency=0, forward_latency=0)
Ruby.create_system(options, None, system)
system.ruby.clk_domain = SrcClockDomain(clock = options.ruby_clock,
voltage_domain = system.voltage_domain)
# attach the CPU ports to Ruby
for i in range(options.num_cpus):
ruby_port = system.ruby._cpu_ports[i]
# Create interrupt controller
system.cpu[i].createInterruptController()
# Connect cache port's to ruby
system.cpu[i].icache_port = ruby_port.slave
system.cpu[i].dcache_port = ruby_port.slave
ruby_port.mem_master_port = system.piobus.slave
if buildEnv['TARGET_ISA'] == "x86":
system.cpu[i].interrupts[0].pio = system.piobus.master
system.cpu[i].interrupts[0].int_master = system.piobus.slave
system.cpu[i].interrupts[0].int_slave = system.piobus.master
if fast_forward:
system.cpu[i].itb.walker.port = ruby_port.slave
system.cpu[i].dtb.walker.port = ruby_port.slave
# attach CU ports to Ruby
# Because of the peculiarities of the CP core, you may have 1 CPU but 2
# sequencers and thus 2 _cpu_ports created. Your GPUs shouldn't be
# hooked up until after the CP. To make this script generic, figure out
# the index as below, but note that this assumes there is one sequencer
# per compute unit and one sequencer per SQC for the math to work out
# correctly.
gpu_port_idx = len(system.ruby._cpu_ports) \
- options.num_compute_units - options.num_sqc
gpu_port_idx = gpu_port_idx - options.num_cp * 2
wavefront_size = options.wf_size
for i in xrange(n_cu):
# The pipeline issues wavefront_size number of uncoalesced requests
# in one GPU issue cycle. Hence wavefront_size mem ports.
for j in xrange(wavefront_size):
system.cpu[shader_idx].CUs[i].memory_port[j] = \
system.ruby._cpu_ports[gpu_port_idx].slave[j]
gpu_port_idx += 1
for i in xrange(n_cu):
if i > 0 and not i % options.cu_per_sqc:
print "incrementing idx on ", i
gpu_port_idx += 1
system.cpu[shader_idx].CUs[i].sqc_port = \
system.ruby._cpu_ports[gpu_port_idx].slave
gpu_port_idx = gpu_port_idx + 1
# attach CP ports to Ruby
for i in xrange(options.num_cp):
system.cpu[cp_idx].createInterruptController()
system.cpu[cp_idx].dcache_port = \
system.ruby._cpu_ports[gpu_port_idx + i * 2].slave
system.cpu[cp_idx].icache_port = \
system.ruby._cpu_ports[gpu_port_idx + i * 2 + 1].slave
system.cpu[cp_idx].interrupts[0].pio = system.piobus.master
system.cpu[cp_idx].interrupts[0].int_master = system.piobus.slave
system.cpu[cp_idx].interrupts[0].int_slave = system.piobus.master
cp_idx = cp_idx + 1
# connect dispatcher to the system.piobus
dispatcher.pio = system.piobus.master
dispatcher.dma = system.piobus.slave
################# Connect the CPU and GPU via GPU Dispatcher ###################
# CPU rings the GPU doorbell to notify a pending task
# using this interface.
# And GPU uses this interface to notify the CPU of task completion
# The communcation happens through emulated driver.
# Note this implicit setting of the cpu_pointer, shader_pointer and tlb array
# parameters must be after the explicit setting of the System cpu list
if fast_forward:
shader.cpu_pointer = future_cpu_list[0]
dispatcher.cpu = future_cpu_list[0]
else:
shader.cpu_pointer = host_cpu
dispatcher.cpu = host_cpu
dispatcher.shader_pointer = shader
dispatcher.cl_driver = driver
########################## Start simulation ########################
root = Root(system=system, full_system=False)
m5.ticks.setGlobalFrequency('1THz')
if options.abs_max_tick:
maxtick = options.abs_max_tick
else:
maxtick = m5.MaxTick
# Benchmarks support work item annotations
Simulation.setWorkCountOptions(system, options)
# Checkpointing is not supported by APU model
if (options.checkpoint_dir != None or
options.checkpoint_restore != None):
fatal("Checkpointing not supported by apu model")
checkpoint_dir = None
m5.instantiate(checkpoint_dir)
# Map workload to this address space
host_cpu.workload[0].map(0x10000000, 0x200000000, 4096)
if options.fast_forward:
print "Switch at instruction count: %d" % \
cpu_list[0].max_insts_any_thread
exit_event = m5.simulate(maxtick)
if options.fast_forward:
if exit_event.getCause() == "a thread reached the max instruction count":
m5.switchCpus(system, switch_cpu_list)
print "Switched CPUS @ tick %s" % (m5.curTick())
m5.stats.reset()
exit_event = m5.simulate(maxtick - m5.curTick())
elif options.fast_forward_pseudo_op:
while exit_event.getCause() == "switchcpu":
# If we are switching *to* kvm, then the current stats are meaningful
# Note that we don't do any warmup by default
if type(switch_cpu_list[0][0]) == FutureCpuClass:
print "Dumping stats..."
m5.stats.dump()
m5.switchCpus(system, switch_cpu_list)
print "Switched CPUS @ tick %s" % (m5.curTick())
m5.stats.reset()
# This lets us switch back and forth without keeping a counter
switch_cpu_list = [(x[1], x[0]) for x in switch_cpu_list]
exit_event = m5.simulate(maxtick - m5.curTick())
print "Ticks:", m5.curTick()
print 'Exiting because ', exit_event.getCause()
sys.exit(exit_event.getCode())
|
{
"content_hash": "eb4333ca493783886868bc1a1ae95b56",
"timestamp": "",
"source": "github",
"line_count": 545,
"max_line_length": 96,
"avg_line_length": 42.61834862385321,
"alnum_prop": 0.62569423515736,
"repo_name": "SanchayanMaity/gem5",
"id": "5ec3289d25f7fe69d6e319e791dab2fad03422f4",
"size": "24883",
"binary": false,
"copies": "3",
"ref": "refs/heads/CS570",
"path": "configs/example/apu_se.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "235643"
},
{
"name": "C",
"bytes": "2802978"
},
{
"name": "C++",
"bytes": "16550087"
},
{
"name": "CMake",
"bytes": "2202"
},
{
"name": "Emacs Lisp",
"bytes": "1969"
},
{
"name": "HTML",
"bytes": "136898"
},
{
"name": "Java",
"bytes": "3179"
},
{
"name": "M4",
"bytes": "49620"
},
{
"name": "Makefile",
"bytes": "49110"
},
{
"name": "Objective-C",
"bytes": "1505"
},
{
"name": "Perl",
"bytes": "33602"
},
{
"name": "Protocol Buffer",
"bytes": "11148"
},
{
"name": "Python",
"bytes": "4547714"
},
{
"name": "Roff",
"bytes": "8783"
},
{
"name": "Shell",
"bytes": "57032"
},
{
"name": "Vim script",
"bytes": "4335"
},
{
"name": "Visual Basic",
"bytes": "2884"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
"""
FileType.py - Base class for all file types
Copyright 2010 Luke Campagnola
Distributed under MIT/X11 license. See license.txt for more infomation.
This abstract class defines the interface that must be used to extend acq4 to support
more file types. The classes are not meant to be instantiated, but instead are just used
to encapsulate a set of static methods.
"""
import os
class FileType:
extensions = [] ## list of extensions handled by this class
dataTypes = [] ## list of python types handled by this class
priority = 0 ## priority for this class when multiple classes support the same file types
@classmethod
def typeName(cls):
"""Return a string representing the file type for this class.
The default implementation just returns the name of the class."""
return cls.__name__
@classmethod
def write(cls, data, dirHandle, fileName, **args):
"""Write data to fileName.
Return the file name written (this allows the function to modify the requested file name)
"""
raise Exception("Function must be implemented in subclass")
@classmethod
def read(cls, fileHandle):
"""Read a file, return a data object"""
raise Exception("Function must be implemented in subclass")
@classmethod
def acceptsFile(cls, fileHandle):
"""Return priority value if the file can be read by this class.
Otherwise return False.
The default implementation just checks for the correct name extensions."""
name = fileHandle.name()
for ext in cls.extensions:
if name[-len(ext):].lower() == ext.lower():
return cls.priority
return False
@classmethod
def acceptsData(cls, data, fileName):
"""Return priority value if the data can be written by this class.
Otherwise return False."""
for typ in cls.dataTypes:
if isinstance(data, typ):
return cls.priority
return False
@classmethod
def addExtension(cls, fileName):
"""Return a file name with extension added if it did not already have one."""
for ext in cls.extensions:
if fileName[-len(ext):].lower() == ext.lower():
return fileName
return fileName + cls.extensions[0]
|
{
"content_hash": "7a6a380f9fb7d2556e4327e3a35b2d29",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 98,
"avg_line_length": 37.796875,
"alnum_prop": 0.6440677966101694,
"repo_name": "acq4/acq4",
"id": "54375c75948b208fb25594971df610ded016fc8d",
"size": "2443",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "acq4/filetypes/FileType.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "3037"
},
{
"name": "Batchfile",
"bytes": "335"
},
{
"name": "C",
"bytes": "1301111"
},
{
"name": "C++",
"bytes": "340035"
},
{
"name": "CSS",
"bytes": "716"
},
{
"name": "Inno Setup",
"bytes": "1606"
},
{
"name": "Makefile",
"bytes": "30"
},
{
"name": "Processing",
"bytes": "13403"
},
{
"name": "Python",
"bytes": "3503085"
},
{
"name": "Shell",
"bytes": "70"
}
],
"symlink_target": ""
}
|
import setuptools
import versioneer
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="PcbDraw",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
python_requires=">=3.7",
author="Jan Mrázek",
author_email="email@honzamrazek.cz",
description="Utility to produce nice looking drawings of KiCAD boards",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/yaqwsx/PcbDraw",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
"numpy",
"lxml",
"mistune>=2.0.2",
"pybars3",
"pyyaml",
"svgpathtools==1.4.1",
"pcbnewTransition>=0.2",
"LnkParse3; platform_system=='Windows'",
"pyVirtualDisplay~=3.0; platform_system!='Windows'",
"Pillow~=9.0",
"click>=7.1"
],
setup_requires=[
"versioneer"
],
extras_require={
"dev": ["pytest", "types-pillow", "types-click", "types-PyYAML"],
},
zip_safe=False,
include_package_data=True,
entry_points = {
"console_scripts": [
"pcbdraw=pcbdraw.ui:run"
],
}
)
|
{
"content_hash": "2ee4accbfa729717a228760f0a7d8300",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 75,
"avg_line_length": 27.78,
"alnum_prop": 0.5867530597552196,
"repo_name": "yaqwsx/PcbDraw",
"id": "061452f317c0d835271208adba55877f42a66ffe",
"size": "1415",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Handlebars",
"bytes": "1508"
},
{
"name": "Makefile",
"bytes": "481"
},
{
"name": "Python",
"bytes": "207214"
},
{
"name": "Shell",
"bytes": "1842"
}
],
"symlink_target": ""
}
|
"""irclib -- Internet Relay Chat (IRC) protocol client library.
This library is intended to encapsulate the IRC protocol at a quite
low level. It provides an event-driven IRC client framework. It has
a fairly thorough support for the basic IRC protocol, CTCP, DCC chat,
but DCC file transfers is not yet supported.
In order to understand how to make an IRC client, I'm afraid you more
or less must understand the IRC specifications. They are available
here: [IRC specifications].
The main features of the IRC client framework are:
* Abstraction of the IRC protocol.
* Handles multiple simultaneous IRC server connections.
* Handles server PONGing transparently.
* Messages to the IRC server are done by calling methods on an IRC
connection object.
* Messages from an IRC server triggers events, which can be caught
by event handlers.
* Reading from and writing to IRC server sockets are normally done
by an internal select() loop, but the select()ing may be done by
an external main loop.
* Functions can be registered to execute at specified times by the
event-loop.
* Decodes CTCP tagging correctly (hopefully); I haven't seen any
other IRC client implementation that handles the CTCP
specification subtilties.
* A kind of simple, single-server, object-oriented IRC client class
that dispatches events to instance methods is included.
Current limitations:
* The IRC protocol shines through the abstraction a bit too much.
* Data is not written asynchronously to the server, i.e. the write()
may block if the TCP buffers are stuffed.
* There are no support for DCC file transfers.
* The author haven't even read RFC 2810, 2811, 2812 and 2813.
* Like most projects, documentation is lacking...
.. [IRC specifications] http://www.irchelp.org/irchelp/rfc/
"""
import bisect
import re
import select
import socket
import string
import time
import types
VERSION = 0, 4, 8
DEBUG = 0
# TODO
# ----
# (maybe) thread safety
# (maybe) color parser convenience functions
# documentation (including all event types)
# (maybe) add awareness of different types of ircds
# send data asynchronously to the server (and DCC connections)
# (maybe) automatically close unused, passive DCC connections after a while
# NOTES
# -----
# connection.quit() only sends QUIT to the server.
# ERROR from the server triggers the error event and the disconnect event.
# dropping of the connection triggers the disconnect event.
class IRCError(Exception):
"""Represents an IRC exception."""
pass
class IRC:
"""Class that handles one or several IRC server connections.
When an IRC object has been instantiated, it can be used to create
Connection objects that represent the IRC connections. The
responsibility of the IRC object is to provide an event-driven
framework for the connections and to keep the connections alive.
It runs a select loop to poll each connection's TCP socket and
hands over the sockets with incoming data for processing by the
corresponding connection.
The methods of most interest for an IRC client writer are server,
add_global_handler, remove_global_handler, execute_at,
execute_delayed, process_once and process_forever.
Here is an example:
irc = irclib.IRC()
server = irc.server()
server.connect(\"irc.some.where\", 6667, \"my_nickname\")
server.privmsg(\"a_nickname\", \"Hi there!\")
irc.process_forever()
This will connect to the IRC server irc.some.where on port 6667
using the nickname my_nickname and send the message \"Hi there!\"
to the nickname a_nickname.
"""
def __init__(self, fn_to_add_socket=None,
fn_to_remove_socket=None,
fn_to_add_timeout=None):
"""Constructor for IRC objects.
Optional arguments are fn_to_add_socket, fn_to_remove_socket
and fn_to_add_timeout. The first two specify functions that
will be called with a socket object as argument when the IRC
object wants to be notified (or stop being notified) of data
coming on a new socket. When new data arrives, the method
process_data should be called. Similarly, fn_to_add_timeout
is called with a number of seconds (a floating point number)
as first argument when the IRC object wants to receive a
notification (by calling the process_timeout method). So, if
e.g. the argument is 42.17, the object wants the
process_timeout method to be called after 42 seconds and 170
milliseconds.
The three arguments mainly exist to be able to use an external
main loop (for example Tkinter's or PyGTK's main app loop)
instead of calling the process_forever method.
An alternative is to just call ServerConnection.process_once()
once in a while.
"""
if fn_to_add_socket and fn_to_remove_socket:
self.fn_to_add_socket = fn_to_add_socket
self.fn_to_remove_socket = fn_to_remove_socket
else:
self.fn_to_add_socket = None
self.fn_to_remove_socket = None
self.fn_to_add_timeout = fn_to_add_timeout
self.connections = []
self.handlers = {}
# list of tuples in the format (time, function, arguments)
self.delayed_commands = []
self.add_global_handler("ping", _ping_ponger, -42)
def server(self):
"""Creates and returns a ServerConnection object."""
c = ServerConnection(self)
self.connections.append(c)
return c
def process_data(self, sockets):
"""Called when there is more data to read on connection sockets.
Arguments:
sockets -- A list of socket objects.
See documentation for IRC.__init__.
"""
for s in sockets:
for c in self.connections:
if s == c._get_socket():
c.process_data()
def process_timeout(self):
"""Called when a timeout notification is due.
See documentation for IRC.__init__.
"""
t = time.time()
while self.delayed_commands:
if t >= self.delayed_commands[0][0]:
self.delayed_commands[0][1](*self.delayed_commands[0][2])
del self.delayed_commands[0]
else:
break
def process_once(self, timeout=0):
"""Process data from connections once.
Arguments:
timeout -- How long the select() call should wait if no
data is available.
This method should be called periodically to check and process
incoming data, if there are any. If that seems boring, look
at the process_forever method.
"""
sockets = map(lambda x: x._get_socket(), self.connections)
sockets = filter(lambda x: x != None, sockets)
if sockets:
(i, o, e) = select.select(sockets, [], [], timeout)
self.process_data(i)
else:
time.sleep(timeout)
self.process_timeout()
def process_forever(self, timeout=0.2):
"""Run an infinite loop, processing data from connections.
This method repeatedly calls process_once.
Arguments:
timeout -- Parameter to pass to process_once.
"""
while 1:
self.process_once(timeout)
def disconnect_all(self, message=""):
"""Disconnects all connections."""
for c in self.connections:
c.disconnect(message)
def add_global_handler(self, event, handler, priority=0):
"""Adds a global handler function for a specific event type.
Arguments:
event -- Event type (a string). Check the values of the
numeric_events dictionary in irclib.py for possible event
types.
handler -- Callback function.
priority -- A number (the lower number, the higher priority).
The handler function is called whenever the specified event is
triggered in any of the connections. See documentation for
the Event class.
The handler functions are called in priority order (lowest
number is highest priority). If a handler function returns
\"NO MORE\", no more handlers will be called.
"""
if not event in self.handlers:
self.handlers[event] = []
bisect.insort(self.handlers[event], ((priority, handler)))
def remove_global_handler(self, event, handler):
"""Removes a global handler function.
Arguments:
event -- Event type (a string).
handler -- Callback function.
Returns 1 on success, otherwise 0.
"""
if not event in self.handlers:
return 0
for h in self.handlers[event]:
if handler == h[1]:
self.handlers[event].remove(h)
return 1
def execute_at(self, at, function, arguments=()):
"""Execute a function at a specified time.
Arguments:
at -- Execute at this time (standard \"time_t\" time).
function -- Function to call.
arguments -- Arguments to give the function.
"""
self.execute_delayed(at - time.time(), function, arguments)
def execute_delayed(self, delay, function, arguments=()):
"""Execute a function after a specified time.
Arguments:
delay -- How many seconds to wait.
function -- Function to call.
arguments -- Arguments to give the function.
"""
bisect.insort(self.delayed_commands,
(delay + time.time(), function, arguments))
if self.fn_to_add_timeout:
self.fn_to_add_timeout(delay)
def dcc(self, dcctype="chat"):
"""Creates and returns a DCCConnection object.
Arguments:
dcctype -- "chat" for DCC CHAT connections or "raw" for
DCC SEND (or other DCC types). If "chat",
incoming data will be split in newline-separated
chunks. If "raw", incoming data is not touched.
"""
c = DCCConnection(self, dcctype)
self.connections.append(c)
return c
def _handle_event(self, connection, event):
"""[Internal]"""
h = self.handlers
for handler in h.get("all_events", []) + h.get(event.eventtype(), []):
if handler[1](connection, event) == "NO MORE":
return
def _remove_connection(self, connection):
"""[Internal]"""
self.connections.remove(connection)
if self.fn_to_remove_socket:
self.fn_to_remove_socket(connection._get_socket())
_rfc_1459_command_regexp = re.compile(
"^(:(?P<prefix>[^ ]+) +)?(?P<command>[^ ]+)( *(?P<argument> .+))?")
class Connection:
"""Base class for IRC connections.
Must be overridden.
"""
def __init__(self, irclibobj):
self.irclibobj = irclibobj
def _get_socket():
raise IRCError("Not overridden")
##############################
### Convenience wrappers.
def execute_at(self, at, function, arguments=()):
self.irclibobj.execute_at(at, function, arguments)
def execute_delayed(self, delay, function, arguments=()):
self.irclibobj.execute_delayed(delay, function, arguments)
class ServerConnectionError(IRCError):
pass
class ServerNotConnectedError(ServerConnectionError):
pass
# Huh!? Crrrrazy EFNet doesn't follow the RFC: their ircd seems to
# use \n as message separator! :P
_linesep_regexp = re.compile("\r?\n")
class ServerConnection(Connection):
"""This class represents an IRC server connection.
ServerConnection objects are instantiated by calling the server
method on an IRC object.
"""
def __init__(self, irclibobj):
Connection.__init__(self, irclibobj)
self.connected = 0 # Not connected yet.
self.socket = None
self.ssl = None
def connect(self, server, port, nickname, password=None, username=None,
ircname=None, localaddress="", localport=0, ssl=False, ipv6=False):
"""Connect/reconnect to a server.
Arguments:
server -- Server name.
port -- Port number.
nickname -- The nickname.
password -- Password (if any).
username -- The username.
ircname -- The IRC name ("realname").
localaddress -- Bind the connection to a specific local IP address.
localport -- Bind the connection to a specific local port.
ssl -- Enable support for ssl.
ipv6 -- Enable support for ipv6.
This function can be called to reconnect a closed connection.
Returns the ServerConnection object.
"""
if self.connected:
self.disconnect("Changing servers")
self.previous_buffer = ""
self.handlers = {}
self.real_server_name = ""
self.real_nickname = nickname
self.server = server
self.port = port
self.nickname = nickname
self.username = username or nickname
self.ircname = ircname or nickname
self.password = password
self.localaddress = localaddress
self.localport = localport
self.localhost = socket.gethostname()
if ipv6:
self.socket = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.socket.bind((self.localaddress, self.localport))
self.socket.connect((self.server, self.port))
if ssl:
try:
import ssl
ssl_wrap = ssl.wrap_socket
except ImportError:
ssl_wrap = socket.ssl
self.ssl = ssl_wrap(self.socket)
except socket.error, x:
self.socket.close()
self.socket = None
raise ServerConnectionError("Couldn't connect to socket: %s" % x)
self.connected = 1
if self.irclibobj.fn_to_add_socket:
self.irclibobj.fn_to_add_socket(self.socket)
# Log on...
if self.password:
self.pass_(self.password)
self.nick(self.nickname)
self.user(self.username, self.ircname)
return self
def close(self):
"""Close the connection.
This method closes the connection permanently; after it has
been called, the object is unusable.
"""
self.disconnect("Closing object")
self.irclibobj._remove_connection(self)
def _get_socket(self):
"""[Internal]"""
return self.socket
def get_server_name(self):
"""Get the (real) server name.
This method returns the (real) server name, or, more
specifically, what the server calls itself.
"""
if self.real_server_name:
return self.real_server_name
else:
return ""
def get_nickname(self):
"""Get the (real) nick name.
This method returns the (real) nickname. The library keeps
track of nick changes, so it might not be the nick name that
was passed to the connect() method. """
return self.real_nickname
def process_data(self):
"""[Internal]"""
try:
if self.ssl:
new_data = self.ssl.read(2 ** 14)
else:
new_data = self.socket.recv(2 ** 14)
except socket.error, x:
# The server hung up.
self.disconnect("Connection reset by peer")
return
if not new_data:
# Read nothing: connection must be down.
self.disconnect("Connection reset by peer")
return
lines = _linesep_regexp.split(self.previous_buffer + new_data)
# Save the last, unfinished line.
self.previous_buffer = lines.pop()
for line in lines:
if DEBUG:
print "FROM SERVER:", line
if not line:
continue
prefix = None
command = None
arguments = None
self._handle_event(Event("all_raw_messages",
self.get_server_name(),
None,
[line]))
m = _rfc_1459_command_regexp.match(line)
if m.group("prefix"):
prefix = m.group("prefix")
if not self.real_server_name:
self.real_server_name = prefix
if m.group("command"):
command = m.group("command").lower()
if m.group("argument"):
a = m.group("argument").split(" :", 1)
arguments = a[0].split()
if len(a) == 2:
arguments.append(a[1])
# Translate numerics into more readable strings.
if command in numeric_events:
command = numeric_events[command]
if command == "nick":
if nm_to_n(prefix) == self.real_nickname:
self.real_nickname = arguments[0]
elif command == "welcome":
# Record the nickname in case the client changed nick
# in a nicknameinuse callback.
self.real_nickname = arguments[0]
if command in ["privmsg", "notice"]:
target, message = arguments[0], arguments[1]
messages = _ctcp_dequote(message)
if command == "privmsg":
if is_channel(target):
command = "pubmsg"
else:
if is_channel(target):
command = "pubnotice"
else:
command = "privnotice"
for m in messages:
if type(m) is types.TupleType:
if command in ["privmsg", "pubmsg"]:
command = "ctcp"
else:
command = "ctcpreply"
m = list(m)
if DEBUG:
print "command: %s, source: %s, target: %s, " \
"arguments: %s" % (command, prefix, target, m)
self._handle_event(Event(command, prefix, target, m))
if command == "ctcp" and m[0] == "ACTION":
self._handle_event(
Event("action", prefix, target, m[1:]))
else:
if DEBUG:
print "command: %s, source: %s, target: %s, " \
"arguments: %s" % (
command, prefix, target, [m])
self._handle_event(Event(command, prefix, target, [m]))
else:
target = None
if command == "quit":
arguments = [arguments[0]]
elif command == "ping":
target = arguments[0]
else:
target = arguments[0]
arguments = arguments[1:]
if command == "mode":
if not is_channel(target):
command = "umode"
if DEBUG:
print "command: %s, source: %s, target: %s, " \
"arguments: %s" % (command, prefix, target, arguments)
self._handle_event(Event(command, prefix, target, arguments))
def _handle_event(self, event):
"""[Internal]"""
self.irclibobj._handle_event(self, event)
if event.eventtype() in self.handlers:
for fn in self.handlers[event.eventtype()]:
fn(self, event)
def is_connected(self):
"""Return connection status.
Returns true if connected, otherwise false.
"""
return self.connected
def add_global_handler(self, *args):
"""Add global handler.
See documentation for IRC.add_global_handler.
"""
self.irclibobj.add_global_handler(*args)
def remove_global_handler(self, *args):
"""Remove global handler.
See documentation for IRC.remove_global_handler.
"""
self.irclibobj.remove_global_handler(*args)
def action(self, target, action):
"""Send a CTCP ACTION command."""
self.ctcp("ACTION", target, action)
def admin(self, server=""):
"""Send an ADMIN command."""
self.send_raw(" ".join(["ADMIN", server]).strip())
def ctcp(self, ctcptype, target, parameter=""):
"""Send a CTCP command."""
ctcptype = ctcptype.upper()
self.privmsg(target, "\001%s%s\001" % (
ctcptype, parameter and (" " + parameter) or ""))
def ctcp_reply(self, target, parameter):
"""Send a CTCP REPLY command."""
self.notice(target, "\001%s\001" % parameter)
def disconnect(self, message=""):
"""Hang up the connection.
Arguments:
message -- Quit message.
"""
if not self.connected:
return
self.connected = 0
self.quit(message)
try:
self.socket.close()
except socket.error, x:
pass
self.socket = None
self._handle_event(Event("disconnect", self.server, "", [message]))
def globops(self, text):
"""Send a GLOBOPS command."""
self.send_raw("GLOBOPS :" + text)
def info(self, server=""):
"""Send an INFO command."""
self.send_raw(" ".join(["INFO", server]).strip())
def invite(self, nick, channel):
"""Send an INVITE command."""
self.send_raw(" ".join(["INVITE", nick, channel]).strip())
def ison(self, nicks):
"""Send an ISON command.
Arguments:
nicks -- List of nicks.
"""
self.send_raw("ISON " + " ".join(nicks))
def join(self, channel, key=""):
"""Send a JOIN command."""
self.send_raw("JOIN %s%s" % (channel, (key and (" " + key))))
def kick(self, channel, nick, comment=""):
"""Send a KICK command."""
self.send_raw("KICK %s %s%s" % (
channel, nick, (comment and (" :" + comment))))
def links(self, remote_server="", server_mask=""):
"""Send a LINKS command."""
command = "LINKS"
if remote_server:
command = command + " " + remote_server
if server_mask:
command = command + " " + server_mask
self.send_raw(command)
def list(self, channels=None, server=""):
"""Send a LIST command."""
command = "LIST"
if channels:
command = command + " " + ",".join(channels)
if server:
command = command + " " + server
self.send_raw(command)
def lusers(self, server=""):
"""Send a LUSERS command."""
self.send_raw("LUSERS" + (server and (" " + server)))
def mode(self, target, command):
"""Send a MODE command."""
self.send_raw("MODE %s %s" % (target, command))
def motd(self, server=""):
"""Send an MOTD command."""
self.send_raw("MOTD" + (server and (" " + server)))
def names(self, channels=None):
"""Send a NAMES command."""
self.send_raw("NAMES" + (channels and
(" " + ",".join(channels)) or ""))
def nick(self, newnick):
"""Send a NICK command."""
self.send_raw("NICK " + newnick)
def notice(self, target, text):
"""Send a NOTICE command."""
# Should limit len(text) here!
self.send_raw("NOTICE %s :%s" % (target, text))
def oper(self, nick, password):
"""Send an OPER command."""
self.send_raw("OPER %s %s" % (nick, password))
def part(self, channels, message=""):
"""Send a PART command."""
if type(channels) == types.StringType:
self.send_raw("PART " + channels + (message and (" " + message)))
else:
self.send_raw("PART " + ",".join(channels) +
(message and (" " + message)))
def pass_(self, password):
"""Send a PASS command."""
self.send_raw("PASS " + password)
def ping(self, target, target2=""):
"""Send a PING command."""
self.send_raw("PING %s%s" % (target, target2 and (" " + target2)))
def pong(self, target, target2=""):
"""Send a PONG command."""
self.send_raw("PONG %s%s" % (target, target2 and (" " + target2)))
def privmsg(self, target, text):
"""Send a PRIVMSG command."""
# Should limit len(text) here!
self.send_raw("PRIVMSG %s :%s" % (target, text))
def privmsg_many(self, targets, text):
"""Send a PRIVMSG command to multiple targets."""
# Should limit len(text) here!
self.send_raw("PRIVMSG %s :%s" % (",".join(targets), text))
def quit(self, message=""):
"""Send a QUIT command."""
# Note that many IRC servers don't use your QUIT message
# unless you've been connected for at least 5 minutes!
self.send_raw("QUIT" + (message and (" :" + message)))
def send_raw(self, string):
"""Send raw string to the server.
The string will be padded with appropriate CR LF.
"""
if self.socket is None:
raise ServerNotConnectedError("Not connected.")
try:
if self.ssl:
self.ssl.write(string + "\r\n")
else:
self.socket.send(string + "\r\n")
if DEBUG:
print "TO SERVER:", string
except socket.error, x:
# Ouch!
self.disconnect("Connection reset by peer.")
def squit(self, server, comment=""):
"""Send an SQUIT command."""
self.send_raw("SQUIT %s%s" % (server, comment and (" :" + comment)))
def stats(self, statstype, server=""):
"""Send a STATS command."""
self.send_raw("STATS %s%s" % (statstype, server and (" " + server)))
def time(self, server=""):
"""Send a TIME command."""
self.send_raw("TIME" + (server and (" " + server)))
def topic(self, channel, new_topic=None):
"""Send a TOPIC command."""
if new_topic is None:
self.send_raw("TOPIC " + channel)
else:
self.send_raw("TOPIC %s :%s" % (channel, new_topic))
def trace(self, target=""):
"""Send a TRACE command."""
self.send_raw("TRACE" + (target and (" " + target)))
def user(self, username, realname):
"""Send a USER command."""
self.send_raw("USER %s 0 * :%s" % (username, realname))
def userhost(self, nicks):
"""Send a USERHOST command."""
self.send_raw("USERHOST " + ",".join(nicks))
def users(self, server=""):
"""Send a USERS command."""
self.send_raw("USERS" + (server and (" " + server)))
def version(self, server=""):
"""Send a VERSION command."""
self.send_raw("VERSION" + (server and (" " + server)))
def wallops(self, text):
"""Send a WALLOPS command."""
self.send_raw("WALLOPS :" + text)
def who(self, target="", op=""):
"""Send a WHO command."""
self.send_raw("WHO%s%s" % (target and (" " + target), op and (" o")))
def whois(self, targets):
"""Send a WHOIS command."""
self.send_raw("WHOIS " + ",".join(targets))
def whowas(self, nick, max="", server=""):
"""Send a WHOWAS command."""
self.send_raw("WHOWAS %s%s%s" % (nick,
max and (" " + max),
server and (" " + server)))
class DCCConnectionError(IRCError):
pass
class DCCConnection(Connection):
"""This class represents a DCC connection.
DCCConnection objects are instantiated by calling the dcc
method on an IRC object.
"""
def __init__(self, irclibobj, dcctype):
Connection.__init__(self, irclibobj)
self.connected = 0
self.passive = 0
self.dcctype = dcctype
self.peeraddress = None
self.peerport = None
def connect(self, address, port):
"""Connect/reconnect to a DCC peer.
Arguments:
address -- Host/IP address of the peer.
port -- The port number to connect to.
Returns the DCCConnection object.
"""
self.peeraddress = socket.gethostbyname(address)
self.peerport = port
self.socket = None
self.previous_buffer = ""
self.handlers = {}
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.passive = 0
try:
self.socket.connect((self.peeraddress, self.peerport))
except socket.error, x:
raise DCCConnectionError("Couldn't connect to socket: %s" % x)
self.connected = 1
if self.irclibobj.fn_to_add_socket:
self.irclibobj.fn_to_add_socket(self.socket)
return self
def listen(self):
"""Wait for a connection/reconnection from a DCC peer.
Returns the DCCConnection object.
The local IP address and port are available as
self.localaddress and self.localport. After connection from a
peer, the peer address and port are available as
self.peeraddress and self.peerport.
"""
self.previous_buffer = ""
self.handlers = {}
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.passive = 1
try:
self.socket.bind((socket.gethostbyname(socket.gethostname()), 0))
self.localaddress, self.localport = self.socket.getsockname()
self.socket.listen(10)
except socket.error, x:
raise DCCConnectionError("Couldn't bind socket: %s" % x)
return self
def disconnect(self, message=""):
"""Hang up the connection and close the object.
Arguments:
message -- Quit message.
"""
if not self.connected:
return
self.connected = 0
try:
self.socket.close()
except socket.error, x:
pass
self.socket = None
self.irclibobj._handle_event(
self,
Event("dcc_disconnect", self.peeraddress, "", [message]))
self.irclibobj._remove_connection(self)
def process_data(self):
"""[Internal]"""
if self.passive and not self.connected:
conn, (self.peeraddress, self.peerport) = self.socket.accept()
self.socket.close()
self.socket = conn
self.connected = 1
if DEBUG:
print "DCC connection from %s:%d" % (
self.peeraddress, self.peerport)
self.irclibobj._handle_event(
self,
Event("dcc_connect", self.peeraddress, None, None))
return
try:
new_data = self.socket.recv(2 ** 14)
except socket.error, x:
# The server hung up.
self.disconnect("Connection reset by peer")
return
if not new_data:
# Read nothing: connection must be down.
self.disconnect("Connection reset by peer")
return
if self.dcctype == "chat":
# The specification says lines are terminated with LF, but
# it seems safer to handle CR LF terminations too.
chunks = _linesep_regexp.split(self.previous_buffer + new_data)
# Save the last, unfinished line.
self.previous_buffer = chunks[-1]
if len(self.previous_buffer) > 2 ** 14:
# Bad peer! Naughty peer!
self.disconnect()
return
chunks = chunks[:-1]
else:
chunks = [new_data]
command = "dccmsg"
prefix = self.peeraddress
target = None
for chunk in chunks:
if DEBUG:
print "FROM PEER:", chunk
arguments = [chunk]
if DEBUG:
print "command: %s, source: %s, target: %s, arguments: %s" % (
command, prefix, target, arguments)
self.irclibobj._handle_event(
self,
Event(command, prefix, target, arguments))
def _get_socket(self):
"""[Internal]"""
return self.socket
def privmsg(self, string):
"""Send data to DCC peer.
The string will be padded with appropriate LF if it's a DCC
CHAT session.
"""
try:
self.socket.send(string)
if self.dcctype == "chat":
self.socket.send("\n")
if DEBUG:
print "TO PEER: %s\n" % string
except socket.error, x:
# Ouch!
self.disconnect("Connection reset by peer.")
class SimpleIRCClient:
"""A simple single-server IRC client class.
This is an example of an object-oriented wrapper of the IRC
framework. A real IRC client can be made by subclassing this
class and adding appropriate methods.
The method on_join will be called when a "join" event is created
(which is done when the server sends a JOIN messsage/command),
on_privmsg will be called for "privmsg" events, and so on. The
handler methods get two arguments: the connection object (same as
self.connection) and the event object.
Instance attributes that can be used by sub classes:
ircobj -- The IRC instance.
connection -- The ServerConnection instance.
dcc_connections -- A list of DCCConnection instances.
"""
def __init__(self):
self.ircobj = IRC()
self.connection = self.ircobj.server()
self.dcc_connections = []
self.ircobj.add_global_handler("all_events", self._dispatcher, -10)
self.ircobj.add_global_handler(
"dcc_disconnect", self._dcc_disconnect, -10)
def _dispatcher(self, c, e):
"""[Internal]"""
m = "on_" + e.eventtype()
if hasattr(self, m):
getattr(self, m)(c, e)
def _dcc_disconnect(self, c, e):
self.dcc_connections.remove(c)
def connect(self, server, port, nickname, password=None, username=None,
ircname=None, localaddress="", localport=0, ssl=False,
ipv6=False):
"""Connect/reconnect to a server.
Arguments:
server -- Server name.
port -- Port number.
nickname -- The nickname.
password -- Password (if any).
username -- The username.
ircname -- The IRC name.
localaddress -- Bind the connection to a specific local IP address.
localport -- Bind the connection to a specific local port.
ssl -- Enable support for ssl.
ipv6 -- Enable support for ipv6.
This function can be called to reconnect a closed connection.
"""
self.connection.connect(server, port, nickname,
password, username, ircname,
localaddress, localport, ssl, ipv6)
def dcc_connect(self, address, port, dcctype="chat"):
"""Connect to a DCC peer.
Arguments:
address -- IP address of the peer.
port -- Port to connect to.
Returns a DCCConnection instance.
"""
dcc = self.ircobj.dcc(dcctype)
self.dcc_connections.append(dcc)
dcc.connect(address, port)
return dcc
def dcc_listen(self, dcctype="chat"):
"""Listen for connections from a DCC peer.
Returns a DCCConnection instance.
"""
dcc = self.ircobj.dcc(dcctype)
self.dcc_connections.append(dcc)
dcc.listen()
return dcc
def start(self):
"""Start the IRC client."""
self.ircobj.process_forever()
class Event:
"""Class representing an IRC event."""
def __init__(self, eventtype, source, target, arguments=None):
"""Constructor of Event objects.
Arguments:
eventtype -- A string describing the event.
source -- The originator of the event (a nick mask or a server).
target -- The target of the event (a nick or a channel).
arguments -- Any event specific arguments.
"""
self._eventtype = eventtype
self._source = source
self._target = target
if arguments:
self._arguments = arguments
else:
self._arguments = []
def eventtype(self):
"""Get the event type."""
return self._eventtype
def source(self):
"""Get the event source."""
return self._source
def target(self):
"""Get the event target."""
return self._target
def arguments(self):
"""Get the event arguments."""
return self._arguments
_LOW_LEVEL_QUOTE = "\020"
_CTCP_LEVEL_QUOTE = "\134"
_CTCP_DELIMITER = "\001"
_low_level_mapping = {
"0": "\000",
"n": "\n",
"r": "\r",
_LOW_LEVEL_QUOTE: _LOW_LEVEL_QUOTE}
_low_level_regexp = re.compile(_LOW_LEVEL_QUOTE + "(.)")
def mask_matches(nick, mask):
"""Check if a nick matches a mask.
Returns true if the nick matches, otherwise false.
"""
nick = irc_lower(nick)
mask = irc_lower(mask)
mask = mask.replace("\\", "\\\\")
for ch in ".$|[](){}+":
mask = mask.replace(ch, "\\" + ch)
mask = mask.replace("?", ".")
mask = mask.replace("*", ".*")
r = re.compile(mask, re.IGNORECASE)
return r.match(nick)
_special = "-[]\\`^{}"
nick_characters = string.ascii_letters + string.digits + _special
_ircstring_translation = string.maketrans(string.ascii_uppercase + "[]\\^",
string.ascii_lowercase + "{}|~")
def irc_lower(s):
"""Returns a lowercased string.
The definition of lowercased comes from the IRC specification (RFC
1459).
"""
return s.translate(_ircstring_translation)
def _ctcp_dequote(message):
"""[Internal] Dequote a message according to CTCP specifications.
The function returns a list where each element can be either a
string (normal message) or a tuple of one or two strings (tagged
messages). If a tuple has only one element (ie is a singleton),
that element is the tag; otherwise the tuple has two elements: the
tag and the data.
Arguments:
message -- The message to be decoded.
"""
def _low_level_replace(match_obj):
ch = match_obj.group(1)
# If low_level_mapping doesn't have the character as key, we
# should just return the character.
return _low_level_mapping.get(ch, ch)
if _LOW_LEVEL_QUOTE in message:
# Yup, there was a quote. Release the dequoter, man!
message = _low_level_regexp.sub(_low_level_replace, message)
if _CTCP_DELIMITER not in message:
return [message]
else:
# Split it into parts. (Does any IRC client actually *use*
# CTCP stacking like this?)
chunks = message.split(_CTCP_DELIMITER)
messages = []
i = 0
while i < len(chunks) - 1:
# Add message if it's non-empty.
if len(chunks[i]) > 0:
messages.append(chunks[i])
if i < len(chunks) - 2:
# Aye! CTCP tagged data ahead!
messages.append(tuple(chunks[i + 1].split(" ", 1)))
i = i + 2
if len(chunks) % 2 == 0:
# Hey, a lonely _CTCP_DELIMITER at the end! This means
# that the last chunk, including the delimiter, is a
# normal message! (This is according to the CTCP
# specification.)
messages.append(_CTCP_DELIMITER + chunks[-1])
return messages
def is_channel(string):
"""Check if a string is a channel name.
Returns true if the argument is a channel name, otherwise false.
"""
return string and string[0] in "#&+!"
def ip_numstr_to_quad(num):
"""Convert an IP number as an integer given in ASCII
representation (e.g. '3232235521') to an IP address string
(e.g. '192.168.0.1')."""
n = long(num)
p = map(str, map(int, [n >> 24 & 0xFF, n >> 16 & 0xFF,
n >> 8 & 0xFF, n & 0xFF]))
return ".".join(p)
def ip_quad_to_numstr(quad):
"""Convert an IP address string (e.g. '192.168.0.1') to an IP
number as an integer given in ASCII representation
(e.g. '3232235521')."""
p = map(long, quad.split("."))
s = str((p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3])
if s[-1] == "L":
s = s[:-1]
return s
def nm_to_n(s):
"""Get the nick part of a nickmask.
(The source of an Event is a nickmask.)
"""
return s.split("!")[0]
def nm_to_uh(s):
"""Get the userhost part of a nickmask.
(The source of an Event is a nickmask.)
"""
return s.split("!")[1]
def nm_to_h(s):
"""Get the host part of a nickmask.
(The source of an Event is a nickmask.)
"""
return s.split("@")[1]
def nm_to_u(s):
"""Get the user part of a nickmask.
(The source of an Event is a nickmask.)
"""
s = s.split("!")[1]
return s.split("@")[0]
def parse_nick_modes(mode_string):
"""Parse a nick mode string.
The function returns a list of lists with three members: sign,
mode and argument. The sign is \"+\" or \"-\". The argument is
always None.
Example:
>>> irclib.parse_nick_modes(\"+ab-c\")
[['+', 'a', None], ['+', 'b', None], ['-', 'c', None]]
"""
return _parse_modes(mode_string, "")
def parse_channel_modes(mode_string):
"""Parse a channel mode string.
The function returns a list of lists with three members: sign,
mode and argument. The sign is \"+\" or \"-\". The argument is
None if mode isn't one of \"b\", \"k\", \"l\", \"v\" or \"o\".
Example:
>>> irclib.parse_channel_modes(\"+ab-c foo\")
[['+', 'a', None], ['+', 'b', 'foo'], ['-', 'c', None]]
"""
return _parse_modes(mode_string, "bklvo")
def _parse_modes(mode_string, unary_modes=""):
"""[Internal]"""
modes = []
arg_count = 0
# State variable.
sign = ""
a = mode_string.split()
if len(a) == 0:
return []
else:
mode_part, args = a[0], a[1:]
if mode_part[0] not in "+-":
return []
for ch in mode_part:
if ch in "+-":
sign = ch
elif ch == " ":
collecting_arguments = 1
elif ch in unary_modes:
if len(args) >= arg_count + 1:
modes.append([sign, ch, args[arg_count]])
arg_count = arg_count + 1
else:
modes.append([sign, ch, None])
else:
modes.append([sign, ch, None])
return modes
def _ping_ponger(connection, event):
"""[Internal]"""
connection.pong(event.target())
# Numeric table mostly stolen from the Perl IRC module (Net::IRC).
numeric_events = {
"001": "welcome",
"002": "yourhost",
"003": "created",
"004": "myinfo",
"005": "featurelist", # XXX
"200": "tracelink",
"201": "traceconnecting",
"202": "tracehandshake",
"203": "traceunknown",
"204": "traceoperator",
"205": "traceuser",
"206": "traceserver",
"207": "traceservice",
"208": "tracenewtype",
"209": "traceclass",
"210": "tracereconnect",
"211": "statslinkinfo",
"212": "statscommands",
"213": "statscline",
"214": "statsnline",
"215": "statsiline",
"216": "statskline",
"217": "statsqline",
"218": "statsyline",
"219": "endofstats",
"221": "umodeis",
"231": "serviceinfo",
"232": "endofservices",
"233": "service",
"234": "servlist",
"235": "servlistend",
"241": "statslline",
"242": "statsuptime",
"243": "statsoline",
"244": "statshline",
"250": "luserconns",
"251": "luserclient",
"252": "luserop",
"253": "luserunknown",
"254": "luserchannels",
"255": "luserme",
"256": "adminme",
"257": "adminloc1",
"258": "adminloc2",
"259": "adminemail",
"261": "tracelog",
"262": "endoftrace",
"263": "tryagain",
"265": "n_local",
"266": "n_global",
"300": "none",
"301": "away",
"302": "userhost",
"303": "ison",
"305": "unaway",
"306": "nowaway",
"311": "whoisuser",
"312": "whoisserver",
"313": "whoisoperator",
"314": "whowasuser",
"315": "endofwho",
"316": "whoischanop",
"317": "whoisidle",
"318": "endofwhois",
"319": "whoischannels",
"321": "liststart",
"322": "list",
"323": "listend",
"324": "channelmodeis",
"329": "channelcreate",
"331": "notopic",
"332": "currenttopic",
"333": "topicinfo",
"341": "inviting",
"342": "summoning",
"346": "invitelist",
"347": "endofinvitelist",
"348": "exceptlist",
"349": "endofexceptlist",
"351": "version",
"352": "whoreply",
"353": "namreply",
"361": "killdone",
"362": "closing",
"363": "closeend",
"364": "links",
"365": "endoflinks",
"366": "endofnames",
"367": "banlist",
"368": "endofbanlist",
"369": "endofwhowas",
"371": "info",
"372": "motd",
"373": "infostart",
"374": "endofinfo",
"375": "motdstart",
"376": "endofmotd",
"377": "motd2", # 1997-10-16 -- tkil
"381": "youreoper",
"382": "rehashing",
"384": "myportis",
"391": "time",
"392": "usersstart",
"393": "users",
"394": "endofusers",
"395": "nousers",
"401": "nosuchnick",
"402": "nosuchserver",
"403": "nosuchchannel",
"404": "cannotsendtochan",
"405": "toomanychannels",
"406": "wasnosuchnick",
"407": "toomanytargets",
"409": "noorigin",
"411": "norecipient",
"412": "notexttosend",
"413": "notoplevel",
"414": "wildtoplevel",
"421": "unknowncommand",
"422": "nomotd",
"423": "noadmininfo",
"424": "fileerror",
"431": "nonicknamegiven",
"432": "erroneusnickname", # Thiss iz how its speld in thee RFC.
"433": "nicknameinuse",
"436": "nickcollision",
"437": "unavailresource", # "Nick temporally unavailable"
"441": "usernotinchannel",
"442": "notonchannel",
"443": "useronchannel",
"444": "nologin",
"445": "summondisabled",
"446": "usersdisabled",
"451": "notregistered",
"461": "needmoreparams",
"462": "alreadyregistered",
"463": "nopermforhost",
"464": "passwdmismatch",
"465": "yourebannedcreep", # I love this one...
"466": "youwillbebanned",
"467": "keyset",
"471": "channelisfull",
"472": "unknownmode",
"473": "inviteonlychan",
"474": "bannedfromchan",
"475": "badchannelkey",
"476": "badchanmask",
"477": "nochanmodes", # "Channel doesn't support modes"
"478": "banlistfull",
"481": "noprivileges",
"482": "chanoprivsneeded",
"483": "cantkillserver",
"484": "restricted", # Connection is restricted
"485": "uniqopprivsneeded",
"491": "nooperhost",
"492": "noservicehost",
"501": "umodeunknownflag",
"502": "usersdontmatch",
}
generated_events = [
# Generated events
"dcc_connect",
"dcc_disconnect",
"dccmsg",
"disconnect",
"ctcp",
"ctcpreply",
]
protocol_events = [
# IRC protocol events
"error",
"join",
"kick",
"mode",
"part",
"ping",
"privmsg",
"privnotice",
"pubmsg",
"pubnotice",
"quit",
"invite",
"pong",
]
all_events = generated_events + protocol_events + numeric_events.values()
|
{
"content_hash": "4711eeb964b9d01ae1c59c86af303f9f",
"timestamp": "",
"source": "github",
"line_count": 1578,
"max_line_length": 79,
"avg_line_length": 30.55576679340938,
"alnum_prop": 0.5576871227990128,
"repo_name": "rconradharris/pyhole",
"id": "4165e42915ef9214b0211de81dd2e131759fcfba",
"size": "49084",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyhole/irclib.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class BorderwidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="borderwidth", parent_name="parcats.line.colorbar", **kwargs
):
super(BorderwidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
{
"content_hash": "2fe8f0ad4987f65f415980c06cbbbf3b",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 86,
"avg_line_length": 34.285714285714285,
"alnum_prop": 0.6083333333333333,
"repo_name": "plotly/plotly.py",
"id": "f46671833c0daaf0aff102152fbd3277675635f6",
"size": "480",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/parcats/line/colorbar/_borderwidth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
import sys
import os
import csv
import json
import re
import math
from decimal import Decimal
from django.core.management.base import BaseCommand, CommandError
field_types = {
'date_ymd': 'DateField',
'number': 'FloatField',
'integer': 'IntegerField',
'email': 'EmailField',
'text': 'CharField',
'textarea': 'TextField',
'calc': 'FloatField',
'radio': 'CharField',
'select': 'CharField',
'checkbox': 'CharField',
'yesno': 'BooleanField',
'truefalse': 'BooleanField',
}
__project_name__ = ''
requires_model_validation = False
db_module = 'django.db'
args = 'file', 'jsonfile'
def csv_2_fixture(self, file, reader, json_models, p_name, fout):
global __project_name__
__project_name__ = p_name
base_form_name = ''
new_form_name = ''
form_name = ''
pk_num_list = []
fixtures = []
form_dict = {}
file.seek(0)
reader.next()
pk_num = 0
for form in open(json_models, 'r'):
form = json.loads(form)
form_name = form['form name']
file.seek(0)
reader.next()
pk_num = 0
num_repeats = ''
primary_key_counter = 0
additional_forms = 0
if form_name.find('~') != -1:
form_name, fk_name = form['form name'].split('~')
form_dict[form_name] = fk_name
num_repeats = form_name.split(' ')[1]
else:
base_form_name = form['form name']
form_dict = {}
for line in reader:
pk_num += 1
fixture_dict = {}
if num_repeats:
if num_repeats.isdigit() is False:
"""
Handles special case where the number of repeats depends
on another field, usually labeled as formName
[relativeForm].
This field will not be in the form[fields] list.
form_name is changed to avoid errors regarding the number
of repeats as the code continues, specifically an error
might pop up in find_related_forms if it is not changed to
a number.
"""
new_num_repeats = line[num_repeats.replace('[', '')
.replace(']', '')]
if not new_num_repeats:
new_num_repeats = 0
#form_name is formatted like formName 5~otherForm 5 before
#this change
new_form_name = form_name.split('~')[0] \
.split(' ')[0] + ' ' + str(new_num_repeats)
form_dict[new_form_name] = fk_name
if new_form_name:
foreign_forms_list = find_related_forms(self,
new_form_name,
form_dict)
new_form_name = ''
else:
foreign_forms_list = find_related_forms(self,
form_name,
form_dict)
full_form_list = foreign_forms_list[:]
full_form_list.append(base_form_name)
full_form_list = full_form_list[::-1]
primary_key_counter, additional_forms = \
generate_repeating_fixtures(self, line, form,
full_form_list, fixtures,
pk_num, pk_num_list,
primary_key_counter,
additional_forms)
else:
#code for generating a fixture without foreign keys
#not used currently with the addition of record form
pk_num_list.append(pk_num)
for field in form['fields']:
"""
Determines if there are choices in the field,
what they are, and gets the
value for each of those fields that includes the choices
"""
if field['choices']:
field_names = get_field_names(self, field, form_dict,
field['field name'])
checked_line = ''
answered = ''
for name in field_names:
try:
if len(field_names) > 1:
if line[name] == '1':
checked_line = name[-1]
answered = True
elif line[name] == '0':
answered = True
else:
if line[name]:
checked_line = line[name]
except KeyError:
#print 'ERROR: NOT FOUND ' + name
#print field
#print field_names
pass
if checked_line:
fixture_dict[field['field name']] = [field,
checked_line]
elif answered is True:
fixture_dict[field['field name']] = [field, '0']
else:
fixture_dict[field['field name']] = [field, '']
else:
try:
fixture_dict[field['field name']] = [
field,
line[field['field name']]
]
except KeyError:
#print 'ERROR: NOT FOUND '+field['field name']
#print field
pass
fixtures.append([form_name, fixture_dict])
print_fixtures(self, fixtures, pk_num_list, fout)
def generate_repeating_fixtures(self, line, form,
form_list, fixtures, pk_num,
pk_num_list, primary_key_counter,
additional_forms):
"""
This function generates the fixture dictionaries for a repeating form.
"""
num_repeats_all = 1
num_repeats_list = []
current_repeat_list = []
counter = 0
#populates current_repeat_list with 1s, amount equal to length of
#form_list - 1
current_repeat_list = [1] * len(form_list[1:])
#determines number of repeats from items in form_list
for item in form_list:
if len(item.split(' ')) > 1:
num_repeats_form = item.split(' ')[1]
num_repeats_list.insert(0, num_repeats_form)
num_repeats_all = int(num_repeats_all) * int(num_repeats_form)
for i in range(num_repeats_all):
primary_key_counter += 1
fixture_dict = {}
form_num_list = []
fk_index = len(form_list)-2
foreign_key = form_list[fk_index].lower().split(' ')[0].replace('_',
'')
if fk_index == 0:
fixture_dict[foreign_key] = ['', pk_num]
else:
fk_num = int(math.ceil(primary_key_counter /
float(num_repeats_list[0])))
fixture_dict[foreign_key] = ['', fk_num]
pk_num_list.append(primary_key_counter+additional_forms)
checkboxform = False
try:
if form['fields'][0]['field name'] == 'label' and \
form['fields'][1]['field name'] == 'value':
checkboxform = True
except IndexError:
pass
if checkboxform:
#clean_field_name = re.sub('\${d\}', '', form['form name'])
cb_field_name = form['form name'].split('~')[0].split(' ')[0]
cb_field_name = re.sub('\${d\}', '', cb_field_name)
if len(form_list[2:]) == 0:
base_field_name = cb_field_name
else:
base_field_name = get_field_name(self, form['fields'][1],
form_list[2:],
current_repeat_list,
cb_field_name).lower()
base_field_name = base_field_name[:-1]
field_names = get_field_names(self, form['fields'][1],
form_list[2:], base_field_name)
checked_lines = []
answered = False
for name in field_names:
try:
if line[name] == '1':
checked_lines.append(name[-1])
answered = True
elif line[name] == '0':
answered = True
except KeyError:
pass
checked_fixtures = []
for checked_line in checked_lines:
choices = form['fields'][1]['choices']
choice = choices.split('|')
"""
The number assigned to each choice by redcap might not start
at 1. This subtracts the starting num from the index so
an out of bounds error doesn't occur
"""
starts_at = choice[0].split(',')[0]
choice = choice[int(checked_line)-int(starts_at)]
choice = choice.split(',')
fixture_dict['label'] = [form['fields'][0], choice[1]]
fixture_dict['value'] = [form['fields'][1],
choice[0].strip(' ')]
checked_fixtures.append(dict(fixture_dict))
temp_primary = primary_key_counter
for i, fixture in enumerate(checked_fixtures):
if i < len(checked_fixtures)-1:
additional_forms = additional_forms + 1
clean_form_name = form['form name'].split(' ')[0] \
.replace('$', '')
fixtures.append([clean_form_name, fixture])
pk_num_list.append(temp_primary+additional_forms)
else:
for field in form['fields']:
clean_field_name = re.sub('\${d\}', '', field['field name'])
#form_list[0] and form_list[1] are both 'base forms'
#form_list[0] is record, form_list[1] is the form name
#given for each field without repeating
if len(form_list[2:]) == 0:
base_field_name = field['field name']
else:
base_field_name = get_field_name(self, field,
form_list[2:],
current_repeat_list) \
.lower()
if field['choices']:
field_names = get_field_names(self, field,
form_list[2:],
base_field_name)
checked_line = ''
answered = ''
for name in field_names:
try:
if len(field_names) > 1:
if line[name] == '1':
checked_line = name[-1]
answered = True
elif line[name] == '0':
answered = True
else:
if line[name]:
checked_line = line[name]
except KeyError:
#print 'ERROR: FIELD NOT FOUND ' + name
#print field
#print field_names
pass
#if the line is checked, the number of option is the answer
if checked_line:
fixture_dict[clean_field_name] = [field, checked_line]
elif answered is True:
fixture_dict[clean_field_name] = [field, '0']
else:
fixture_dict[clean_field_name] = [field, '']
elif '_summary' in field['field name']:
field_names = get_field_names_summary(self, field,
form_list[2:],
base_field_name[:-8])
checked_lines = []
answered = False
for name in field_names:
try:
if line[name] == '1':
checked_lines.append(name[-1])
answered = True
elif line[name] == '0':
answered = True
except KeyError:
pass
choices_str = ''
for checked_line in checked_lines:
choices = field['field note']
choice = choices.split('|')
"""
The number assigned to each choice might not start
at 1. This subtracts the starting num from the index
we check
"""
starts_at = choice[0].split(',')[0]
choice = choice[int(checked_line)-int(starts_at)]
choices_str = choices_str + ' ' + choice
fixture_dict[clean_field_name] = [field, choices_str]
else:
try:
fixture_dict[clean_field_name] = [field,
line[base_field_name]
]
except KeyError:
#print 'ERROR: NOT FOUND ' + base_field_name
#print field
#print base_field_name
pass
clean_form_name = form['form name'].split(' ')[0].replace('$', '')
fixtures.append([clean_form_name, fixture_dict])
cur_ind = len(current_repeat_list) - 1
update_current_repeats(self, num_repeats_list[::-1],
current_repeat_list, cur_ind)
return primary_key_counter, additional_forms
def get_field_name(self, field, form_list,
repeat_num_list, alt_field_name=None):
"""
Loops through a list of forms. All forms are prefix forms except for the
last form in form_list.
"""
prefix = ''
if alt_field_name:
field_name = alt_field_name
else:
field_name = field['field name']
for i in range(len(form_list)):
if i != len(form_list)-1:
str_split = form_list[i].split(' ')
name = str_split[0]
name = re.sub('\d$', '', name)
num_repeats = repeat_num_list[i]
prefix = prefix + name + str(num_repeats) + '_'
elif field_name.find('${d}') != -1:
new_field_name = re.sub('\$\{d\}',
str(repeat_num_list[-1]),
field_name)
else:
new_field_name = field_name + '' + str(repeat_num_list[-1])
new_field_name = prefix + new_field_name
return new_field_name
def find_related_forms(self, form_name, form_dict, foreign_forms=None):
"""
Finds the form_name value in the form_dict. If it is found, the function
will call itself using form_dict[form_name]. The form_dict is a dictionary
with the keys being a form name and the value being the name of the form
they have a foreign key relation with.
Ex: form_dict['Microarray 1'] = 'Prior Gen Testing'
This function will continue until no more related forms are found, and will
return a list of them, in order from highest to deepest form relation
"""
if foreign_forms is None:
foreign_forms = []
if form_name in form_dict and not form_name in foreign_forms:
foreign_forms.append(form_name)
find_related_forms(self, form_dict[form_name],
form_dict, foreign_forms)
return foreign_forms
def get_field_names(self, field, form_dict, field_name):
"""
Checkboxes and radio_other fields have multiple parts in the data csv,
usually something like name1 name2 name3 for each checkbox/radio button
that is pushable, but the info must be put into one field.
This method finds the fields in the data file that are related to the field
parameter. If it is a checkbox, it splits the possible choices and uses
that to find the fields.
If another special case for field names needs to be added, all that
needs to be done is add an elif statement with the field type or variable
it depends on.
"""
choices_field_names = []
if field['field type'] == 'checkbox' or \
field['field type'] == 'checkbox_other' or \
field['field type'] == 'checkbox_details':
choices = field['choices'].split('|')
for choice in choices:
choices_field_names.append(field_name.lower() + '___' +
choice.split(',')[0].strip(' '))
else:
choices_field_names.append(field_name.lower())
return choices_field_names
def get_field_names_summary(self, field, form_dict, field_name):
choices = field['field note'].split('|')
choices_field_names = []
for choice in choices:
choices_field_names.append(field_name.lower() + '___' +
choice.split(',')[0].strip(' '))
return choices_field_names
def update_current_repeats(self, form_list, current_repeats_list, cur_index):
"""
Updates the current_repeats_list depending on form_list([5,5,5] which
is a list of numbers indicating the max number of repeats needed) and
current_repeats_list([1,1,1] which is a list of numbers indicating what
iteration the repeating is on). When function is first called, cur_index
will be 0. Iterates the current_repeats_list like
[1,1,1][1,1,2][1,1,3][1,2,1][1,2,2][1,2,3]
if the element at cur_index in current_repeats_list is greater than or
equal to the element in form_list at cur_index (Both of these are ints),
then 'reset' the element in current_repeats_list.
if the cur_index - 1 is not negative (still in bounds + cur_index is
not first index) then recursively call update_current_repeats on
cur_index - 1
else add 1 to current_repeats_list[cur_index]
"""
if int(current_repeats_list[cur_index]) >= int(form_list[cur_index]):
current_repeats_list[cur_index] = 1
if cur_index - 1 >= 0:
cur_index -= 1
update_current_repeats(self, form_list,
current_repeats_list, cur_index)
else:
current_repeats_list[cur_index] += 1
def print_fixtures(self, fixtures_list, pk_list, fout):
"""
fixtures_list is a list of lists. Each element is a list of
[form name,fixture_dict]. Each element in fixture_dict
is [field, field_val]
function loops through each element in fixtures_list, then each
key(element) in fixtures_list[i][1](a fixture_dict) and determines if its
fields are blank. If they are not blank, the field is added to field_dict.
field_dict is then printed all fields in each fixture_dict has been checked
"""
all_json = []
first_fix = True
for i in range(len(fixtures_list)):
field_dict = {}
#if field has a value, print it
for key in fixtures_list[i][1]:
if fixtures_list[i][1][key]:
field = fixtures_list[i][1][key][0]
field_val = fixtures_list[i][1][key][1]
if field:
field_dict[key] = cast_field(self, field, field_val)
else:
#if it is just a foreign key
field_dict[key] = field_val
all_json.append({'model': __project_name__ + '.' +
fixtures_list[i][0].replace('_', '') + '',
'pk': pk_list[i],
'fields': field_dict
})
fout.write(json.dumps(all_json, indent=4, separators=(',', ': ')))
def get_field_type(self, field):
"""
Given the database connection, the table name, and the cursor row
description, this routine will return the given field type name,as well
as any additional keyword parameters and notes for the field.
"""
required = field['required']
validation_type = field['validation type']
field_type = field['field type']
try:
field_type = field_types.get(validation_type, field_types[field_type])
except KeyError:
field_type = 'TextField'
if not required:
if field_type is 'BooleanField':
field_type = 'NullBooleanField'
choices = None
if field['choices']:
try:
choices = [(int(v.strip()),
k.strip()) for v, k in [choice.split(',')
for choice in field['choices'].split('|')]]
field_type = 'IntegerField'
except (ValueError, TypeError):
pass
return field_type
def cast_field(self, field, field_val):
"""
Casts line[name] depending on the field_type
"""
field_type = get_field_type(self, field)
if field_type == 'CharField' or field_type == 'TextField':
return field_val
elif field_type == 'IntegerField':
if field_val and field_val.isdigit():
return int(field_val)
elif field_type == 'FloatField':
try:
return float(field_val)
except:
pass
elif field_type == 'NullBooleanField':
if field_val == '':
return None
elif field_val == '0':
return False
elif field_val == '1':
return True
else:
return field_val
elif field_type == 'BooleanField':
if field_val:
if field_val == '1':
return True
elif field_val == '0':
return False
else:
return field_val
elif field_type == 'DateField':
if field_val:
return field_val
else:
return field_val
|
{
"content_hash": "1cad5d73b339dfe1115f0d79b62144f5",
"timestamp": "",
"source": "github",
"line_count": 556,
"max_line_length": 79,
"avg_line_length": 42.93884892086331,
"alnum_prop": 0.45522325542431097,
"repo_name": "swanijam/django-redcap",
"id": "a9a7c2dfde259c282dcf86e8f4a3900fab7f72a5",
"size": "23874",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "djredcap/management/subcommands/djfixture.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "78763"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
try:
import sphinxcontrib.gist # NOQA
extensions = ['sphinxcontrib.gist']
except ImportError:
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Nikola'
copyright = '2012-2015, The Nikola Contributors'
# The version info for the project yo're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '7.7.0'
# The full version, including alpha/beta/rc tags.
release = '7.7.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Nikoladoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto/manual]).
latex_documents = [
('index', 'Nikola.tex', 'Nikola Documentation',
'The Nikola Contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'nikola', 'Nikola Documentation',
['The Nikola Contributors'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Nikola', 'Nikola Documentation',
'The Nikola Contributors', 'Nikola', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
primary_domain = None
|
{
"content_hash": "e618913c706200e16d186969605dc56d",
"timestamp": "",
"source": "github",
"line_count": 239,
"max_line_length": 79,
"avg_line_length": 31.435146443514643,
"alnum_prop": 0.7005191002262745,
"repo_name": "berezovskyi/nikola",
"id": "7d9db7a7b3546c8c9a9e86247e9dbf51a736db82",
"size": "8386",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docs/sphinx/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18265"
},
{
"name": "JavaScript",
"bytes": "18717"
},
{
"name": "Python",
"bytes": "992410"
},
{
"name": "Shell",
"bytes": "3057"
},
{
"name": "XSLT",
"bytes": "3527"
}
],
"symlink_target": ""
}
|
import logging
import os
import time
from tempfile import gettempdir
from typing import Iterable
from sqlalchemy import Table, exc, func, inspect, or_, text
from airflow import settings
from airflow.configuration import conf
from airflow.jobs.base_job import BaseJob # noqa: F401
from airflow.models import ( # noqa: F401
DAG,
XCOM_RETURN_KEY,
BaseOperator,
BaseOperatorLink,
Connection,
DagBag,
DagModel,
DagPickle,
DagRun,
DagTag,
Log,
Pool,
SkipMixin,
SlaMiss,
TaskFail,
TaskInstance,
TaskReschedule,
Variable,
XCom,
)
# We need to add this model manually to get reset working well
from airflow.models.serialized_dag import SerializedDagModel # noqa: F401
# TODO: remove create_session once we decide to break backward compatibility
from airflow.utils.session import create_global_lock, create_session, provide_session # noqa: F401
log = logging.getLogger(__name__)
def _format_airflow_moved_table_name(source_table, version):
return "__".join([settings.AIRFLOW_MOVED_TABLE_PREFIX, version.replace(".", "_"), source_table])
@provide_session
def merge_conn(conn, session=None):
"""Add new Connection."""
if not session.query(Connection).filter(Connection.conn_id == conn.conn_id).first():
session.add(conn)
session.commit()
@provide_session
def add_default_pool_if_not_exists(session=None):
"""Add default pool if it does not exist."""
if not Pool.get_pool(Pool.DEFAULT_POOL_NAME, session=session):
default_pool = Pool(
pool=Pool.DEFAULT_POOL_NAME,
slots=conf.getint(section='core', key='default_pool_task_slot_count'),
description="Default pool",
)
session.add(default_pool)
session.commit()
@provide_session
def create_default_connections(session=None):
"""Create default Airflow connections."""
merge_conn(
Connection(
conn_id="airflow_db",
conn_type="mysql",
host="mysql",
login="root",
password="",
schema="airflow",
),
session,
)
merge_conn(
Connection(
conn_id="aws_default",
conn_type="aws",
),
session,
)
merge_conn(
Connection(
conn_id="azure_batch_default",
conn_type="azure_batch",
login="<ACCOUNT_NAME>",
password="",
extra='''{"account_url": "<ACCOUNT_URL>"}''',
)
)
merge_conn(
Connection(
conn_id="azure_cosmos_default",
conn_type="azure_cosmos",
extra='{"database_name": "<DATABASE_NAME>", "collection_name": "<COLLECTION_NAME>" }',
),
session,
)
merge_conn(
Connection(
conn_id='azure_data_explorer_default',
conn_type='azure_data_explorer',
host='https://<CLUSTER>.kusto.windows.net',
extra='''{"auth_method": "<AAD_APP | AAD_APP_CERT | AAD_CREDS | AAD_DEVICE>",
"tenant": "<TENANT ID>", "certificate": "<APPLICATION PEM CERTIFICATE>",
"thumbprint": "<APPLICATION CERTIFICATE THUMBPRINT>"}''',
),
session,
)
merge_conn(
Connection(
conn_id="azure_data_lake_default",
conn_type="azure_data_lake",
extra='{"tenant": "<TENANT>", "account_name": "<ACCOUNTNAME>" }',
),
session,
)
merge_conn(
Connection(
conn_id="azure_default",
conn_type="azure",
),
session,
)
merge_conn(
Connection(
conn_id="cassandra_default",
conn_type="cassandra",
host="cassandra",
port=9042,
),
session,
)
merge_conn(
Connection(
conn_id="databricks_default",
conn_type="databricks",
host="localhost",
),
session,
)
merge_conn(
Connection(
conn_id="dingding_default",
conn_type="http",
host="",
password="",
),
session,
)
merge_conn(
Connection(
conn_id="drill_default",
conn_type="drill",
host="localhost",
port=8047,
extra='{"dialect_driver": "drill+sadrill", "storage_plugin": "dfs"}',
),
session,
)
merge_conn(
Connection(
conn_id="druid_broker_default",
conn_type="druid",
host="druid-broker",
port=8082,
extra='{"endpoint": "druid/v2/sql"}',
),
session,
)
merge_conn(
Connection(
conn_id="druid_ingest_default",
conn_type="druid",
host="druid-overlord",
port=8081,
extra='{"endpoint": "druid/indexer/v1/task"}',
),
session,
)
merge_conn(
Connection(
conn_id="elasticsearch_default",
conn_type="elasticsearch",
host="localhost",
schema="http",
port=9200,
),
session,
)
merge_conn(
Connection(
conn_id="emr_default",
conn_type="emr",
extra="""
{ "Name": "default_job_flow_name",
"LogUri": "s3://my-emr-log-bucket/default_job_flow_location",
"ReleaseLabel": "emr-4.6.0",
"Instances": {
"Ec2KeyName": "mykey",
"Ec2SubnetId": "somesubnet",
"InstanceGroups": [
{
"Name": "Master nodes",
"Market": "ON_DEMAND",
"InstanceRole": "MASTER",
"InstanceType": "r3.2xlarge",
"InstanceCount": 1
},
{
"Name": "Core nodes",
"Market": "ON_DEMAND",
"InstanceRole": "CORE",
"InstanceType": "r3.2xlarge",
"InstanceCount": 1
}
],
"TerminationProtected": false,
"KeepJobFlowAliveWhenNoSteps": false
},
"Applications":[
{ "Name": "Spark" }
],
"VisibleToAllUsers": true,
"JobFlowRole": "EMR_EC2_DefaultRole",
"ServiceRole": "EMR_DefaultRole",
"Tags": [
{
"Key": "app",
"Value": "analytics"
},
{
"Key": "environment",
"Value": "development"
}
]
}
""",
),
session,
)
merge_conn(
Connection(
conn_id="facebook_default",
conn_type="facebook_social",
extra="""
{ "account_id": "<AD_ACCOUNT_ID>",
"app_id": "<FACEBOOK_APP_ID>",
"app_secret": "<FACEBOOK_APP_SECRET>",
"access_token": "<FACEBOOK_AD_ACCESS_TOKEN>"
}
""",
),
session,
)
merge_conn(
Connection(
conn_id="fs_default",
conn_type="fs",
extra='{"path": "/"}',
),
session,
)
merge_conn(
Connection(
conn_id="google_cloud_default",
conn_type="google_cloud_platform",
schema="default",
),
session,
)
merge_conn(
Connection(
conn_id="hive_cli_default",
conn_type="hive_cli",
port=10000,
host="localhost",
extra='{"use_beeline": true, "auth": ""}',
schema="default",
),
session,
)
merge_conn(
Connection(
conn_id="hiveserver2_default",
conn_type="hiveserver2",
host="localhost",
schema="default",
port=10000,
),
session,
)
merge_conn(
Connection(
conn_id="http_default",
conn_type="http",
host="https://www.httpbin.org/",
),
session,
)
merge_conn(
Connection(
conn_id='kubernetes_default',
conn_type='kubernetes',
),
session,
)
merge_conn(
Connection(
conn_id='kylin_default',
conn_type='kylin',
host='localhost',
port=7070,
login="ADMIN",
password="KYLIN",
),
session,
)
merge_conn(
Connection(
conn_id="leveldb_default",
conn_type="leveldb",
host="localhost",
),
session,
)
merge_conn(Connection(conn_id="livy_default", conn_type="livy", host="livy", port=8998), session)
merge_conn(
Connection(
conn_id="local_mysql",
conn_type="mysql",
host="localhost",
login="airflow",
password="airflow",
schema="airflow",
),
session,
)
merge_conn(
Connection(
conn_id="metastore_default",
conn_type="hive_metastore",
host="localhost",
extra='{"authMechanism": "PLAIN"}',
port=9083,
),
session,
)
merge_conn(Connection(conn_id="mongo_default", conn_type="mongo", host="mongo", port=27017), session)
merge_conn(
Connection(
conn_id="mssql_default",
conn_type="mssql",
host="localhost",
port=1433,
),
session,
)
merge_conn(
Connection(
conn_id="mysql_default",
conn_type="mysql",
login="root",
schema="airflow",
host="mysql",
),
session,
)
merge_conn(
Connection(
conn_id="opsgenie_default",
conn_type="http",
host="",
password="",
),
session,
)
merge_conn(
Connection(
conn_id="oss_default",
conn_type="oss",
extra='''{
"auth_type": "AK",
"access_key_id": "<ACCESS_KEY_ID>",
"access_key_secret": "<ACCESS_KEY_SECRET>"}
''',
),
session,
)
merge_conn(
Connection(
conn_id="pig_cli_default",
conn_type="pig_cli",
schema="default",
),
session,
)
merge_conn(
Connection(
conn_id="pinot_admin_default",
conn_type="pinot",
host="localhost",
port=9000,
),
session,
)
merge_conn(
Connection(
conn_id="pinot_broker_default",
conn_type="pinot",
host="localhost",
port=9000,
extra='{"endpoint": "/query", "schema": "http"}',
),
session,
)
merge_conn(
Connection(
conn_id="postgres_default",
conn_type="postgres",
login="postgres",
password="airflow",
schema="airflow",
host="postgres",
),
session,
)
merge_conn(
Connection(
conn_id="presto_default",
conn_type="presto",
host="localhost",
schema="hive",
port=3400,
),
session,
)
merge_conn(
Connection(
conn_id="qubole_default",
conn_type="qubole",
host="localhost",
),
session,
)
merge_conn(
Connection(
conn_id="redis_default",
conn_type="redis",
host="redis",
port=6379,
extra='{"db": 0}',
),
session,
)
merge_conn(
Connection(
conn_id="segment_default",
conn_type="segment",
extra='{"write_key": "my-segment-write-key"}',
),
session,
)
merge_conn(
Connection(
conn_id="sftp_default",
conn_type="sftp",
host="localhost",
port=22,
login="airflow",
extra='{"key_file": "~/.ssh/id_rsa", "no_host_key_check": true}',
),
session,
)
merge_conn(
Connection(
conn_id="spark_default",
conn_type="spark",
host="yarn",
extra='{"queue": "root.default"}',
),
session,
)
merge_conn(
Connection(
conn_id="sqlite_default",
conn_type="sqlite",
host=os.path.join(gettempdir(), "sqlite_default.db"),
),
session,
)
merge_conn(
Connection(
conn_id="sqoop_default",
conn_type="sqoop",
host="rdbms",
),
session,
)
merge_conn(
Connection(
conn_id="ssh_default",
conn_type="ssh",
host="localhost",
),
session,
)
merge_conn(
Connection(
conn_id="tableau_default",
conn_type="tableau",
host="https://tableau.server.url",
login="user",
password="password",
extra='{"site_id": "my_site"}',
),
session,
)
merge_conn(
Connection(
conn_id="trino_default",
conn_type="trino",
host="localhost",
schema="hive",
port=3400,
),
session,
)
merge_conn(
Connection(
conn_id="vertica_default",
conn_type="vertica",
host="localhost",
port=5433,
),
session,
)
merge_conn(
Connection(
conn_id="wasb_default",
conn_type="wasb",
extra='{"sas_token": null}',
),
session,
)
merge_conn(
Connection(
conn_id="webhdfs_default",
conn_type="hdfs",
host="localhost",
port=50070,
),
session,
)
merge_conn(
Connection(
conn_id='yandexcloud_default',
conn_type='yandexcloud',
schema='default',
),
session,
)
@provide_session
def initdb(session=None):
"""Initialize Airflow database."""
upgradedb(session=session)
if conf.getboolean('core', 'LOAD_DEFAULT_CONNECTIONS'):
create_default_connections(session=session)
with create_global_lock(session=session):
dagbag = DagBag()
# Save DAGs in the ORM
dagbag.sync_to_db(session=session)
# Deactivate the unknown ones
DAG.deactivate_unknown_dags(dagbag.dags.keys(), session=session)
from flask_appbuilder.models.sqla import Base
Base.metadata.create_all(settings.engine)
def _get_alembic_config():
from alembic.config import Config
current_dir = os.path.dirname(os.path.abspath(__file__))
package_dir = os.path.normpath(os.path.join(current_dir, '..'))
directory = os.path.join(package_dir, 'migrations')
config = Config(os.path.join(package_dir, 'alembic.ini'))
config.set_main_option('script_location', directory.replace('%', '%%'))
config.set_main_option('sqlalchemy.url', settings.SQL_ALCHEMY_CONN.replace('%', '%%'))
return config
def check_migrations(timeout):
"""
Function to wait for all airflow migrations to complete.
:param timeout: Timeout for the migration in seconds
:return: None
"""
from alembic.runtime.migration import MigrationContext
from alembic.script import ScriptDirectory
config = _get_alembic_config()
script_ = ScriptDirectory.from_config(config)
with settings.engine.connect() as connection:
context = MigrationContext.configure(connection)
ticker = 0
while True:
source_heads = set(script_.get_heads())
db_heads = set(context.get_current_heads())
if source_heads == db_heads:
break
if ticker >= timeout:
raise TimeoutError(
f"There are still unapplied migrations after {ticker} seconds. "
f"Migration Head(s) in DB: {db_heads} | Migration Head(s) in Source Code: {source_heads}"
)
ticker += 1
time.sleep(1)
log.info('Waiting for migrations... %s second(s)', ticker)
def check_conn_id_duplicates(session=None) -> Iterable[str]:
"""
Check unique conn_id in connection table
:param session: session of the sqlalchemy
:rtype: str
"""
dups = []
try:
dups = session.query(Connection.conn_id).group_by(Connection.conn_id).having(func.count() > 1).all()
except (exc.OperationalError, exc.ProgrammingError):
# fallback if tables hasn't been created yet
session.rollback()
pass
if dups:
yield (
'Seems you have non unique conn_id in connection table.\n'
'You have to manage those duplicate connections '
'before upgrading the database.\n'
f'Duplicated conn_id: {[dup.conn_id for dup in dups]}'
)
def check_conn_type_null(session=None) -> Iterable[str]:
"""
Check nullable conn_type column in Connection table
:param session: session of the sqlalchemy
:rtype: str
"""
n_nulls = []
try:
n_nulls = session.query(Connection.conn_id).filter(Connection.conn_type.is_(None)).all()
except (exc.OperationalError, exc.ProgrammingError, exc.InternalError):
# fallback if tables hasn't been created yet
session.rollback()
pass
if n_nulls:
yield (
'The conn_type column in the connection '
'table must contain content.\n'
'Make sure you don\'t have null '
'in the conn_type column.\n'
f'Null conn_type conn_id: {list(n_nulls)}'
)
def _format_dangling_error(source_table, target_table, invalid_count, reason):
noun = "row" if invalid_count == 1 else "rows"
return (
f"The {source_table} table has {invalid_count} {noun} {reason}, which "
f"is invalid. We could not move them out of the way because the "
f"{target_table} table already exists in your database. Please either "
f"drop the {target_table} table, or manually delete the invalid rows "
f"from the {source_table} table."
)
def _move_dangling_run_data_to_new_table(session, source_table, target_table):
where_clause = "where dag_id is null or run_id is null or execution_date is null"
session.execute(text(f"create table {target_table} as select * from {source_table} {where_clause}"))
session.execute(text(f"delete from {source_table} {where_clause}"))
def check_run_id_null(session) -> Iterable[str]:
import sqlalchemy.schema
metadata = sqlalchemy.schema.MetaData(session.bind)
try:
metadata.reflect(only=[DagRun.__tablename__])
except exc.InvalidRequestError:
# Table doesn't exist -- empty db
return
# We can't use the model here since it may differ from the db state due to
# this function is run prior to migration. Use the reflected table instead.
dagrun_table = metadata.tables[DagRun.__tablename__]
invalid_dagrun_filter = or_(
dagrun_table.c.dag_id.is_(None),
dagrun_table.c.run_id.is_(None),
dagrun_table.c.execution_date.is_(None),
)
invalid_dagrun_count = session.query(dagrun_table.c.id).filter(invalid_dagrun_filter).count()
if invalid_dagrun_count > 0:
dagrun_dangling_table_name = _format_airflow_moved_table_name(dagrun_table.name, "2.2")
if dagrun_dangling_table_name in inspect(session.get_bind()).get_table_names():
yield _format_dangling_error(
source_table=dagrun_table.name,
target_table=dagrun_dangling_table_name,
invalid_count=invalid_dagrun_count,
reason="with a NULL dag_id, run_id, or execution_date",
)
return
_move_dangling_run_data_to_new_table(session, dagrun_table.name, dagrun_dangling_table_name)
def _move_dangling_task_data_to_new_table(session, source_table, target_table):
where_clause = f"""
where (task_id, dag_id, execution_date) IN (
select source.task_id, source.dag_id, source.execution_date
from {source_table} as source
left join dag_run as dr
on (source.dag_id = dr.dag_id and source.execution_date = dr.execution_date)
where dr.id is null
)
"""
session.execute(text(f"create table {target_table} as select * from {source_table} {where_clause}"))
session.execute(text(f"delete from {source_table} {where_clause}"))
def check_task_tables_without_matching_dagruns(session) -> Iterable[str]:
import sqlalchemy.schema
from sqlalchemy import and_, outerjoin
metadata = sqlalchemy.schema.MetaData(session.bind)
models_to_dagrun = [TaskInstance, TaskReschedule]
for model in models_to_dagrun + [DagRun]:
try:
metadata.reflect(only=[model.__tablename__])
except exc.InvalidRequestError:
# Table doesn't exist, but try the other ones in case the user is upgrading from an _old_ DB
# version
pass
# Key table doesn't exist -- likely empty DB.
if DagRun.__tablename__ not in metadata or TaskInstance.__tablename__ not in metadata:
return
# We can't use the model here since it may differ from the db state due to
# this function is run prior to migration. Use the reflected table instead.
dagrun_table = metadata.tables[DagRun.__tablename__]
existing_table_names = set(inspect(session.get_bind()).get_table_names())
errored = False
for model in models_to_dagrun:
# We can't use the model here since it may differ from the db state due to
# this function is run prior to migration. Use the reflected table instead.
source_table = metadata.tables.get(model.__tablename__)
if source_table is None:
continue
# Migration already applied, don't check again.
if "run_id" in source_table.columns:
continue
source_to_dag_run_join_cond = and_(
source_table.c.dag_id == dagrun_table.c.dag_id,
source_table.c.execution_date == dagrun_table.c.execution_date,
)
invalid_row_count = (
session.query(source_table.c.dag_id, source_table.c.task_id, source_table.c.execution_date)
.select_from(outerjoin(source_table, dagrun_table, source_to_dag_run_join_cond))
.filter(dagrun_table.c.dag_id.is_(None))
.count()
)
if invalid_row_count <= 0:
continue
dangling_table_name = _format_airflow_moved_table_name(source_table.name, "2.2")
if dangling_table_name in existing_table_names:
yield _format_dangling_error(
source_table=source_table.name,
target_table=dangling_table_name,
invalid_count=invalid_row_count,
reason=f"without a corresponding {dagrun_table.name} row",
)
errored = True
continue
_move_dangling_task_data_to_new_table(session, source_table.name, dangling_table_name)
if errored:
session.rollback()
else:
session.commit()
@provide_session
def _check_migration_errors(session=None) -> Iterable[str]:
"""
:session: session of the sqlalchemy
:rtype: list[str]
"""
for check_fn in (
check_conn_id_duplicates,
check_conn_type_null,
check_run_id_null,
check_task_tables_without_matching_dagruns,
):
yield from check_fn(session)
@provide_session
def upgradedb(session=None):
"""Upgrade the database."""
# alembic adds significant import time, so we import it lazily
from alembic import command
config = _get_alembic_config()
config.set_main_option('sqlalchemy.url', settings.SQL_ALCHEMY_CONN.replace('%', '%%'))
errors_seen = False
for err in _check_migration_errors(session=session):
if not errors_seen:
log.error("Automatic migration is not available")
errors_seen = True
log.error("%s", err)
if errors_seen:
exit(1)
with create_global_lock(session=session, pg_lock_id=2, lock_name="upgrade"):
log.info("Creating tables")
command.upgrade(config, 'heads')
add_default_pool_if_not_exists()
@provide_session
def resetdb(session=None):
"""Clear out the database"""
log.info("Dropping tables that exist")
connection = settings.engine.connect()
with create_global_lock(session=session, pg_lock_id=4, lock_name="reset"):
drop_airflow_models(connection)
drop_flask_models(connection)
initdb(session=session)
def drop_airflow_models(connection):
"""
Drops all airflow models.
:param connection: SQLAlchemy Connection
:return: None
"""
from airflow.models.base import Base
# Drop connection and chart - those tables have been deleted and in case you
# run resetdb on schema with chart or users table will fail
chart = Table('chart', Base.metadata)
chart.drop(settings.engine, checkfirst=True)
user = Table('user', Base.metadata)
user.drop(settings.engine, checkfirst=True)
users = Table('users', Base.metadata)
users.drop(settings.engine, checkfirst=True)
dag_stats = Table('dag_stats', Base.metadata)
dag_stats.drop(settings.engine, checkfirst=True)
Base.metadata.drop_all(connection)
# we remove the Tables here so that if resetdb is run metadata does not keep the old tables.
Base.metadata.remove(dag_stats)
Base.metadata.remove(users)
Base.metadata.remove(user)
Base.metadata.remove(chart)
# alembic adds significant import time, so we import it lazily
from alembic.migration import MigrationContext
migration_ctx = MigrationContext.configure(connection)
version = migration_ctx._version
if version.exists(connection):
version.drop(connection)
def drop_flask_models(connection):
"""
Drops all Flask models.
:param connection: SQLAlchemy Connection
:return: None
"""
from flask_appbuilder.models.sqla import Base
Base.metadata.drop_all(connection)
@provide_session
def check(session=None):
"""
Checks if the database works.
:param session: session of the sqlalchemy
"""
session.execute('select 1 as is_alive;')
log.info("Connection successful.")
|
{
"content_hash": "8cfadd162ef270963762ecc9fe32572e",
"timestamp": "",
"source": "github",
"line_count": 925,
"max_line_length": 109,
"avg_line_length": 29.635675675675675,
"alnum_prop": 0.536096012840623,
"repo_name": "apache/incubator-airflow",
"id": "16247e0965ed4a99ee4612e63e9e9b8353dca0f2",
"size": "28200",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "airflow/utils/db.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "69070"
},
{
"name": "Dockerfile",
"bytes": "2001"
},
{
"name": "HTML",
"bytes": "283783"
},
{
"name": "JavaScript",
"bytes": "1387552"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "5482822"
},
{
"name": "Shell",
"bytes": "40957"
}
],
"symlink_target": ""
}
|
import os
import pytest
import compas
from compas.datastructures import Mesh
from compas.files import STL
compas.PRECISION = "12f"
BASE_FOLDER = os.path.dirname(__file__)
@pytest.fixture
def binary_stl_with_ascii_header():
return os.path.join(BASE_FOLDER, "fixtures", "stl", "binary-1.stl")
@pytest.fixture
def binary_stl():
return os.path.join(BASE_FOLDER, "fixtures", "stl", "binary-2.stl")
@pytest.fixture
def ascii_stl():
return os.path.join(BASE_FOLDER, "fixtures", "stl", "ascii.stl")
def test_binary_detection(ascii_stl, binary_stl, binary_stl_with_ascii_header):
stl = STL(ascii_stl)
assert len(stl.parser.vertices) > 0
stl = STL(binary_stl)
assert len(stl.parser.vertices) > 0
stl = STL(binary_stl_with_ascii_header)
assert len(stl.parser.vertices) > 0
def test_binary_read_write_fidelity():
mesh = Mesh.from_stl(compas.get("cube_binary.stl"))
fp = compas.get("cube_binary_2.stl")
mesh.to_stl(fp, binary=True)
mesh_2 = Mesh.from_stl(fp)
assert mesh.adjacency == mesh_2.adjacency
assert mesh.vertex == mesh_2.vertex
|
{
"content_hash": "a4000e844607548cd77683332ab69de8",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 79,
"avg_line_length": 23.91304347826087,
"alnum_prop": 0.6881818181818182,
"repo_name": "compas-dev/compas",
"id": "ae4a87aaf89ec25e334cee1c0484a98d28d04c9d",
"size": "1100",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/compas/files/test_stl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3181804"
}
],
"symlink_target": ""
}
|
from functools import wraps
from flask import session, request
from .exception import AuthorticationException
def login_required(func):
@wraps(func)
def wrapper(*args, **kwargs):
user_id = request.cookies.get('user_id') or session.get('user_id')
if user_id:
session['user_id'] = user_id
return func(*args, **kwargs)
raise AuthorticationException('user not login')
return wrapper
def user_login(user):
session['user_id'] = user.id
|
{
"content_hash": "a34f660d70d6a3931743d9db95be717d",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 68,
"avg_line_length": 25,
"alnum_prop": 0.7244444444444444,
"repo_name": "flreey/private-navigation",
"id": "4c70ec8898e3004fc6d94b8298398309de836db1",
"size": "450",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libs/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1253"
},
{
"name": "JavaScript",
"bytes": "7783"
},
{
"name": "Python",
"bytes": "17727"
}
],
"symlink_target": ""
}
|
import os
import subprocess
import argparse
MAJOR = 1
MINOR = 9
MICRO = 0
ISRELEASED = False
IS_RELEASE_BRANCH = False
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
def get_version_info(source_root):
# Adding the git rev number needs to be done inside
# write_version_py(), otherwise the import of scipy.version messes
# up the build under Python 3.
FULLVERSION = VERSION
if os.path.exists(os.path.join(source_root, '.git')):
GIT_REVISION, COMMIT_COUNT = git_version(source_root)
elif os.path.exists('scipy/version.py'):
# must be a source distribution, use existing version file
# load it as a separate module to not load scipy/__init__.py
import runpy
ns = runpy.run_path('scipy/version.py')
GIT_REVISION = ns['git_revision']
COMMIT_COUNT = ns['git_revision']
else:
GIT_REVISION = "Unknown"
COMMIT_COUNT = "Unknown"
if not ISRELEASED:
FULLVERSION += '.dev0+' + COMMIT_COUNT + '.' + GIT_REVISION
return FULLVERSION, GIT_REVISION, COMMIT_COUNT
def write_version_py(source_root, filename='scipy/version.py'):
cnt = """\
# THIS FILE IS GENERATED DURING THE SCIPY BUILD
# See tools/version_utils.py for details
short_version = '%(version)s'
version = '%(version)s'
full_version = '%(full_version)s'
git_revision = '%(git_revision)s'
commit_count = '%(commit_count)s'
release = %(isrelease)s
if not release:
version = full_version
"""
FULLVERSION, GIT_REVISION, COMMIT_COUNT = get_version_info(source_root)
a = open(filename, 'w')
try:
a.write(cnt % {'version': VERSION,
'full_version': FULLVERSION,
'git_revision': GIT_REVISION,
'commit_count': COMMIT_COUNT,
'isrelease': str(ISRELEASED)})
finally:
a.close()
# Return the git revision as a string
def git_version(cwd):
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(cmd, stdout=subprocess.PIPE,
env=env, cwd=cwd).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')[:7]
# We need a version number that's regularly incrementing for newer commits,
# so the sort order in a wheelhouse of nightly builds is correct (see
# https://github.com/MacPython/scipy-wheels/issues/114). It should also be
# a reproducible version number, so don't rely on date/time but base it on
# commit history. This gives the commit count since the previous branch
# point from the current branch (assuming a full `git clone`, it may be
# less if `--depth` was used - commonly the default in CI):
prev_version_tag = '^v{}.{}.0'.format(MAJOR, MINOR - 2)
out = _minimal_ext_cmd(['git', 'rev-list', 'HEAD', prev_version_tag,
'--count'])
COMMIT_COUNT = out.strip().decode('ascii')
COMMIT_COUNT = '0' if not COMMIT_COUNT else COMMIT_COUNT
except OSError:
GIT_REVISION = "Unknown"
COMMIT_COUNT = "Unknown"
return GIT_REVISION, COMMIT_COUNT
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--source-root", type=str, default='.',
help="Relative path to the root of the source directory")
args = parser.parse_args()
write_version_py(args.source_root)
|
{
"content_hash": "9268a1b59ec064c557c4da159a68f261",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 83,
"avg_line_length": 33.973214285714285,
"alnum_prop": 0.5992115637319316,
"repo_name": "vigna/scipy",
"id": "c5c5cbe7754949f1c0618e48a7c0bb2121ff8fef",
"size": "3805",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/version_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4491892"
},
{
"name": "C++",
"bytes": "960140"
},
{
"name": "Cython",
"bytes": "1050681"
},
{
"name": "Dockerfile",
"bytes": "9839"
},
{
"name": "Fortran",
"bytes": "5299482"
},
{
"name": "MATLAB",
"bytes": "4346"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Meson",
"bytes": "141627"
},
{
"name": "Python",
"bytes": "14969167"
},
{
"name": "Shell",
"bytes": "3533"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("wagtaildocs", "0009_document_verbose_name_plural"),
]
operations = [
migrations.AddField(
model_name="document",
name="file_hash",
field=models.CharField(blank=True, editable=False, max_length=40),
),
]
|
{
"content_hash": "a8ee32cdacf9f7cb8118e7e8a0e49b85",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 23.875,
"alnum_prop": 0.599476439790576,
"repo_name": "zerolab/wagtail",
"id": "bc7fc9b2971a7a00b3bdc9e3227dd94f95017c3e",
"size": "431",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "wagtail/documents/migrations/0010_document_file_hash.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2522"
},
{
"name": "Dockerfile",
"bytes": "2041"
},
{
"name": "HTML",
"bytes": "593037"
},
{
"name": "JavaScript",
"bytes": "615631"
},
{
"name": "Makefile",
"bytes": "1413"
},
{
"name": "Python",
"bytes": "6560334"
},
{
"name": "SCSS",
"bytes": "219204"
},
{
"name": "Shell",
"bytes": "6845"
},
{
"name": "TypeScript",
"bytes": "288102"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Column',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('title', models.CharField(max_length=128)),
('column_type', models.CharField(max_length=3, choices=[('ITV', 'Интервал'), ('DT', 'Дата'), ('PCN', 'Количество пусков'), ('OCN', 'Количество остановов')])),
('from_event', models.CharField(max_length=3, choices=[('FVZ', 'ввод/замена'), ('FKR', 'капремонт'), ('FSR', 'средний ремонт'), ('FRC', 'реконструкция')])),
('element_name_filter', models.CharField(blank=True, max_length=50)),
('weight', models.IntegerField(default=0)),
],
options={
'default_permissions': [],
'verbose_name': 'столбец',
'verbose_name_plural': 'столбцы',
'db_table': 'columns',
'ordering': ['weight'],
},
),
migrations.CreateModel(
name='Employee',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('department', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Equipment',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('name', models.CharField(max_length=50)),
('plant', models.ForeignKey(blank=True, null=True, to='uptime.Equipment', related_name='parts')),
],
options={
'verbose_name': 'оборудование',
'verbose_name_plural': 'оборудование',
'db_table': 'equipment',
'ordering': ['plant_id', 'name'],
},
),
migrations.CreateModel(
name='EventItem',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('date', models.DateField()),
('event_code', models.CharField(max_length=3, choices=[('vkr', 'Ввод из капремонта'), ('zmn', 'Ввод после замены'), ('vsr', 'Ввод из ср. ремонта'), ('vrc', 'Ввод из реконструкции'), ('vvd', 'Ввод'), ('sps', 'Списание')])),
],
options={
'default_permissions': [],
'db_table': 'event_items',
},
),
migrations.CreateModel(
name='IntervalItem',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('state_code', models.CharField(db_index=True, choices=[('wrk', 'Работа'), ('hrs', 'Горячий резерв'), ('rsv', 'Резерв'), ('trm', 'Тек. ремонт'), ('arm', 'Ав. ремонт'), ('krm', 'Кап. ремонт'), ('srm', 'Сред. ремонт'), ('rcd', 'Реконструкция')], max_length=3, default='wrk')),
('time_in_state', models.DurationField()),
],
options={
'default_permissions': [],
'db_table': 'intervals',
},
),
migrations.CreateModel(
name='Journal',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('stat_by_parent', models.BooleanField(default=False)),
('hot_rzv_stat', models.BooleanField(default=False)),
('downtime_stat', models.BooleanField(default=False)),
('description', models.TextField(blank=True)),
('equipment', models.OneToOneField(to='uptime.Equipment', related_name='journal')),
],
options={
'verbose_name_plural': 'журналы',
'permissions': (('view_journal_details', 'View journal details'), ('view_journal_list', 'View journal list'), ('create_journal_record', 'Create record'), ('edit_journal_record', 'Edit record'), ('delete_journal_record', 'Delete record'), ('create_journal_event', 'Create journal event'), ('delete_journal_event', 'Delete journal event')),
'default_permissions': [],
'verbose_name': 'журнал',
'db_table': 'journals',
'ordering': ['equipment__name'],
},
),
migrations.CreateModel(
name='Record',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('rdate', models.DateField()),
('up_cnt', models.IntegerField(default=0)),
('down_cnt', models.IntegerField(default=0)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('journal', models.ForeignKey(to='uptime.Journal', related_name='records')),
],
options={
'db_table': 'records',
},
),
migrations.CreateModel(
name='Report',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('title', models.CharField(max_length=255)),
('is_generalizing', models.BooleanField(default=False)),
('weight', models.IntegerField(default=0)),
('equipment', models.OneToOneField(to='uptime.Equipment', related_name='report')),
],
options={
'default_permissions': [],
'verbose_name': 'отчет',
'db_table': 'reports',
'verbose_name_plural': 'отчеты',
},
),
migrations.AddField(
model_name='intervalitem',
name='record',
field=models.ForeignKey(to='uptime.Record', related_name='intervals'),
),
migrations.AddField(
model_name='eventitem',
name='journal',
field=models.ForeignKey(to='uptime.Journal', related_name='events'),
),
migrations.AddField(
model_name='employee',
name='equipment',
field=models.ForeignKey(to='uptime.Equipment', related_name='profile'),
),
migrations.AddField(
model_name='employee',
name='user',
field=models.OneToOneField(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='column',
name='report',
field=models.ForeignKey(to='uptime.Report', related_name='columns'),
),
]
|
{
"content_hash": "52ac321482ab9a4e17bacea1250c9f5f",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 354,
"avg_line_length": 46.73026315789474,
"alnum_prop": 0.5220329438265522,
"repo_name": "Igelinmist/etools",
"id": "48918844a88c4616ca2ca5f999be7e2be0714454",
"size": "7432",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "etools/apps/uptime/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "30541"
},
{
"name": "Python",
"bytes": "111337"
}
],
"symlink_target": ""
}
|
def nested_item(depth, value):
if depth <= 1:
return [value]
else:
return [nested_item(depth - 1, value)]
def nested_list(n):
"""Generate a nested list where the i'th item is at depth i."""
lis = []
for i in range(n):
if i == 0:
lis.append(i)
else:
lis.append(nested_item(i, i))
return lis
def flatten(lis):
"""Given a list, possibly nested to any level, return it flattened."""
new_lis = []
for item in lis:
if type(item) == type([]):
new_lis.extend(flatten(item))
else:
new_lis.append(item)
return new_lis
for n in range(7):
print n,
lis = nested_list(n)
print "original:", lis
new_lis = flatten(lis)
print "flattened:", new_lis
print
for i in range(6):
lis = range(i)
print "orig:", lis
flat_lis = flatten(lis)
print "flat:", flat_lis
|
{
"content_hash": "e720fe023837b7470ca52ce54f11f142",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 74,
"avg_line_length": 23.53846153846154,
"alnum_prop": 0.5446623093681917,
"repo_name": "ActiveState/code",
"id": "f26367656516c0e17bef784cb463e9c2dfb10031",
"size": "968",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/578948_Flattening_arbitrarily_nested_list/recipe-578948.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
}
|
from django.conf.urls import url, patterns, include
from django.contrib import admin
from django.views.generic import TemplateView
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^$', TemplateView.as_view(template_name='index.html')),
# Administrative components
url(r'^admin/', include(admin.site.urls)),
)
|
{
"content_hash": "addabacc9e574393fadd4d59ee9e44d4",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 65,
"avg_line_length": 23.928571428571427,
"alnum_prop": 0.7223880597014926,
"repo_name": "bruth/wicked-django-template",
"id": "8ba0d35ffc844f5105bc238e3e342977288156a2",
"size": "335",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project_name/conf/urls.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "179928"
},
{
"name": "CoffeeScript",
"bytes": "11209"
},
{
"name": "HTML",
"bytes": "6666"
},
{
"name": "JavaScript",
"bytes": "776389"
},
{
"name": "Python",
"bytes": "24812"
}
],
"symlink_target": ""
}
|
"""Spike sorting and ephys data analysis for 1000 channels and beyond."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import logging
import os.path as op
import sys
from six import StringIO
from .io.datasets import download_file
from .utils.config import load_master_config
from .utils._misc import _git_version
from .utils.plugin import IPlugin, get_plugin, discover_plugins
from .utils.testing import _enable_profiler
#------------------------------------------------------------------------------
# Global variables and functions
#------------------------------------------------------------------------------
__author__ = 'Kwik team'
__email__ = 'cyrille.rossant at gmail.com'
__version__ = '1.0.9'
__version_git__ = __version__ + _git_version()
# Set a null handler on the root logger
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.NullHandler())
_logger_fmt = '%(asctime)s [%(levelname)s] %(caller)s %(message)s'
_logger_date_fmt = '%H:%M:%S'
class _Formatter(logging.Formatter):
def format(self, record):
# Only keep the first character in the level name.
record.levelname = record.levelname[0]
filename = op.splitext(op.basename(record.pathname))[0]
record.caller = '{:s}:{:d}'.format(filename, record.lineno).ljust(20)
return super(_Formatter, self).format(record)
def add_default_handler(level='INFO'):
handler = logging.StreamHandler()
handler.setLevel(level)
formatter = _Formatter(fmt=_logger_fmt,
datefmt=_logger_date_fmt)
handler.setFormatter(formatter)
logger.addHandler(handler)
DEBUG = False
if '--debug' in sys.argv: # pragma: no cover
DEBUG = True
sys.argv.remove('--debug')
PDB = False
if '--pdb' in sys.argv: # pragma: no cover
PDB = True
sys.argv.remove('--pdb')
IPYTHON = False
if '--ipython' in sys.argv: # pragma: no cover
IPYTHON = True
sys.argv.remove('--ipython')
# Add `profile` in the builtins.
if '--lprof' in sys.argv or '--prof' in sys.argv: # pragma: no cover
_enable_profiler('--lprof' in sys.argv)
if '--prof' in sys.argv:
sys.argv.remove('--prof')
if '--lprof' in sys.argv:
sys.argv.remove('--lprof')
def test(): # pragma: no cover
"""Run the full testing suite of phy."""
import pytest
pytest.main()
|
{
"content_hash": "c6f96f0e130c98c9b80fe76ba6afd8ad",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 79,
"avg_line_length": 27.384615384615383,
"alnum_prop": 0.5758426966292135,
"repo_name": "rossant/phy",
"id": "0890d54630703d76e6034502ac0c3b33fe6ba87c",
"size": "2532",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "phy/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "32"
},
{
"name": "CSS",
"bytes": "1171"
},
{
"name": "GLSL",
"bytes": "6782"
},
{
"name": "JavaScript",
"bytes": "9051"
},
{
"name": "Makefile",
"bytes": "499"
},
{
"name": "Python",
"bytes": "562738"
},
{
"name": "Shell",
"bytes": "218"
}
],
"symlink_target": ""
}
|
"""The tests for the demo weather component."""
from homeassistant.components import weather
from homeassistant.components.weather import (
ATTR_FORECAST,
ATTR_FORECAST_CONDITION,
ATTR_FORECAST_PRECIPITATION,
ATTR_FORECAST_PRECIPITATION_PROBABILITY,
ATTR_FORECAST_TEMP,
ATTR_FORECAST_TEMP_LOW,
ATTR_WEATHER_HUMIDITY,
ATTR_WEATHER_OZONE,
ATTR_WEATHER_PRESSURE,
ATTR_WEATHER_TEMPERATURE,
ATTR_WEATHER_WIND_BEARING,
ATTR_WEATHER_WIND_SPEED,
)
from homeassistant.const import ATTR_ATTRIBUTION
from homeassistant.setup import async_setup_component
from homeassistant.util.unit_system import METRIC_SYSTEM
async def test_attributes(hass):
"""Test weather attributes."""
assert await async_setup_component(
hass, weather.DOMAIN, {"weather": {"platform": "demo"}}
)
hass.config.units = METRIC_SYSTEM
await hass.async_block_till_done()
state = hass.states.get("weather.demo_weather_south")
assert state is not None
assert state.state == "sunny"
data = state.attributes
assert data.get(ATTR_WEATHER_TEMPERATURE) == 21.6
assert data.get(ATTR_WEATHER_HUMIDITY) == 92
assert data.get(ATTR_WEATHER_PRESSURE) == 1099
assert data.get(ATTR_WEATHER_WIND_SPEED) == 1.8 # 0.5 m/s -> km/h
assert data.get(ATTR_WEATHER_WIND_BEARING) is None
assert data.get(ATTR_WEATHER_OZONE) is None
assert data.get(ATTR_ATTRIBUTION) == "Powered by Home Assistant"
assert data.get(ATTR_FORECAST)[0].get(ATTR_FORECAST_CONDITION) == "rainy"
assert data.get(ATTR_FORECAST)[0].get(ATTR_FORECAST_PRECIPITATION) == 1
assert data.get(ATTR_FORECAST)[0].get(ATTR_FORECAST_PRECIPITATION_PROBABILITY) == 60
assert data.get(ATTR_FORECAST)[0].get(ATTR_FORECAST_TEMP) == 22
assert data.get(ATTR_FORECAST)[0].get(ATTR_FORECAST_TEMP_LOW) == 15
assert data.get(ATTR_FORECAST)[6].get(ATTR_FORECAST_CONDITION) == "fog"
assert data.get(ATTR_FORECAST)[6].get(ATTR_FORECAST_PRECIPITATION) == 0.2
assert data.get(ATTR_FORECAST)[6].get(ATTR_FORECAST_TEMP) == 21
assert data.get(ATTR_FORECAST)[6].get(ATTR_FORECAST_TEMP_LOW) == 12
assert (
data.get(ATTR_FORECAST)[6].get(ATTR_FORECAST_PRECIPITATION_PROBABILITY) == 100
)
assert len(data.get(ATTR_FORECAST)) == 7
|
{
"content_hash": "e7afa43a01c46cf8f7e2846bf45980b1",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 88,
"avg_line_length": 41.49090909090909,
"alnum_prop": 0.71034180543383,
"repo_name": "w1ll1am23/home-assistant",
"id": "8c93219f8e69ad16af1a6845e63ebfef03c99a03",
"size": "2282",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/components/demo/test_weather.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52277012"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
"""
Presence analyzer unit tests.
"""
import os.path
import json
import datetime
import unittest
from presence_analyzer import main, views, utils
TEST_DATA_CSV = os.path.join(
os.path.dirname(__file__), '..', '..', 'runtime', 'data', 'test_data.csv'
)
# pylint: disable=maybe-no-member, too-many-public-methods
class PresenceAnalyzerViewsTestCase(unittest.TestCase):
"""
Views tests.
"""
def setUp(self):
"""
Before each test, set up a environment.
"""
main.app.config.update({'DATA_CSV': TEST_DATA_CSV})
self.client = main.app.test_client()
def tearDown(self):
"""
Get rid of unused objects after each test.
"""
pass
def test_mainpage(self):
"""
Test main page redirect.
"""
resp = self.client.get('/')
self.assertEqual(resp.status_code, 302)
assert resp.headers['Location'].endswith('/presence_weekday/')
def test_api_users(self):
"""
Test users listing.
"""
resp = self.client.get('/api/v1/users')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content_type, 'application/json')
data = json.loads(resp.data)
self.assertEqual(len(data), 2)
self.assertDictEqual(data[0], {u'user_id': 10, u'name': u'User 10'})
def test_api_mean_time_weekday_404(self):
"""
Test response of mean presence time for a nonexistent user.
"""
resp = self.client.get('/api/v1/mean_time_weekday/0')
self.assertEqual(resp.status_code, 404)
def test_api_mean_time_weekday(self):
"""
Test mean presence time of given user grouped by weekday.
"""
resp = self.client.get('/api/v1/mean_time_weekday/10')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content_type, 'application/json')
data = json.loads(resp.data)
self.assertEqual(len(data), 7)
self.assertEqual(
data,
[
[u'Mon', 0],
[u'Tue', 30047.0],
[u'Wed', 24465.0],
[u'Thu', 23705.0],
[u'Fri', 0],
[u'Sat', 0],
[u'Sun', 0]
]
)
def test_api_presence_weekday_404(self):
"""
Test response of total presence time for a nonexistent user.
"""
resp = self.client.get('/api/v1/presence_weekday/0')
self.assertEqual(resp.status_code, 404)
def test_api_presence_weekday(self):
"""
Test total presence time of given user grouped by weekday.
"""
resp = self.client.get('/api/v1/presence_weekday/10')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content_type, 'application/json')
data = json.loads(resp.data)
self.assertEqual(len(data), 8)
self.assertEqual(
data,
[
[u'Weekday', u'Presence (s)'],
[u'Mon', 0],
[u'Tue', 30047],
[u'Wed', 24465],
[u'Thu', 23705],
[u'Fri', 0],
[u'Sat', 0],
[u'Sun', 0]
]
)
def test_api_presence_start_end_404(self):
"""
Test timespans for nonexistent employee.
"""
resp = self.client.get('/api/v1/presence_start_end/0')
self.assertEqual(resp.status_code, 404)
def test_api_presence_start_end(self):
"""
Test timespans when the employee is most often present in the office.
"""
resp = self.client.get('/api/v1/presence_start_end/10')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content_type, 'application/json')
data = json.loads(resp.data)
self.assertEqual(len(data), 5)
self.assertItemsEqual(
data,
[
{u'start': 0.0, u'end': 0.0},
{u'start': 34745000.0, u'end': 64792000.0},
{u'start': 33592000.0, u'end': 58057000.0},
{u'start': 38926000.0, u'end': 62631000.0},
{u'start': 0.0, u'end': 0.0}
]
)
class PresenceAnalyzerUtilsTestCase(unittest.TestCase):
"""
Utility functions tests.
"""
def setUp(self):
"""
Before each test, set up a environment.
"""
main.app.config.update({'DATA_CSV': TEST_DATA_CSV})
def tearDown(self):
"""
Get rid of unused objects after each test.
"""
pass
def test_get_data(self):
"""
Test parsing of CSV file.
"""
data = utils.get_data()
self.assertIsInstance(data, dict)
self.assertItemsEqual(data.keys(), [10, 11])
sample_date = datetime.date(2013, 9, 10)
self.assertIn(sample_date, data[10])
self.assertItemsEqual(data[10][sample_date].keys(), ['start', 'end'])
self.assertEqual(
data[10][sample_date]['start'],
datetime.time(9, 39, 5)
)
def test_seconds_since_midnight(self):
"""
Test calculating amount of seconds since midnight.
"""
test_time = datetime.datetime.strptime('01:06:06', '%H:%M:%S').time()
self.assertEqual(utils.seconds_since_midnight(test_time), 3966)
def test_interval(self):
"""
Test calculating inverval in seconds between two datetime.time objects.
"""
start_time = datetime.datetime.strptime('01:06:00', '%H:%M:%S').time()
end_time = datetime.datetime.strptime('01:07:06', '%H:%M:%S').time()
self.assertEqual(utils.interval(start_time, end_time), 66)
def test_mean(self):
"""
Tests calculating arithmetic mean. Returns zero for empty lists.
"""
self.assertEqual(utils.mean([1, 2, 3, 3]), 2.25)
self.assertEqual(utils.mean([]), 0.0)
def test_group_by_weekday(self):
"""
Test grouping presence entries by weekday.
"""
sample_data = {
datetime.date(2015, 4, 7): {
'start': datetime.time(9, 0, 0),
'end': datetime.time(17, 30, 0),
},
datetime.date(2015, 4, 8): {
'start': datetime.time(8, 30, 0),
'end': datetime.time(9, 0, 0),
},
}
grouping = utils.group_by_weekday(sample_data)
self.assertEqual(grouping, [[], [30600], [1800], [], [], [], []])
def test_get_mean_by_weekday(self):
"""
Test grouping mean seconds since midnight
(either start or end) by weekday.
"""
sample_data = {
datetime.date(2015, 4, 7): {
'start': datetime.time(9, 0, 0),
'end': datetime.time(17, 30, 0),
},
datetime.date(2015, 4, 8): {
'start': datetime.time(8, 30, 0),
'end': datetime.time(9, 0, 0),
},
}
res = utils.get_mean_by_weekday(sample_data, 'start')
self.assertEqual(res, [0, 32400.0, 30600.0, 0, 0, 0, 0])
res = utils.get_mean_by_weekday(sample_data, 'end')
self.assertEqual(res, [0, 63000.0, 32400.0, 0, 0, 0, 0])
def suite():
"""
Default test suite.
"""
base_suite = unittest.TestSuite()
base_suite.addTest(unittest.makeSuite(PresenceAnalyzerViewsTestCase))
base_suite.addTest(unittest.makeSuite(PresenceAnalyzerUtilsTestCase))
return base_suite
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "ba203a205486d658c0b42a47affe5e02",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 79,
"avg_line_length": 31.15918367346939,
"alnum_prop": 0.5331412103746398,
"repo_name": "stxnext-kindergarten/presence-analyzer-rbast",
"id": "f6e2c5910d1ac0de323a08856264c19ea0fa8d77",
"size": "7658",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/presence_analyzer/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "921"
},
{
"name": "HTML",
"bytes": "6693"
},
{
"name": "Python",
"bytes": "23819"
}
],
"symlink_target": ""
}
|
"""
User commands for multi commands example.
"""
from argparseinator import class_args, arg, ArgParseInated
@class_args
class User(ArgParseInated):
"""
User commands.
"""
__cmd_name__ = "user"
@arg()
def files(self):
"""
List files.
"""
self.writeln("Listing files...")
# listing files code.
return 0, "Files listed\n"
@arg('name', help="Name to greet")
def greet(self, name):
"""
Greeting command.
"""
self.writeln('Greeting person...')
self.writeln('Ciao', name)
return 0, "person greeted\n"
|
{
"content_hash": "10bddeb1ded35a81e66cab8aa3d59147",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 58,
"avg_line_length": 21.033333333333335,
"alnum_prop": 0.5388272583201268,
"repo_name": "ellethee/argparseinator",
"id": "60f58335a3cccd0103c526f5981e27b0b516eb06",
"size": "655",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/commands/user.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "52165"
}
],
"symlink_target": ""
}
|
from flask import Flask, request, abort
import json
from ReverseProxied import ReverseProxied
app = Flask(__name__)
app.wsgi_app = ReverseProxied(app.wsgi_app)
with open("intervals.json") as b:
data = json.load(b)
def intersects(interval, min_age, max_age):
if interval["lag"] <= min_age <= interval["eag"]:
return True
elif interval["lag"] <= max_age <= interval["eag"]:
return True
elif interval["lag"] >= min_age and max_age >= interval["eag"]:
return True
else:
return False
def resolve_geologic_time_intersects(min_age, max_age):
z = [interval for interval in data["records"] if intersects(interval, min_age, max_age)]
z.sort(key=lambda x: x["lag"])
z.sort(key=lambda x: x["lvl"])
return json.dumps(z)
def within(interval, min_age, max_age):
return interval["lag"] <= min_age <= max_age <= interval["eag"]
def resolve_geologic_time_within(min_age, max_age):
z = [interval for interval in data["records"] if within(interval, min_age, max_age)]
if not z:
return z
z.sort(key=lambda x: x["lvl"], reverse=True)
return json.dumps(z[0])
@app.route("/")
def hello():
return json.dumps(data)
def process_inputs():
min_age = request.args.get('min', None, type=float)
max_age = request.args.get('max', None, type=float)
if min_age is None or max_age is None:
abort(400)
return min_age, max_age
@app.route("/resolve-within", methods=['GET'])
def resolve_within():
min_age, max_age = process_inputs()
return resolve_geologic_time_within(min_age, max_age)
@app.route("/resolve-intersects", methods=['GET'])
def resolve_intersects():
min_age, max_age = process_inputs()
return resolve_geologic_time_intersects(min_age, max_age)
if __name__ == "__main__":
app.run()
|
{
"content_hash": "a09b8f4f5d2f326bb76103cc8d5a4712",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 92,
"avg_line_length": 25.333333333333332,
"alnum_prop": 0.6425438596491229,
"repo_name": "tetherless-world/dtdi-geologic-time-resolver",
"id": "f3bbedea60f7d9d40338c8346619c48d0116fb68",
"size": "1824",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1705"
}
],
"symlink_target": ""
}
|
import GafferUI
QtCore = GafferUI._qtImport( "QtCore" )
## PyQt and PySide differ in their bindings of functions using the
# QVariant type. PySide doesn't expose QVariant and instead uses
# the standard python types, whereas PyQt binds and uses the QVariant type.
# This class provides functions to help with writing code which works
# with either set of bindings.
class _Variant( object ) :
## Returns value converted to a form which can be passed to a function
# expecting a QVariant.
@staticmethod
def toVariant( value ) :
# PyQt uses QVariant
if hasattr( QtCore, "QVariant" ) :
if value is not None :
return QtCore.QVariant( value )
else :
return QtCore.QVariant()
# whereas PySide just uses python values
return value
## Converts variant to a standard python object.
@staticmethod
def fromVariant( variant ) :
if hasattr( QtCore, "QVariant" ) and isinstance( variant, QtCore.QVariant ) :
t = variant.type()
if t == QtCore.QVariant.String :
return str( variant.toString() )
elif t == QtCore.QVariant.Double :
return variant.toDouble()[0]
elif t == QtCore.QVariant.Int :
return variant.toInt()[0]
elif t == QtCore.QVariant.Bool :
return variant.toBool()
elif t == QtCore.QVariant.Invalid :
return None
else :
raise ValueError( "Unsupported QVariant type \"%s\"" % variant.typeName() )
else :
return variant
|
{
"content_hash": "666bd585c61f31b9b9c97bdd1c06914e",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 79,
"avg_line_length": 30.391304347826086,
"alnum_prop": 0.7031473533619457,
"repo_name": "chippey/gaffer",
"id": "c7acea0628264998bef47580f2f2f77478c11fcf",
"size": "3258",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/GafferUI/_Variant.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2258"
},
{
"name": "C++",
"bytes": "5420141"
},
{
"name": "CSS",
"bytes": "28027"
},
{
"name": "GLSL",
"bytes": "6250"
},
{
"name": "Objective-C",
"bytes": "2228"
},
{
"name": "Python",
"bytes": "5348174"
},
{
"name": "Shell",
"bytes": "8370"
},
{
"name": "Slash",
"bytes": "41159"
}
],
"symlink_target": ""
}
|
import curses
from curses import panel
def run_menu(menu_items,x=0,y=0,name=None,border=True):
"""
Display menu at x,y on a newly created window
params:
x(int) - x coordinate to create window
y(int) - y coordinate to create window
name(str) - optional title for menu
border(bool) - display border around the menu
"""
length = max(len(s) for s in menu_items) + 4
height = len(menu_items)
if name:
height += 2
current_opt = 0
selected_opt = -1
w = curses.newwin(height+2,length,y,x)
w.keypad(1)
w.refresh()
while selected_opt == -1:
#reverse and bold the selected option
display_attr=[curses.A_NORMAL]*height
display_attr[current_opt]=curses.A_REVERSE+curses.A_BOLD
line = 1
if name:
w.addstr(line,2,name,curses.A_BOLD+curses.A_UNDERLINE)
line += 2
for i,v in enumerate(menu_items):
w.addstr(line,2,str(v),display_attr[i])
w.clrtoeol()
line += 1
if border:
w.border()
w.refresh()
x = w.getch()
if x == curses.KEY_DOWN:
current_opt += 1
elif x == curses.KEY_UP:
current_opt -= 1
#validation can be done by CR or space bar
elif x == ord('\n') or x == 32 :
selected_opt = current_opt
#in case key pressed is a number
elif x in range(ord('0'),ord('0')+height):
current_opt = x - ord('0')
selected_opt=current_opt
if current_opt > len(menu_items) - 1:
current_opt = len(menu_items) -1
elif current_opt < 0:
current_opt = 0
return selected_opt
|
{
"content_hash": "dc8f42cb22fba49d8035a81ee3fcd1e3",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 66,
"avg_line_length": 26.21212121212121,
"alnum_prop": 0.545664739884393,
"repo_name": "bcicen/statsquid",
"id": "7fa595b8ea371b7d6b3c2bb95d05f79a03da4bdc",
"size": "1730",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "statsquid/menu.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25463"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_protect
from user_profile.forms import *
from django.template import RequestContext
from user_profile.models import UserProfile
from django.http import HttpResponse
from django.contrib.auth.models import User
@login_required
def view_profile(request, username=None):
if username is not None:
try:
user = User.objects.get(username__iexact=username)
except User.DoesNotExist:
return HttpResponseRedirect('/home/')
else:
user = request.user
if user == request.user:
selfProfile = True
else:
selfProfile = False
try:
user_profile = user.profile
except UserProfile.DoesNotExist():
return create_profile(request)
variables = RequestContext(request, {'userprofile' : user_profile, 'selfProfile' : selfProfile})
return render_to_response('user_profile/profile.html', variables, )
@csrf_protect
@login_required
def create_profile(request):
try:
profile = request.user.profile
except UserProfile.DoesNotExist():
profile = UserProfile(user=request.user)
if request.method == 'POST':
form = ProfileForm(request.POST)
if form.is_valid():
user_profile = profile
user_profile.user = request.user
user_profile.fullName = form.cleaned_data['fullName']
user_profile.phoneNumber = form.cleaned_data['phoneNumber']
user_profile.website = form.cleaned_data['website']
user_profile.save()
return HttpResponseRedirect('../view')
else:
form = ProfileForm(initial={'fullName':profile.fullName, 'phoneNumber':profile.phoneNumber, 'website':profile.website})
variables = RequestContext(request, {'form' : form})
return render_to_response('user_profile/create_profile.html', variables, )
|
{
"content_hash": "d6693f6d7a8710c1cc97f7a5b51af867",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 127,
"avg_line_length": 38.092592592592595,
"alnum_prop": 0.6937287311618863,
"repo_name": "bitsnbytes7c8/django-site",
"id": "bd622610c7ca4df4d3182257d4aa6389768fe2be",
"size": "2057",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "user_profile/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4056"
},
{
"name": "Python",
"bytes": "13058"
}
],
"symlink_target": ""
}
|
import pytest
from indico.modules.events.payment.models.transactions import PaymentTransaction, TransactionStatus
@pytest.fixture
def create_transaction():
"""Return a callable which lets you create transactions."""
def _create_transaction(status, **params):
params.setdefault('amount', 10)
params.setdefault('currency', 'USD')
params.setdefault('provider', '_manual')
params.setdefault('data', {})
return PaymentTransaction(status=status, **params)
return _create_transaction
@pytest.fixture
def dummy_transaction(create_transaction):
"""Return a dummy successful transaction."""
return create_transaction(status=TransactionStatus.successful)
|
{
"content_hash": "7630cae2a501c010efb3897b837f76ce",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 99,
"avg_line_length": 30.91304347826087,
"alnum_prop": 0.7243319268635724,
"repo_name": "DirkHoffmann/indico",
"id": "04c80f6cb698af265b54fd3876b8240b966cadc8",
"size": "925",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "indico/modules/events/payment/testing/fixtures.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "33249"
},
{
"name": "HTML",
"bytes": "1398354"
},
{
"name": "JavaScript",
"bytes": "2295843"
},
{
"name": "Mako",
"bytes": "1527"
},
{
"name": "Python",
"bytes": "5426206"
},
{
"name": "SCSS",
"bytes": "496904"
},
{
"name": "Shell",
"bytes": "3877"
},
{
"name": "TeX",
"bytes": "23435"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
}
|
from .stochastic_gradient import (SGDRegressor, ElasticNet, Lasso, Ridge,
LinearRegression, L1Regression,
SGDClassifier, LogisticRegression,
LinearSVC, SGDMultiClassifier,
SoftmaxRegression)
__all__ = ['SGDRegressor',
'ElasticNet',
'Lasso',
'Ridge',
'LinearRegression',
'L1Regression',
'SGDClassifier',
'LogisticRegression',
'LinearSVC',
'SGDMultiClassifier',
'SoftmaxRegression']
|
{
"content_hash": "0dbc904504d118e3c4e1ef1eaae324fc",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 73,
"avg_line_length": 36.94117647058823,
"alnum_prop": 0.47929936305732485,
"repo_name": "hduongtrong/hyperemble",
"id": "167601a85072432882b75c950da47cb75a8effe8",
"size": "628",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hyperemble/linear_model/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "738"
},
{
"name": "Python",
"bytes": "41885"
}
],
"symlink_target": ""
}
|
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Project.project_type'
db.add_column(u'ide_project', 'project_type',
self.gf('django.db.models.fields.CharField')(default='native', max_length=10),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Project.project_type'
db.delete_column(u'ide_project', 'project_type')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'ide.buildresult': {
'Meta': {'object_name': 'BuildResult'},
'binary_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'finished': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'builds'", 'to': "orm['ide.Project']"}),
'resource_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'started': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'total_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'b84e091c-1712-4e5d-973b-e37323897fd8'", 'max_length': '36'})
},
'ide.project': {
'Meta': {'unique_together': "(('owner', 'name'),)", 'object_name': 'Project'},
'app_capabilities': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'app_company_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'app_is_watchface': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'app_jshint': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'app_keys': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'app_long_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'app_short_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'app_uuid': ('django.db.models.fields.CharField', [], {'default': "'4b4975dc-0fca-43d1-b212-dc2f4209c8b2'", 'max_length': '36', 'null': 'True', 'blank': 'True'}),
'app_version_code': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'app_version_label': ('django.db.models.fields.CharField', [], {'default': "'1.0'", 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'github_branch': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'github_hook_build': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'github_hook_uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True', 'blank': 'True'}),
'github_last_commit': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'github_last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'github_repo': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'optimisation': ('django.db.models.fields.CharField', [], {'default': "'s'", 'max_length': '1'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'project_type': ('django.db.models.fields.CharField', [], {'default': "'native'", 'max_length': '10'}),
'sdk_version': ('django.db.models.fields.CharField', [], {'default': "'1'", 'max_length': '10'}),
'version_def_name': ('django.db.models.fields.CharField', [], {'default': "'APP_RESOURCES'", 'max_length': '50'})
},
'ide.resourcefile': {
'Meta': {'unique_together': "(('project', 'file_name'),)", 'object_name': 'ResourceFile'},
'file_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_menu_icon': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '9'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'resources'", 'to': "orm['ide.Project']"})
},
'ide.resourceidentifier': {
'Meta': {'unique_together': "(('resource_file', 'resource_id'),)", 'object_name': 'ResourceIdentifier'},
'character_regex': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'resource_file': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'identifiers'", 'to': "orm['ide.ResourceFile']"}),
'resource_id': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tracking': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'ide.sourcefile': {
'Meta': {'unique_together': "(('project', 'file_name'),)", 'object_name': 'SourceFile'},
'file_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'source_files'", 'to': "orm['ide.Project']"})
},
'ide.templateproject': {
'Meta': {'object_name': 'TemplateProject', '_ormbases': ['ide.Project']},
u'project_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['ide.Project']", 'unique': 'True', 'primary_key': 'True'}),
'template_kind': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'})
},
'ide.usergithub': {
'Meta': {'object_name': 'UserGithub'},
'avatar': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'nonce': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True', 'blank': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'github'", 'unique': 'True', 'primary_key': 'True', 'to': u"orm['auth.User']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
},
'ide.usersettings': {
'Meta': {'object_name': 'UserSettings'},
'accepted_terms': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'autocomplete': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'keybinds': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '20'}),
'tab_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '2'}),
'theme': ('django.db.models.fields.CharField', [], {'default': "'monokai'", 'max_length': '50'}),
'use_spaces': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
}
}
complete_apps = ['ide']
|
{
"content_hash": "387620550d9e0442cbba158cb15ee8ac",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 195,
"avg_line_length": 81.31724137931035,
"alnum_prop": 0.5507590535153931,
"repo_name": "math-foo/cloudpebble",
"id": "1fc7e66250250f57ff40abe54857a439cd14942e",
"size": "11815",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "ide/migrations/0018_auto__add_field_project_project_type.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "62120"
},
{
"name": "HTML",
"bytes": "94707"
},
{
"name": "JavaScript",
"bytes": "380016"
},
{
"name": "Python",
"bytes": "573889"
},
{
"name": "Shell",
"bytes": "7204"
}
],
"symlink_target": ""
}
|
from datetime import datetime, timedelta
from itertools import chain, cycle
from django.shortcuts import render_to_response
from django.utils.translation import ugettext, ugettext_lazy as _
from django.http import HttpResponseRedirect, Http404
from django import forms
from django.views.decorators.http import require_POST
from django.db import transaction
from django.contrib.contenttypes.models import ContentType
from django.template import RequestContext
from django.contrib.sites.models import Site
from django.template.defaultfilters import slugify
from django.utils.safestring import mark_safe
from django.utils.encoding import force_unicode
from django.contrib.formtools.wizard import FormWizard
from django.views.decorators.csrf import csrf_protect
from ella.core.cache import get_cached_object_or_404
from ella.core.views import get_templates_from_publishable
from ella_polls.models import Poll, Contestant, Survey
from ella_polls.conf import polls_settings
def get_next_url(request):
"""
Return URL for redirection
Try to get it from:
* POST param 'next'
* HTTP_REFERER
"""
if 'next' in request.POST: # and request.POST['next'].startswith('/'):
return request.POST['next']
else:
return request.META.get('HTTP_REFERER', '/')
def poll_check_vote(request, poll):
"""
To avoid multiple poll votes of the same user.
Uses sessions (authenticatedd users) or cookies (annonymous users) at first.
Then it looks it up in Votes.
Return choices:
* User not yet voted
* User just voted
* User allready voted
* User try to vote with no choice (usefull to display a message in a Poll box)
"""
sess_jv = request.session.get(polls_settings.POLL_JUST_VOTED_COOKIE_NAME, [])
# removing just voted info from session
if poll.id in sess_jv:
del request.session[polls_settings.POLL_JUST_VOTED_COOKIE_NAME]
# TODO - del just my poll, not the entire list !
return polls_settings.USER_JUST_VOTED
# removing no vote info from session
sess_nv = request.session.get(polls_settings.POLL_NO_CHOICE_COOKIE_NAME, [])
if poll.id in sess_nv:
del request.session[polls_settings.POLL_NO_CHOICE_COOKIE_NAME]
# TODO - del just my poll, not the entire list !
return polls_settings.USER_NO_CHOICE
# authenticated user - check session
if request.user.is_authenticated():
sess = request.session.get(polls_settings.POLL_COOKIE_NAME, [])
if poll.id in sess:
return polls_settings.USER_ALLREADY_VOTED
# otherwise check Vote object - just for sure
if poll.check_vote_by_user(request.user):
return polls_settings.USER_ALLREADY_VOTED
return polls_settings.USER_NOT_YET_VOTED
# anonymous - check cookie
else:
cook = request.COOKIES.get(polls_settings.POLL_COOKIE_NAME, '').split(',')
if str(poll.id) in cook:
return polls_settings.USER_ALLREADY_VOTED
ip_address = request.META['REMOTE_ADDR']
# otherwise check Vote object - just for sure
if poll.check_vote_by_ip_address(ip_address):
return polls_settings.USER_ALLREADY_VOTED
return polls_settings.USER_NOT_YET_VOTED
def survey_check_vote(request, survey):
sess_jv = request.session.get(polls_settings.SURVEY_JUST_VOTED_COOKIE_NAME, [])
# removing just voted info from session
if survey.id in sess_jv:
del request.session[polls_settings.SURVEY_JUST_VOTED_COOKIE_NAME]
# TODO - del just my poll, not the entire list !
return polls_settings.USER_JUST_VOTED
# removing no vote info from session
sess_nv = request.session.get(polls_settings.SURVEY_NO_CHOICE_COOKIE_NAME, [])
if survey.id in sess_nv:
del request.session[polls_settings.SURVEY_NO_CHOICE_COOKIE_NAME]
# TODO - del just my poll, not the entire list !
return polls_settings.USER_NO_CHOICE
# authenticated user - check session
if request.user.is_authenticated():
sess = request.session.get(polls_settings.SURVEY_COOKIE_NAME, [])
if survey.id in sess:
return polls_settings.USER_ALLREADY_VOTED
# otherwise check Vote object - just for sure
if survey.check_vote_by_user(request.user):
return polls_settings.USER_ALLREADY_VOTED
return polls_settings.USER_NOT_YET_VOTED
# anonymous - check cookie
else:
cook = request.COOKIES.get(polls_settings.SURVEY_COOKIE_NAME, '').split(',')
if str(survey.id) in cook:
return polls_settings.USER_ALLREADY_VOTED
ip_address = request.META['REMOTE_ADDR']
# otherwise check Vote object - just for sure
if survey.check_vote_by_ip_address(ip_address):
return polls_settings.USER_ALLREADY_VOTED
return polls_settings.USER_NOT_YET_VOTED
@csrf_protect
@require_POST
@transaction.commit_on_success
def poll_vote(request, poll_id):
poll_ct = ContentType.objects.get_for_model(Poll)
poll = get_cached_object_or_404(poll_ct, pk=poll_id)
url = get_next_url(request)
# activity check
if not poll.is_active():
return HttpResponseRedirect(url)
# vote check
if poll_check_vote(request, poll) != polls_settings.USER_NOT_YET_VOTED:
return HttpResponseRedirect(url)
form = QuestionForm(poll.question)(request.POST)
# invalid input
if not form.is_valid():
# no choice selected error - via session
sess_nv = request.session.get(polls_settings.POLL_NO_CHOICE_COOKIE_NAME, [])
sess_nv.append(poll.id)
request.session[polls_settings.POLL_NO_CHOICE_COOKIE_NAME] = sess_nv
return HttpResponseRedirect(url)
# vote save
kwa = {}
if request.user.is_authenticated():
kwa['user'] = request.user
kwa['ip_address'] = request.META['REMOTE_ADDR']
poll.vote(form.cleaned_data['choice'], **kwa)
# just voted info session update
sess_jv = request.session.get(polls_settings.POLL_JUST_VOTED_COOKIE_NAME, [])
sess_jv.append(poll.id)
request.session[polls_settings.POLL_JUST_VOTED_COOKIE_NAME] = sess_jv
response = HttpResponseRedirect(url)
# authenticated user vote - session update
if request.user.is_authenticated():
sess = request.session.get(polls_settings.POLL_COOKIE_NAME, [])
sess.append(poll.id)
request.session[polls_settings.POLL_COOKIE_NAME] = sess
# annonymous user vote - cookies update
else:
cook = request.COOKIES.get(polls_settings.POLL_COOKIE_NAME, '').split(',')
if len(cook) > polls_settings.POLL_MAX_COOKIE_LENGTH:
cook = cook[1:]
cook.append(str(poll.id))
expires = datetime.strftime(datetime.utcnow() + \
timedelta(seconds=polls_settings.POLL_MAX_COOKIE_AGE),
"%a, %d-%b-%Y %H:%M:%S GMT")
response.set_cookie(
polls_settings.POLL_COOKIE_NAME,
value=','.join(cook),
max_age=polls_settings.POLL_MAX_COOKIE_AGE,
expires=expires,
path='/',
domain=Site.objects.get_current().domain,
secure=None
)
return response
@csrf_protect
@require_POST
@transaction.commit_on_success
def survey_vote(request, survey_id):
survey_ct = ContentType.objects.get_for_model(Survey)
survey = get_cached_object_or_404(survey_ct, pk=survey_id)
url = get_next_url(request)
# activity check
if not survey.current_activity_state == polls_settings.ACTIVITY_ACTIVE:
return HttpResponseRedirect(url)
# vote check
if survey_check_vote(request, survey) != polls_settings.USER_NOT_YET_VOTED:
return HttpResponseRedirect(url)
form = QuestionForm(survey)(request.POST)
# invalid input
if not form.is_valid():
# no choice selected error - via session
sess_nv = request.session.get(polls_settings.SURVEY_NO_CHOICE_COOKIE_NAME, [])
sess_nv.append(survey.id)
request.session[polls_settings.SURVEY_NO_CHOICE_COOKIE_NAME] = sess_nv
return HttpResponseRedirect(url)
# vote save
kwa = {}
if request.user.is_authenticated():
kwa['user'] = request.user
kwa['ip_address'] = request.META['REMOTE_ADDR']
survey.vote(form.cleaned_data['choice'], **kwa)
# just voted info session update
sess_jv = request.session.get(polls_settings.SURVEY_JUST_VOTED_COOKIE_NAME, [])
sess_jv.append(survey.id)
request.session[polls_settings.SURVEY_JUST_VOTED_COOKIE_NAME] = sess_jv
response = HttpResponseRedirect(url)
# authenticated user vote - session update
if request.user.is_authenticated():
sess = request.session.get(polls_settings.SURVEY_COOKIE_NAME, [])
sess.append(survey.id)
request.session[polls_settings.SURVEY_COOKIE_NAME] = sess
# annonymous user vote - cookies update
else:
cook = request.COOKIES.get(polls_settings.SURVEY_COOKIE_NAME, '').split(',')
if len(cook) > polls_settings.SURVEY_MAX_COOKIE_LENGTH:
cook = cook[1:]
cook.append(str(survey.id))
expires = datetime.strftime(datetime.utcnow() + timedelta(seconds=polls_settings.SURVEY_MAX_COOKIE_AGE), "%a, %d-%b-%Y %H:%M:%S GMT")
response.set_cookie(
polls_settings.SURVEY_COOKIE_NAME,
value=','.join(cook),
max_age=polls_settings.SURVEY_MAX_COOKIE_AGE,
expires=expires,
path='/',
domain=Site.objects.get_current().domain,
secure=None
)
return response
@csrf_protect
@transaction.commit_on_success
def contest_vote(request, context):
contest = context['object']
forms = []
forms_are_valid = True
# question forms
for question in contest.questions:
form = QuestionForm(question)(request.POST or None, prefix=str(question.id))
if not form.is_valid():
forms_are_valid = False
forms.append((question, form))
# contestant form
initial = {}
if request.user.is_authenticated():
initial['name'] = request.user.first_name
initial['surname'] = request.user.last_name
initial['email'] = request.user.email
contestant_form = ContestantForm(request.POST or None, initial=initial)
if not contestant_form.is_valid():
forms_are_valid = False
# saving contestant
if forms_are_valid and contest.is_active():
return contest_finish(request, context, forms, contestant_form)
context.update({
'forms' : forms,
'contestant_form' : contestant_form,
'activity_not_yet_active' : polls_settings.ACTIVITY_NOT_YET_ACTIVE,
'activity_active' : polls_settings.ACTIVITY_ACTIVE,
'activity_closed' : polls_settings.ACTIVITY_CLOSED
})
return render_to_response(
get_templates_from_publishable('form.html', context['object']),
context,
context_instance=RequestContext(request)
)
class MyCheckboxSelectMultiple(forms.CheckboxSelectMultiple):
def render(self, name, value, attrs=None, choices=()):
if value is None: value = []
has_id = attrs and 'id' in attrs
final_attrs = self.build_attrs(attrs, name=name)
str_values = set([force_unicode(v) for v in value]) # Normalize to strings.
for i, (option_value, option_label) in enumerate(chain(self.choices, choices)):
# If an ID attribute was given, add a numeric index as a suffix,
# so that the checkboxes don't all have the same ID attribute.
if has_id:
final_attrs = dict(final_attrs, id='%s_%s' % (attrs['id'], i))
cb = forms.CheckboxInput(final_attrs, check_test=lambda value: value in str_values)
option_value = force_unicode(option_value)
yield mark_safe(u'<label>%s %s</label>' % (cb.render(name, option_value), force_unicode(option_label)))
class MyRadioSelect(forms.RadioSelect):
def render(self, name, value, attrs=None, choices=()):
return self.get_renderer(name, value, attrs, choices)
def fudge_choice_percentages(choices):
percent_sum = 0
choice_list = list(choices)
for choice in choice_list:
choice.percentage = choice.get_percentage()
percent_sum += choice.percentage
choice_iter = cycle(choice_list)
step = cmp(100, percent_sum)
while percent_sum != 100:
choice = choice_iter.next()
choice.percentage += step
percent_sum += step
return choice_list
def QuestionForm(question):
if question.allow_multiple:
choice_field = forms.ModelMultipleChoiceField(
queryset=question.choices,
widget=MyCheckboxSelectMultiple,
required=not question.allow_no_choice
)
else:
choice_field = forms.ModelChoiceField(
queryset=question.choices,
widget=MyRadioSelect,
empty_label=None,
required=not question.allow_no_choice
)
class _QuestionForm(forms.Form):
"""
Question form with all its choices
"""
choice = choice_field
def choices(self):
field = self['choice']
# TODO: move choice percentage to question and use it here!!
choice_list = fudge_choice_percentages(field.field.queryset)
for choice, input in zip(choice_list, field.as_widget(field.field.widget)):
yield choice, input
return _QuestionForm
class ContestantForm(forms.Form):
name = Contestant._meta.get_field('name').formfield()
surname = Contestant._meta.get_field('surname').formfield()
email = Contestant._meta.get_field('email').formfield()
phonenumber = Contestant._meta.get_field('phonenumber').formfield()
address = Contestant._meta.get_field('address').formfield()
count_guess = Contestant._meta.get_field('count_guess').formfield()
def clean(self):
# TODO - antispam
return self.cleaned_data
@transaction.commit_on_success
def contest_finish(request, context, qforms, contestant_form):
contest = context['object']
email = contestant_form.cleaned_data['email']
if Contestant.objects.filter(email=email, contest=contest).count() > 0:
context.update({
'duplicate' : True,
'forms' : qforms,
'contestant_form' : contestant_form,
})
return render_to_response(
get_templates_from_publishable('form.html', context['object']),
context,
context_instance=RequestContext(request)
)
choices = '|'.join(
'%d:%s' % (
question.id,
question.allow_multiple and ','.join(str(c.id) for c in sorted(f.cleaned_data['choice'], key=lambda ch: ch.id)) or f.cleaned_data['choice'].id)
for question, f in sorted(qforms, key=lambda q: q[0].id)
)
c = Contestant(
contest=contest,
choices=choices,
**contestant_form.cleaned_data
)
if request.user.is_authenticated():
c.user = request.user
c.save()
return HttpResponseRedirect(contest.get_absolute_url() + slugify(ugettext('result')) + u'/')
def contest_result(request, context):
return render_to_response(
get_templates_from_publishable('result.html', context['object']),
context,
context_instance=RequestContext(request)
)
def contest_conditions(request, context):
return render_to_response(
get_templates_from_publishable('conditions.html', context['object']),
context,
context_instance=RequestContext(request)
)
RESULT_FIELD = 'results'
class QuizWizard(FormWizard):
def __init__(self, quiz):
form_list = [ QuestionForm(q) for q in quiz.questions ]
super(QuizWizard, self).__init__(form_list)
self.quiz = quiz
self.extra_context = {'object' : quiz, 'question' : quiz.questions[0], 'category' : quiz.category, }
def get_template(self, step):
return get_templates_from_publishable('step.html', self.extra_context['object'])
def process_step(self, request, form, step):
if (step + 1) < len(self.form_list):
self.extra_context['question'] = self.quiz.questions[step + 1]
def done(self, request, form_list):
points = 0
results = []
for question, f in zip(self.quiz.questions, form_list):
if not question.allow_no_choice:
if question.allow_multiple:
points += sum(c.points for c in f.cleaned_data['choice'])
results.append('%d:%s' % (question.id, ','.join(str(c.id) for c in f.cleaned_data['choice'])))
else:
points += f.cleaned_data['choice'].points
results.append('%d:%s' % (question.id, f.cleaned_data['choice'].id))
results = '|'.join(results)
result = self.quiz.get_result(points)
result.count += 1
result.save()
self.extra_context.update(
{
'result' : result,
'points' : points,
'results' : results,
'result_field': RESULT_FIELD,
'result_action' : self.quiz.get_absolute_url() + slugify(_('results')) + '/'
}
)
return render_to_response(
get_templates_from_publishable('result.html', self.extra_context['object']),
self.extra_context,
context_instance=RequestContext(request)
)
def result_details(request, context):
quiz = context['object']
if not quiz.has_correct_answers:
raise Http404
results = request.GET.get(RESULT_FIELD, '').split('|')
if len(results) != len(quiz.questions):
raise Http404
questions = []
for question, q_res in zip(quiz.questions, results):
q_id, id_list = q_res.split(':')
choices = question.choices
if question.allow_multiple:
cl = set(id_list.split(','))
for ch in choices:
if str(ch.id) in cl:
ch.chosen = True
else:
for ch in choices:
if str(ch.id) == id_list:
ch.chosen = True
break
questions.append((question, choices))
context['questions'] = questions
return render_to_response(
get_templates_from_publishable('result_detail.html', context['object']),
context,
context_instance=RequestContext(request)
)
@csrf_protect
def contest(request, context):
return contest_vote(request, context)
@csrf_protect
def quiz(request, context):
quiz = context['object']
return QuizWizard(quiz)(request, extra_context=context)
|
{
"content_hash": "a57c4d5f958245271c90e982f3eade03",
"timestamp": "",
"source": "github",
"line_count": 507,
"max_line_length": 163,
"avg_line_length": 37.429980276134124,
"alnum_prop": 0.6337671918638351,
"repo_name": "ella/ella-polls",
"id": "b5dddd20b0cb97e7c43ab440a2f286ccc367ed02",
"size": "18977",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ella_polls/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "120115"
}
],
"symlink_target": ""
}
|
import hail as hl
from .resources import *
from .utils import benchmark
import gzip
def read_gunzip(path):
with gzip.open(path) as f:
for line in f:
pass
@benchmark(args=many_ints_table.handle('tsv'))
def sentinel_read_gunzip_1(path):
read_gunzip(path)
@benchmark(args=many_ints_table.handle('tsv'))
def sentinel_read_gunzip_2(path):
read_gunzip(path)
@benchmark(args=many_ints_table.handle('tsv'))
def sentinel_read_gunzip_3(path):
read_gunzip(path)
def iter_hash(m, n):
x = 0
for i in range(m):
y = 0
for j in range(n):
y = hash(y + j)
x += y
@benchmark()
def sentinel_cpu_hash_1():
iter_hash(10000, 25000)
@benchmark()
def sentinel_cpu_hash_2():
iter_hash(10000, 25000)
@benchmark()
def sentinel_cpu_hash_3():
iter_hash(10000, 25000)
|
{
"content_hash": "d218453d30f00597e4b0279dcfbd9d59",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 46,
"avg_line_length": 16.92,
"alnum_prop": 0.6264775413711584,
"repo_name": "hail-is/hail",
"id": "296ec4e61045ba2df88b05c0d22f7370d2c1e7f1",
"size": "846",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "benchmark/python/benchmark_hail/run/sentinel_benchmarks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7729"
},
{
"name": "C",
"bytes": "779"
},
{
"name": "C++",
"bytes": "171899"
},
{
"name": "CMake",
"bytes": "3045"
},
{
"name": "CSS",
"bytes": "666"
},
{
"name": "Dockerfile",
"bytes": "10056"
},
{
"name": "Emacs Lisp",
"bytes": "377"
},
{
"name": "HCL",
"bytes": "54923"
},
{
"name": "HTML",
"bytes": "155946"
},
{
"name": "Java",
"bytes": "38401"
},
{
"name": "JavaScript",
"bytes": "877"
},
{
"name": "Jupyter Notebook",
"bytes": "305748"
},
{
"name": "MLIR",
"bytes": "20"
},
{
"name": "Makefile",
"bytes": "61284"
},
{
"name": "Python",
"bytes": "5635857"
},
{
"name": "R",
"bytes": "3038"
},
{
"name": "SCSS",
"bytes": "33487"
},
{
"name": "Scala",
"bytes": "5050997"
},
{
"name": "Shell",
"bytes": "75539"
},
{
"name": "XSLT",
"bytes": "5748"
}
],
"symlink_target": ""
}
|
"""
Range of durations
"""
import datetime
import pscheduler
class DurationRange():
"Range of durations"
def __init__(self, drange):
"""Construct a range from a JSON DurationRange."""
# TODO: Would be nice if this class could treat missing
# lower/upper as zero or infinity.
# TODO: Figure out why this can't just point to a DurationRange
valid, message = pscheduler.json_validate(drange, {
"type": "object",
"properties": {
"lower": {"$ref": "#/pScheduler/Duration"},
"upper": {"$ref": "#/pScheduler/Duration"}
},
"additionalProperties": False,
"required": ["lower", "upper"]
})
if not valid:
raise ValueError("Invalid duration range: %s" % message)
self.lower_str = drange['lower']
self.lower = pscheduler.iso8601_as_timedelta(self.lower_str)
self.upper_str = drange['upper']
self.upper = pscheduler.iso8601_as_timedelta(self.upper_str)
def __contains__(self, duration):
"""See if the range contains the specified duration, which can be a
timedelta or ISO8601 string."""
if type(duration) == datetime.timedelta:
test_value = duration
elif type(duration) in [str, unicode]:
test_value = pscheduler.iso8601_as_timedelta(duration)
else:
raise ValueError(
"Invalid duration; must be ISO8601 string or timedelta.")
return self.lower <= test_value <= self.upper
def contains(self, duration, invert=False):
"""Like __contains__, but can do inversion and returns a message stub
Return value is (contains, stub), where 'contains' is a boolean
and 'stub' describes why the check failed (e.g., "is not in PT1M..PT1H")
"""
in_range = duration in self
if (in_range and invert) or (not in_range and not invert):
return False, ("not %s %s..%s" %
("outside" if invert else "in",
self.lower_str, self.upper_str))
return True, None
# Test program
if __name__ == "__main__":
drange = DurationRange({
"lower": "PT10S",
"upper": "PT1M"
})
for value in ["PT1S",
datetime.timedelta(seconds=3),
"PT30S",
datetime.timedelta(seconds=45),
"PT1M",
"PT5M",
datetime.timedelta(minutes=10)
]:
result = value in drange
print value, result
for invert in [False, True]:
print "%s Invert=%s %s" % (value, invert,
drange.contains(value, invert=invert))
print
|
{
"content_hash": "d5828470a20ed154e36cc51eafb39336",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 80,
"avg_line_length": 30.608695652173914,
"alnum_prop": 0.5404829545454546,
"repo_name": "mfeit-internet2/pscheduler-dev",
"id": "9b537189051d39d540ac5b41c0746244bcd8ad5b",
"size": "2816",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python-pscheduler/pscheduler/pscheduler/durationrange.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groff",
"bytes": "2305"
},
{
"name": "M4",
"bytes": "3960"
},
{
"name": "Makefile",
"bytes": "24167"
},
{
"name": "PLpgSQL",
"bytes": "67056"
},
{
"name": "Python",
"bytes": "416726"
},
{
"name": "Shell",
"bytes": "19957"
}
],
"symlink_target": ""
}
|
''' test_avltree.py
This python scripts tests methods in avltree.py that the authors did not
leave clues for.
- insert()
- adjustBalances()
- case1()
- case2()
- case3()
'''
import unittest
import avltree
import stack
class AVLTreeTestsFromStarter(unittest.TestCase):
def setUp(self):
a = avltree.AVLNode(8, -1)
b = avltree.AVLNode(18, -1)
c = avltree.AVLNode(3)
d = avltree.AVLNode(20)
self.t = avltree.AVLTree()
self.t.root = b
b.left = a
a.left = c
b.right = d
self.t.count = 4
def test_insert_found(self):
expected = str(self.t)
self.t.insert(18)
self.assertEqual(str(self.t), expected)
def test_insert_case2(self):
# Test insert function for adjust only
self.t.insert(22)
self.assertEqual(self.t.root.balance, 0)
self.assertEqual(self.t.count, 5)
def test_insert_case1(self):
# First balance tree:
self.t.insert(22)
self.t.insert(17)
self.t.insert(19)
# Check to make sure tree is balanced
(pivot, theStack, parent, found) = self.t.search(14)
while not theStack.isEmpty():
node = theStack.pop()
self.assertEqual(node.balance, 0)
# Then test for inserting into balanced tree
self.t.insert(14)
(pivot, theStack, parent, found) = self.t.search(14)
theStack.pop()
while not theStack.isEmpty():
node = theStack.pop()
self.assertNotEqual(node.balance, 0)
def test_insert_case3_subcaseA(self):
(pivot, theStack, parent, found) = self.t.search(1)
self.t.insert(1)
self.assertEqual(pivot.balance, 0)
self.assertEqual(self.t.root.balance, -1)
def test_insert_case3_subcaseB(self):
self.t.insert(10)
self.t.insert(12)
self.assertEqual(self.t.root.balance, 0)
self.assertEqual(self.t.root.right.item, 18)
self.assertEqual(self.t.root.right.balance, 0)
self.assertEqual(self.t.root.left.balance, -1)
def test_adjustBalances_negative(self):
(pivot, theStack, parent, found) = self.t.search(1)
newNode = avltree.AVLNode(1)
self.t.adjustBalances_add(theStack, pivot, newNode)
self.assertEqual(pivot.balance, -2)
def test_height(self):
height = self.t.height(self.t.root)
self.assertEqual(height, 3)
def test_print_astree(self):
self.t.print_astree()
class AVLTreeTestsFromBook(unittest.TestCase):
def setUp(self):
# Original tree
rootitem = avltree.AVLNode(10.0)
self.t = avltree.AVLTree(root=rootitem)
items = [3.0, 18.0, 2.0, 4.0, 13.0, 40.0]
for newItem in items:
self.t.insert(newItem)
def test_case1(self):
self.t.insert(39)
self.assertEqual(self.t.root.balance, 1)
node = self.t.root.right
self.assertEqual(node.balance, 1)
node = node.right
self.assertEqual(node.balance, -1)
def test_case2(self):
self.t.insert(39)
self.t.insert(12)
self.assertEqual(self.t.root.right.balance, 0)
self.assertEqual(self.t.root.right.left.balance, -1)
def test_case3_subcaseA(self):
self.t.insert(39.0)
self.t.insert(12.0)
self.t.insert(38)
self.assertEqual(self.t.root.balance, 1)
node = self.t.root.right
self.assertEqual(node.balance, 0)
node = node.right
self.assertEqual(node.balance, 0)
self.assertEqual(node.item, 39)
self.assertEqual(node.left.item, 38)
self.assertEqual(node.right.item, 40)
def test_case3_subcaseB(self):
self.t.insert(39.0)
self.t.insert(12.0)
self.t.insert(38.0)
self.t.insert(14.0)
self.t.insert(11)
self.assertEqual(self.t.root.item, 13.0)
self.assertEqual(self.t.root.balance, 0)
self.assertEqual(self.t.root.left.item, 10.0)
self.assertEqual(self.t.root.left.balance, 0)
self.assertEqual(self.t.root.right.item, 18.0)
self.assertEqual(self.t.root.right.balance, 1)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "c5396b9d788077f5d2c6a4476bc386c3",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 76,
"avg_line_length": 30.30496453900709,
"alnum_prop": 0.5965363912941727,
"repo_name": "kyspencer/3D_Empirical_Attainment_Function",
"id": "7d7ca821ef7c1c88180e065329cbbadd476e4233",
"size": "4273",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_avltree.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "180800"
},
{
"name": "Python",
"bytes": "76666"
}
],
"symlink_target": ""
}
|
import unittest
from unittest import mock
import subprocess
from chaser import pacman
class TestPacman(unittest.TestCase):
def test_exists_true(self):
subprocess.check_output = lambda x, **_: "test"
self.assertEquals("test", pacman.exists("test"))
def test_exists_false(self):
subprocess.check_output = mock.Mock(
side_effect=subprocess.CalledProcessError(1, None))
self.assertEquals(False, pacman.exists("test"))
def test_is_installed(self):
pacman.exists = lambda x: "test"
subprocess.call = lambda x, **_: 0
self.assertEquals(True, pacman.is_installed("test"))
pacman.exists = lambda x: False
self.assertEquals(True, pacman.is_installed("test"))
def test_not_installed(self):
subprocess.call = lambda x, **_: 1
self.assertEquals(False, pacman.is_installed("test"))
|
{
"content_hash": "dd1088bf63f2786ef04adb969f29e944",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 67,
"avg_line_length": 31.964285714285715,
"alnum_prop": 0.6536312849162011,
"repo_name": "rshipp/chaser",
"id": "e4d5906738d2f4a5cce82ae013f0bb703c576fdd",
"size": "895",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_pacman.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "25281"
},
{
"name": "Shell",
"bytes": "1090"
}
],
"symlink_target": ""
}
|
DEBUG = True
# DEBUG = False
# TEMPLATE_DEBUG = True
TEMPLATE_DEBUG = False
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SESSION_COOKIE_AGE = 365*24*60*60
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
# Values below are imported from secret_settings.py
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
SITE_ID = 7
ADMINS = (
(), #imported from secret_settings.py
)
ADMINURL = '/admin/'
MANAGERS = ADMINS
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
ALLOWED_HOSTS = ['*']
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
USE_I18N = False
USE_TZ = False
# Make this unique, and don't share it with anybody.
SECRET_KEY = '' # imported from secret_settings.py
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.messages.context_processors.messages',
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.request',
'ap.context_processors.media',
'ap.context_processors.static',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'ap.urls'
WSGI_APPLICATION = 'ap.wsgi.application'
TEMPLATE_DIRS = (
"/home/lee/ap/templates",
)
TINYMCE_DEFAULT_CONFIG = {'theme': 'advanced', 'relative_urls': False}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
# 'django.contrib.staticfiles',
'django.contrib.admin',
# 'django.contrib.sites',
# 'django.contrib.admin.apps.SimpleAdminConfig',
'ap.apdirposts',
'ap.boarddocs',
'todo',
)
ALLOWED_INCLUDE_ROOTS = ('/home/lee/www/arlplanet',)
AUTH_PROFILE_MODULE = 'apdirposts.Director'
STATIC_ROOT = '/home/lee/ap/static'
STATIC_URL = '/static/'
MEDIA_ROOT = '/home/lee/ap/media/'
MEDIA_URL = '/media/'
from secret_settings import *
# secret import gets "token", used with EventBrite
|
{
"content_hash": "5ca1878de8c9c9d0242319a7a9628a95",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 100,
"avg_line_length": 30.31896551724138,
"alnum_prop": 0.6849587716804094,
"repo_name": "leephillips/FOTPweb",
"id": "cbca69b7d6f800b820ff7fab58537770e23652c1",
"size": "3552",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "133097"
},
{
"name": "HTML",
"bytes": "80843"
},
{
"name": "JavaScript",
"bytes": "80851"
},
{
"name": "Python",
"bytes": "84791"
},
{
"name": "Shell",
"bytes": "2059"
},
{
"name": "TeX",
"bytes": "7234"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Team',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('team_name', models.CharField(choices=[('Management', 'Management'), ('Marketing', 'Marketing'), ('Technology', 'Technology'), ('Media & Design', 'Media & Design'), ('Consulting', 'Consulting'), ('Public Services', 'Public Services')], default='Management', max_length=12)),
('team_member_role', models.CharField(max_length=120)),
('team_member_first_name', models.CharField(max_length=120)),
('team_member_last_name', models.CharField(max_length=120)),
('team_member_location', models.CharField(max_length=120)),
('team_member_joined_at', models.DateField()),
('team_member_availability', models.PositiveIntegerField(verbose_name='Available hrs per week')),
('team_member_expertise', models.CharField(max_length=120)),
],
options={
'ordering': ['-team_member_joined_at', 'team_member_role'],
},
),
]
|
{
"content_hash": "94d69a480abc45c0df01c8306362a30e",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 291,
"avg_line_length": 44.45161290322581,
"alnum_prop": 0.5849056603773585,
"repo_name": "neldom/qessera",
"id": "147dbb30f7c8eaaf76df813eb330c7422fa04093",
"size": "1451",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "team/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3609764"
},
{
"name": "HTML",
"bytes": "739418"
},
{
"name": "JavaScript",
"bytes": "1337580"
},
{
"name": "Python",
"bytes": "93433"
}
],
"symlink_target": ""
}
|
"""
component_control.py
Class instance for control variables shared between components.
"""
# Load the needed packages
from functools import partial
from ..core import Variable, Component, QtGui, QtCore, common, componentsList
class LinkPlugins(Component):
'''
Class instance for control variables shared between components.
The user may select two components from a list. A radio menu is
added for every common sharable variable. Each variable may be unlinked
from similar instance in the other component.
This is a powerful Component, multiple instances may conflict.
'''
@classmethod
def guiStart(self, parent=None):
kwargs, independent = \
common._SimplePluginStart("LinkPlugins").startDisplay()
kwargs['parent'] = parent
return self(**kwargs), independent
def __init__(self, components=None, name="LinkPlugins", parent=None):
'''Initialize the class to create the interface.
Parameters
----------
[Optional]
components : list of :py:class:`~artview.core.core.Component` instance
Components to control. If None will use the global list present in
artview.core.core.componentsList
name : string
Field Radiobutton window name.
parent : PyQt instance
Parent instance to associate to this class.
If None, then Qt owns, otherwise associated with parent PyQt
instance.
'''
super(LinkPlugins, self).__init__(name=name, parent=parent)
self.central_widget = QtGui.QWidget()
self.setCentralWidget(self.central_widget)
self.layout = QtGui.QGridLayout(self.central_widget)
if components is None:
self.components = componentsList
QtCore.QObject.connect(
self.components, QtCore.SIGNAL("ComponentAppended"),
self._updateComponentList)
QtCore.QObject.connect(
self.components, QtCore.SIGNAL("ComponentRemoved"),
self._updateComponentList)
else:
self.components = components
self.comp0 = None
self.comp1 = None
self.setupUi()
self.show()
def _setVariables(self):
'''Determine common variables to both components.'''
self.variables = []
for var in self.comp0.sharedVariables.keys():
if var in self.comp1.sharedVariables.keys():
self.variables.append(var)
########################
# Button methods #
########################
def setupUi(self):
'''Build main layout.'''
if len(self.components) == 0:
return
if self.comp0 not in self.components:
self.comp0 = self.components[0]
if self.comp1 not in self.components:
self.comp1 = self.components[0]
# Select Components buttons
self.combo0 = QtGui.QComboBox()
self.combo0.activated[int].connect(self._comp0Action)
self.combo1 = QtGui.QComboBox()
self.combo1.activated[int].connect(self._comp1Action)
self.layout.addWidget(self.combo0, 0, 0)
self.layout.addWidget(self.combo1, 1, 0)
# Fill buttons
for component in self.components:
self.combo0.addItem(component.name)
self.combo1.addItem(component.name)
self.combo0.setCurrentIndex(self.components.index(self.comp0))
self.combo1.setCurrentIndex(self.components.index(self.comp1))
self._setVariables()
self._setRadioButtons()
def _setRadioButtons(self):
'''Add radio buttons for control over the variables.'''
# Radio Buttons
self.radioLayout = QtGui.QGridLayout()
self.layout.addLayout(self.radioLayout, 2, 0)
self.radioLayout.addWidget(QtGui.QLabel("Link"), 0, 1)
self.radioLayout.addWidget(QtGui.QLabel("Unlink"), 0, 2)
self.radioBoxes = []
for idx, var in enumerate(self.variables):
self._addRadioButton(var, idx)
def _addRadioButton(self, var, idx):
'''Add radio button for variable in the given index.'''
radioBox = QtGui.QButtonGroup()
self.radioBoxes.append(radioBox) # avoid garbage collector
link = QtGui.QRadioButton()
unlink = QtGui.QRadioButton()
QtCore.QObject.connect(link, QtCore.SIGNAL("clicked()"),
partial(self.connectVar, var))
QtCore.QObject.connect(unlink, QtCore.SIGNAL("clicked()"),
partial(self.disconnectVar, var))
radioBox.addButton(link)
radioBox.addButton(unlink)
if getattr(self.comp0, var) is getattr(self.comp1, var):
link.setChecked(True)
else:
unlink.setChecked(True)
if self.comp0 is self.comp1:
unlink.setDisabled(True)
self.radioLayout.addWidget(QtGui.QLabel(var[1::]), idx+1, 0)
self.radioLayout.addWidget(link, idx+1, 1)
self.radioLayout.addWidget(unlink, idx+1, 2)
def _comp0Action(self, idx):
'''Update Component 0.'''
self.comp0 = self.components[idx]
self._setVariables()
self._clearLayout(self.radioLayout)
self.layout.removeItem(self.radioLayout)
self._setRadioButtons()
def _comp1Action(self, idx):
'''Update Component 1.'''
self.comp1 = self.components[idx]
self._setVariables()
self._clearLayout(self.radioLayout)
self.layout.removeItem(self.radioLayout)
self._setRadioButtons()
def connectVar(self, var):
'''Assign variable in component 0 to component 1.'''
# Disconect old Variable
self.comp1.disconnectSharedVariable(var)
# comp1.var = comp0.var
setattr(self.comp1, var, getattr(self.comp0, var))
# Connect new Variable
self.comp1.connectSharedVariable(var)
# emit signal
getattr(self.comp1, var).update()
print("connect var %s of %s from %s" % (
var, self.comp1.name, self.comp0.name))
def disconnectVar(self, var):
'''Turn variable in component 1 independente of component 0.'''
# Disconect old Variable
self.comp1.disconnectSharedVariable(var)
# comp1.var = Variable()
setattr(self.comp1, var, Variable())
# Connect new Variable
self.comp1.connectSharedVariable(var)
# emit signal
getattr(self.comp1, var).update()
print("disconnect var %s of %s from %s" % (
var, self.comp1.name, self.comp0.name))
def _clearLayout(self, layout):
'''Recursively remove items from layout.'''
while layout.count():
item = layout.takeAt(0)
widget = item.widget()
if widget is not None:
widget.deleteLater()
else:
self._clearLayout(item.layout())
def _updateComponentList(self, item):
'''Rebuild main layout.'''
self._clearLayout(self.layout)
self.setupUi()
|
{
"content_hash": "a9f22bc7111de648d40f2c44d00339f3",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 78,
"avg_line_length": 35.34825870646766,
"alnum_prop": 0.611118930330753,
"repo_name": "jjhelmus/artview",
"id": "908d4e8e1648220160cacce7687b354a07318497",
"size": "7105",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "artview/components/component_control.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "447864"
}
],
"symlink_target": ""
}
|
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import UniqueConstraint
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
meter = Table(
'meter', meta,
Column('id', Integer, primary_key=True, index=True),
Column('counter_name', String(255)),
Column('user_id', String(255), index=True),
Column('project_id', String(255), index=True),
Column('resource_id', String(255)),
Column('resource_metadata', String(5000)),
Column('counter_type', String(255)),
Column('counter_volume', Integer),
Column('counter_duration', Integer),
Column('timestamp', DateTime(timezone=False), index=True),
Column('message_signature', String(1000)),
Column('message_id', String(1000)),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
resource = Table(
'resource', meta,
Column('id', String(255), primary_key=True, index=True),
Column('resource_metadata', String(5000)),
Column('project_id', String(255), index=True),
Column('received_timestamp', DateTime(timezone=False)),
Column('timestamp', DateTime(timezone=False), index=True),
Column('user_id', String(255), index=True),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
user = Table(
'user', meta,
Column('id', String(255), primary_key=True, index=True),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
project = Table(
'project', meta,
Column('id', String(255), primary_key=True, index=True),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
sourceassoc = Table(
'sourceassoc', meta,
Column('source_id', String(255), index=True),
Column('user_id', String(255)),
Column('project_id', String(255)),
Column('resource_id', String(255)),
Column('meter_id', Integer),
Index('idx_su', 'source_id', 'user_id'),
Index('idx_sp', 'source_id', 'project_id'),
Index('idx_sr', 'source_id', 'resource_id'),
Index('idx_sm', 'source_id', 'meter_id'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
source = Table(
'source', meta,
Column('id', String(255), primary_key=True, index=True),
UniqueConstraint('id'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
tables = [meter, project, resource, user, source, sourceassoc]
for i in sorted(tables, key=lambda table: table.fullname):
i.create()
def downgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
for name in ['source', 'sourceassoc', 'project',
'user', 'resource', 'meter']:
t = Table(name, meta, autoload=True)
t.drop()
|
{
"content_hash": "a61d2ac897c5b4b60017f96f5a23dbbd",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 66,
"avg_line_length": 32.956043956043956,
"alnum_prop": 0.5985328442814272,
"repo_name": "pkilambi/ceilometer",
"id": "4f8a4c8af8e822ffdc457de9996002cd3a1c2e89",
"size": "3545",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ceilometer/storage/sqlalchemy/migrate_repo/versions/001_add_meter_table.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2894515"
},
{
"name": "Shell",
"bytes": "6294"
}
],
"symlink_target": ""
}
|
from flask.ext.testing import TestCase
class TestUser(TestCase):
def create_app(self):
from core import app
return app
def test_user_password(self):
from identity.models.user import User
email = 'bob@gmail.com'
password = '123'
not_password = '321'
user = User(email, password)
assert user.check_password(password) is True
assert user.check_password(not_password) is False
|
{
"content_hash": "061d96532521f3a4fd4a71a75b21edfb",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 57,
"avg_line_length": 24.157894736842106,
"alnum_prop": 0.6339869281045751,
"repo_name": "vernikov/distribution",
"id": "3c48bf88ea554272789a467608aca3456a1b8bf2",
"size": "459",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/identity/models/test_user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2228"
},
{
"name": "Cucumber",
"bytes": "1160"
},
{
"name": "HTML",
"bytes": "19085"
},
{
"name": "JavaScript",
"bytes": "8930"
},
{
"name": "Python",
"bytes": "13890"
}
],
"symlink_target": ""
}
|
''' Internal utility functions and classes for implementing ``bokeh.colors``.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
from six import string_types
# Bokeh imports
from ..util.future import with_metaclass
from .rgb import RGB
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'ColorGroup',
'NamedColor',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
class _ColorGroupMeta(type):
''' This metaclass enables ColorGroup class types to be used like simple
enumerations.
When Python2 support is dropped, this will no longer be necessary.
'''
def __len__(self):
return len(self._colors)
def __getitem__(self, v):
from . import named
if isinstance(v, string_types):
if v in self._colors:
return getattr(named, v.lower())
raise KeyError("Color group %r has no color %r" % (self.__class__.__name__, v))
if isinstance(v, int):
if v >= 0 and v < len(self):
return getattr(named, self._colors[v].lower())
raise IndexError("Index out of range for color group %r" % self.__class__.__name__)
raise ValueError("Unknown index %r for color group %r" % (v, self.__class__.__name__))
def __iter__(self):
from . import named
return (getattr(named, x.lower()) for x in self._colors)
def __getattr__(self, v):
from . import named
if v != "_colors" and v in self._colors:
return getattr(named, v.lower())
return super(_ColorGroupMeta, self).__getattr__(v)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class ColorGroup(with_metaclass(_ColorGroupMeta)):
''' Collect a group of named colors into an iterable, indexable group.
'''
class NamedColor(RGB):
''' Represent a CSS named color, provided as RGB values.
Instances of this class also record the name of the created color, which
is used to populate the Bokeh enum for named colors.
'''
__all__ = []
def __init__(self, name, r, g, b):
'''
Args:
name (str) :
The name to associate with the color, e.g. "firebrick"
r (int) :
The value for the red channel in [0, 255]
g (int) :
The value for the green channel in [0, 255]
b (int) :
The value for the blue channel in [0, 255]
'''
if name not in self.__all__:
self.__all__.append(name)
self.name = name
super(NamedColor, self).__init__(r, g, b)
def to_css(self):
'''
'''
return self.name
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
{
"content_hash": "386b7c07b77b616399f9282034e988fa",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 95,
"avg_line_length": 30.8,
"alnum_prop": 0.41194805194805195,
"repo_name": "stonebig/bokeh",
"id": "168ae4f83f6c7b5e02e5d5a549e30a6610333d56",
"size": "4181",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bokeh/colors/util.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5455"
},
{
"name": "CSS",
"bytes": "423978"
},
{
"name": "CoffeeScript",
"bytes": "1961885"
},
{
"name": "HTML",
"bytes": "1556638"
},
{
"name": "JavaScript",
"bytes": "4741"
},
{
"name": "Makefile",
"bytes": "5785"
},
{
"name": "Python",
"bytes": "1696641"
},
{
"name": "Shell",
"bytes": "14856"
}
],
"symlink_target": ""
}
|
from twisted.internet import defer
from twisted.python import failure, util
"""
"""
class Counter(object):
num = 0
let = 'a'
def incrLet(cls):
cls.let = chr(ord(cls.let) + 1)
incrLet = classmethod(incrLet)
def handleFailure(f):
print "errback"
print "we got an exception: %s" % (f.getTraceback(),)
return f
def subCb_B(result):
print "sub-callback %s" % (Counter.let,)
Counter.incrLet()
s = " beautiful!"
print "\tadding %r to result" % (s,)
result += s
return result
def subCb_A(result):
print "sub-callback %s" % (Counter.let,)
Counter.incrLet()
s = " are "
print "\tadding %r to result" % (s,)
result += s
return result
def mainCb_1(result):
Counter.num += 1
print "callback %s" % (Counter.num,)
print "\tgot result: %s" % (result,)
result += " Deferreds "
d = defer.Deferred().addCallback(subCb_A
).addCallback(subCb_B)
d.callback(result)
return d
def mainCb_2(result):
Counter.num += 1
print "callback %s" % (Counter.num,)
print "\tgot result: %s" % (result,)
def deferredExample():
d = defer.Deferred().addCallback(mainCb_1
).addCallback(mainCb_2)
d.callback("I hope you'll agree: ")
if __name__ == '__main__':
deferredExample()
|
{
"content_hash": "7039c38dd0ac96d0dacc4217931b03e7",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 57,
"avg_line_length": 21.333333333333332,
"alnum_prop": 0.5758928571428571,
"repo_name": "hortonworks/hortonworks-sandbox",
"id": "68f24ab5fa83351b5b79cfeac7f97806fbc839fd",
"size": "1363",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/Twisted/doc/core/howto/listings/deferred/deferred_ex8.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ActionScript",
"bytes": "27264"
},
{
"name": "Assembly",
"bytes": "207947"
},
{
"name": "C",
"bytes": "10279874"
},
{
"name": "C++",
"bytes": "208068"
},
{
"name": "CSS",
"bytes": "356769"
},
{
"name": "Emacs Lisp",
"bytes": "3171"
},
{
"name": "Java",
"bytes": "3064179"
},
{
"name": "JavaScript",
"bytes": "1532806"
},
{
"name": "PHP",
"bytes": "4160"
},
{
"name": "Perl",
"bytes": "139518"
},
{
"name": "Python",
"bytes": "27735073"
},
{
"name": "R",
"bytes": "12290"
},
{
"name": "Ruby",
"bytes": "5050"
},
{
"name": "Shell",
"bytes": "42062"
},
{
"name": "XSLT",
"bytes": "585"
}
],
"symlink_target": ""
}
|
import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
# Package meta-data.
NAME = 'journal_club'
DESCRIPTION = 'Weighted scheme to choose a person to present in a Journal Club'
URL = 'https://github.com/philastrophist/journal_club'
EMAIL = 'shaun.c.read@gmail.com'
AUTHOR = 'philastrophist'
# What packages are required for this module to be executed?
REQUIRED = ['numpy', 'pandas', 'pyglet', 'gtts', 'pydub']
# The rest you shouldn't have to touch too much :)
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.rst' is present in your MANIFEST.in file!
with io.open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = '\n' + f.read()
# Load the package's __version__.py module as a dictionary.
about = {}
with open(os.path.join(here, NAME, '__version__.py')) as f:
exec(f.read(), about)
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds...')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution...')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPi via Twine...')
os.system('twine upload dist/*')
sys.exit()
# Where the magic happens:
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
author=AUTHOR,
author_email=EMAIL,
url=URL,
packages=find_packages(exclude=('tests',)),
#If your package is a single module, use this instead of 'packages':
# py_modules=['mypackage'],
entry_points={
'console_scripts': ['jc=journal_club.jc:main'],
},
install_requires=REQUIRED,
include_package_data=True,
license='MIT',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
# $ setup.py publish support.
cmdclass={
'upload': UploadCommand,
},
)
|
{
"content_hash": "5320ddc27b392e97078f00d18dcb71b5",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 86,
"avg_line_length": 30.925233644859812,
"alnum_prop": 0.6273798730734361,
"repo_name": "philastrophist/journal_club",
"id": "f940b4676690221784d533fd3151b694eae211a9",
"size": "3309",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13559"
}
],
"symlink_target": ""
}
|
"""
Models use to define the database
The database is not initiated here, but a pointer is created named db. This is
to be passed to the app creator within the Flask blueprint.
"""
__author__ = 'J. Elliott'
__maintainer__ = 'J. Elliott'
__copyright__ = 'ADS Copyright 2015'
__version__ = '1.0'
__email__ = 'ads@cfa.harvard.edu'
__status__ = 'Production'
__license__ = 'MIT'
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy.dialects.postgresql import ARRAY
from sqlalchemy.ext.mutable import Mutable
db = SQLAlchemy()
class MutableList(Mutable, list):
"""
The PostgreSQL type ARRAY cannot be mutated once it is set. This hack is
written by the author of SQLAlchemy as a solution. For further reading,
see:
https://groups.google.com/forum/#!topic/sqlalchemy/ZiDlGJkVTM0
and
http://kirang.in/2014/08/09/creating-a-mutable-array-data-type-in-sqlalchemy
"""
def append(self, value):
"""
Define an append action
:param value: value to be appended
:return: no return
"""
list.append(self, value)
self.changed()
def remove(self, value):
"""
Define a remove action
:param value: value to be removed
:return: no return
"""
list.remove(self, value)
self.changed()
@classmethod
def coerce(cls, key, value):
"""
Re-define the coerce. Ensures that a class deriving from Mutable is
always returned
:param key:
:param value:
:return:
"""
if not isinstance(value, MutableList):
if isinstance(value, list):
return MutableList(value)
return Mutable.coerce(key, value)
else:
return value
class User(db.Model):
"""
User table
Foreign-key absolute_uid is the primary key of the user in the user
database microservice.
"""
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
absolute_uid = db.Column(db.Integer, unique=True)
permissions = db.relationship('Permissions', backref='user')
def __repr__(self):
return '<User {0}, {1}>'\
.format(self.id, self.absolute_uid)
class Library(db.Model):
"""
Library table
This represents a collection of bibcodes, a biblist, and can be thought of
much like a bibtex file.
"""
__tablename__ = 'library'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50))
description = db.Column(db.String(50))
public = db.Column(db.Boolean)
bibcode = db.Column(MutableList.as_mutable(ARRAY(db.String(50))))
permissions = db.relationship('Permissions', backref='library')
def __repr__(self):
return '<Library, library_id: {0:d} name: {1}, ' \
'description: {2}, public: {3},' \
'bibcode: {4}>'\
.format(self.id,
self.name,
self.description,
self.public,
self.bibcode)
class Permissions(db.Model):
"""
Permissions table
Logically connects the library and user table. Whereby, a Library belongs
to a user, and the user can give permissions to other users to view their
libraries.
User (1) to Permissions (Many)
Library (1) to Permissions (Many)
"""
__tablename__ = 'permissions'
id = db.Column(db.Integer, primary_key=True)
read = db.Column(db.Boolean)
write = db.Column(db.Boolean)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
library_id = db.Column(db.Integer, db.ForeignKey('library.id'))
def __repr__(self):
return '<Permissions, user_id: {0}, library_id: {1}, read: {2}, '\
'write: {3}'\
.format(self.user_id, self.library_id, self.read, self.write)
|
{
"content_hash": "10fd809bc61b8b7e09b16ec75b567aaa",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 80,
"avg_line_length": 28.607407407407408,
"alnum_prop": 0.6043500776799585,
"repo_name": "jonnybazookatone/gut-service",
"id": "b44b8f2d903386410519f445df74d860fccfbb24",
"size": "3862",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "biblib/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Puppet",
"bytes": "1251"
},
{
"name": "Python",
"bytes": "61038"
},
{
"name": "Shell",
"bytes": "438"
}
],
"symlink_target": ""
}
|
import os
from textgrid import TextGrid, IntervalTier
from .textgrid import TextgridParser
from ..types.parsing import OrthographyTier
from polyglotdb.exceptions import TextGridError
from ..helper import find_wav_path, get_n_channels
from .base import DiscourseData
class FaveParser(TextgridParser):
def _is_valid(self, tg):
found_words = {}
found_phones = {}
for ti in tg.tiers:
try:
speaker, type = ti.name.split(' - ')
except ValueError:
continue
if speaker not in found_words:
found_words[speaker] = False
if speaker not in found_phones:
found_phones[speaker] = False
if type.startswith('word'):
found_words[speaker] = True
elif type.startswith('phone'):
found_phones[speaker] = True
if len(list(found_words.keys())) == 0:
return False
found_word = all(found_words.values())
found_phone = all(found_phones.values())
return found_word and found_phone
def parse_discourse(self, path, types_only=False):
'''
Parse a TextGrid file for later importing.
Parameters
----------
path : str
Path to TextGrid file
Returns
-------
:class:`~polyglotdb.io.discoursedata.DiscourseData`
Parsed data from the file
'''
tg = TextGrid()
tg.read(path)
if not self._is_valid(tg):
raise (TextGridError('The file "{}" cannot be parsed by the FAVE parser.'.format(path)))
name = os.path.splitext(os.path.split(path)[1])[0]
dummy = self.annotation_types
self.annotation_types = []
wav_path = find_wav_path(path)
speaker_channel_mapping = {}
if wav_path is not None:
n_channels = get_n_channels(wav_path)
if n_channels > 1:
# Figure speaker-channel mapping
n_tiers = 0
for ti in tg.tiers:
try:
speaker, type = ti.name.split(' - ')
except ValueError:
continue
n_tiers += 1
ind = 0
cutoffs = [x / n_channels for x in range(1, n_channels)]
for ti in tg.tiers:
try:
speaker, type = ti.name.split(' - ')
except ValueError:
continue
if speaker in speaker_channel_mapping:
continue
for i, c in enumerate(cutoffs):
if ind / n_channels < c:
speaker_channel_mapping[speaker] = i
break
else:
speaker_channel_mapping[speaker] = i + 1
ind += 1
# Parse the tiers
for ti in tg.tiers:
try:
speaker, type = ti.name.split(' - ')
if type.endswith('s'):
type = type[:-1]
except ValueError:
continue
if len(ti) == 1 and ti[0].mark.strip() == '':
continue
at = OrthographyTier(type, type)
at.speaker = speaker
at.add(((x.mark.strip(), x.minTime, x.maxTime) for x in ti))
self.annotation_types.append(at)
pg_annotations = self._parse_annotations(types_only)
data = DiscourseData(name, pg_annotations, self.hierarchy)
data.speaker_channel_mapping = speaker_channel_mapping
data.wav_path = wav_path
self.annotation_types = dummy
return data
|
{
"content_hash": "27ccb9b3396727daecfaa1bad63d8f34",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 100,
"avg_line_length": 33.82142857142857,
"alnum_prop": 0.5013199577613516,
"repo_name": "samihuc/PolyglotDB",
"id": "01053a8f6688c7c8ba9465d2fbca89afb44fa8c3",
"size": "3788",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "polyglotdb/io/parsers/fave.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "794865"
},
{
"name": "Shell",
"bytes": "2326"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('tags', '0002_tag_author'),
]
operations = [
migrations.AddField(
model_name='tag',
name='slug',
field=models.SlugField(default='slug'),
preserve_default=False,
),
]
|
{
"content_hash": "1093041de62af8024828f26704ab7126",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 51,
"avg_line_length": 20.894736842105264,
"alnum_prop": 0.5692695214105793,
"repo_name": "giantryansaul/easy_blog_django",
"id": "3de0acabef1d7ab27e0f5d974997c64487c93fe6",
"size": "421",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "easy_blog_django/tags/migrations/0003_tag_slug.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "87325"
},
{
"name": "HTML",
"bytes": "247585"
},
{
"name": "JavaScript",
"bytes": "13424"
},
{
"name": "PHP",
"bytes": "2157"
},
{
"name": "Python",
"bytes": "48034"
},
{
"name": "Shell",
"bytes": "3620"
}
],
"symlink_target": ""
}
|
def extractProphetswordStarmvsCom(item):
'''
Parser for 'prophetsword.starmvs.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Lonely Loser', 'Lonely Loser, I’ll Become Blonde Frivolous Gyaru’s Favourite', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
{
"content_hash": "10445b6995d297658c46666a27f33832",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 124,
"avg_line_length": 32.476190476190474,
"alnum_prop": 0.624633431085044,
"repo_name": "fake-name/ReadableWebProxy",
"id": "f879d3b843bd386eb3becbb5497d9559523d98c1",
"size": "686",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractProphetswordStarmvsCom.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.