prefix
stringlengths
0
918k
middle
stringlengths
0
812k
suffix
stringlengths
0
962k
# PyMM - Python MP3 Manager # Copyright (C) 2000 Pierre Hjalm <pierre.hjalm@dis.uu.se> # # Modified by Alexander Kanavin <ak@sensi.org> # Removed ID tags support and added VBR support # Used http://home.swipnet.se/grd/mp3info/ for information # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the # Free Software Foundation, Inc., 59 Temple Place - Suite 330, # Boston, MA 02111-1307, USA. """ mp3.py Reads information from an mp3 file. This is a python port of code taken from the mpg123 input module of xmms. """ import struct def header(buf): return struct.unpack(">I",buf)[0] def head_check(head): if ((head & 0xffe00000L) != 0xffe00000L): return 0 if (not ((head >> 17) & 3)): return 0 if (((head >> 12) & 0xf) == 0xf): return 0 if ( not ((head >> 12) & 0xf)): return 0 if (((head >> 10) & 0x3) == 0x3): return 0 if (((head >> 19) & 1) == 1 and ((head >> 17) & 3) == 3 and ((head >> 16) & 1) == 1): return 0 if ((head & 0xffff0000L) == 0xfffe0000L): return 0 return 1 def filesize(file): """ Returns the size of file sans any ID3 tag ""
" f=open(file) f.seek(0,2) size=f.tell() try: f.seek(-128,2) except: f.close() return 0 buf=f.read(3) f.close() if buf=="TAG": size=size-128 if size<0:
return 0 else: return size table=[[ [0, 32, 64, 96, 128, 160, 192, 224, 256, 288, 320, 352, 384, 416, 448], [0, 32, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384], [0, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320]], [ [0, 32, 48, 56, 64, 80, 96, 112, 128, 144, 160, 176, 192, 224, 256], [0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160], [0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160]]] def decode_header(head): """ Decode the mp3 header and put the information in a frame structure """ freqs=[44100, 48000, 32000, 22050, 24000, 16000, 11025, 12000, 8000] fr={} if head & (1 << 20): if head & (1 << 19): fr["lsf"]=0 else: fr["lsf"]=1 fr["mpeg25"] = 0 else: fr["lsf"] = 1 fr["mpeg25"] = 1 fr["lay"] = 4 - ((head >> 17) & 3) if fr["mpeg25"]: fr["sampling_frequency"] = freqs[6 + ((head >> 10) & 0x3)] else: fr["sampling_frequency"] = freqs[((head >> 10) & 0x3) + (fr["lsf"] * 3)] fr["error_protection"] = ((head >> 16) & 0x1) ^ 0x1 fr["bitrate_index"] = ((head >> 12) & 0xf) fr["bitrate"]=table[fr["lsf"]][fr["lay"]-1][fr["bitrate_index"]] fr["padding"]=((head>>9) & 0x1) fr["channel_mode"]=((head>>6) & 0x3) if fr["lay"]==1: fr["framesize"]=table[fr["lsf"]][0][fr["bitrate_index"]]*12000 fr["framesize"]=fr["framesize"]/fr["sampling_frequency"] fr["framesize"]=((fr["framesize"]+fr["padding"])<<2)-4 elif fr["lay"]==2: fr["framesize"]=table[fr["lsf"]][1][fr["bitrate_index"]]*144000 fr["framesize"]=fr["framesize"]/fr["sampling_frequency"] fr["framesize"]=fr["framesize"]+fr["padding"]-1 elif fr["lay"]==3: fr["framesize"]=table[fr["lsf"]][2][fr["bitrate_index"]]*144000 fr["framesize"]=fr["framesize"]/fr["sampling_frequency"]<<fr["lsf"] fr["framesize"]=fr["framesize"]+fr["padding"]-4 pass else: return 0 return fr def decode_vbr(buf): vbr = {} if buf[:4] != "Xing": return 0 frames_flag = ord(buf[7]) & 1 if not frames_flag: return 0 vbr["frames"] = header(buf[8:]) return vbr def decode_synch_integer(buf): return (ord(buf[0])<<21)+(ord(buf[1])<<14)+(ord(buf[2])<<7)+ord(buf[3]) def detect_mp3(filename): """ Determines whether this is an mp3 file and if so reads information from it. """ try: f=open(filename,"rb") except: return 0 try: tmp=f.read(4) except: f.close() return 0 if tmp[:3] == 'ID3': try: tmp = f.read(6) f.seek(decode_synch_integer(tmp[2:])+10) tmp=f.read(4) except: f.close() return 0 try: head=header(tmp) except: return 0 while not head_check(head): # This is a real time waster, but an mp3 stream can start anywhere # in a file so we have to search the entire file which can take a # while for large non-mp3 files. try: buf=f.read(1024) except: f.close() return 0 if buf=="": f.close() return 0 for i in range(0,len(buf)-1): head=long(head)<<8; head=head|ord(buf[i]) if head_check(head): f.seek(i+1-len(buf),1) break mhead=decode_header(head) if mhead: # Decode VBR header if there's any. pos = f.tell() mhead["vbr"] = 0 if not mhead["lsf"]: if mhead["channel_mode"] == 3: vbrpos = 17 else: vbrpos = 32 else: if mhead["channel_mode"] == 3: vbrpos = 9 else: vbrpos = 17 try: f.seek(vbrpos,1) vbr = decode_vbr(f.read(12)) mhead["vbrframes"] = vbr["frames"] if mhead["vbrframes"] >0: mhead["vbr"] = 1 except: pass # We found something which looks like a MPEG-header # We check the next frame too, to be sure if f.seek(pos+mhead["framesize"]): f.close() return 0 try: tmp=f.read(4) except: f.close() return 0 if len(tmp)!=4: f.close() return 0 htmp=header(tmp) if not (head_check(htmp) and decode_header(htmp)): f.close() return 0 f.close() # If we have found a valid mp3 add some more info the head data. if mhead: mhead["filesize"]=filesize(filename) if not mhead["vbr"]: if mhead["bitrate"] and mhead["filesize"]: mhead["time"]=int(float(mhead["filesize"])/(mhead["bitrate"]*1000)*8) else: mhead["time"]=0 else: if mhead["filesize"] and mhead["sampling_frequency"]: medframesize = float(mhead["filesize"])/float(mhead["vbrframes"]) if mhead["lsf"]: coef = 12 else: coef = 144 vbrrate = medframesize*mhead["sampling_frequency"]/(1000*coef) mhead["time"]=int(float(mhead["filesize"])/(vbrrate*1000)*8) mhead["vbrrate"] = int(vbrrate) return mhead else: return 0 if __name__=="__main__": import sys mp3info=detect_mp3(sys.argv[1]) if mp3info: print mp3info else: print "Not an mp3 file."
from ..base impo
rt BaseShortener from ..exceptions import ShorteningErrorException class Shortener(BaseShortener): """ TinyURL.com shortener implementation Example: >>> import pyshorteners >>> s = pyshorteners.Shortener() >>> s.tinyurl.short('http://www.google.com') 'http://tinyurl.com/TEST' >>> s.tinyurl.expand('http://tinyurl.com/test') 'http://www.google.com' """ api_url = "http://tinyurl.com/api-create.php" def short(self, url):
"""Short implementation for TinyURL.com Args: url: the URL you want to shorten Returns: A string containing the shortened URL Raises: ShorteningErrorException: If the API returns an error as response """ url = self.clean_url(url) response = self._get(self.api_url, params=dict(url=url)) if response.ok: return response.text.strip() raise ShorteningErrorException(response.content)
import autocomplete_light.shortcuts as autocomplete_light from django import VERSION from .models import * try: import genericm2m except ImportError: genericm2m = None try: import taggit except ImportError: taggit = None class DjangoCompatMeta: if VERSION >= (1, 6): fields = '__all__' class FkModelForm(autocomplete_light.ModelForm): class Meta(DjangoCompatMeta): model = FkModel class OtoModelForm(
autocomplete_light.ModelForm): class Meta(DjangoCompatMeta): model = OtoModel class MtmModelForm(autocomplete_light.ModelForm): class Meta(DjangoCompatMeta): model = MtmModel class GfkModelForm(autocomplete_light.ModelForm): class Meta(DjangoCompatMeta): model = GfkModel if genericm2m: class Gm
tmModelForm(autocomplete_light.ModelForm): class Meta(DjangoCompatMeta): model = GmtmModel if taggit: class TaggitModelForm(autocomplete_light.ModelForm): class Meta(DjangoCompatMeta): model = TaggitModel
import base64 def toBase6
4(s): return base64.b64enc
ode(str(s)) def fromBase64(s): return base64.b64decode(str(s))
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################# from openerp import models, fields, api, tools, _ class odisea_representative(models.Model): """Representative""" _name = 'odisea.representative' _description = 'Representative' @api.multi def _has_image(self): return dict((p.id, bool(p.image)) for p in self) name = fields.Char(string='Name', required=True) cuit = fields.Char(string='CUIT', size=13) title = fields.Many2one('res.partner.title', 'Title') company = fields.Char(string='Company') ref = fields.Char('Contact Reference', select=1) website = fields.Char('Website', help="Website of Partner or Company") comment = fields.Text('Notes') category_id = fields.Many2many('res.partner.category', id1='partner_id', id2='category_id', string='Tags') active = fields.Boolean('Active', default=True) street = fields.Char('Street') street2 = fields.Char('Street2') zip = fields.Char('Zip', size=24, change_default=True) city = fields.Char('City') state_id = fields.Many2one("res.country.state", 'State', ondelete='restrict') country_id = fields.Many2one('res.country', 'Country', ondelete='restrict') email = fields.Char('Email') phone = fields.Char('Phone') fax = fields.Char('Fax') mobile = fields.Char('Mobile') birthdate = fields.Char('Birthdate') function = fields.Char('Job Position') is_company = fields.Boolean('Is a Company', help="Check if the contact is a company, otherwise it is a person") use_parent_address = fields.Boolean('Use Company Address', help="Select this if you want to set company's address information for this contact") # image: all image fields are base64 encoded and PIL-supported image = fields.Binary("Image", help="This field holds the image used as avatar for this contact, limited to 1024x1024px") image_medium = fields.Binary(compute="_get_image", string="Medium-sized image", store= False, help="Medium-sized image of this contact. It is automatically "\ "resized as a 128x128px image, with aspect ratio preserved. "\ "Use this field in form views or some kanban views.") image_small = fields.Binary(compute="_get_image", string="Small-sized image", store= False, help="Small-sized image of this contact. It is automatically "\ "resized as a 64x64px image, with aspect ratio preserved. "\ "Use this field anywhere a small image is required.") has_image = fields.Boolean(compute=_has_image) color = fields.Integer('Color Index') @api.multi def o
nchange_state(self, state_id): if state_id: state = self.env['res.country.state'].browse(state_id) return {'value': {'country_id': state.country_id.id}} return {} @api.multi def onchange_type(self, is_company): value = {'title': False} if is_company: value['use_parent_address'] = False domain = {'title': [('domain', '=', 'partner')]} else: domain = {'title': [('domain', '=', 'contact')]} return {'value': value, 'domain': domain} @api.one @api.de
pends("image") def _get_image(self): """ calculate the images sizes and set the images to the corresponding fields """ image = self.image # check if the context contains the magic `bin_size` key if self.env.context.get("bin_size"): # refetch the image with a clean context image = self.env[self._name].with_context({}).browse(self.id).image data = tools.image_get_resized_images(image, return_big=True, avoid_resize_big=False) self.image_big = data["image"] self.image_medium = data["image_medium"] self.image_small = data["image_small"] return True
#!/usr/bin/env python ''' Created on Jan 5, 2011 @author: mkiyer chimerascan: chimeric transcript discovery using RNA-seq Copyright (C) 2011 Matthew Iyer This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU Genera
l Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ''' import logging import os import shutil import subprocess import sys from optparse import OptionParser # local imports import chimerascan.pysam as pysam from chimerascan.lib.feature import GeneFeature from chimerascan.lib.seq import DNA_reverse_complement from chimerascan.lib.config import JOB_ERROR, JOB_SUCCESS, ALIGN_INDEX, GENE_REF_PREFIX, GENE_FEATURE_FILE from chimerascan.lib.base import check_executable
BASES_PER_LINE = 50 def split_seq(seq, chars_per_line): pos = 0 newseq = [] while pos < len(seq): if pos + chars_per_line > len(seq): endpos = len(seq) else: endpos = pos + chars_per_line newseq.append(seq[pos:endpos]) pos = endpos return '\n'.join(newseq) def bed12_to_fasta(gene_feature_file, reference_seq_file): ref_fa = pysam.Fastafile(reference_seq_file) for g in GeneFeature.parse(open(gene_feature_file)): exon_seqs = [] error_occurred = False for start, end in g.exons: seq = ref_fa.fetch(g.chrom, start, end) if not seq: logging.warning("gene %s exon %s:%d-%d not found in reference" % (g.tx_name, g.chrom, start, end)) error_occurred = True break exon_seqs.append(seq) if error_occurred: continue # make fasta record seq = ''.join(exon_seqs) if g.strand == '-': seq = DNA_reverse_complement(seq) # break seq onto multiple lines seqlines = split_seq(seq, BASES_PER_LINE) yield (">%s range=%s:%d-%d gene=%s strand=%s\n%s" % (GENE_REF_PREFIX + g.tx_name, g.chrom, start, end, g.strand, g.gene_name, seqlines)) ref_fa.close() def create_chimerascan_index(output_dir, genome_fasta_file, gene_feature_file, bowtie_build_bin): # create output dir if it does not exist if not os.path.exists(output_dir): os.makedirs(output_dir) logging.info("Created index directory: %s" % (output_dir)) # create FASTA index file index_fasta_file = os.path.join(output_dir, ALIGN_INDEX + ".fa") fh = open(index_fasta_file, "w") # copy reference fasta file to output dir logging.info("Adding reference genome to index...") shutil.copyfileobj(open(genome_fasta_file), fh) # extract sequences from gene feature file logging.info("Adding gene models to index...") for fa_record in bed12_to_fasta(gene_feature_file, genome_fasta_file): print >>fh, fa_record fh.close() # copy gene bed file to index directory shutil.copyfile(gene_feature_file, os.path.join(output_dir, GENE_FEATURE_FILE)) # index the combined fasta file logging.info("Indexing FASTA file...") fh = pysam.Fastafile(index_fasta_file) fh.close() # build bowtie index on the combined fasta file logging.info("Building bowtie index...") bowtie_index_name = os.path.join(output_dir, ALIGN_INDEX) args = [bowtie_build_bin, index_fasta_file, bowtie_index_name] if subprocess.call(args) != os.EX_OK: logging.error("bowtie-build failed to create alignment index") return JOB_ERROR logging.info("chimerascan index created successfully") return JOB_SUCCESS def main(): logging.basicConfig(level=logging.DEBUG, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s") parser = OptionParser("usage: %prog [options] <reference_genome.fa> <gene_models.txt> <index_output_dir>") parser.add_option("--bowtie-build-bin", dest="bowtie_build_bin", default="bowtie-build", help="Path to 'bowtie-build' program") options, args = parser.parse_args() # check command line arguments if len(args) < 3: parser.error("Incorrect number of command line arguments") ref_fasta_file = args[0] gene_feature_file = args[1] output_dir = args[2] # check that input files exist if not os.path.isfile(ref_fasta_file): parser.error("Reference fasta file '%s' not found" % (ref_fasta_file)) if not os.path.isfile(gene_feature_file): parser.error("Gene feature file '%s' not found" % (gene_feature_file)) # check that output dir is not a regular file if os.path.exists(output_dir) and (not os.path.isdir(output_dir)): parser.error("Output directory name '%s' exists and is not a valid directory" % (output_dir)) # check that bowtie-build program exists if check_executable(options.bowtie_build_bin): logging.debug("Checking for 'bowtie-build' binary... found") else: parser.error("bowtie-build binary not found or not executable") # run main index creation function retcode = create_chimerascan_index(output_dir, ref_fasta_file, gene_feature_file, options.bowtie_build_bin) sys.exit(retcode) if __name__ == '__main__': main()
# -*- coding: utf-8 -*- # Generated by D
jango 1.11.9 on 2018-04-09 08:07 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('georegion', '0001_initial_squashed_0004_auto_20180307_2026'), ] operations = [ migrations.AlterField( model_name='georegion', name='part_of',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='georegion.GeoRegion', verbose_name='Part of'), ), ]
# Copyright (c) 2013 Alan McIntyre import httplib import json import decimal import re decimal.getcontext().rounding = decimal.ROUND_DOWN exps = [decimal.Decimal("1e-%d" % i) for i in range(16)] btce_domain = "btc-e.com" all_currencies = ("btc", "usd", "rur", "ltc", "nmc", "eur", "nvc", "trc", "ppc", "ftc", "xpm") all_pairs = ("btc_usd", "btc_rur", "btc_eur", "ltc_btc", "ltc_usd", "ltc_rur", "ltc_eur", "nmc_btc", "nmc_usd", "nvc_btc", "nvc_usd", "usd_rur", "eur_usd", "trc_btc", "ppc_btc", "ppc_usd", "ftc_btc", "xpm_btc") max_digits = {"btc_usd": 3, "btc_rur": 5, "btc_eur": 5, "ltc_btc": 5, "ltc_usd": 6, "ltc_rur": 5, "ltc_eur": 3, "nmc_btc": 5, "nmc_usd": 3, "nvc_btc": 5, "nvc_usd": 3, "usd_rur": 5, "eur_usd": 5, "trc_btc": 5, "ppc_btc": 5, "ppc_usd": 3, "ftc_btc": 5, "xpm_btc": 5} min_orders = {"btc_usd": decimal.Decimal("0.01"), "btc_rur": decimal.Decimal("0.1"), "btc_eur": decimal.Decimal("0.1"), "ltc_btc": decimal.Decimal("0.1"), "ltc_usd": decimal.Decimal("0.1"), "ltc_rur": decimal.Decimal("0.1"), "ltc_eur": decimal.Decimal("0.1"), "nmc_btc": decimal.Decimal("0.1"), "nmc_usd": decimal.Decimal("0.1"), "nvc_btc": decimal.Decimal("0.1"), "nvc_usd": decimal.Decimal("0.1"), "usd_rur": decimal.Decimal("0.1"), "eur_usd": decimal.Decimal("0.1"), "trc_btc": decimal.Decimal("0.1"), "ppc_btc": decimal.Decimal("0.1"), "ppc_usd": decimal.Decimal("0.1"), "ftc_btc": decimal.Decimal("0.1"), "xpm_btc": decimal.Decimal("0.1")} def parseJSONResponse(response): def parse_decimal(var): return decimal.Decimal(var) try: r = json.loads(response, parse_float=parse_decimal, parse_int=parse_decimal) except Exception as e: msg = "Error while attempting to parse JSON response:"\ " %s\nResponse:\n%r" % (e, response) raise Exception(msg) return r HEADER_COOKIE_RE = re.com
pile(r'__cfduid=([a-f0-9]{46})') BODY_COOKIE_RE = re.compile(r'document\.cookie="a=([a-f0-9]{32});path=/;";') class BTCEConnection: def __init__(self, t
imeout=30): self.conn = httplib.HTTPSConnection(btce_domain, timeout=timeout) self.cookie = None def close(self): self.conn.close() def getCookie(self): self.cookie = "" self.conn.request("GET", '/') response = self.conn.getresponse() setCookieHeader = response.getheader("Set-Cookie") match = HEADER_COOKIE_RE.search(setCookieHeader) if match: self.cookie = "__cfduid=" + match.group(1) match = BODY_COOKIE_RE.search(response.read()) if match: if self.cookie != "": self.cookie += '; ' self.cookie += "a=" + match.group(1) def makeRequest(self, url, extra_headers=None, params="", with_cookie=False): headers = {"Content-type": "application/x-www-form-urlencoded"} if extra_headers is not None: headers.update(extra_headers) if with_cookie: if self.cookie is None: self.getCookie() headers.update({"Cookie": self.cookie}) self.conn.request("POST", url, params, headers) response = self.conn.getresponse().read() return response def makeJSONRequest(self, url, extra_headers=None, params=""): response = self.makeRequest(url, extra_headers, params) return parseJSONResponse(response) def validatePair(pair): if pair not in all_pairs: if "_" in pair: a, b = pair.split("_") swapped_pair = "%s_%s" % (b, a) if swapped_pair in all_pairs: msg = "Unrecognized pair: %r (did you mean %s?)" msg = msg % (pair, swapped_pair) raise Exception(msg) raise Exception("Unrecognized pair: %r" % pair) def validateOrder(pair, trade_type, rate, amount): validatePair(pair) if trade_type not in ("buy", "sell"): raise Exception("Unrecognized trade type: %r" % trade_type) minimum_amount = min_orders[pair] formatted_min_amount = formatCurrency(minimum_amount, pair) if amount < minimum_amount: msg = "Trade amount too small; should be >= %s" % formatted_min_amount raise Exception(msg) def truncateAmountDigits(value, digits): quantum = exps[digits] return decimal.Decimal(value).quantize(quantum) def truncateAmount(value, pair): return truncateAmountDigits(value, max_digits[pair]) def formatCurrencyDigits(value, digits): s = str(truncateAmountDigits(value, digits)) dot = s.index(".") while s[-1] == "0" and len(s) > dot + 2: s = s[:-1] return s def formatCurrency(value, pair): return formatCurrencyDigits(value, max_digits[pair])
cl
ass a(object): pass class
b(a): pass print a.__subclasses__() class c(a): pass print a.__subclasses__()
# Generated by Django 2.2.17 on 2021-01-31 06:11 from django.db import migrations, models class Migration(migra
tions.Migration): dependencies = [ ('conversation', '0032_twitterusertimeline'), ] operations = [ migrations.AddField( model_name='twitterusertimeline', name='last_api_call', field=models.DateTimeField(blank=True, null
=True), ), ]
# -*- co
ding: UTF-8 -*- from __future__ impo
rt unicode_literals, print_function, division
# GUI for pyfdtd using PySide # Copyright (C) 2012 Patrik Gebhardt # Contact: grosser.knuff@googlemail.com # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warrant
y of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/
licenses/>. from newLayer import * from newSimulation import *
dynamic_individual_grad_values)): tf.logging.info("Comparing individual gradients iteration %d" % i) self.assertAllEqual(a, b) for i, (a, b) in enumerate(zip(static_individual_var_grad_values, dynamic_individual_var_grad_values)): tf.logging.info( "Comparing individual variable gradients iteration %d" % i) self.assertAllEqual(a, b) def testNoProjNoShardingSimpleStateSaver(self): self._testNoProjNoShardingSimpleStateSaver(use_gpu=False) self._testNoProjNoShardingSimpleStateSaver(use_gpu=True) def testNoProjNoSharding(self): self._testNoProjNoSharding(use_gpu=False) self._testNoProjNoSharding(use_gpu=True) def testCellClipping(self): self._testCellClipping(use_gpu=False) self._testCellClipping(use_gpu=True) def testProjNoSharding(self): self._testProjNoSharding(use_gpu=False) self._testProjNoSharding(use_gpu=True) def testProjSharding(self): self._testProjSharding(use_gpu=False) self._testProjSharding(use_gpu=True) def testTooManyShards(self): self._testTooManyShards(use_gpu=False) self._testTooManyShards(use_gpu=True) def testShardNoShardEquivalentOutput(self): self._testShardNoShardEquivalentOutput(use_gpu=False) self._testShardNoShardEquivalentOutput(use_gpu=True) def testDoubleInput(self): self._testDoubleInput(use_gpu=False) self._testDoubleInput(use_gpu=True) def testDoubleInputWithDropoutAndDynamicCalculation(self): self._testDoubleInputWithDropoutAndDynamicCalculation(use_gpu=False) self._testDoubleInputWithDropoutAndDynamicCalculation(use_gpu=True) def testDynamicEquivalentToStaticRNN(self): self._testDynamicEquivalentToStaticRNN( use_gpu=False, use_sequence_length=False) self._testDynamicEquivalentToStaticRNN( use_gpu=True, use_sequence_length=False) self._testDynamicEquivalentToStaticRNN( use_gpu=False, use_sequence_length=True) self._testDynamicEquivalentToStaticRNN( use_gpu=True, use_sequence_length=True) class BidirectionalRNNTest(tf.test.TestCase): def setUp(self): self._seed = 23489 np.random.seed(self._seed) def _createBidirectionalRNN(self, use_gpu, use_shape, use_sequence_length, scope=None): num_units = 3 input_size = 5 batch_size = 2 max_length = 8 initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed) sequence_length = tf.placeholder(tf.int64) if use_sequence_length else None cell_fw = tf.nn.rnn_cell.LSTMCell(num_units, input_size, initializer=initializer, state_is_tuple=False) cell_bw = tf.nn.rnn_cell.LSTMCell(num_units, input_size, initializer=initializer, state_is_tuple=False) inputs = max_length * [ tf.placeholder( tf.float32, shape=(batch_size, input_size) if use_shape else (None, input_size)) ] outputs, state_fw, state_bw = tf.nn.bidirectional_rnn( cell_fw, cell_bw, inputs, dtype=tf.float32, sequence_length=sequence_length, scope=scope) self.assertEqual(len(outputs), len(inputs)) for out in outputs: self.assertEqual( out.get_shape().as_list(), [batch_size if use_shape else None, 2 * num_units]) input_value = np.random.randn(batch_size, input_size) outputs = tf.pack(outputs) return input_value, inputs, outputs, state_fw, state_bw, sequence_length def _testBidirectionalRNN(self, use_gpu, use_shape): with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess: input_value, inputs, outputs, state_fw, state_bw, sequence_length = ( self._c
reateBidirectionalRNN(use_gpu, use_shape, True)) tf.initialize_all_variables().run() # Run with pre-specified sequence length of 2, 3 out, s_fw, s_bw = sess.run([outputs, state_fw, state_bw], feed_dict={inputs[
0]: input_value, sequence_length: [2, 3]}) # Since the forward and backward LSTM cells were initialized with the # same parameters, the forward and backward output has to be the same, # but reversed in time. The format is output[time][batch][depth], and # due to depth concatenation (as num_units=3 for both RNNs): # - forward output: out[][][depth] for 0 <= depth < 3 # - backward output: out[][][depth] for 4 <= depth < 6 # # First sequence in batch is length=2 # Check that the time=0 forward output is equal to time=1 backward output self.assertEqual(out[0][0][0], out[1][0][3]) self.assertEqual(out[0][0][1], out[1][0][4]) self.assertEqual(out[0][0][2], out[1][0][5]) # Check that the time=1 forward output is equal to time=0 backward output self.assertEqual(out[1][0][0], out[0][0][3]) self.assertEqual(out[1][0][1], out[0][0][4]) self.assertEqual(out[1][0][2], out[0][0][5]) # Second sequence in batch is length=3 # Check that the time=0 forward output is equal to time=2 backward output self.assertEqual(out[0][1][0], out[2][1][3]) self.assertEqual(out[0][1][1], out[2][1][4]) self.assertEqual(out[0][1][2], out[2][1][5]) # Check that the time=1 forward output is equal to time=1 backward output self.assertEqual(out[1][1][0], out[1][1][3]) self.assertEqual(out[1][1][1], out[1][1][4]) self.assertEqual(out[1][1][2], out[1][1][5]) # Check that the time=2 forward output is equal to time=0 backward output self.assertEqual(out[2][1][0], out[0][1][3]) self.assertEqual(out[2][1][1], out[0][1][4]) self.assertEqual(out[2][1][2], out[0][1][5]) # Via the reasoning above, the forward and backward final state should be # exactly the same self.assertAllClose(s_fw, s_bw) def _testBidirectionalRNNWithoutSequenceLength(self, use_gpu, use_shape): with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess: input_value, inputs, outputs, state_fw, state_bw, _ = ( self._createBidirectionalRNN(use_gpu, use_shape, False)) tf.initialize_all_variables().run() out, s_fw, s_bw = sess.run([outputs, state_fw, state_bw], feed_dict={inputs[0]: input_value}) # Since the forward and backward LSTM cells were initialized with the # same parameters, the forward and backward output has to be the same, # but reversed in time. The format is output[time][batch][depth], and # due to depth concatenation (as num_units=3 for both RNNs): # - forward output: out[][][depth] for 0 <= depth < 3 # - backward output: out[][][depth] for 4 <= depth < 6 # # Both sequences in batch are length=8. Check that the time=i # forward output is equal to time=8-1-i backward output for i in xrange(8): self.assertEqual(out[i][0][0], out[8 - 1 - i][0][3]) self.assertEqual(out[i][0][1], out[8 - 1 - i][0][4]) self.assertEqual(out[i][0][2], out[8 - 1 - i][0][5]) for i in xrange(8): self.assertEqual(out[i][1][0], out[8 - 1 - i][1][3]) self.assertEqual(out[i][1][1], out[8 - 1 - i][1][4]) self.assertEqual(out[i][1][2], out[8 - 1 - i][1][5]) # Via the reasoning above, the forward and backward final state should be # exactly the same self.assertAllClose(s_fw, s_bw) def testBidirectionalRNN(self): self._testBidirectionalRNN(use_gpu=False, use_shape=False) self._testBidirectionalRNN(use_gpu=True, use_shape=False) self._testBidirectionalRNN(use_gpu=False, use_shape=True) self._testBidirectionalRNN(use_gpu=True, use_shape=True) def testBidirectionalRNNWithoutSequenceLength(self): self._testBidirectionalRNNWithoutSequenceLength(use_gpu=False,
er._rank() else: return NotImplemented def __eq__(self, other): if isinstance(self, Agent): return self._rank() == other._rank() else: return NotImplemented def __hash__(self): return hash(self._rank()) def create_context(self): return Context(self) class _RegionList(Sequence): __slots__ = '_all', 'globals', 'readonlys', 'privates', 'groups' def __init__(self, lst): self._all = tuple(lst) self.globals = tuple(x for x in lst if x.kind == 'global') self.readonlys = tuple(x for x in lst if x.kind == 'readonly') self.privates = tuple(x for x in lst if x.kind == 'private') self.groups = tuple(x for x in lst if x.kind == 'group') def __len__(self): return len(self._all) def __contains__(self, item): return item in self._all def __reversed__(self): return reversed(self._all) def __getitem__(self, idx): return self._all[idx] class MemPool(HsaWrapper): """Abstracts a HSA mem pool. This will wrap and provide an OO interface for hsa_amd_memory_pool_t C-API elements """ _hsa_info_function = 'hsa_amd_memory_pool_get_info' _hsa_properties = { 'segment': ( enums_ext.HSA_AMD_MEMORY_POOL_INFO_SEGMENT, drvapi.hsa_amd_segment_t ), '_flags': ( enums_ext.HSA_AMD_MEMORY_POOL_INFO_GLOBAL_FLAGS, ctypes.c_uint32 ), 'size': (enums_ext.HSA_AMD_MEMORY_POOL_INFO_SIZE, ctypes.c_size_t), 'alloc_allowed': (enums_ext.HSA_AMD_MEMORY_POOL_INFO_RUNTIME_ALLOC_ALLOWED, ctypes.c_bool), 'alloc_granule': (enums_ext.HSA_AMD_MEMORY_POOL_INFO_RUNTIME_ALLOC_GRANULE, ctypes.c_size_t), 'alloc_alignment': (enums_ext.HSA_AMD_MEMORY_POOL_INFO_RUNTIME_ALLOC_ALIGNMENT, ctypes.c_size_t), 'accessible_by_all': (enums_ext.HSA_AMD_MEMORY_POOL_INFO_ACCESSIBLE_BY_ALL, ctypes.c_bool), } _segment_name_map = { enums_ext.HSA_AMD_SEGMENT_GLOBAL: 'global', enums_ext.HSA_AMD_SEGMENT_READONLY: 'readonly', enums_ext.HSA_AMD_SEGMENT_PRIVATE: 'private', enums_ext.HSA_AMD_SEGMENT_GROUP: 'group', } def __init__(self, agent, pool): """Do not instantiate MemPool objects directly, use the factory class method 'instance_for' to ensure MemPool identity""" self._id = pool self._owner_agent = agent self._as_parameter_ = self._id @property def kind(self): return self._segment_name_map[self.segment] @property def agent(self): return self._owner_agent def supports(self, check_flag): """ Determines if a given feature is supported by this MemRegion. Feature flags are found in "./enums_exp.py" under: * hsa_amd_memory_pool_global_flag_t Params: check_flag: Feature flag to test """ if self.kind == 'global': return self._flags & check_flag else: return False def allocate(self, nbytes): assert self.alloc_allowed assert nbytes >= 0 buff = ctypes.c_void_p() flags = ctypes.c_uint32(0) # From API docs "Must be 0"! hsa.hsa_amd_memory_pool_allocate(self._id, nbytes, flags, ctypes.byref(buff)) if buff.value is None: raise HsaDriverError("Failed to allocate from {}".format(self)) return buff _instance_dict = {} @classmethod def instance_for(cls, owner, _id): try: return cls._instance_dict[_id] except KeyError: new_instance = cls(owner, _id) cls._instance_dict[_id] = new_instance return new_instance class MemRegion(HsaWrapper): """Abstracts a HSA memory region. This will wrap and provide an OO interface for hsa_region_t C-API elements """ _hsa_info_function = 'hsa_region_get_info' _hsa_properties = { 'segment': ( enums.HSA_REGION_INFO_SEGMENT, drvapi.hsa_region_segment_t ), '_flags': ( enums.HSA_REGION_INFO_GLOBAL_FLAGS, drvapi.hsa_region_global_flag_t ), 'host_accessible': (enums_ext.HSA_AMD_REGION_INFO_HOST_ACCESSIBLE, ctypes.c_bool), 'size': (enums.HSA_REGION_INFO_SIZE, ctypes.c_size_t), 'alloc_max_size': (enums.HSA_REGION_INFO_ALLOC_MAX_SIZE, ctypes.c_size_t), 'alloc_alignment': (enums.HSA_REGION_INFO_RUNTIME_ALLOC_ALIGNMENT, ctypes.c_size_t), 'alloc_granule': (enums.HSA_REGION_INFO_RUNTIME_ALLOC_GRANULE, ctypes.c_size_t), 'alloc_allowed': (enums.HSA_REGION_INFO_RUNTIME_ALLOC_ALLOWED, ctypes.c_bool), } _segment_name_map = { enums.HSA_REGION_SEGMENT_GLOBAL: 'global', enums.HSA_REGION_SEGMENT_READONLY: 'readonly', enums.HSA_REGION_SEGMENT_PRIVATE: 'private', enums.HSA_REGION_SEGMENT_GROUP: 'group', } def __init__(self, agent, region_id): """Do not instantiate MemRegion objects directly, use the factory class method 'instance_for' to ensure MemRegion identity""" self._id = region_id self._owner_agent = agent self._as_parameter_ = self._id @property def kind(self): return self._segment_name_map[self.segment] @property def agent(self): return self._owner_agent def supports(self, check_flag): """ Determines if a given feature is supported by this MemRegion. Feature flags are found in "./enums.py" under: * hsa_region_global_flag_t Params: check_flag: Feature flag to test """ if self.kind == 'global': return self._flags & check_flag else: return False def allocate(self, nbytes): assert self.alloc_allowed assert nbytes <= self.alloc_max_size assert nbytes >= 0 buff = ctypes.c_void_p() hsa.hsa_memory_allo
cate(self._id, nbytes, ctypes.byref(buff)) return buff def free(self, ptr): hsa.hsa_memory_free(ptr) _instance_dict = {} @classmethod def instance_for(cls, owner, _id): try: return cls._instance_dict[_id] except KeyError: new_instance = cls(owner, _id) cls._instance_dict[_id] = new_instance return new_instance class
Queue(object): def __init__(self, agent, queue_ptr): """The id in a queue is a pointer to the queue object returned by hsa_queue_create. The Queue object has ownership on that queue object""" self._agent = weakref.proxy(agent) self._id = queue_ptr self._as_parameter_ = self._id self._finalizer = hsa.hsa_queue_destroy def release(self): self._agent.release_queue(self) def __getattr__(self, fname): return getattr(self._id.contents, fname) @contextmanager def _get_packet(self, packet_type): # Write AQL packet at the calculated queue index address queue_struct = self._id.contents queue_mask = queue_struct.size - 1 assert (ctypes.sizeof(packet_type) == ctypes.sizeof(drvapi.hsa_kernel_dispatch_packet_t)) packet_array_t = (packet_type * queue_struct.size) # Obtain the current queue write index index = hsa.hsa_queue_add_write_index_acq_rel(self._id, 1) while True: read_offset = hsa.hsa_queue_load_read_index_acquire(self._id) if read_offset <= index < read_offset + queue_struct.size: break queue_offset = index & queue_mask queue = packet_array_t.from_address(queue_struct.base_address) packet = queue[queue_offset] # zero
import os import re import sublime import sublime_plugin
class ExpandTabsOnLoad(sublime_plugin.EventListener): # Run ST's 'expand_tabs' command when opening a file, # only if there are any tab characters in the file def on_load(self, view): expand_tabs = view.settings().get("expand
_tabs_on_load", False) if expand_tabs and view.find("\t", 0): view.run_command("expand_tabs", {"set_translate_tabs": True}) tab_size = view.settings().get("tab_size", 0) message = "Converted tab characters to {0} spaces".format(tab_size) sublime.status_message(message)
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import absolute_import, division, print_function import hmac from cryptography.
hazmat.bindings._constant_time import lib if hasattr(hmac, "compare_digest"): def bytes_eq(a, b): if not isinstance(a, bytes) or not isinstance(b, bytes): raise TypeError("a and b must be bytes.") return hmac.compare_digest(a, b) else: def bytes_eq(a, b): if not isinstance(a, bytes) or not isinstance(b, bytes): raise TypeError("a and b must be bytes.") return lib.Cryptography_constant_t
ime_bytes_eq( a, len(a), b, len(b) ) == 1
lg.norm(A, ord='fro') for i in [5, 10, 50]: U, s, Vt = randomized_svd(X, n_components, n_iter=i, power_iteration_normalizer=normalizer, random_state=0) A = X - U.dot(np.diag(s).dot(Vt)) error = linalg.norm(A, ord='fro') assert 15 > np.abs(error_2 - error) def test_randomized_svd_sparse_warnings(): # randomized_svd throws a warning for lil and dok matrix rng = np.random.RandomState(42) X = make_low_rank_matrix(50, 20, effective_rank=10, random_state=rng) n_components = 5 for cls in (sparse.lil_matrix, sparse.dok_matrix): X = cls(X) assert_warns_message( sparse.SparseEfficiencyWarning, "Calculating SVD of a {} is expensive. " "csr_matrix is more efficient.".format(cls.__name__), randomized_svd, X, n_components, n_iter=1, power_iteration_normalizer
='none') def test_svd_flip(): # Check that svd_flip works in both situations, and reconstructs input. rs = np.random.RandomState(1999) n_samples = 20 n_features = 10 X = rs.randn(n_samples, n_features) # Check matrix reconstruction U, S, Vt = linalg.svd(X, full_matrices=False) U1, V1 = svd_flip(U, Vt, u_based_decision=False) assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6) # Check transposed matrix reconstruction XT = X.T
U, S, Vt = linalg.svd(XT, full_matrices=False) U2, V2 = svd_flip(U, Vt, u_based_decision=True) assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6) # Check that different flip methods are equivalent under reconstruction U_flip1, V_flip1 = svd_flip(U, Vt, u_based_decision=True) assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6) U_flip2, V_flip2 = svd_flip(U, Vt, u_based_decision=False) assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6) def test_randomized_svd_sign_flip(): a = np.array([[2.0, 0.0], [0.0, 1.0]]) u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41) for seed in range(10): u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed) assert_almost_equal(u1, u2) assert_almost_equal(v1, v2) assert_almost_equal(np.dot(u2 * s2, v2), a) assert_almost_equal(np.dot(u2.T, u2), np.eye(2)) assert_almost_equal(np.dot(v2.T, v2), np.eye(2)) def test_randomized_svd_sign_flip_with_transpose(): # Check if the randomized_svd sign flipping is always done based on u # irrespective of transpose. # See https://github.com/scikit-learn/scikit-learn/issues/5608 # for more details. def max_loading_is_positive(u, v): """ returns bool tuple indicating if the values maximising np.abs are positive across all rows for u and across all columns for v. """ u_based = (np.abs(u).max(axis=0) == u.max(axis=0)).all() v_based = (np.abs(v).max(axis=1) == v.max(axis=1)).all() return u_based, v_based mat = np.arange(10 * 8).reshape(10, -1) # Without transpose u_flipped, _, v_flipped = randomized_svd(mat, 3, flip_sign=True) u_based, v_based = max_loading_is_positive(u_flipped, v_flipped) assert u_based assert not v_based # With transpose u_flipped_with_transpose, _, v_flipped_with_transpose = randomized_svd( mat, 3, flip_sign=True, transpose=True) u_based, v_based = max_loading_is_positive( u_flipped_with_transpose, v_flipped_with_transpose) assert u_based assert not v_based def test_cartesian(): # Check if cartesian product delivers the right results axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7])) true_out = np.array([[1, 4, 6], [1, 4, 7], [1, 5, 6], [1, 5, 7], [2, 4, 6], [2, 4, 7], [2, 5, 6], [2, 5, 7], [3, 4, 6], [3, 4, 7], [3, 5, 6], [3, 5, 7]]) out = cartesian(axes) assert_array_equal(true_out, out) # check single axis x = np.arange(3) assert_array_equal(x[:, np.newaxis], cartesian((x,))) def test_logistic_sigmoid(): # Check correctness and robustness of logistic sigmoid implementation def naive_log_logistic(x): return np.log(expit(x)) x = np.linspace(-2, 2, 50) assert_array_almost_equal(log_logistic(x), naive_log_logistic(x)) extreme_x = np.array([-100., 100.]) assert_array_almost_equal(log_logistic(extreme_x), [-100, 0]) def test_incremental_variance_update_formulas(): # Test Youngs and Cramer incremental variance formulas. # Doggie data from https://www.mathsisfun.com/data/standard-deviation.html A = np.array([[600, 470, 170, 430, 300], [600, 470, 170, 430, 300], [600, 470, 170, 430, 300], [600, 470, 170, 430, 300]]).T idx = 2 X1 = A[:idx, :] X2 = A[idx:, :] old_means = X1.mean(axis=0) old_variances = X1.var(axis=0) old_sample_count = np.full(X1.shape[1], X1.shape[0], dtype=np.int32) final_means, final_variances, final_count = \ _incremental_mean_and_var(X2, old_means, old_variances, old_sample_count) assert_almost_equal(final_means, A.mean(axis=0), 6) assert_almost_equal(final_variances, A.var(axis=0), 6) assert_almost_equal(final_count, A.shape[0]) def test_incremental_mean_and_variance_ignore_nan(): old_means = np.array([535., 535., 535., 535.]) old_variances = np.array([4225., 4225., 4225., 4225.]) old_sample_count = np.array([2, 2, 2, 2], dtype=np.int32) X = np.array([[170, 170, 170, 170], [430, 430, 430, 430], [300, 300, 300, 300]]) X_nan = np.array([[170, np.nan, 170, 170], [np.nan, 170, 430, 430], [430, 430, np.nan, 300], [300, 300, 300, np.nan]]) X_means, X_variances, X_count = _incremental_mean_and_var( X, old_means, old_variances, old_sample_count) X_nan_means, X_nan_variances, X_nan_count = _incremental_mean_and_var( X_nan, old_means, old_variances, old_sample_count) assert_allclose(X_nan_means, X_means) assert_allclose(X_nan_variances, X_variances) assert_allclose(X_nan_count, X_count) @skip_if_32bit def test_incremental_variance_numerical_stability(): # Test Youngs and Cramer incremental variance formulas. def np_var(A): return A.var(axis=0) # Naive one pass variance computation - not numerically stable # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance def one_pass_var(X): n = X.shape[0] exp_x2 = (X ** 2).sum(axis=0) / n expx_2 = (X.sum(axis=0) / n) ** 2 return exp_x2 - expx_2 # Two-pass algorithm, stable. # We use it as a benchmark. It is not an online algorithm # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Two-pass_algorithm def two_pass_var(X): mean = X.mean(axis=0) Y = X.copy() return np.mean((Y - mean)**2, axis=0) # Naive online implementation # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm # This works only for chunks for size 1 def naive_mean_variance_update(x, last_mean, last_variance, last_sample_count): updated_sample_count = (last_sample_count + 1) samples_ratio = last_sample_count / float(updated_sample_count) updated_mean = x / updated_sample_count + last_mean * samples_ratio updated_variance = last_variance * samples_ratio + \ (x - last_mean) * (x - updated_mean) / updated_sample_count return updated_mean, updated_variance, updated_sample_count # We want to show a case when one_pass_var has error > 1e-3 while # _batch_mean_variance_update has les
#!/usr/bin/python # # Copyright (c) 2011 The Bitcoin developers # Distributed under the MIT/X11 software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # import time import json import pprint import hashlib import struct import re import base64 import httplib import sys from multiprocessing import Process ERR_SLEEP = 15 MAX_NONCE = 1000000L settings = {} pp = pprint.PrettyPrinter(indent=4) class BitcoinRPC: OBJID = 1 def __init__(self, host, port, username, password): authpair = "%s:%s" % (username, password) self.authhdr = "Basic %s" % (base64.b64encode(authpair)) self.conn = httplib.HTTPConnection(host, port, False, 30) def rpc(self, method, params=None): self.OBJID += 1 obj = { 'version' : '1.1', 'method' : method, 'id' : self.OBJID } if params is None: obj['params'] = [] else: obj['params'] = params self.conn.request('POST', '/', json.dumps(obj), { 'Authorization' : self.authhdr, 'Content-type' : 'application/json' }) resp = self.conn.getresponse() if resp is None: print "JSON-RPC: no response" return None body = resp.read() resp_obj = json.loads(body) if resp_obj is None: print "JSON-RPC: cannot JSON-decode body" return None if 'error' in resp_obj and resp_obj['error'] != None: return resp_obj['error'] if 'result' not in resp_obj: print "JSON-RPC: no result in object" return None return resp_obj['result'] def getblockcount(self): return self.rpc('getblockcount') def getwork(self, data=None): return self.rpc('getwork', data) def uint32(x): return x & 0xffffffffL def bytereverse(x): return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) | (((x) >> 8) & 0x0000ff00) | ((x) >> 24) )) def bufreverse(in_buf): out_words = [] for i in range(0, len(in_buf), 4): word = struct.unpack('@I', in_buf[i:i+4])[0] out_words.append(struct.pack('@I', bytereverse(word))) return ''.join(out_words) def wordrevers
e(in_buf): out_words = [] for i in range(0, len(in_buf), 4): out_words.append(in_buf[i:i+4]) out_words.reverse() return ''.join(out_words) class Miner: def __init__(self, id): self.id = id self.max_nonce = MAX_NONCE def work(self, datastr, targetstr): # d
ecode work data hex string to binary static_data = datastr.decode('hex') static_data = bufreverse(static_data) # the first 76b of 80b do not change blk_hdr = static_data[:76] # decode 256-bit target value targetbin = targetstr.decode('hex') targetbin = targetbin[::-1] # byte-swap and dword-swap targetbin_str = targetbin.encode('hex') target = long(targetbin_str, 16) # pre-hash first 76b of block header static_hash = hashlib.sha256() static_hash.update(blk_hdr) for nonce in xrange(self.max_nonce): # encode 32-bit nonce value nonce_bin = struct.pack("<I", nonce) # hash final 4b, the nonce value hash1_o = static_hash.copy() hash1_o.update(nonce_bin) hash1 = hash1_o.digest() # sha256 hash of sha256 hash hash_o = hashlib.sha256() hash_o.update(hash1) hash = hash_o.digest() # quick test for winning solution: high 32 bits zero? if hash[-4:] != '\0\0\0\0': continue # convert binary hash to 256-bit Python long hash = bufreverse(hash) hash = wordreverse(hash) hash_str = hash.encode('hex') l = long(hash_str, 16) # proof-of-work test: hash < target if l < target: print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,) return (nonce + 1, nonce_bin) else: print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,) # return (nonce + 1, nonce_bin) return (nonce + 1, None) def submit_work(self, rpc, original_data, nonce_bin): nonce_bin = bufreverse(nonce_bin) nonce = nonce_bin.encode('hex') solution = original_data[:152] + nonce + original_data[160:256] param_arr = [ solution ] result = rpc.getwork(param_arr) print time.asctime(), "--> Upstream RPC result:", result def iterate(self, rpc): work = rpc.getwork() if work is None: time.sleep(ERR_SLEEP) return if 'data' not in work or 'target' not in work: time.sleep(ERR_SLEEP) return time_start = time.time() (hashes_done, nonce_bin) = self.work(work['data'], work['target']) time_end = time.time() time_diff = time_end - time_start self.max_nonce = long( (hashes_done * settings['scantime']) / time_diff) if self.max_nonce > 0xfffffffaL: self.max_nonce = 0xfffffffaL if settings['hashmeter']: print "HashMeter(%d): %d hashes, %.2f Khash/sec" % ( self.id, hashes_done, (hashes_done / 1000.0) / time_diff) if nonce_bin is not None: self.submit_work(rpc, work['data'], nonce_bin) def loop(self): rpc = BitcoinRPC(settings['host'], settings['port'], settings['rpcuser'], settings['rpcpass']) if rpc is None: return while True: self.iterate(rpc) def miner_thread(id): miner = Miner(id) miner.loop() if __name__ == '__main__': if len(sys.argv) != 2: print "Usage: pyminer.py CONFIG-FILE" sys.exit(1) f = open(sys.argv[1]) for line in f: # skip comment lines m = re.search('^\s*#', line) if m: continue # parse key=value lines m = re.search('^(\w+)\s*=\s*(\S.*)$', line) if m is None: continue settings[m.group(1)] = m.group(2) f.close() if 'host' not in settings: settings['host'] = '127.0.0.1' if 'port' not in settings: settings['port'] = 7046 if 'threads' not in settings: settings['threads'] = 1 if 'hashmeter' not in settings: settings['hashmeter'] = 0 if 'scantime' not in settings: settings['scantime'] = 30L if 'rpcuser' not in settings or 'rpcpass' not in settings: print "Missing username and/or password in cfg file" sys.exit(1) settings['port'] = int(settings['port']) settings['threads'] = int(settings['threads']) settings['hashmeter'] = int(settings['hashmeter']) settings['scantime'] = long(settings['scantime']) thr_list = [] for thr_id in range(settings['threads']): p = Process(target=miner_thread, args=(thr_id,)) p.start() thr_list.append(p) time.sleep(1) # stagger threads print settings['threads'], "mining threads started" print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port']) try: for thr_proc in thr_list: thr_proc.join() except KeyboardInterrupt: pass print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
tin)s['%(op)s'](%(left)s, %(right)s)" % jsvars except StopIteration: pass return left def node_assert_stmt(self, node): jsvars = self.jsvars.copy() childs = node.children.__iter__() self.assert_value(node, childs.next().value, 'assert') test = self.dispatch(childs.next()) arg = '' if len(node.children) > 2: child = childs.next() self.assert_value(node, child.value, ',') arg = ', %s' % self.dispatch(childs.next()) jsvars.update(locals()) # TODO: return %(builtin)s['raise'](%(builtin)s['AssertionError']%(arg)s, %(None)s) return """if (!%(booljs)s(%(test)s)) { + return %(builtin)s['raise'](%(module)s['$new'](%(builtin)s['AssertionEr
ror']%(arg)s)); }""" % jsvars def node_atom(self, node): jsvars = self.jsvars.copy() items = [] cls = None if node.children[0].value == '(': cls = jsvars['tuple'] if len(nod
e.children) == 3: items = self.dispatch(node.children[1]) if not isinstance(items, list): return items elif len(node.children) != 2: self.not_implemented(node) elif node.children[0].value == '[': cls = jsvars['list'] if len(node.children) == 3: items = self.dispatch(node.children[1]) if not isinstance(items, list): if isinstance(node.children[1], Leaf): items = [items] elif type_repr(node.children[1].type) == 'listmaker': pass else: items = [items] elif len(node.children) != 2: self.not_implemented(node) elif node.children[0].value == '{': cls = jsvars['dict'] if len(node.children) == 3: items = self.dispatch(node.children[1]) if not isinstance(items, dict): self.not_implemented(node) elif len(node.children) != 2: self.not_implemented(node) if items: items = ["[%s, %s]" % (k, v) for k, v in items.iteritems()] else: items = [] elif node.children[0].value == '`': assert len(node.children) == 3 what = self.dispatch(node.children[1]) jsvars.update(locals()) return "%(builtin)s['repr'](%(what)s)" % jsvars elif leaf_type.get(node.children[0].type) == 'str': s = '' for child in node.children: assert leaf_type.get(child.type) == 'str' s += child.value return self.add_const_str(eval(s)) else: self.not_implemented(node) if isinstance(items, list): items = '[%s]' % ', '.join([str(i) for i in items]) jsvars.update(locals()) return "%(module)s['$new'](%(cls)s, %(items)s)" % jsvars def node_augassign(self, node): self.not_implemented(node) def node_break_stmt(self, node): return 'break' def node_classdef(self, node): jsvars = self.jsvars.copy() #print node.depth() childs = node.children.__iter__() self.assert_value(node, childs.next().value, 'class') name = childs.next().value self.add_name(name) tok = childs.next() if tok.value == ':': bases = self.get_jsname('object') else: self.assert_value(node, tok.value, '(') bases = childs.next() if isinstance(bases, Leaf): if bases.value == ')': bases = None else: bases = [self.get_jsname(bases.value)] else: bases = self.dispatch(bases) if bases is None: bases = self.get_jsname('object') elif isinstance(bases, list): bases = ', '.join([str(i) for i in bases]) self.assert_value(node, childs.next().value, ')') else: self.assert_value(node, childs.next().value, ')') self.assert_value(node, childs.next().value, ':') lineno = self.track_lineno(node) jsvars.update(locals()) if isinstance(self.names[-1], ClassNames): namespace = "%(funcbase)s['$dict']" % jsvars else: namespace = "%(locals)s" % jsvars jsvars.update(namespace=namespace) self.add_lines("""\ %(namespace)s[%(name)r] = %(builtin)s['B$type'](%(module)s, %(name)r, [%(bases)s], {}); (function(%(funcbase)s){ + //var %(locals)s = %(funcbase)s['$dict'];""" % jsvars) names = ClassNames() self.names.append(names) indent_level = self.indent() self.next_func_type.append(func_type['function']) try: while True: child = childs.next() self.dispatch(child) except StopIteration: pass self.next_func_type.pop() self.assert_dedent(self.dedent(), indent_level) assert names is self.names.pop(), self.TranslationError("names pop error", self.get_lineno(node)) if '__slots__' in names: self.add_lines("""\ + %(funcbase)s['__slots__'] = %(module)s['$new'](%(tuple)s, %(locals)s['__slots__']).__array;\ """ % jsvars) self.add_lines("})(%(namespace)s[%(name)r]);" % jsvars) return name def node_comp_for(self, node): assert False, "Shouldn't get here..." def node_comp_if(self, node): assert False, "Shouldn't get here..." def node_comp_iter(self, node): self.not_implemented(node) def node_comp_op(self, node): if node.children[0].value == 'is': if node.children[1].value == 'not': return op_compare['is not'] elif node.children[0].value == 'not': if node.children[1].value == 'in': return op_compare['not in'] self.not_implemented(node) def node_comparison(self, node): jsvars = self.jsvars.copy() left = op = right = None childs = node.children.__iter__() first_left = left = self.dispatch(childs.next()) prev_right = None cmp_expr = [] tmp = None try: while True: op_node = childs.next() right = self.dispatch(childs.next()) if isinstance(op_node, Leaf): op = op_compare[op_node.value] elif type_repr(op_node.type) == 'comp_op': op = self.dispatch(op_node) jsvars.update(locals()) if prev_right is None: cmp_expr.append("""\ %(builtin)s['%(op)s'](%(left)s, %(right)s)""" % jsvars) else: if tmp is None: tmp = self.get_tmp_jsname('comp$') jsvars['tmp'] = tmp self.add_lines("var %s;" % tmp) cmp_expr = ["""\ %(builtin)s['%(op)s'](%(first_left)s, %(tmp)s=%(prev_right)s)""" % jsvars] cmp_expr.append("""\ %(builtin)s['%(op)s'](%(tmp)s, %(tmp)s=%(right)s)""" % jsvars) left = right prev_right = right except StopIteration: pass if cmp_expr: if len(cmp_expr) == 1: return cmp_expr[0] s = ' && '.join([ "(%s).valueOf()" % i for i in cmp_expr ]) jsvars.update(locals()) return "(%(s)s ? %(True)s : %(False)s)" % jsvars self.not_implemented(node) def node_compound_stmt(self, node): self.not_implemented(node) def node_continue_stmt(self, node): self.not_implemented(node) def node_decorated(self, node): self.assert_instance(node, node.children[0], Node) self.assert_instance(node, node.children[1], Node) assert len(node.children) == 2 decor
""" FileDialogDelegateQt.py: Delegate that pops up a file dialog when double clicked. Sets the model data to the selected file name. """ import os.path try: from PyQt5.QtCore import Qt, QT_VERSION_STR from PyQt5.QtWidgets import QStyledItemDelegate, QFileDialog except ImportError: try: from PyQt4.QtCore import Qt, QT_VERSION_STR from PyQt4.QtGui import QStyledItemDelegate, QFileDialog except ImportError: raise ImportError("FileDialogDelegateQt: Requires PyQt5 or PyQt4.") __author__ = "Marcel Goldschen-Ohm <marcel.goldschen@gmail.com>" class FileDialogDelegateQt(QStyledItemDelegate): """ Delegate that pops up a file dialog when double clicked. Sets the model data to the selected file name. """ def __init__(self, parent=None): QStyledItemDelegate.__init__(self, pa
rent) def createEditor(self, parent, option, index): """ Instead of creating an editor, just popup a modal file dialog and set the model data to the selected file name, if any. """ pathToFileNam
e = "" if QT_VERSION_STR[0] == '4': pathToFileName = QFileDialog.getOpenFileName(None, "Open") elif QT_VERSION_STR[0] == '5': pathToFileName, temp = QFileDialog.getOpenFileName(None, "Open") pathToFileName = str(pathToFileName) # QString ==> str if len(pathToFileName): index.model().setData(index, pathToFileName, Qt.EditRole) index.model().dataChanged.emit(index, index) # Tell model to update cell display. return None def displayText(self, value, locale): """ Show file name without path. """ try: if QT_VERSION_STR[0] == '4': pathToFileName = str(value.toString()) # QVariant ==> str elif QT_VERSION_STR[0] == '5': pathToFileName = str(value) path, fileName = os.path.split(pathToFileName) return fileName except: return ""
#! /usr/bin/env python # -*- coding: utf-8 -*- # This file has been created by ARSF Data Analysis Node and # is licensed under the MIT Licence. A copy of this # licence is available to download with this file. # # Author: Robin Wilson # Created: 2015-11-16 import sys import numpy as np import pandas as pd # Python 2/3 imports try: from StringIO import StringIO except ImportError: if sys.version_info[0] >= 3: from io import StringIO else: raise from . import spectra_reader class DARTFormat(spectra_reader.SpectraReader): """ Class to read spectra from DART format files """ def get_spectra(self, filename): """ Ex
tract spectra from a DART format file Requires: * filename - the filename to the DART format file to read Returns: * Spectra object with values, radiance, pixel and line """ f = open(filename, 'r') s = StringIO() within_comment = False while True: try:
line = f.next() except: break if "*" in line and within_comment: within_comment = False continue elif "*" in line and not within_comment: within_comment = True if not within_comment and not line.isspace(): s.write(line) s.seek(0) df = pd.read_table(s, header=None, names=["wavelength", "reflectance", "refractive_index", "A", "Alpha", "wHapke", "AHapkeSpec", "AlphaHapkeSpec", "TDirect", "TDiffuse"]) df.reflectance = df.reflectance / 100 wavelengths = np.array(df.wavelength) reflectance = np.array(df.reflectance) self.spectra.file_name = filename self.spectra.wavelengths = wavelengths self.spectra.values = reflectance self.spectra.pixel = None self.spectra.line = None self.spectra.latitude = None self.spectra.longitude = None self.spectra.wavelength_units = "nm" self.spectra.value_units = "reflectance" self.spectra.value_scaling = 1 return self.spectra
ble cls: A custom type or function that will be passed the direct response :return: NatGateway, or the result of cls(response) :rtype: ~azure.mgmt.network.v2020_04_01.models.NatGateway :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.NatGateway"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_m
ap.update(kwargs.pop('error_map', {})) api_version = "2020-04-01" content_type = kwargs.pop("content_type", "application/
json") accept = "application/json" # Construct URL url = self.update_tags.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'natGatewayName': self._serialize.url("nat_gateway_name", nat_gateway_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(parameters, 'TagsObject') body_content_kwargs['content'] = body_content request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('NatGateway', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways/{natGatewayName}'} # type: ignore def list_all( self, **kwargs: Any ) -> AsyncIterable["_models.NatGatewayListResult"]: """Gets all the Nat Gateways in a subscription. :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either NatGatewayListResult or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_04_01.models.NatGatewayListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.NatGatewayListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-04-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list_all.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('NatGatewayListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/natGateways'} # type: ignore def list( self, resource_group_name: str, **kwargs: Any ) -> AsyncIterable["_models.NatGatewayListResult"]: """Gets all nat gateways in a resource group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either NatGatewayListResult or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_04_01.models.NatGatewayListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.NatGatewayListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-04-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('NatGatewayListResult', pipeline_response
# -*- coding: utf-8 -*- from ..internal.DeadCrypter import DeadCrypter class Movie2KTo(DeadCrypter): __name__ = "Movie2KTo" __type__ = "crypter" __version__ = "0.56" __status__ = "stable" __pattern__ = r'http://(?:www\.)?movie2k\.to/(.+)\.html' __config__ = [("activated", "bool", "Activated", True)] __des
cription__ = """Mo
vie2k.to decrypter plugin""" __license__ = "GPLv3" __authors__ = [("4Christopher", "4Christopher@gmx.de")]
config = { "interfaces": { "google.devtools.clouderrorreporting.v1beta1.ReportErrorsService": { "retry_codes": { "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], "non_idempotent": [] }, "retry_params": { "default": { "initial_retry_delay_millis": 100, "retry_delay_multiplier": 1.3, "max_retry_delay_millis": 60000, "initial_rpc_timeout_millis": 20000, "r
pc_timeout_multiplier": 1.0, "max_rpc_timeout_millis": 20000, "total_timeout_millis": 600000 } }, "methods": { "ReportErrorEvent": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", "retry_params_name": "default" }
} } } }
import random rand = random.SystemRandom() def rabinMiller(num): if num % 2 == 0: return False s = num - 1 t = 0 while s % 2 == 0: s = s // 2 t += 1
for trials in range(64): a = rand.randrange(2, num - 1) v = pow(a, s, num) if v != 1: i = 0 while v != (num - 1):
if i == t - 1: return False else: i = i + 1 v = (v ** 2) % num return True
''' salt.targeting ~~~~~~~~~~~~~~ ''' import logging log = logging.getLogger(__name__) from .parser import * from .query import * from .rules import * from .subjects import * #: defines minion targeting minion_targeting = Query(default_rule=GlobRule) minion_targeting.register(GlobRule, None, 'glob') minion_targeting
.register(GrainRule, 'G', 'grain') minion_targeting.register(PillarRule, 'I', 'pillar') minion_targeting.register(PCRERule, 'E', 'pcre') minion_targeting.register(GrainPCRERule, 'P', 'grain_pcre') minion_targeting.register(SubnetIPRule, 'S') minion_targeting.register(ExselRule, 'X', 'exsel') minion_targeting.register(LocalStoreRule, 'D') minion_targeting.register(YahooRangeRule, 'R') minion_targeting.register(ListEvaluator, 'L', 'list') minion_targeting.register(Nod
eGroupEvaluator, 'N')
# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # #
http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing
, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.messaging.notify import notifier class NoOpDriver(notifier._Driver): def notify(self, ctxt, message, priority): pass
oind <0.7 or Eloipool <20120513) if not self.OldGMP: self.OldGMP = True self.logger.warning('Upstream server is not BIP 22 compatible') oMP = deepcopy(MP) prevBlock = bytes.fromhex(MP['previousblockhash'])[::-1] if 'height' in MP: height = MP['height'] else: height = self.access.getinfo()['blocks'] + 1 bits = bytes.fromhex(MP['bits'])[::-1] if (prevBlock, height, bits) != self.currentBlock: self.updateBlock(prevBlock, height, bits, _HBH=(MP['previousblockhash'], MP['bits'])) txnlist = MP['transactions'] if len(txnlist) and isinstance(txnlist[0], dict): txninfo = txnlist txnlist = tuple(a['data'] for a in txnlist) txninfo.insert(0, { }) elif 'transactionfees' in MP: # Backward compatibility with pre-BIP22 gmp_fees branch txninfo = [{'fee':a} for a in MP['transactionfees']] else: # Backward compatibility with pre-BIP22 hex-only (bitcoind <0.7, Eloipool <future) txninfo = [{}] * len(txnlist) # TODO: cache Txn or at least txid from previous merkle roots? txnlist = [a for a in map(bytes.fromhex, txnlist)] self._makeBlockSafe(MP, txnlist, txninfo) cbtxn = self.makeCoinbaseTxn(MP['coinbasevalue']) cbtxn.setCoinbase(b'\0\0') cbtxn.assemble() txnlist.insert(0, cbtxn.data) txnlist = [a for a in map(Txn, txnlist[1:])] txnlist.insert(0, cbtxn) txnlist = list(txnlist) newMerkleTree = MerkleTree(txnlist) if newMerkleTree.merkleRoot() != self.currentMerkleTree.merkleRoot(): newMerkleTree.POTInfo = MP.get('POTInfo') newMerkleTree.oMP = oMP if (not self.OldGMP) and 'proposal' in MP.get('capabilities', ()): (prevBlock, height, bits) = self.currentBlock coinbase = self.makeCoinbase(height=height) cbtxn.setCoinbase(coinbase) cbtxn.assemble() merkleRoot = newMerkleTree.merkleRoot() MRD = (merkleRoot, newMerkleTree, coinbase, prevBlock, bits) blkhdr = MakeBlockHeader(MRD) data = assembleBlock(blkhdr, txnlist) propose = self.access.getblocktemplate({ "mode": "proposal", "data": b2a_hex(data).decode('utf8'), }) if propose is None: self.logger.debug('Updating merkle tree (upstream accepted proposal)') self.currentMerkleTree = newMerkleTree else: self.RejectedProposal = (newMerkleTree, propose) try: propose = propose['reject-reason'] except: pass self.logger.error('Upstream rejected proposed block: %s' % (propose,)) else: self.logger.debug('Updating merkle tree (no proposal support)') self.currentMerkleTree = newMerkleTree self.lastMerkleUpdate = now self.nextMerkleUpdate = now + self.MinimumTxnUpdateWait if self.needMerkle == 2: self.needMerkle = 1 self.needMerkleSince = now def makeCoinbase(self, height): now = int(time()) if now > _makeCoinbase[0]: _makeCoinbase[0] = now _makeCoinbase[1] = 0 else: _makeCoinbase[1] += 1 rv = self.CoinbasePrefix rv += pack('>L', now) + pack('>Q', _makeCoinbase[1]).lstrip(b'\0') # NOTE: Not using varlenEncode, since this is always guaranteed to be < 100 rv = bytes( (len(rv),) ) + rv for v in self.CoinbaseAux.values(): rv += v if len(rv) > 95: t = time() if self.overflowed < t - 300: self.logger.warning('Overflowing coinbase data! %d bytes long' % (len(rv),)) self.overflowed = t self.isOverflowed = True rv = rv[:95] else: self.isOverflowed = False rv = bitcoin.script.encodeUNum(height) + rv return rv def makeMerkleRoot(self, merkleTree, height): cbtxn = merkleTree.data[0] cb = self.makeCoinbase(height=height) cbtxn.setCoinbase(cb) cbtxn.assemble() merkleRoot = merkleTree.merkleRoot() return (merkleRoot, merkleTree, cb) _doing_last = None def _doing(self, what): if self._doing_last == what: self._doing_i += 1 return global now if self._doing_last: self.logger.debug("Switching from (%4dx in %5.3f seconds) %s => %s" % (self._doing_i, now - self._doing_s, self._doing_last, what)) se
lf._doing_last = what self._doing_i = 1 self._doing_s = now def _floodWarning(self, now, wid, wmsgf = None, doin = True, logf = None): if doin is True: doin = self._doing_last def a
(f = wmsgf): return lambda: "%s (doing %s)" % (f(), doin) wmsgf = a() winfo = self.lastWarning.setdefault(wid, [0, None]) (lastTime, lastDoing) = winfo if now <= lastTime + max(5, self.MinimumTxnUpdateWait): return winfo[0] = now nowDoing = doin winfo[1] = nowDoing if logf is None: logf = self.logger.warning logf(wmsgf() if wmsgf else doin) def _makeOne(self, putf, merkleTree, height): MT = self.currentMerkleTree height = self.currentBlock[1] MR = self.makeMerkleRoot(MT, height=height) # Only add it if the height hasn't changed in the meantime, to avoid a race if self.currentBlock[1] == height: putf(MR) def makeClear(self): self._doing('clear merkle roots') self._makeOne(self.clearMerkleRoots.put, self.curClearMerkleTree, height=self.currentBlock[1]) def makeNext(self): self._doing('longpoll merkle roots') self._makeOne(self.nextMerkleRoots.put, self.nextMerkleTree, height=self.currentBlock[1] + 1) def makeRegular(self): self._doing('regular merkle roots') self._makeOne(self.merkleRoots.append, self.currentMerkleTree, height=self.currentBlock[1]) def merkleMaker_II(self): global now # No bits = no mining :( if not self.ready: return self.updateMerkleTree() # First, ensure we have the minimum clear, next, and regular (in that order) if self.clearMerkleRoots.qsize() < self.WorkQueueSizeClear[0]: return self.makeClear() if self.nextMerkleRoots.qsize() < self.WorkQueueSizeLongpoll[0]: return self.makeNext() if len(self.merkleRoots) < self.WorkQueueSizeRegular[0]: return self.makeRegular() # If we've met the minimum requirements, consider updating the merkle tree if self.nextMerkleUpdate <= now: return self.updateMerkleTree() # Finally, fill up clear, next, and regular until we've met the maximums if self.clearMerkleRoots.qsize() < self.WorkQueueSizeClear[1]: return self.makeClear() if self.nextMerkleRoots.qsize() < self.WorkQueueSizeLongpoll[1]: return self.makeNext() if len(self.merkleRoots) < self.WorkQueueSizeRegular[1] or self.merkleRoots[0][1] != self.currentMerkleTree: return self.makeRegular() # Nothing left to do, fire onBlockUpdate event (if appropriate) and sleep if self.needMerkle == 1: self.onBlockUpdate() self.needMerkle = False self._doing('idle') # TODO: rather than sleepspin, block until MinimumTxnUpdateWait expires or threading.Condition(?) sleep(self.IdleSleepTime) def merkleMaker_I(self): global now now = time() self.merkleMaker_II() if self.needMerkle == 1 and now > self.needMerkleSince + self.WarningDelayTxnLongpoll: self._floodWarning(now, 'NeedMerkle', lambda: 'Transaction-longpoll requested %d seconds ago, and still not ready. Is your server fast enough to keep up with your configured WorkQueueSizeRegular maximum?' % (now - self.needMerkleSince,)) if now > self.nextMerkleUpdate + self.WarningDelayMerkleUpdate: self._floodWarning(now, 'MerkleUpdate', lambda: "Haven't updated the merkle tree in at least %d seconds! Is your server fast enough to keep up with your configured work queue minimums?" % (now - self.lastMerkleUpdate,)) def run(self): while True: try: self.merkleMaker_I() except: self.logger.critical(traceback.format_exc()) def start(self, *a, **k): self._prepare() super().start(*a, **k) def getMRD(self): try: MRD = self.merkleRoots.pop() self.LowestMerkleRoots = min(len(self.merkleRoots), self.LowestMerkleRoots) rollPrevBlk = False except IndexError: qsz = self.clearMerkleRoots.qsize() if qsz < 0x10: self.logger.warning('clearMerkleRoots running out! only %d left' % (qsz,)) MRD = self.clearMerkleRoots.get() self.LowestClearMerkleRoots = min(self.clearMerkleRoots.qsize(), self.LowestClearMerkleRoots) rollPrevBlk = True (merkleRoot, merkleTree, cb) = MRD (prevBlock, height, bits) = self.currentBlock return (merkleRoot, merkleTree, cb, prevBlock, bits
# # Copyright (c) 2014, Arista Networks, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # Neither the name of Arista Networks nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCH
ANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS # BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR # BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDI
NG NEGLIGENCE # OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN # IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 # __version__ = '1.1.0' __author__ = 'Arista Networks'
se, centred = False): super().__init__() self.text = text self.pos = pos self.colour = colour self.font = font self.size = size self.variable = variable self.centred = centred def update(self): pos = self.pos font = py.font.Font(self.font, self.size) if not self.variable: label = font.render(self.text, 1, self.colour) if self.variable: label = font.render(str(getattr(v, self.text)), 1, self.colour) if self.centred: pos = list(self.pos) pos[0] -= font.size(self.text)[0] / 2 pos[1] -= font.size(self.text)[1] / 2 pos = tuple(pos) v.screen.blit(label, pos) class Button(py.sprite.Sprite): def __init__(self, text, pos, size, hovercolour, normalcolour, font, ID, centred = False, bsize=(0,0)): super().__init__() self.ID = ID self.hovered = False self.text = text self.pos = pos self.hcolour = hovercolour self.ncolour = normalcolour self.font = font self.font = py.font.Font(font, int(size)) self.centred = centred self.size = bsize self.set_rect() def update(self): self.set_rend() py.draw.rect(v.screen, self.get_color(), self.rect) v.screen.blit(self.rend, self.rect) if self.rect.collidepoint(v.mouse_pos): self.hovered = True else: self.hovered = False def set_rend(self): self.rend = self.font.render(self.text, True, (0,0,0)) def get_color(self): if self.hovered: return self.hcolour else: return self.ncolour def set_rect(self): self.set_rend() self.rect = self.rend.get_rect() if not self.centred: self.rect.topleft = self.pos if self.centred: self.rect.center = self.pos if not self.size[0] == 0: self.rect.width = self.size[0] if not self.size[1] == 0: self.rect.height = self.size[1] def pressed(self): mouse = v.mouse_pos if mouse[0] > self.rect.topleft[0]: if mouse[1] > self.rect.topleft[1]: if mouse[0] < self.rect.bottomright[0]: if mouse[1] < self.rect.bottomright[1]: return True else: return False else: return False else: return False else: return False import os, shutil try: shutil.copyfile("Resources/Fonts/Vecna.otf", "Update/Vecna.otf") theFont = "Update/Vecna.otf" except: theFont = None py.init() v.screen = py.display.set_mode((640, 480)) v.screen.fill((20, 20, 20)) textLabel("Checking For Updates...", (320, 240), (255, 255, 255), theFont, 50, False, True).update() py.display.flip() tries = 0 def reporthook(count, blockSize, totalSize): if totalSize == -1: print("FAILED TOTALSIZE") raise Exception() #Shows percentage of download py.event.pump() for event in py.event.get(): if event.type == py.QUIT: sys.exit() percent = int(count*blockSize*100/totalSize) rect = py.Rect(100, 240, percent*4.4, 30) v.screen.fill((20, 20, 20)) py.draw.rect(v.screen, (255, 0, 0), rect) py.draw.rect(v.screen, (0, 0, 0), rect, 2) py.draw.rect(v.screen, (0, 0, 0), (100, 240, 440, 30), 2) #font = py.font.Font(theFont, 25) #title = font.render("Downloading...", 1, (255, 255, 255)) #progress = font.render(str(percent) + "%", 1, (255, 255, 255)) #v.screen.blit(title, (200, 200)) #v.screen.blit(progress, (200, 250)) textLabel("Downloading...", (320, 150), (255, 255, 255), theFont, 50, False, True).update() textLabel(str(percent) + "%", (320, 255), (255, 255, 255), theFont, 20, False, True).update() py.display.flip() #sys.stdout.write("\r" + "...%d%%" % percent) #sys.stdout.flush() def recursive_overwrite(src, dest, ignore=None): if os.path.isdir(src): if not os.path.isdir(dest): os.makedirs(dest) files = os.listdir(src) if ignore is not None: ignored = ignore(src, files) else: ignored = set() for f in files: if f not in ignored: recursive_overwrite(os.path.join(src, f), os.path.join(dest, f), ignore) else: shutil.copyfile(src, dest) def updateCheck(): global latest page = urllib.request.urlopen('https://github.com/Lightning3105/Legend-Of-Aiopa-RPG/commits/master') page = str(page.read()) ind = page.find('class="sha btn btn-outline"') latest = page[ind + 38:ind + 45] print(latest) #CHECK IF LATEST IS PROPER try: f = open("Saves/current.version", "rb") current = pickle.lo
ad(f) f.close() except: print("create new file") try: os.mkdir("Saves") except: pass f = open
("Saves/current.version", "wb") current = 0000 pickle.dump(current, f) f.close() print(current, "vs", latest) if current != latest: from os import remove try: remove("Update/download.zip") except: pass print("downloading latest") buttons = py.sprite.Group() buttons.add(Button("Update", (220, 240), 60, (100, 100, 100), (255, 255, 255), theFont, "Y", centred=True)) buttons.add(Button("Ignore", (420, 240), 60, (100, 100, 100), (255, 255, 255), theFont, "N", centred=True)) buttons.add(Button("Skip Update", (320, 300), 40, (100, 100, 100), (255, 255, 255), theFont, "S", centred=True)) labels = py.sprite.Group() labels.add(textLabel("An Update Is Available:", (320, 150), (255, 255, 255), theFont, 50, False, True)) labels.add(textLabel(str(str(current) + " ==> " + str(latest)), (320, 180), (255, 255, 255), theFont, 20, False, True)) while True: py.event.pump() v.screen.fill((20, 20, 20)) buttons.update() labels.update() for event in py.event.get(): if event.type == py.QUIT: sys.exit() elif event.type == py.MOUSEBUTTONDOWN: for button in buttons: if button.pressed(): id = button.ID if id == "Y": global tries tries = 0 download() return if id == "N": return if id == "S": f = open("Saves/current.version", "wb") current = latest pickle.dump(current, f) f.close() return py.display.flip() else: v.screen.fill((20, 20, 20)) t = textLabel("No Update!", (320, 250), (255, 0, 0), theFont, 70, False, True) v.current = current t2 = textLabel("current", (320, 300), (255, 200, 200), theFont, 50, True, True) t.update() t2.update() py.display.update() if __name__ == "__main__": py.time.wait(2000) def download(): global tries try: try: os.mkdir("Update") except: pass urllib.request.urlretrieve("https://github.com/Lightning3105/Legend-Of-Aiopa-RPG/archive/master.zip", "Update/download.zip", reporthook) f = open("Saves/current.version", "wb") current = latest pi
"""P
redicted Electoral Vote Count""" import re from madcow.util.http import getsoup from madcow.util.col
or import ColorLib from madcow.util import Module, strip_html class Main(Module): pattern = re.compile(r'^\s*(election|ev)\s*$', re.I) help = u'ev - current election 2008 vote prediction' baseurl = u'http://www.electoral-vote.com/' def init(self): if self.madcow is None: self.colorlib = ColorLib('ansi') else: self.colorlib = self.madcow.colorlib def colorize(self, color, key, val): return u'%s: %s' % (key, val) def render(self, node): pass def response(self, nick, args, kwargs): soup = getsoup(self.baseurl) out = [] for box in soup.find('div', 'score-box').findAll('div', 'box'): score = [] for key in 'name', 'score': val = strip_html(box.find('span', key).renderContents()).replace(u'\xa0', u'').strip() if key == 'name': if val == u'Obama': color = 'blue' elif val == 'Romney': color = 'red' else: color = None if color: val = self.colorlib.get_color(color, text=val) if val: score.append(val) if score: out.append(u'%s: %s' % tuple(score)) return u'%s: %s' % (nick, u', '.join(out)) #from IPython.Shell import IPShellEmbed as S; #S()()
hoices: - half - full ssl_send_empty_frags: description: - Enable/disable sending empty fragments to avoid attack on CBC IV. type: str choices: - enable - disable url_rewrite: description: - Enable/disable rewriting the URL. type: str choices: - enable - disable ''' EXAMPLES = ''' - hosts: localhost vars: host: "192.168.122.40" username: "admin" password: "" vdom: "root" ssl_verify: "False" tasks: - name: Configure SSL servers. fortios_firewall_ssl_server: host: "{{ host }}" username: "{{ username }}" password: "{{ password }}" vdom: "{{ vdom }}" https: "False" state: "present" firewall_ssl_server: add_header_x_forwarded_proto: "enable" ip: "<your_own_value>" mapped_port: "5" name: "default_name_6" port: "7" ssl_algorithm: "high" ssl_cert: "<your_own_value> (source vpn.certificate.local.name)" ssl_client_renegotiation: "allow" ssl_dh_bits: "768" ssl_max_version: "tls-1.0" ssl_min_version: "tls-1.0" ssl_mode: "half" ssl_send_empty_frags: "enable" url_rewrite: "enable" ''' RETURN = ''' build: description: Build number of the fortigate image returned: always type: str sample: '1547' http_method: description: Last method used to provision the content into FortiGate returned: always type: str sample: 'PUT' http_status: description: Last result given by FortiGate on last operation applied returned: always type: str sample: "200" mkey: description: Master key (id) used in the last call to FortiGate returned: success type: str sample: "id" name: description: Name of the table used to fulfill the request returned: always type: str sample: "urlfilter" path: description: Path of the table used to fulfill the request returned: always type: str sample: "webfilter" revision: description: Internal revision number returned: always type: str sample: "17.0.2.10658" serial: description: Serial number of the unit returned: always type: str sample: "FGVMEVYYQT3AB5352" status: description: Indication of the operation's result returned: always type: str sample: "success" vdom: description: Virtual domain used returned: always type: str sample: "root" version: description: Version of the FortiGate returned: always type: str sample: "v5.6.3" ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.connection import Connection from ansible.module_utils.network.fortios.fortios import FortiOSHandler from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG def login(data, fos): host = data['host'] username = data['username'] password = data['password'] ssl_verify = data['ssl_verify'] fos.debug('on') if 'https' in data and not data['https']: fos.http
s('off') else: fos.https('on') fos.login(host, username, password, verify
=ssl_verify) def filter_firewall_ssl_server_data(json): option_list = ['add_header_x_forwarded_proto', 'ip', 'mapped_port', 'name', 'port', 'ssl_algorithm', 'ssl_cert', 'ssl_client_renegotiation', 'ssl_dh_bits', 'ssl_max_version', 'ssl_min_version', 'ssl_mode', 'ssl_send_empty_frags', 'url_rewrite'] dictionary = {} for attribute in option_list: if attribute in json and json[attribute] is not None: dictionary[attribute] = json[attribute] return dictionary def underscore_to_hyphen(data): if isinstance(data, list): for i, elem in enumerate(data): data[i] = underscore_to_hyphen(elem) elif isinstance(data, dict): new_data = {} for k, v in data.items(): new_data[k.replace('_', '-')] = underscore_to_hyphen(v) data = new_data return data def firewall_ssl_server(data, fos): vdom = data['vdom'] if 'state' in data and data['state']: state = data['state'] elif 'state' in data['firewall_ssl_server'] and data['firewall_ssl_server']: state = data['firewall_ssl_server']['state'] else: state = True firewall_ssl_server_data = data['firewall_ssl_server'] filtered_data = underscore_to_hyphen(filter_firewall_ssl_server_data(firewall_ssl_server_data)) if state == "present": return fos.set('firewall', 'ssl-server', data=filtered_data, vdom=vdom) elif state == "absent": return fos.delete('firewall', 'ssl-server', mkey=filtered_data['name'], vdom=vdom) def is_successful_status(status): return status['status'] == "success" or \ status['http_method'] == "DELETE" and status['http_status'] == 404 def fortios_firewall(data, fos): if data['firewall_ssl_server']: resp = firewall_ssl_server(data, fos) return not is_successful_status(resp), \ resp['status'] == "success", \ resp def main(): fields = { "host": {"required": False, "type": "str"}, "username": {"required": False, "type": "str"}, "password": {"required": False, "type": "str", "default": "", "no_log": True}, "vdom": {"required": False, "type": "str", "default": "root"}, "https": {"required": False, "type": "bool", "default": True}, "ssl_verify": {"required": False, "type": "bool", "default": True}, "state": {"required": False, "type": "str", "choices": ["present", "absent"]}, "firewall_ssl_server": { "required": False, "type": "dict", "default": None, "options": { "state": {"required": False, "type": "str", "choices": ["present", "absent"]}, "add_header_x_forwarded_proto": {"required": False, "type": "str", "choices": ["enable", "disable"]}, "ip": {"required": False, "type": "str"}, "mapped_port": {"required": False, "type": "int"}, "name": {"required": True, "type": "str"}, "port": {"required": False, "type": "int"}, "ssl_algorithm": {"required": False, "type": "str", "choices": ["high", "medium", "low"]}, "ssl_cert": {"required": False, "type": "str"}, "ssl_client_renegotiation": {"required": False, "type": "str", "choices": ["allow", "deny", "secure"]}, "ssl_dh_bits": {"required": False, "type": "str", "choices": ["768", "1024", "1536", "2048"]}, "ssl_max_version": {"required": False, "type": "str", "choices": ["tls-1.0", "tls-1.1", "tls-1.2"]}, "ssl_min_version": {"required": False, "type": "str", "choices": ["tls-1.0", "tls-1.1", "tls-1.2"]}, "ssl_mode": {"required": False, "type": "str", "choices": ["half", "full"]}, "ssl_send_empty_frags": {"required": False, "type": "str", "choices": ["enable", "disable"]}, "url_rewrite": {"required": False, "type": "str", "choices": ["enable", "disable"]} } } } module = AnsibleModule(argument_spec=fields, supports_check_mode=False) # legacy_mode refers to using fortiosapi instead of HTTPAPI legacy_mode = 'host' in module.params and module.params['host'] is not None and \
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2015, Numenta, Inc. Unless you have purchased from # Numenta, Inc. a separate commercial license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- import math import numpy import random from nupic.encoders.utils import bitsToString class LanguageEncoder(object): """ An encoder converts a value to a sparse distributed representation (SDR). The Encoder superclass implements: - bitmapToSDR() returns binary SDR of a bitmap - bitmapFromSDR() returns the bitmap rep of an SDR - pprintHeader() prints a header describing the encoding to the terminal - pprint() prints an encoding to the terminal - decodedToStr() returns pretty print string of decoded SDR Methods/properties that must be implemented by subclasses: - encode() returns a numpy array encoding the input - decode() returns a list of strings representing a decoded SDR - getWidth() returns the output width, in bits - getDescription() returns a dict describing the encoded output """ def __init__(self, n=16384, w=328): """The SDR dimensions are standard for Cortical.io fingerprints.""" self.n = n self.w = w self.targetSparsity = 5.0 def encode(self, inputText): """ Encodes inputText and puts the encoded value into the numpy output array, which is a 1-D array of length returned by getWidth(). Note: The numpy output array is reused, so clear it before updating it. @param inputData (str) Data to encode. This should be validated by the encoder subclass. @param output (numpy) 1-D array of same length returned by getWidth(). """ raise NotImplementedError def encodeIntoArray(self, inputText, output): """ Encodes inputData and puts the encoded value into the numpy output array, which is a 1-D array of length returned by getWidth(). Note: The numpy output array is reused, so clear it before updating it. @param inputData Data to encode. This should be validated by the encoder. @param output numpy 1-D array of same length returned by getWidth() """ raise NotImplementedError def decode(self, encoded): """ Decodes the SDR encoded. See subclass imp
lementation for details; the decoding approaches and return objects vary depending on the encoder. To pretty print the return value from this method, use decodedToStr(). @param encoded (numpy) Encoded 1-d array (an SDR).
""" raise NotImplementedError def getWidth(self): """ Get an encoding's output width in bits. See subclass implementation for details. """ raise NotImplementedError() def getDescription(self): """ Returns a tuple, each containing (name, offset). The name is a string description of each sub-field, and offset is the bit offset of the sub-field for that encoder; should be 0. """ raise NotImplementedError() def bitmapToSDR(self, bitmap): """Convert SDR encoding from bitmap to binary numpy array.""" sdr = numpy.zeros(self.n) sdr[bitmap] = 1 return sdr def bitmapFromSDR(self, sdr): """Convert SDR encoding from binary numpy array to bitmap.""" return numpy.array([i for i in range(len(sdr)) if sdr[i]==1]) def encodeRandomly(self, text): """Return a random bitmap representation of the sample.""" random.seed(sample) return numpy.sort(random.sample(xrange(self.n), self.w)) def compare(self, bitmap1, bitmap2): """ Compare bitmaps, returning a dict of similarity measures. @param bitmap1 (list) Indices of ON bits. @param bitmap2 (list) Indices of ON bits. @return distances (dict) Key-values of distance metrics and values. Example return dict: { "cosineSimilarity": 0.6666666666666666, "euclideanDistance": 0.3333333333333333, "jaccardDistance": 0.5, "overlappingAll": 6, "overlappingLeftRight": 0.6666666666666666, "overlappingRightLeft": 0.6666666666666666, "sizeLeft": 9, "sizeRight": 9 } """ if not len(bitmap1) > 0 or not len(bitmap2) > 0: raise ValueError("Bitmaps must have ON bits to compare.") sdr1 = self.bitmapToSDR(bitmap1) sdr2 = self.bitmapToSDR(bitmap2) distances = { "sizeLeft": float(len(bitmap1)), "sizeRight": float(len(bitmap2)), "overlappingAll": float(len(numpy.intersect1d(bitmap1, bitmap2))), "euclideanDistance": numpy.linalg.norm(sdr1 - sdr2) } distances["overlappingLeftRight"] = (distances["overlappingAll"] / distances["sizeLeft"]) distances["overlappingRightLeft"] = (distances["overlappingAll"] / distances["sizeRight"]) distances["cosineSimilarity"] = (distances["overlappingAll"] / (math.sqrt(distances["sizeLeft"]) * math.sqrt(distances["sizeRight"]))) distances["jaccardDistance"] = 1 - (distances["overlappingAll"] / len(numpy.union1d(bitmap1, bitmap2))) return distances def sparseUnion(self, counts): """ Bits from the input patterns are unionized and then sparsified. @param counts (Counter) A count of the ON bits for the union bitmap. @return (list) A sparsified union bitmap. """ max_sparsity = int((self.targetSparsity / 100) * self.n) w = min(len(counts), max_sparsity) return [c[0] for c in counts.most_common(w)] def pprintHeader(self, prefix=""): """ Pretty-print a header that labels the sub-fields of the encoded output. This can be used in conjuction with pprint(). @param prefix printed before the header if specified """ print prefix, description = self.getDescription() + [("end", self.getWidth())] for i in xrange(len(description) - 1): name = description[i][0] width = description[i+1][1] - description[i][1] formatStr = "%%-%ds |" % width if len(name) > width: pname = name[0:width] else: pname = name print formatStr % pname, print print prefix, "-" * (self.getWidth() + (len(description) - 1)*3 - 1) def pprint(self, output, prefix=""): """ Pretty-print the encoded output using ascii art. @param output to print @param prefix printed before the header if specified """ print prefix, description = self.getDescription() + [("end", self.getWidth())] for i in xrange(len(description) - 1): offset = description[i][1] nextoffset = description[i+1][1] print "%s |" % bitsToString(output[offset:nextoffset]), print def decodedToStr(self, decodeResults): """ Return a pretty print string representing the return value from decode(). """ (fieldsDict, fieldsOrder) = decodeResults desc = '' for fieldName in fieldsOrder: (ranges, rangesStr) = fieldsDict[fieldName] if len(desc) > 0: desc += ", %s:" % (fieldName) else: desc += "%s:" % (fieldName) desc += "[%s]" % (rangesStr) return desc
# Copyright (c) 2015 Quobyte Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_concurrency import processutils from oslo_log import log as logging from oslo_utils import fileutils import psutil import six import nova.conf from nova import exception as nova_exception from nova.i18n import _ import nova.privsep.libvirt from nova import utils from nova.virt.libvirt.volume import fs LOG = logging.getLogger(__name__) CONF = nova.conf.CONF SOURCE_PROTOCOL = 'quobyte' SOURCE_TYPE = 'file' DRIVER_CACHE = 'none' DRIVER_IO = 'native' VALID_SYSD_STATES = ["starting", "running", "degraded"] SYSTEMCTL_CHECK_PATH = "/run/systemd/system" _is_systemd = None def is_systemd(): """Checks if the host is running systemd""" global _is_systemd if _is_systemd is not None: return _is_systemd tmp_is_systemd = False if psutil.Process(1).name() == "systemd" or os.path.exists( SYSTEMCTL_CHECK_PATH): # NOTE(kaisers): exit code might be >1 in theory but in practice this # is hard coded to 1. Due to backwards compatibility and systemd # CODING_STYLE this is unlikely to change. sysdout, sysderr = processutils.execute("systemctl", "is-system-running", check_exit_code=[0, 1]) for state in VALID_SYSD_STATES: if state == sysdout.strip(): tmp_is_systemd = True break _is_systemd = tmp_is_systemd return _is_systemd def mount_volume(volume, mnt_base, configfile=None): """Wraps execute calls for mounting a Quobyte volume""" fileutils.ensure_tree(mnt_base) # Note(kaisers): with systemd this requires a separate CGROUP to # prevent Nova service stop/restarts from killing the mount. if is_systemd(): LOG.debug('Mounting volume %s at mount point %s via systemd-run', volume, mnt_base) nova.privsep.libvirt.systemd_run_qb_mount(volume, mnt_base, cfg_file=configfile) else: LOG.debug('Mounting volume %s at mount point %s via mount.quobyte', volume, mnt_base, cfg_file=configfile) nova.privsep.libvirt.unprivileged_qb_mount(volume, mnt_base, cfg_file=configfile) LOG.info('Mounted volume: %s', volume) def umount_volume(mnt_base): """Wraps execute calls for unmouting a Quobyte volume""" try: if is_systemd(): nova.privsep.libvirt.umount(mnt_base) else: nova.privsep.libvirt.unprivileged_umount(mnt_base) except processutils.ProcessExecutionError as exc: if 'Device or resource busy' in six.text_type(exc): LOG.error("The Quobyte volume at %s is still in use.", mnt_base) else: LOG.exception(_("Couldn't unmount the Quobyte Volume at %s"), mnt_base) def validate_volume(mount_path): """Determine if the volume is a valid Quobyte mount. Runs a number of tests to be sure this is a (working) Quobyte mount "
"" parti
tions = psutil.disk_partitions(all=True) for p in partitions: if mount_path != p.mountpoint: continue if p.device.startswith("quobyte@") or p.fstype == "fuse.quobyte": statresult = os.stat(mount_path) # Note(kaisers): Quobyte always shows mount points with size 0 if statresult.st_size == 0: # client looks healthy return # we're happy here else: msg = (_("The mount %(mount_path)s is not a " "valid Quobyte volume. Stale mount?") % {'mount_path': mount_path}) raise nova_exception.StaleVolumeMount(msg, mount_path=mount_path) else: msg = (_("The mount %(mount_path)s is not a valid " "Quobyte volume according to partition list.") % {'mount_path': mount_path}) raise nova_exception.InvalidVolume(msg) msg = (_("No matching Quobyte mount entry for %(mount_path)s" " could be found for validation in partition list.") % {'mount_path': mount_path}) raise nova_exception.InvalidVolume(msg) class LibvirtQuobyteVolumeDriver(fs.LibvirtBaseFileSystemVolumeDriver): """Class implements libvirt part of volume driver for Quobyte.""" def _get_mount_point_base(self): return CONF.libvirt.quobyte_mount_point_base def get_config(self, connection_info, disk_info): conf = super(LibvirtQuobyteVolumeDriver, self).get_config(connection_info, disk_info) data = connection_info['data'] conf.source_protocol = SOURCE_PROTOCOL conf.source_type = SOURCE_TYPE conf.driver_cache = DRIVER_CACHE conf.driver_io = DRIVER_IO conf.driver_format = data.get('format', 'raw') conf.source_path = self._get_device_path(connection_info) return conf @utils.synchronized('connect_qb_volume') def connect_volume(self, connection_info, instance): """Connect the volume.""" if is_systemd(): LOG.debug("systemd detected.") else: LOG.debug("No systemd detected.") data = connection_info['data'] quobyte_volume = self._normalize_export(data['export']) mount_path = self._get_mount_path(connection_info) try: validate_volume(mount_path) mounted = True except nova_exception.StaleVolumeMount: mounted = False LOG.info('Fixing previous mount %s which was not ' 'unmounted correctly.', mount_path) umount_volume(mount_path) except nova_exception.InvalidVolume: mounted = False if not mounted: mount_volume(quobyte_volume, mount_path, CONF.libvirt.quobyte_client_cfg) try: validate_volume(mount_path) except (nova_exception.InvalidVolume, nova_exception.StaleVolumeMount) as nex: LOG.error("Could not mount Quobyte volume: %s", nex) @utils.synchronized('connect_qb_volume') def disconnect_volume(self, connection_info, instance): """Disconnect the volume.""" mount_path = self._get_mount_path(connection_info) try: validate_volume(mount_path) except (nova_exception.InvalidVolume, nova_exception.StaleVolumeMount) as exc: LOG.warning("Could not disconnect Quobyte volume mount: %s", exc) else: umount_volume(mount_path) def _normalize_export(self, export): protocol = SOURCE_PROTOCOL + "://" if export.startswith(protocol): export = export[len(protocol):] return export
""" WSGI con
fig for astrology project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on th
is file, see https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/ """ import os os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.prod") from django.core.wsgi import get_wsgi_application application = get_wsgi_application()
import os import sys import sqlite3 import logging
from tqdm import tqdm from pathlib import Path from whoosh.index import create_in, open_dir from whoosh.fields import Schema, TEXT, NUMERIC from whoosh.qparser import QueryParser from whoosh.spelling import ListCorrector from whoosh.highlight impor
t UppercaseFormatter logging.basicConfig(level=logging.INFO) if getattr(sys, 'frozen', False): APPLICATION_PATH = os.path.dirname(sys.executable) elif __file__: APPLICATION_PATH = os.path.dirname(__file__) PATH = APPLICATION_PATH PATH_DATA = Path(PATH) / 'data' FILE_DB = PATH_DATA / "data.db" class Searcher: def __init__(self): self.scope = 20 self.terms = set() self.index_path = "index" self.common_terms = set() self.schema = Schema( title=TEXT(stored=True), path=TEXT(stored=True), page=NUMERIC(stored=True), content=TEXT(stored=True)) self.ix = None self.index_files = False if not os.path.exists(self.index_path): os.mkdir(self.index_path) self.ix = create_in(self.index_path, self.schema) self.index_files = True else: self.ix = open_dir(self.index_path) self.writer = self.ix.writer() self.read() self.writer.commit() self.searcher = self.ix.searcher() self.corrector = ListCorrector(sorted(list(self.common_terms))) self.parser = QueryParser("content", self.ix.schema) def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.searcher.close() def search(self, term): results = [] suggestions = [term]+(self.corrector.suggest(term, limit=5)) for t in suggestions: query = self.parser.parse(t) query_res = self.searcher.search(query, limit=100) query_res.fragmenter.maxchars = 300 query_res.fragmenter.surround = 100 query_res.formatter = UppercaseFormatter() results.append((t, query_res)) return results def read(self): logging.info("Indexing") con = sqlite3.connect(str(FILE_DB)) cur = con.cursor() cur.execute(r"SELECT BOOKS.NAME, PAGE, CONTENT " r"FROM TEXT, BOOKS " r"WHERE BOOK = BOOKS.ID " r"ORDER BY BOOKS.NAME, PAGE") for row in tqdm(cur): book, page, content = row book, page, content = str(book), str(page), str(content) for i in content.split(' '): self.common_terms.add(i) if self.index_files: self.writer.add_document(title=book, content=content, path=book, page=page)
import avango import avango.script import avango.gua from examples_common.GuaVE import GuaVE class TimedRotate(avango.script.Script): TimeIn = avango.SFFloat() MatrixOut = avango.gua.SFMatrix4() def evaluate(self): self.MatrixOut.value = avango.gua.make_rot_mat( self.TimeIn.value * 2.0, 0.0, 1.0, 0.0) def start(): # setup scenegraph graph = avango.gua.nodes.SceneGraph(Name="scenegraph") loader = avango.gua.nodes.TriMeshLoader() monkey1 = loader.create_geometry_from_file( "monkey", "data/objects/monkey.obj", avango.gua.LoaderFlags.NORMALIZE_SCALE) monkey2 = loader.create_geometry_from_file( "monkey", "data/objects/monkey.obj", avango.gua.LoaderFlags.NORMALIZE_SCALE) monkey1.Material.value.set_uniform( "Color", avango.gua.Vec4(1.0, 0.766, 0.336, 1.0)) monkey1.Material.value.set_uniform("Roughness", 0.3) monkey1.Material.value.set_uniform("Metalness", 1.0) monkey2.Material.value.set_uniform( "Color", avango.gua.Vec4(1.0, 0.266, 0.136, 1.0)) monkey2.Material.value.set_uniform("Roughness", 0.6) monkey2.Material.value.set_uniform("Metalness", 0.0) transform1 = avango.gua.nodes.TransformNode(Children=[monkey1]) transform2 = avango.gua.nodes.TransformNode( Transform=avango.gua.make_trans_mat(-0.5, 0.0, 0.0), Children=[monkey2]) light = avango.gua.nodes.LightNode( Type=avango.gua.LightType.POINT, Name="light", Color=avango.gua.Color(1.0, 1.0, 1.0), Brightness=100.0, Transform=(avango.gua.make_trans_mat(1, 1, 5) * avango.gua.make_scale_mat(30, 30, 30))) size = avango.gua.Vec2ui(1024, 768) window = avango.gua.nodes.GlfwWindow(Size=size, LeftResolution=size) avango.gua.register_window("window", window) cam = avango.gua.nodes.CameraNode( LeftScreenPath="/screen", SceneGraph="scenegraph", Resolution=size, OutputWindowName="window", Transform=avango.gua.make_trans_mat(0.0, 0.0, 3.5)) res_pass = avango.gua.nodes.ResolvePassDescription() res_pass.EnableSSAO.value = True res_pass.SSAOIntensity.value = 4.0 res_pass.SSAOFalloff.value = 10.0 res_pass.SSAORadius.value = 7.0 #res_pass.EnableScreenSpaceShadow.value = True res_pass.EnvironmentLightingColor.value = avango.gua.Color(0.1, 0.1, 0.1) res_pass.ToneMappingMode.value = avango.gua.ToneMappingMode.UNCHARTED res_pass.Exposure.value = 1.0 res_pass.BackgroundColor.value = avango.gua.Color(0.45, 0.5, 0.6) anti_aliasing = avango.gua.nodes.SSAAPassDescription() pipeline_description = avango.gua.nodes.PipelineDescription( Passes=[ avango.gua.
nodes.TriMeshPassDescription(), avango.gua.nodes.LightVisibilityPassDescription(), res_pass,
anti_aliasing, ]) cam.PipelineDescription.value = pipeline_description screen = avango.gua.nodes.ScreenNode( Name="screen", Width=2, Height=1.5, Children=[cam]) graph.Root.value.Children.value = [transform1, transform2, light, screen] #setup viewer viewer = avango.gua.nodes.Viewer() viewer.SceneGraphs.value = [graph] viewer.Windows.value = [window] monkey_updater = TimedRotate() timer = avango.nodes.TimeSensor() monkey_updater.TimeIn.connect_from(timer.Time) transform1.Transform.connect_from(monkey_updater.MatrixOut) guaVE = GuaVE() guaVE.start(locals(), globals()) viewer.run() if __name__ == '__main__': start()
#!/usr/bin/python # -*- coding: utf-8 -*- """ The MIT license Copyright (c) 2010 Jonas Nockert Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. --- XEP-0012: Last Activity handler """ from datetime import datetime import time from twisted.words.protocols.jabber.xmlstream import toResponse from wokkel.subprotocols import IQHandlerMixin, XMPPHandler NS_LAST_ACTIVITY =
'jabber:iq:last' LAST_ACTIVITY = '/iq[@type="get"]/query[@xmlns="' + NS_LAST_ACTIVITY +'"]' class LastActivityHandler(XMPPHandler, IQHandlerMixin): """ XMPP subprotocol handler for Last Activity extension. This protocol is described in U{XEP-0012<http://www.xmpp.org/extensions/xep-0012.html>}. """ iqHandlers = {LAST_ACTIVITY: 'onLastActivityGet'} def __init__(self, get_last=lambda: 0):
self.get_last = get_last def connectionInitialized(self): self.xmlstream.addObserver(LAST_ACTIVITY, self.handleRequest) def onLastActivityGet(self, iq): """Handle a request for last activity.""" response = toResponse(iq, 'result') # TODO: Replace 'hello world!' string with something proper. query = response.addElement((NS_LAST_ACTIVITY, 'query'), content="Hello world!") query['seconds'] = str(self.get_last()) self.send(response) iq.handled = True
import numpy as np import pytest from nilabels.tools.image_colors_manipulations.relabeller import relabeller, permute_labels, erase_labels, \ assign_all_other_labels_the_same_value, keep_only_one_label, relabel_half_side_one_label def test_relabeller_basic(): data = np.array(range(10)).reshape(2, 5) relabelled_data = relabeller(data, range(10), range(10)[::-1]) np.testing.assert_array_equal(relabelled_data, np.array(range(10)[::-1]).reshape(2,5)) def test_relabeller_one_element(): data = np.array(range(10)).reshape(2, 5) relabelled_data = relabeller(data, 0, 1, verbose=1) expected_output = data[:] expected_output[0, 0] = 1 np.testing.assert_array_equal(relabelled_data, expected_output) def test_relabeller_one_element_not_in_array(): data = np.array(range(10)).reshape(2, 5) relabelled_data = relabeller(data, 15, 1, verbose=1) np.testing.assert_array_equal(relabelled_data, data) def test_relabeller_wrong_input(): data = np.array(range(10)).reshape(2, 5) with np.testing.assert_raises(IOError): relabeller(data, [1, 2], [3, 4, 4]) def test_permute_labels_invalid_permutation(): invalid_permutation
= [[3
, 3, 3], [1, 1, 1]] with pytest.raises(IOError): permute_labels(np.zeros([3, 3]), invalid_permutation) def test_permute_labels_valid_permutation(): data = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]) valid_permutation = [[1, 2, 3], [1, 3, 2]] perm_data = permute_labels(data, valid_permutation) expected_data = np.array([[1, 3, 2], [1, 3, 2], [1, 3, 2]]) np.testing.assert_equal(perm_data, expected_data) def test_erase_label_simple(): data = np.array(range(10)).reshape(2, 5) data_erased_1 = erase_labels(data, 1) expected_output = data[:] expected_output[0, 1] = 0 np.testing.assert_array_equal(data_erased_1, expected_output) def test_assign_all_other_labels_the_same_values_simple(): data = np.array(range(10)).reshape(2, 5) data_erased_1 = erase_labels(data, 1) data_labels_to_keep = assign_all_other_labels_the_same_value(data, range(2, 10), same_value_label=0) np.testing.assert_array_equal(data_erased_1, data_labels_to_keep) def test_assign_all_other_labels_the_same_values_single_value(): data = np.array(range(10)).reshape(2, 5) data_erased_1 = np.zeros_like(data) data_erased_1[0, 1] = 1 data_labels_to_keep = assign_all_other_labels_the_same_value(data, 1, same_value_label=0) np.testing.assert_array_equal(data_erased_1, data_labels_to_keep) def test_keep_only_one_label_label_simple(): data = np.array(range(10)).reshape(2, 5) new_data = keep_only_one_label(data, 1) expected_data = np.zeros([2, 5]) expected_data[0, 1] = 1 np.testing.assert_array_equal(new_data, expected_data) def test_keep_only_one_label_label_not_present(): data = np.array(range(10)).reshape(2, 5) new_data = keep_only_one_label(data, 120) np.testing.assert_array_equal(new_data, data) def test_relabel_half_side_one_label_wrong_input_shape(): data = np.array(range(10)).reshape(2, 5) with np.testing.assert_raises(IOError): relabel_half_side_one_label(data, label_old=[1, 2], label_new=[2, 1], side_to_modify='above', axis='x', plane_intercept=2) def test_relabel_half_side_one_label_wrong_input_side(): data = np.array(range(27)).reshape(3, 3, 3) with np.testing.assert_raises(IOError): relabel_half_side_one_label(data, label_old=[1, 2], label_new=[2, 1], side_to_modify='spam', axis='x', plane_intercept=2) def test_relabel_half_side_one_label_wrong_input_axis(): data = np.array(range(27)).reshape(3, 3, 3) with np.testing.assert_raises(IOError): relabel_half_side_one_label(data, label_old=[1, 2], label_new=[2, 1], side_to_modify='above', axis='spam', plane_intercept=2) def test_relabel_half_side_one_label_wrong_input_simple(): data = np.array(range(3 ** 3)).reshape(3, 3, 3) # Z above new_data = relabel_half_side_one_label(data, label_old=1, label_new=100, side_to_modify='above', axis='z', plane_intercept=1) expected_data = data[:] expected_data[0, 0, 1] = 100 np.testing.assert_array_equal(new_data, expected_data) # Z below new_data = relabel_half_side_one_label(data, label_old=3, label_new=300, side_to_modify='below', axis='z', plane_intercept=2) expected_data = data[:] expected_data[0, 1, 0] = 300 np.testing.assert_array_equal(new_data, expected_data) # Y above new_data = relabel_half_side_one_label(data, label_old=8, label_new=800, side_to_modify='above', axis='y', plane_intercept=1) expected_data = data[:] expected_data[0, 2, 2] = 800 np.testing.assert_array_equal(new_data, expected_data) # Y below new_data = relabel_half_side_one_label(data, label_old=6, label_new=600, side_to_modify='below', axis='y', plane_intercept=2) expected_data = data[:] expected_data[0, 2, 0] = 600 np.testing.assert_array_equal(new_data, expected_data) # X above new_data = relabel_half_side_one_label(data, label_old=18, label_new=180, side_to_modify='above', axis='x', plane_intercept=1) expected_data = data[:] expected_data[2, 0, 0] = 180 np.testing.assert_array_equal(new_data, expected_data) # X below new_data = relabel_half_side_one_label(data, label_old=4, label_new=400, side_to_modify='below', axis='x', plane_intercept=2) expected_data = data[:] expected_data[0, 1, 1] = 400 np.testing.assert_array_equal(new_data, expected_data) if __name__ == '__main__': test_relabeller_basic() test_relabeller_one_element() test_relabeller_one_element_not_in_array() test_relabeller_wrong_input() test_permute_labels_invalid_permutation() test_permute_labels_valid_permutation() test_erase_label_simple() test_assign_all_other_labels_the_same_values_simple() test_assign_all_other_labels_the_same_values_single_value() test_keep_only_one_label_label_simple() test_keep_only_one_label_label_not_present() test_relabel_half_side_one_label_wrong_input_shape() test_relabel_half_side_one_label_wrong_input_side() test_relabel_half_side_one_label_wrong_input_axis() test_relabel_half_side_one_label_wrong_input_simple()
= 0.3048 MILE_PER_KM = 0.621371 DEFAULT_PORT = '/dev/ttyS0' DEBUG_READ = 0 def logmsg(level, msg): syslog.syslog(level, 'ws1: %s' % msg) def logdbg(msg): logmsg(syslog.LOG_DEBUG, msg) def loginf(msg): logmsg(syslog.LOG_INFO, msg) def logerr(msg): logmsg(syslog.LOG_ERR, msg) class WS1Driver(weewx.drivers.AbstractDevice): """weewx driver that communicates with an ADS-WS1 station port - serial port [Required. Default is /dev/ttyS0] max_tries - how often to retry serial communication before giving up [Optional. Default is 5] retry_wait - how long to wait, in seconds, before retrying after a failure [Optional. Default is 10] """ def __init__(self, **stn_dict): self.port = stn_dict.get('port', DEFAULT_PORT) self.max_tries = int(stn_dict.get('max_tries', 5)) self.retry_wait = int(stn_dict.get('retry_wait', 10)) self.last_rain = None loginf('driver version is %s' % DRIVER_VERSION) loginf('using serial port %s' % self.port) global DEBUG_READ DEBUG_READ = int(stn_dict.get('debug_read', DEBUG_READ)) self.station = Station(self.port) self.station.open() def closePort(self): if self.station is not None: self.station.close() self.station = None @property def hardware_name(self): return "WS1" def genLoopPackets(self): while True: packet = {'dateTime': int(time.time() + 0.5), 'usUnits': weewx.US} readings = self.station.get_readings_with_retry(self.max_tries, self.retry_wait) data = Station.parse_readings(readings) packet.update(data) self._augment_packet(packet) yield packet def _augment_packet(self, packet): # calculate the rain delta from rain total if self.last_rain is not None: packet['rain'] = packet['long_term_rain'] - self.last_rain else: packet['rain'] = None self.last_rain = packet['long_term_rain'] # no wind direction when wind speed is zero if 'windSpeed' in packet and not packet['windSpeed']: packet['windDir'] = None class Station(object): def __init__(self, port): self.port = port self.baudrate = 2400 self.timeout = 3 self.serial_port = None def __enter__(self): self.open() return self def __exit__(self, _, value, traceback): self.close() def open(self): logdbg("open serial port %s" % self.port) self.serial_port = serial.Serial(self.port, self.baudrate, timeout=self.timeout) def close(self): if self.serial_port is not None: logdbg("close serial port %s" % self.port) self.serial_port.close() self.serial_port = None # FIXME: use either CR or LF as line terminator. apparently some ws1 # hardware occasionally ends a line with only CR instead of the standard # CR-LF, resulting in a line that is too long. def get_readings(self): buf = self.serial_port.readline() if DEBUG_READ: logdbg("bytes: '%s'" % ' '.join(["%0.2X" % ord(c) for c in buf])) buf = buf.strip() return buf def get_readings_with_retry(self, max_tries=5, retry_wait=10): for ntries in range(0, max_tries): try: buf = self.get_readings() Station.validate_string(buf) return buf except (serial.serialutil.SerialException, weewx.WeeWxIOError), e: loginf("Failed attempt %d of %d to get readings: %s" % (ntries + 1, max_tries, e)) time.sleep(retry_wait) else: msg = "Max retries (%d) exceeded for readings" % max_tries logerr(msg) raise weewx.RetriesExceeded(msg) @staticmethod def validate_string(buf): if len(buf) != 50: raise weewx.WeeWxIOError("Unexpected buffer length %d" % len(buf)) if buf[0:2] != '!!': raise weewx.WeeWxIOError("Unexpected header bytes '%s'" % buf[0:2]) return buf @staticmethod def parse_readings(raw): """WS1 station emits data in PeetBros format: http://www.peetbros.com/shop/custom.aspx?recid=29 Each line has 50 characters - 2 header bytes and 48 data bytes: !!000000BE02EB000027700000023A023A0025005800000000 SSSSXXDDTTTTLLLLPPPPttttHHHHhhhhddddmmmmRRRRWWWW SSSS - wind speed (0.1 kph) XX - wind direction calibration DD - wind direction (0-255) TTTT - outdoor temperature (0.1 F) LLLL - long term rain (0.01 in) PPPP - pressure (0.1 mbar) tttt - indoor temperature (0.1 F) HHHH - outdoor humidity (0.1 %) hhhh - indoor humidity (0.1 %) dddd - date (day of year) mmmm - time (minute of day) RRRR - daily rain (0.01 in) WWWW - one minute wind average (0.1 kph) """ # FIXME: peetbros could be 40 bytes or 44 bytes, what about ws1? # FIXME: peetbros uses two's complement for temp, what about ws1? # FIXME: for ws1 is the pressure reading 'pressure' or 'barometer'? buf = raw[2:] data = dict() data['windSpeed'] = Station._decode(buf[0:4], 0.1 * MILE_PER_KM) # mph data['windDir'] = Station._decode(buf[6:8], 1.411764) # compass deg data['outTemp'] = Station._decode(buf[8:12], 0.1) # degree_F data['long_term_rain'] = Station._decode(buf[12:16], 0.01) # inch data['pressure'] = Station._decode(buf[16:20], 0.1 * INHG_PER_MBAR) # inHg data['inTemp'] = Station._decode(buf[20:24], 0.1) # degree_F data['outHumidity'] = Station._decode(buf[24:28], 0.1) # percent data['inHumidity'] = Station._decode(buf[28:32], 0.1) # percent data['day_of_year'] = Station._decode(buf[32:36]) data['minute_of_day'] = Station._decode(buf[36:40]) data['daily_rain'] = Station._decode(buf[40:44], 0.01) # inch data['wind_average'] = Station._decode(buf[44:48], 0.1 * MILE_PER_KM) # mph return data @staticmethod d
ef _decode(s, multiplier=None, neg=False): v = None try: v = int(s, 16) if neg: bits = 4 * len(s) if v & (1 << (bits - 1)) != 0: v -= (1 << bits) if multiplier is not None: v *= multiplier except ValueError, e: if s !
= '----': logdbg("decode failed for '%s': %s" % (s, e)) return v class WS1ConfEditor(weewx.drivers.AbstractConfEditor): @property def default_stanza(self): return """ [WS1] # This section is for the ADS WS1 series of weather stations. # Serial port such as /dev/ttyS0, /dev/ttyUSB0, or /dev/cuaU0 port = /dev/ttyUSB0 # The driver to use: driver = weewx.drivers.ws1 """ def prompt_for_settings(self): print "Specify the serial port on which the station is connected, for" print "example /dev/ttyUSB0 or /dev/ttyS0." port = self._prompt('port', '/dev/ttyUSB0') return {'port': port} # define a main entry point for basic testing of the station without weewx # engine and service overhead. invoke this as follows from the weewx root dir: # # PYTHONPATH=bin python bin/weewx/drivers/ws1.py if __name__ == '__main__': import optparse usage = """%prog [options] [--help]""" syslog.openlog('ws1', syslog.LOG_PID | syslog.LOG_CONS) syslog.setlogmask(syslog.LOG_UPTO(syslog.LOG_DEBUG)) parser = optparse.OptionParser(usage=usage) parser.add_option('--version', dest='version', action='store_true', help='display driver version') parser.add_option('--port', dest='port', metavar='PORT', help='serial port to which the station is con
#!/usr/bin/env python # -*- coding: utf-8 -*- import MySQLdb as mdb import uuid, pprint def generate(data): gdata = [] for grade in range(1,4): for clazz in range(1,10): if grade != data['grade_number'] and clazz != data['class_number']: gdata.append("insert into classes(uuid, grade_number, class_number, school_uuid) values('%s', %d, %d, '%s');" % (unicode(uuid.uuid4()), grade, clazz, data['school
_uuid'])) return gdata def main(): config = {'user': 'root', 'passwd': 'oseasy_db', 'db': 'b
anbantong', 'use_unicode': True, 'charset': 'utf8'} conn = mdb.connect(**config) if not conn: return cursor = conn.cursor() cursor.execute('select grade_number, class_number, school_uuid from classes;') base = {} desc = cursor.description data = cursor.fetchone() for i, x in enumerate(data): base[desc[i][0]] = data[i] moreData = generate(base) #cursor.executemany('insert into classes(uuid, grade_number, class_number, school_uuid) values(%s, %d, %d, %s)', moreData) for sql in moreData: cursor.execute(sql) conn.commit() cursor.close() conn.close() if __name__ == "__main__": main()
#!/usr/bin/env python # coding=utf-8 import errno import os import sys import fileinput import string import logging import traceback import hashlib import time import re from datetime import date, timedelta import datetime from subprocess import call import redis from datasource import DataSource class Items(DataSource): def __init__(self, redisClientManager, config, act): DataSource.__init__(self, config, act) self.redisClientManager = redisClientManager self.downloadedDir = "" self.key = "" if os.path.exists(self.dir + "/downloaded.txt"): with open(self.dir + "/downloaded.txt", 'r') as content_file: self.downloadedDir = content_file.read() def saveDownloadedDir(self, dir): self.downloadedDir = dir with open(self.dir + "/downloaded.txt", "w") as text_file: text_file.write(dir) def isOkFloatString(self, value): for
c in value: if c == '.': continue if ord(c) <48 or ord(c) > 57: return False return True def download(self): try: cmd = "rm -rf " + self.dir + "/*" call(cmd, shell=True) cmd = "hadoop fs -get " + self.download_url + " " + self.dir logging.info("[" + self.name + "]" + "
Downloading file:" + self.download_url) retcode = call(cmd, shell=True) if retcode != 0: logging.error("Child was terminated by signal:" + str(retcode) + " for cmd:" + cmd) return False else: self.saveDownloadedDir(self.datedir) return True except: tb = traceback.format_exc() logging.error("Some error occured:\n" + tb) return False def __parseImport(self, filename, name): file = open(filename, 'r') count = 0 ff = name.split('_') prefix= self.config["prefix"] + ":" while 1: lines = file.readlines(10000) if not lines: break for line in lines: line = line.strip() if count % 100000 == 0 and count != 0: logging.info("[" + self.name + "]" + str(count) + " lines parsed and imported to redis for file:" + filename) count = count + 1 #ss = re.split(r'\t+', line.rstrip('\t')) line = line.rstrip("\n") ss = line.split("\t") if len(ss) != 11: print "fxxk you man!" exit(1) #poi_id = ss[0] #�쳣�ַ��� poi_id = ss[0] if not all(ord(c) < 128 for c in poi_id): logging.error("[" + self.name + "]Discard invalid line:" + line + "\n") continue if len(poi_id) > 50: logging.error("filename:" + filename + ", line:" + str(count) +", cuid too long!") continue key = prefix + poi_id value = "" i = 1 tag = 0 while i < len(ss): # if not self.isOkFloatString(ss[i]): # tag = 1 # break if i == 1: value = ss[i] else: value = value + ":" + ss[i] i = i+1 # if tag == 1: # logging.error("filename:" + filename + ", line:" + str(count) +", not all nums are right") # continue clients = self.redisClientManager.getClientsByShardKey("items", poi_id) self.cnt+=1 if self.key != key: for client in clients: client.pipeline.delete(key) self.key = key for client in clients: client.pipeline.sadd(key, value) client.IncrPipeCount() if client.pipecount >= 100: client.commit() file.close() return True def parseImport(self): fs = os.listdir(self.dir) for file in fs: if file == "status.txt" or file == "downloaded.txt": continue while True: try: logging.info("[" + self.name + "]Start parsing import data from file:" + file) self.__parseImport(self.dir + "/" + file, file) self.redisClientManager.commitClients("items") break except: tb = traceback.format_exc() logging.error("Some error occured to parsing import file:" + file + "\n" + tb) time.sleep(60) return True def __delete(self, filename): fi = open(filename, 'r') count = 0 prefix= self.config["prefix"] + ":" while 1: lines = fi.readlines(10000) if not lines: break for line in lines: line = line.strip() if count % 100000 == 0 and count != 0: logging.info("[" + self.name + "]" + str(count) + " lines parsed and deleted from redis for file:" + filename) count = count + 1 ss = re.split(r'\t+', line.rstrip('\t')) #poi_id = ss[0] poi_id = ss[0] if not all(ord(c) < 128 for c in poi_id): logging.error("[" + self.name + "]Discard invalid line:" + line + "\n") continue if len(poi_id) > 50: logging.error("filename:" + filename + ", line:" + str(count) +", cuid too long!") continue key = prefix + poi_id clients = self.redisClientManager.getClientsByShardKey("items", poi_id) for client in clients: client.pipeline.delete(key) client.IncrPipeCount() if client.pipecount >= 100: client.commit() fi.close() return True def delete(self): fs = os.listdir(self.dir) for fi in fs: if fi == "status.txt" or fi == "downloaded.txt": continue while True: try: logging.info("[" + self.name + "]Start parsing delete data from file:" + fi) self.__delete(self.dir + "/" + fi) self.redisClientManager.commitClients("items") break except: tb = traceback.format_exc() logging.error("Some error occured to parsing delete file:" + fi + "\n" + tb) time.sleep(60) return True def checkAvailable(self): try: if self.action == "import": yesterday = date.today() - timedelta(1) self.datedir = yesterday.strftime('%Y%m%d') #self.datedir = "." if self.datedir == self.downloadedDir: return 0 elif self.action == "delete": self.datedir = self.del_date if self.datedir == self.downloadedDir: return 2 self.download_url = self.config["url"].replace("${date}", self.datedir) donefile = self.config["checkfile"].replace("${date}", self.datedir) cmd = "hadoop fs -test -e " + donefile retcode = call(cmd, shell=True) if retcode == 0: return 1 return 0 except: tb = traceback.format_exc() logging.error("Some error occured:\n" + tb) return 0 return 0
################################################
############################## # Copyright (c) 20
13-2018, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class RRngtools(RPackage): """This package contains a set of functions for working with Random Number Generators (RNGs). In particular, it defines a generic S4 framework for getting/setting the current RNG, or RNG data that are embedded into objects for reproducibility. Notably, convenient default methods greatly facilitate the way current RNG settings can be changed.""" homepage = "https://renozao.github.io/rngtools" url = "https://cran.r-project.org/src/contrib/rngtools_1.2.4.tar.gz" list_url = "https://cran.r-project.org/src/contrib/Archive/rngtools" version('1.2.4', '715967f8b3af2848a76593a7c718c1cd') depends_on('r-pkgmaker', type=('build', 'run')) depends_on('r-stringr', type=('build', 'run')) depends_on('r-digest', type=('build', 'run'))
"""Test the roon config flow.""" from homeassistant import config_entries, setup from homeassistant.components.roon.const import DOMAIN from homeassistant.const import CONF_HOST from tests.async_mock import patch from tests.common import MockConfigEntry class RoonApiMock: """Mock to handle returning tokens for testing the RoonApi.""" def __init__(s
elf, token): """Initialize.""" self._token = token @property def token(self): """Return the auth token from the api.""" return self._token def stop(self): # pylint: disable=no-self-use """Close down the api.""" return async def test_form_and_auth(hass): """Test we get the form.""" await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) assert result["type"] == "form" assert result["errors"] == {} with patch("homeassistant.components.roon.config_flow.TIMEOUT", 0,), patch( "homeassistant.components.roon.const.AUTHENTICATE_TIMEOUT", 0, ), patch( "homeassistant.components.roon.config_flow.RoonApi", return_value=RoonApiMock("good_token"), ), patch( "homeassistant.components.roon.async_setup", return_value=True ) as mock_setup, patch( "homeassistant.components.roon.async_setup_entry", return_value=True, ) as mock_setup_entry: await hass.config_entries.flow.async_configure( result["flow_id"], {"host": "1.1.1.1"} ) result2 = await hass.config_entries.flow.async_configure( result["flow_id"], user_input={} ) assert result2["type"] == "create_entry" assert result2["title"] == "Roon Labs Music Player" assert result2["data"] == {"host": "1.1.1.1", "api_key": "good_token"} await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1 assert len(mock_setup_entry.mock_calls) == 1 async def test_form_no_token(hass): """Test we handle no token being returned (timeout or not authorized).""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) with patch("homeassistant.components.roon.config_flow.TIMEOUT", 0,), patch( "homeassistant.components.roon.const.AUTHENTICATE_TIMEOUT", 0, ), patch( "homeassistant.components.roon.config_flow.RoonApi", return_value=RoonApiMock(None), ): await hass.config_entries.flow.async_configure( result["flow_id"], {"host": "1.1.1.1"} ) result2 = await hass.config_entries.flow.async_configure( result["flow_id"], user_input={} ) assert result2["type"] == "form" assert result2["errors"] == {"base": "invalid_auth"} async def test_form_unknown_exception(hass): """Test we handle cannot connect error.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) with patch( "homeassistant.components.roon.config_flow.RoonApi", side_effect=Exception, ): await hass.config_entries.flow.async_configure( result["flow_id"], {"host": "1.1.1.1"} ) result2 = await hass.config_entries.flow.async_configure( result["flow_id"], user_input={} ) assert result2["type"] == "form" assert result2["errors"] == {"base": "unknown"} async def test_form_host_already_exists(hass): """Test we add the host if the config exists and it isn't a duplicate.""" MockConfigEntry(domain=DOMAIN, data={CONF_HOST: "existing_host"}).add_to_hass(hass) await setup.async_setup_component(hass, "persistent_notification", {}) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) assert result["type"] == "form" assert result["errors"] == {} with patch("homeassistant.components.roon.config_flow.TIMEOUT", 0,), patch( "homeassistant.components.roon.const.AUTHENTICATE_TIMEOUT", 0, ), patch( "homeassistant.components.roon.config_flow.RoonApi", return_value=RoonApiMock("good_token"), ), patch( "homeassistant.components.roon.async_setup", return_value=True ) as mock_setup, patch( "homeassistant.components.roon.async_setup_entry", return_value=True, ) as mock_setup_entry: await hass.config_entries.flow.async_configure( result["flow_id"], {"host": "1.1.1.1"} ) result2 = await hass.config_entries.flow.async_configure( result["flow_id"], user_input={} ) assert result2["type"] == "create_entry" assert result2["title"] == "Roon Labs Music Player" assert result2["data"] == {"host": "1.1.1.1", "api_key": "good_token"} await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1 assert len(mock_setup_entry.mock_calls) == 2 async def test_form_duplicate_host(hass): """Test we don't add the host if it's a duplicate.""" MockConfigEntry(domain=DOMAIN, data={CONF_HOST: "existing_host"}).add_to_hass(hass) await setup.async_setup_component(hass, "persistent_notification", {}) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) assert result["type"] == "form" assert result["errors"] == {} result2 = await hass.config_entries.flow.async_configure( result["flow_id"], {"host": "existing_host"} ) assert result2["type"] == "form" assert result2["errors"] == {"base": "duplicate_entry"}
"""engine.SCons.Tool.aixf77 Tool-specific initialization for IBM Visual Age f77 Fortran compiler. There normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Tool.Tool() selection method. """ # # Copyright (c) 2001 - 2014 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHO
UT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CON
TRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Tool/aixf77.py 2014/08/24 12:12:31 garyo" import os.path #import SCons.Platform.aix import f77 # It would be good to look for the AIX F77 package the same way we're now # looking for the C and C++ packages. This should be as easy as supplying # the correct package names in the following list and uncommenting the # SCons.Platform.aix_get_xlc() call the in the function below. packages = [] def get_xlf77(env): xlf77 = env.get('F77', 'xlf77') xlf77_r = env.get('SHF77', 'xlf77_r') #return SCons.Platform.aix.get_xlc(env, xlf77, xlf77_r, packages) return (None, xlf77, xlf77_r, None) def generate(env): """ Add Builders and construction variables for the Visual Age FORTRAN compiler to an Environment. """ path, _f77, _shf77, version = get_xlf77(env) if path: _f77 = os.path.join(path, _f77) _shf77 = os.path.join(path, _shf77) f77.generate(env) env['F77'] = _f77 env['SHF77'] = _shf77 def exists(env): path, _f77, _shf77, version = get_xlf77(env) if path and _f77: xlf77 = os.path.join(path, _f77) if os.path.exists(xlf77): return xlf77 return None # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
# -*- coding: utf-8 -*- from __future__ import unicode_literals import autocomplete_light from django.utils.encoding import force_text from .settings import USER_MODEL from .utils.module_loading import get_real_model_class class UserAutocomplete(autocomplete_light.AutocompleteModelBase): search_fields = [ '^first_name', 'last_name', 'username' ] mode
l = get_real_model_class(USER_MODEL) order_by = ['first_name', 'last_name'] # choice_template = 'django_documentos/user_choice_autocomplete.html' l
imit_choices = 10 attrs = { 'data-autcomplete-minimum-characters': 0, 'placeholder': 'Pessoa que irá assinar', } # widget_attrs = {'data-widget-maximum-values': 3} def choice_value(self, choice): """ Return the pk of the choice by default. """ return choice.pk def choice_label(self, choice): """ Return the textual representation of the choice by default. """ # return force_text("{}-{}".format(choice.pk, choice.get_full_name().title())) return force_text(choice.get_full_name().title()) # def choice_label(self, choice): # return choice.get_full_name().title() def choices_for_request(self): return super(UserAutocomplete, self).choices_for_request() autocomplete_light.register(UserAutocomplete)
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Author: Muduo ''' FastSync ''' from setuptools import setup, find_packages setup( name='FastSync', version='0.2.0.3', packages=find_packages(), install_requires=[ 'requests',
'watchdog', 'pycrypto', 'future', 'web.py' ], entry_points={ 'console_scripts': [ 'fsnd = sync:sending', 'frcv = sync:receiving', ], }, license='Apache License', author='Muduo', author_email='imuduo@16
3.com', url='https://github.com/iMuduo/FastSync', description='Event driven fast synchronization tool', keywords=['sync'], )
covariance_type='full', random_state=rng).fit(X).bic(X) for covariance_type in ['tied', 'diag', 'spherical']: bic = GaussianMixture(n_components=n_components, covariance_type=covariance_type, random_state=rng).fit(X).bic(X) assert_almost_equal(bic_full, bic) def test_gaussian_mixture_aic_bic(): # Test the aic and bic criteria rng = np.random.RandomState(0) n_samples, n_features, n_components = 50, 3, 2 X = rng.randn(n_samples, n_features) # standard gaussian entropy sgh = 0.5 * (fast_logdet(np.cov(X.T, bias=1)) +
n_features * (1 + np.log(2 * np.pi))) for cv_type in COVARIANCE_TYPE: g = GaussianMixture( n_components=n_components, covariance_type=cv_type, random_state=rng, max_iter=200) g.fit(X) aic = 2 * n_samples * sgh + 2 * g._n_parameters() bic = (2 * n_samples * sgh + np.log(n_samples) * g._n_parameters()) bound = n_features / np.sqrt(n_samples) assert (g.aic(X
) - aic) / n_samples < bound assert (g.bic(X) - bic) / n_samples < bound def test_gaussian_mixture_verbose(): rng = np.random.RandomState(0) rand_data = RandomData(rng) n_components = rand_data.n_components for covar_type in COVARIANCE_TYPE: X = rand_data.X[covar_type] g = GaussianMixture(n_components=n_components, n_init=1, reg_covar=0, random_state=rng, covariance_type=covar_type, verbose=1) h = GaussianMixture(n_components=n_components, n_init=1, reg_covar=0, random_state=rng, covariance_type=covar_type, verbose=2) old_stdout = sys.stdout sys.stdout = StringIO() try: g.fit(X) h.fit(X) finally: sys.stdout = old_stdout @pytest.mark.filterwarnings('ignore:.*did not converge.*') @pytest.mark.parametrize("seed", (0, 1, 2)) def test_warm_start(seed): random_state = seed rng = np.random.RandomState(random_state) n_samples, n_features, n_components = 500, 2, 2 X = rng.rand(n_samples, n_features) # Assert the warm_start give the same result for the same number of iter g = GaussianMixture(n_components=n_components, n_init=1, max_iter=2, reg_covar=0, random_state=random_state, warm_start=False) h = GaussianMixture(n_components=n_components, n_init=1, max_iter=1, reg_covar=0, random_state=random_state, warm_start=True) g.fit(X) score1 = h.fit(X).score(X) score2 = h.fit(X).score(X) assert_almost_equal(g.weights_, h.weights_) assert_almost_equal(g.means_, h.means_) assert_almost_equal(g.precisions_, h.precisions_) assert score2 > score1 # Assert that by using warm_start we can converge to a good solution g = GaussianMixture(n_components=n_components, n_init=1, max_iter=5, reg_covar=0, random_state=random_state, warm_start=False, tol=1e-6) h = GaussianMixture(n_components=n_components, n_init=1, max_iter=5, reg_covar=0, random_state=random_state, warm_start=True, tol=1e-6) g.fit(X) assert not g.converged_ h.fit(X) # depending on the data there is large variability in the number of # refit necessary to converge due to the complete randomness of the # data for _ in range(1000): h.fit(X) if h.converged_: break assert h.converged_ @ignore_warnings(category=ConvergenceWarning) def test_convergence_detected_with_warm_start(): # We check that convergence is detected when warm_start=True rng = np.random.RandomState(0) rand_data = RandomData(rng) n_components = rand_data.n_components X = rand_data.X['full'] for max_iter in (1, 2, 50): gmm = GaussianMixture(n_components=n_components, warm_start=True, max_iter=max_iter, random_state=rng) for _ in range(100): gmm.fit(X) if gmm.converged_: break assert gmm.converged_ assert max_iter >= gmm.n_iter_ def test_score(): covar_type = 'full' rng = np.random.RandomState(0) rand_data = RandomData(rng, scale=7) n_components = rand_data.n_components X = rand_data.X[covar_type] # Check the error message if we don't call fit gmm1 = GaussianMixture(n_components=n_components, n_init=1, max_iter=1, reg_covar=0, random_state=rng, covariance_type=covar_type) msg = ( "This GaussianMixture instance is not fitted yet. Call 'fit' with " "appropriate arguments before using this estimator." ) with pytest.raises(NotFittedError, match=msg): gmm1.score(X) # Check score value with warnings.catch_warnings(): warnings.simplefilter("ignore", ConvergenceWarning) gmm1.fit(X) gmm_score = gmm1.score(X) gmm_score_proba = gmm1.score_samples(X).mean() assert_almost_equal(gmm_score, gmm_score_proba) # Check if the score increase gmm2 = GaussianMixture(n_components=n_components, n_init=1, reg_covar=0, random_state=rng, covariance_type=covar_type).fit(X) assert gmm2.score(X) > gmm1.score(X) def test_score_samples(): covar_type = 'full' rng = np.random.RandomState(0) rand_data = RandomData(rng, scale=7) n_components = rand_data.n_components X = rand_data.X[covar_type] # Check the error message if we don't call fit gmm = GaussianMixture(n_components=n_components, n_init=1, reg_covar=0, random_state=rng, covariance_type=covar_type) msg = ( "This GaussianMixture instance is not fitted yet. Call 'fit' with " "appropriate arguments before using this estimator." ) with pytest.raises(NotFittedError, match=msg): gmm.score_samples(X) gmm_score_samples = gmm.fit(X).score_samples(X) assert gmm_score_samples.shape[0] == rand_data.n_samples def test_monotonic_likelihood(): # We check that each step of the EM without regularization improve # monotonically the training set likelihood rng = np.random.RandomState(0) rand_data = RandomData(rng, scale=7) n_components = rand_data.n_components for covar_type in COVARIANCE_TYPE: X = rand_data.X[covar_type] gmm = GaussianMixture(n_components=n_components, covariance_type=covar_type, reg_covar=0, warm_start=True, max_iter=1, random_state=rng, tol=1e-7) current_log_likelihood = -np.infty with warnings.catch_warnings(): warnings.simplefilter("ignore", ConvergenceWarning) # Do one training iteration at a time so we can make sure that the # training log likelihood increases after each iteration. for _ in range(600): prev_log_likelihood = current_log_likelihood current_log_likelihood = gmm.fit(X).score(X) assert current_log_likelihood >= prev_log_likelihood if gmm.converged_: break assert gmm.converged_ def test_regularisation(): # We train the GaussianMixture on degenerate data by defining two clusters # of a 0 covariance. rng = np.random.RandomState(0) n_samples, n_features = 10, 5 X = np.vstack((np.ones((n_samples // 2, n_features)), np.zeros((n_samples // 2, n_features)))) for covar_type in COVARIANCE_TYPE: gmm = GaussianMixture(n_components=n_samples, reg_covar=0, covariance_type=covar_type, random_state=rng) with warnings.catch_warnings(): warnings.simplefilter("ignore", RuntimeWarning) msg = re.escape(
# # A sample service to be 'compiled' into an exe-file with py2exe. # # See also # setup.py - the distutils' setup script # setup.cfg - the distutils' config file for this # README.txt - detailed usage notes # # A minimal service, doing nothing else than # - write 'start' and 'stop' entries into the NT event log # - when started, waits to be stopped again. # import win32serviceutil import win32service import win32event import win32evtlogutil class MyService(win32serviceutil.ServiceFramework): _svc_name_ = "MyService" _svc_display_name_ = "My Service" _svc_deps_ = ["EventLog"] def __init__(self, args): win32serviceutil.ServiceFramework.__init__(self, args) self.hWaitStop = win32event.CreateEvent(None, 0, 0, None) def SvcStop(self): self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING) win32event.SetEvent(self.hWaitStop) def SvcDoRun(self): import servicemanager # Write a 'started' event to the event log... win32evtlogutil.ReportEvent(self._svc_name_, servicemanager.PYS_SERVICE_STARTED, 0, # category servicemanager.EVENTLOG_INFORMATION_TYPE, (self._svc_name_, '')) # wait for beeing stopped... win32event.WaitForSingleObject(self.hWaitStop, win32event.INFINITE) # and
write a 'stopped' event to the event
log. win32evtlogutil.ReportEvent(self._svc_name_, servicemanager.PYS_SERVICE_STOPPED, 0, # category servicemanager.EVENTLOG_INFORMATION_TYPE, (self._svc_name_, '')) if __name__ == '__main__': # Note that this code will not be run in the 'frozen' exe-file!!! win32serviceutil.HandleCommandLine(MyService)
#!/usr/bin/env python import os import argparse import numpy as np import pandas as pd import pycondor import comptools as comp if __name__ == "__main__": p = argparse.ArgumentParser( description='Extracts and saves desired information from simulation/data .i3 files') p.add_argument('-c', '--config', dest='config', default='IC86.2012', choices=['IC79', 'IC86.2012', 'IC86.2013', 'IC86.2014', 'IC86.2015'], help='Detector configuration') p.add_argument('--low_energy', dest='low_energy', default=False, action='store_true', help='Only use events with energy < 10**6.75 GeV') p.add_argument('--n_side', dest='n_side', type=int, default=64, help='Number of times to split the DataFrame') p.add_argument('--chunksize', dest='chunksize', type=int, default=1000, help='Number of lines used when reading in DataFrame') p.add_argument('--n_batches', dest='n_batches', type=int, default=50, help='Number batches running in parallel for each ks-test trial') p.add_argument('--ks_trials', dest='ks_trials', type=int, default=100, help='Number of random maps to generate') p.
add_argument('--overwrite', dest='overwrite', default=False, action='store_true', help='Option to overwrite reference map file, ' 'if it alreadu exists') p.add_argument('--test', dest='test',
default=False, action='store_true', help='Option to run small test version') args = p.parse_args() if args.test: args.ks_trials = 20 args.n_batches = 10000 args.chunksize = 100 # Define output directories error = comp.paths.condor_data_dir + '/ks_test_{}/error'.format(args.config) output = comp.paths.condor_data_dir + '/ks_test_{}/output'.format(args.config) log = comp.paths.condor_scratch_dir + '/ks_test_{}/log'.format(args.config) submit = comp.paths.condor_scratch_dir + '/ks_test_{}/submit'.format(args.config) # Define path to executables make_maps_ex = os.path.join(comp.paths.project_home, 'processing/anisotropy/ks_test_multipart', 'make_maps.py') merge_maps_ex = os.path.join(comp.paths.project_home, 'processing/anisotropy/ks_test_multipart', 'merge_maps.py') save_pvals_ex = os.path.join(comp.paths.project_home, 'processing/anisotropy/ks_test_multipart', 'save_pvals.py') # Create Dagman instance dag_name = 'anisotropy_kstest_{}'.format(args.config) if args.test: dag_name += '_test' dagman = pycondor.Dagman(dag_name, submit=submit, verbose=1) # Create Job for saving ks-test p-values for each trial save_pvals_name = 'save_pvals_{}'.format(args.config) if args.low_energy: save_pvals_name += '_lowenergy' save_pvals_job = pycondor.Job(save_pvals_name, save_pvals_ex, error=error, output=output, log=log, submit=submit, verbose=1) save_pvals_infiles_0 = [] save_pvals_infiles_1 = [] dagman.add_job(save_pvals_job) outdir = os.path.join(comp.paths.comp_data_dir, args.config + '_data', 'anisotropy', 'random_splits') if args.test: outdir = os.path.join(outdir, 'test') for trial_num in range(args.ks_trials): # Create map_maps jobs for this ks_trial make_maps_name = 'make_maps_{}_trial-{}'.format(args.config, trial_num) if args.low_energy: make_maps_name += '_lowenergy' make_maps_job = pycondor.Job(make_maps_name, make_maps_ex, error=error, output=output, log=log, submit=submit, verbose=1) dagman.add_job(make_maps_job) merge_maps_infiles_0 = [] merge_maps_infiles_1 = [] for batch_idx in range(args.n_batches): if args.test and batch_idx > 2: break outfile_sample_1 = os.path.join(outdir, 'random_split_1_trial-{}_batch-{}.fits'.format(trial_num, batch_idx)) outfile_sample_0 = os.path.join(outdir, 'random_split_0_trial-{}_batch-{}.fits'.format(trial_num, batch_idx)) make_maps_arg_list = [] make_maps_arg_list.append('--config {}'.format(args.config)) make_maps_arg_list.append('--n_side {}'.format(args.n_side)) make_maps_arg_list.append('--chunksize {}'.format(args.chunksize)) make_maps_arg_list.append('--n_batches {}'.format(args.n_batches)) make_maps_arg_list.append('--batch_idx {}'.format(batch_idx)) make_maps_arg_list.append('--outfile_sample_0 {}'.format(outfile_sample_0)) make_maps_arg_list.append('--outfile_sample_1 {}'.format(outfile_sample_1)) make_maps_arg = ' '.join(make_maps_arg_list) if args.low_energy: make_maps_arg += ' --low_energy' make_maps_job.add_arg(make_maps_arg) # Add this outfile to the list of infiles for merge_maps_job merge_maps_infiles_0.append(outfile_sample_0) merge_maps_infiles_1.append(outfile_sample_1) for sample_idx, input_file_list in enumerate([merge_maps_infiles_0, merge_maps_infiles_1]): merge_maps_name = 'merge_maps_{}_trial-{}_split-{}'.format(args.config, trial_num, sample_idx) if args.low_energy: merge_maps_name += '_lowenergy' merge_maps_job = pycondor.Job(merge_maps_name, merge_maps_ex, error=error, output=output, log=log, submit=submit, verbose=1) # Ensure that make_maps_job completes before merge_maps_job begins make_maps_job.add_child(merge_maps_job) merge_maps_job.add_child(save_pvals_job) dagman.add_job(merge_maps_job) merge_infiles_str = ' '.join(input_file_list) # Assemble merged output file path merge_outfile = os.path.join(outdir, 'random_split_{}_trial-{}.fits'.format(sample_idx, trial_num)) merge_maps_arg = '--infiles {} --outfile {}'.format(merge_infiles_str, merge_outfile) merge_maps_job.add_arg(merge_maps_arg) if sample_idx == 0: save_pvals_infiles_0.append(merge_outfile) else: save_pvals_infiles_1.append(merge_outfile) save_pvals_infiles_0_str = ' '.join(save_pvals_infiles_0) save_pvals_infiles_1_str = ' '.join(save_pvals_infiles_1) if args.low_energy: outfile_basename = 'ks_test_dataframe_lowenergy.hdf' else: outfile_basename = 'ks_test_dataframe.hdf' outfile = os.path.join(outdir, outfile_basename) save_pvals_arg = '--infiles_sample_0 {} --infiles_sample_1 {} ' \ '--outfile {}'.format(save_pvals_infiles_0_str, save_pvals_infiles_1_str, outfile) save_pvals_job.add_arg(save_pvals_arg) dagman.build_submit(fancyname=True)
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy from numpy import array, shape from pyspark import SparkContext from pyspark.mllib._common import \ _dot, _get_unmangled_rdd, _get_unmangled_double_vector_rdd, \ _serialize_double_matrix, _deserialize_double_matrix, \ _serialize_double_vector, _deserialize_double_vector, \ _get_initial_weights, _serialize_rating, _regression_train_wrapper, \ _linear_predictor_typecheck, _get_unmangled_labeled_point_rdd from pyspark.mllib.linalg import SparseVector from pyspark.mllib.regression import LabeledPoint, LinearModel from math import exp, log class LogisticRegressionModel(LinearModel): """A linear binary classification model derived from logistic regression. >>> data = [ ... LabeledPoint(0.0, [0.0]), ... LabeledPoint(1.0, [1.0]), ... LabeledPoint(1.0, [2.0]), ... LabeledPoint(1.0, [3.0]) ... ] >>> lrm = LogisticRegressionWithSGD.train(sc.parallelize(data)) >>> lrm.predict(array([1.0])) > 0 True >>> lrm.predict(array([0.0])) <= 0 True >>> sparse_data = [ ... LabeledPoint(0.0, SparseVector(2, {0: 0.0})), ... LabeledPoint(1.0, SparseVector(2, {1: 1.0})), ... LabeledPoint(0.0, SparseVector(2, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 2.0})) ... ] >>> lrm = LogisticRegressionWithSGD.train(sc.parallelize(sparse_data)) >>> lrm.predict(array([0.0, 1.0])) > 0 True >>> lrm.predict(array([0.0, 0.0])) <= 0 True >>> lrm.predict(SparseVector(2, {1: 1.0})) > 0 True >>> lrm.predict(SparseVector(2, {1: 0.0})) <= 0 Tr
ue """ def predict(self, x): _linear_predictor_typecheck(x, self._coeff) margin = _dot(x, self._coeff) + self._intercept prob = 1/(1 + exp(-margin)) return 1 if prob > 0.5 else 0 class LogisticRegressionWithSGD(object): @classmethod def train(cls, data, iterations=100, step=1.0, miniBatchFraction=1.0, initialWeights=None): """Train a logistic regression model on the given data.""" sc = data.context train_func = lambda d, i: sc._jvm.PythonMLLibAPI().trainLogisticRegressionModelWithSGD( d._jrdd, iterations, step, miniBatchFraction, i) return _regression_train_wrapper(sc, train_func, LogisticRegressionModel, data, initialWeights) class SVMModel(LinearModel): """A support vector machine. >>> data = [ ... LabeledPoint(0.0, [0.0]), ... LabeledPoint(1.0, [1.0]), ... LabeledPoint(1.0, [2.0]), ... LabeledPoint(1.0, [3.0]) ... ] >>> svm = SVMWithSGD.train(sc.parallelize(data)) >>> svm.predict(array([1.0])) > 0 True >>> sparse_data = [ ... LabeledPoint(0.0, SparseVector(2, {0: -1.0})), ... LabeledPoint(1.0, SparseVector(2, {1: 1.0})), ... LabeledPoint(0.0, SparseVector(2, {0: 0.0})), ... LabeledPoint(1.0, SparseVector(2, {1: 2.0})) ... ] >>> svm = SVMWithSGD.train(sc.parallelize(sparse_data)) >>> svm.predict(SparseVector(2, {1: 1.0})) > 0 True >>> svm.predict(SparseVector(2, {0: -1.0})) <= 0 True """ def predict(self, x): _linear_predictor_typecheck(x, self._coeff) margin = _dot(x, self._coeff) + self._intercept return 1 if margin >= 0 else 0 class SVMWithSGD(object): @classmethod def train(cls, data, iterations=100, step=1.0, regParam=1.0, miniBatchFraction=1.0, initialWeights=None): """Train a support vector machine on the given data.""" sc = data.context train_func = lambda d, i: sc._jvm.PythonMLLibAPI().trainSVMModelWithSGD( d._jrdd, iterations, step, regParam, miniBatchFraction, i) return _regression_train_wrapper(sc, train_func, SVMModel, data, initialWeights) class NaiveBayesModel(object): """ Model for Naive Bayes classifiers. Contains two parameters: - pi: vector of logs of class priors (dimension C) - theta: matrix of logs of class conditional probabilities (CxD) >>> data = [ ... LabeledPoint(0.0, [0.0, 0.0]), ... LabeledPoint(0.0, [0.0, 1.0]), ... LabeledPoint(1.0, [1.0, 0.0]), ... ] >>> model = NaiveBayes.train(sc.parallelize(data)) >>> model.predict(array([0.0, 1.0])) 0.0 >>> model.predict(array([1.0, 0.0])) 1.0 >>> sparse_data = [ ... LabeledPoint(0.0, SparseVector(2, {1: 0.0})), ... LabeledPoint(0.0, SparseVector(2, {1: 1.0})), ... LabeledPoint(1.0, SparseVector(2, {0: 1.0})) ... ] >>> model = NaiveBayes.train(sc.parallelize(sparse_data)) >>> model.predict(SparseVector(2, {1: 1.0})) 0.0 >>> model.predict(SparseVector(2, {0: 1.0})) 1.0 """ def __init__(self, labels, pi, theta): self.labels = labels self.pi = pi self.theta = theta def predict(self, x): """Return the most likely class for a data vector x""" return self.labels[numpy.argmax(self.pi + _dot(x, self.theta.transpose()))] class NaiveBayes(object): @classmethod def train(cls, data, lambda_=1.0): """ Train a Naive Bayes model given an RDD of (label, features) vectors. This is the Multinomial NB (U{http://tinyurl.com/lsdw6p}) which can handle all kinds of discrete data. For example, by converting documents into TF-IDF vectors, it can be used for document classification. By making every vector a 0-1 vector, it can also be used as Bernoulli NB (U{http://tinyurl.com/p7c96j6}). @param data: RDD of NumPy vectors, one per element, where the first coordinate is the label and the rest is the feature vector (e.g. a count vector). @param lambda_: The smoothing parameter """ sc = data.context dataBytes = _get_unmangled_labeled_point_rdd(data) ans = sc._jvm.PythonMLLibAPI().trainNaiveBayes(dataBytes._jrdd, lambda_) return NaiveBayesModel( _deserialize_double_vector(ans[0]), _deserialize_double_vector(ans[1]), _deserialize_double_matrix(ans[2])) def _test(): import doctest globs = globals().copy() globs['sc'] = SparkContext('local[4]', 'PythonTest', batchSize=2) (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS) globs['sc'].stop() if failure_count: exit(-1) if __name__ == "__main__": _test()
#!/usr/bin/env python # -*- coding: utf-8 -*- """ test_python_2d_ns ---------------------------------- Tests for `python_2d_ns` module. """ import sys import unittest from python_2d_ns.python_2d_ns import * class TestPython_2d_ns(unittest.TestCase): #test x, y coordinates generated by function IC_coor #assume use 2 threads and rank==1 #y coordinate should be the same as serial code def test_IC_coor_y_coor(self): x, y, kx, ky, k2, k2_exp=IC_coor(64, 64, 32, 1, 1, 1, 2) self.assertTrue(y[3,0]==-32) self.assertTrue(y[3,5]==-27) #x coordinate for rank 2 should start from 0 def test_IC_coor_x_coor(self): x, y, kx, ky, k2, k2_exp=IC_coor(64, 64, 32, 1, 1, 1, 2)
#this coordinate should be 0 self.assertTrue(x[0,2]==0) #test initial condition, Taylor green forcing, test whether the value is given on specific wavenu
mber def test_IC_con(self): #generate kx, ky, assume 2 threads, rank==0 x, y, kx, ky, k2, k2_exp=IC_coor(32, 32, 16, 1, 1, 0, 2) Vxhat, Vyhat=IC_condition(1, 2, kx, ky, 32, 16) #this wavenumber should be zero self.assertTrue(Vyhat[2,5]==0) #this wavenumber should be non-zero self.assertTrue(Vxhat[14,14]==0.5j) #test dealiasing function, which will remove values in wavenumber >= Nx/3 def test_delias(self): #generate kx, ky, assume 2 threads, rank==1 Vxhat=zeros((Nx, Np), dtype=complex); Vyhat=zeros((Nx, Np), dtype=complex); Vxhat[:]=1 Vxhat, Vyhat=delias(Vxhat, Vyhat, Nx, Np, k2) #this should be zero self.assertTrue(Vxhat[Nx-1,Np-1]==0) self.assertTrue(Vyhat[Nx-1,Np-1]==0) #test FFT and IFFT. Take FFT and IFFT on array, it will transform back (with some numerical errors) def test_FFT(self): testa=zeros((Np, Ny), dtype=float); testahat=empty(( N, Np) , dtype = complex ) if rank==0: testa[2,0]=1 testa=ifftn_mpi(fftn_mpi(testa, testahat), testa) #after FFT and IFFT, this value should be the same if rank==0: self.assertTrue(testa[2,0]-1<0.0001) if __name__ == '__main__': sys.exit(unittest.main())
e for reading and writing. * `IOStream`: Implementation of BaseIOStream using non-blocking sockets. * `SSLIOStream`: SSL-aware version of IOStream. * `PipeIOStream`: Pipe-based IOStream implementation. """ from __future__ import absolute_import, division, print_function, with_statement import collections import errno import numbers import os import socket import ssl import sys import re from tornado import ioloop from tornado.log import gen_log, app_log from tornado.netutil import ssl_wrap_socket, ssl_match_hostname, SSLCertificateError from tornado import stack_context from tornado.util import bytes_type try: from tornado.platform.posix import _set_nonblocking except ImportError: _set_nonblocking = None class StreamClosedError(IOError): """Exception raised by `IOStream` methods when the stream is closed. Note that the close callback is scheduled to run *after* other callbacks on the stream (to allow for buffered data to be processed), so you may see this error before you see the close callback. """ pass class BaseIOStream(object): """A utility class to write to and read from a non-blocking file or socket. We support a non-blocking ``write(
)`` and a family of ``read_*()`` methods. All of the methods take callbacks (since writing and reading are non-blocking and asynchronous). When a stream is closed due to an error, the IOStream's ``error`` attribute contains the exception object. Subclasses must implement `fileno`, `close_fd`, `write_to_fd`, `read_from_fd`, and optionally `get_fd_error`. """ def __init__(self, io_loop=None, max_buffer_si
ze=None, read_chunk_size=4096): self.io_loop = io_loop or ioloop.IOLoop.current() self.max_buffer_size = max_buffer_size or 104857600 self.read_chunk_size = read_chunk_size self.error = None self._read_buffer = collections.deque() self._write_buffer = collections.deque() self._read_buffer_size = 0 self._write_buffer_frozen = False self._read_delimiter = None self._read_regex = None self._read_bytes = None self._read_until_close = False self._read_callback = None self._streaming_callback = None self._write_callback = None self._close_callback = None self._connect_callback = None self._connecting = False self._state = None self._pending_callbacks = 0 self._closed = False def fileno(self): """Returns the file descriptor for this stream.""" raise NotImplementedError() def close_fd(self): """Closes the file underlying this stream. ``close_fd`` is called by `BaseIOStream` and should not be called elsewhere; other users should call `close` instead. """ raise NotImplementedError() def write_to_fd(self, data): """Attempts to write ``data`` to the underlying file. Returns the number of bytes written. """ raise NotImplementedError() def read_from_fd(self): """Attempts to read from the underlying file. Returns ``None`` if there was nothing to read (the socket returned `~errno.EWOULDBLOCK` or equivalent), otherwise returns the data. When possible, should return no more than ``self.read_chunk_size`` bytes at a time. """ raise NotImplementedError() def get_fd_error(self): """Returns information about any error on the underlying file. This method is called after the `.IOLoop` has signaled an error on the file descriptor, and should return an Exception (such as `socket.error` with additional information, or None if no such information is available. """ return None def read_until_regex(self, regex, callback): """Run ``callback`` when we read the given regex pattern. The callback will get the data read (including the data that matched the regex and anything that came before it) as an argument. """ self._set_read_callback(callback) self._read_regex = re.compile(regex) self._try_inline_read() def read_until(self, delimiter, callback): """Run ``callback`` when we read the given delimiter. The callback will get the data read (including the delimiter) as an argument. """ self._set_read_callback(callback) self._read_delimiter = delimiter self._try_inline_read() def read_bytes(self, num_bytes, callback, streaming_callback=None): """Run callback when we read the given number of bytes. If a ``streaming_callback`` is given, it will be called with chunks of data as they become available, and the argument to the final ``callback`` will be empty. Otherwise, the ``callback`` gets the data as an argument. """ self._set_read_callback(callback) assert isinstance(num_bytes, numbers.Integral) self._read_bytes = num_bytes self._streaming_callback = stack_context.wrap(streaming_callback) self._try_inline_read() def read_until_close(self, callback, streaming_callback=None): """Reads all data from the socket until it is closed. If a ``streaming_callback`` is given, it will be called with chunks of data as they become available, and the argument to the final ``callback`` will be empty. Otherwise, the ``callback`` gets the data as an argument. Subject to ``max_buffer_size`` limit from `IOStream` constructor if a ``streaming_callback`` is not used. """ self._set_read_callback(callback) self._streaming_callback = stack_context.wrap(streaming_callback) if self.closed(): if self._streaming_callback is not None: self._run_callback(self._streaming_callback, self._consume(self._read_buffer_size)) self._run_callback(self._read_callback, self._consume(self._read_buffer_size)) self._streaming_callback = None self._read_callback = None return self._read_until_close = True self._streaming_callback = stack_context.wrap(streaming_callback) self._try_inline_read() def write(self, data, callback=None): """Write the given data to this stream. If ``callback`` is given, we call it when all of the buffered write data has been successfully written to the stream. If there was previously buffered write data and an old write callback, that callback is simply overwritten with this new callback. """ assert isinstance(data, bytes_type) self._check_closed() # We use bool(_write_buffer) as a proxy for write_buffer_size>0, # so never put empty strings in the buffer. if data: # Break up large contiguous strings before inserting them in the # write buffer, so we don't have to recopy the entire thing # as we slice off pieces to send to the socket. WRITE_BUFFER_CHUNK_SIZE = 128 * 1024 if len(data) > WRITE_BUFFER_CHUNK_SIZE: for i in range(0, len(data), WRITE_BUFFER_CHUNK_SIZE): self._write_buffer.append(data[i:i + WRITE_BUFFER_CHUNK_SIZE]) else: self._write_buffer.append(data) self._write_callback = stack_context.wrap(callback) if not self._connecting: self._handle_write() if self._write_buffer: self._add_io_state(self.io_loop.WRITE) self._maybe_add_error_listener() def set_close_callback(self, callback): """Call the given callback when the stream is closed.""" self._close_callback = stack_context.wrap(callback) def close(self, exc_info=False): """Close this stream. If ``exc_info`` is true, set the ``error`` attribute to the current exception from `sys.exc_info` (or if ``exc_i
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class PyYtopt(PythonPackag
e): """Ytopt package implements search using Random Forest (SuRF), an autotuning search method developed within Y-Tune ECP project.""" maintainers = ['Kerilk'] homepage = "https://github.com/ytopt-team/ytopt" url
= "https://github.com/ytopt-team/ytopt/archive/refs/tags/v0.0.1.tar.gz" version('0.0.2', sha256='5a624aa678b976ff6ef867610bafcb0dfd5c8af0d880138ca5d56d3f776e6d71') version('0.0.1', sha256='3ca616922c8e76e73f695a5ddea5dd91b0103eada726185f008343cc5cbd7744') depends_on('python@3.6:', type=('build', 'run')) depends_on('py-scikit-learn@0.23.1', type=('build', 'run')) depends_on('py-dh-scikit-optimize', type=('build', 'run')) depends_on('py-configspace', type=('build', 'run')) depends_on('py-numpy', type=('build', 'run')) depends_on('py-ytopt-autotune@1.1:', type=('build', 'run')) depends_on('py-joblib', type=('build', 'run')) depends_on('py-deap', type=('build', 'run')) depends_on('py-tqdm', type=('build', 'run')) depends_on('py-ray', type=('build', 'run')) depends_on('py-mpi4py@3.0.0:', type=('build', 'run'))
import _plotly_utils.basevalidators class TickvalssrcValidator(_plotly_utils.basevalidators.SrcValidator): def __init__( self, plotly_name="tickvalssrc"
, parent_name="scatter3d.marker.colorbar", **kwargs
): super(TickvalssrcValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "none"), **kwargs )
import sys import pandas as pd import seaborn as sns import matplotlib.pyplot as plt class Visualizer(): def __init__(self, *args): pass def show_performance(self, list_of_tuples, fig_size=(9,9), font_scale=1.1, file=''): """ Parameters: list_of_tuples: - list containing (clf_name, clf_performance) tuples for each classifier we wish to visualize fig_size: - set figure size (default: (9,9))
font_scale: - text scale in seaborn plots (default: 1.1) file: - string containing a valid filename (default: '') Output: f: (matplotlib.pyplot.figure object) """ if not (isinstance(list_of_tuples, list) and isinstance(list_of_tuples[0], tuple)):
raise ValueError("Expecting a list of tuples") sns.set(font_scale=font_scale) sns.set_style("whitegrid") data = list() for name, value in list_of_tuples: data.append([name, value]) data = pd.DataFrame(data, columns=['classifier', 'performance']) data.sort_values('performance', inplace=True, ascending=False) """ Close all figures (can close individual figure using plt.close(f) where f is a matplotlib.pyplot.figure object) """ plt.close('all') f = plt.figure(figsize=fig_size) sns.barplot(x='performance', y='classifier', data=data) plt.xlabel('performance') if len(file)>1: try: plt.savefig(file) except: pass return f if __name__ == '__main__': sys.exit(-1)
# -*- coding: utf-8 -*- from django.conf.urls import patterns, include, url from .views import (AvailableMapListview, Av
ailableMapsDetailview, index_view, MyArmiesListView, ArmyCreateView, ArmyDetailView, RobotCreateView) urlpatterns = patterns('', url(r'maingame/maps$', AvailableMapListview.as_view(), name='list_available_
maps'), url(r'maingame/map/(?P<pk>\d+)$', AvailableMapsDetailview.as_view(), name="available_map_detail" ), url(r'maingame/my_armies$', MyArmiesListView.as_view(), name='my_armies'), url(r'maingame/army/(?P<pk>\d+)$', ArmyDetailView.as_view(), name="army_detail" ), url(r'maingame/create_armies$', ArmyCreateView.as_view(), name='add_army'), url(r'maingame/create_robot$', RobotCreateView.as_view(), name='add_robot_to_army'), url(r'^$', index_view, name="index"), )
#!/usr/bin/env python #######
################################################################# # File : dirac-ve
rsion # Author : Ricardo Graciani ######################################################################## """ Print version of current DIRAC installation Usage: dirac-version [option] Example: $ dirac-version """ import argparse import DIRAC from DIRAC.Core.Base.Script import Script @Script() def main(): parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.parse_known_args() print(DIRAC.version) if __name__ == "__main__": main()
def is_palindrome(obj): obj = str(obj) obj_list = list(obj) obj_list_reversed = obj_list[::-1] return obj_list == obj_list_reversed def generate_rotations(word): letters = list(word) string_rotations = [] counter = len(letters) temp = letters while counter != 0: current_letter = temp.pop(0) temp.append(current_letter) word = "".join(temp)
string_rotations.append(word) counter -= 1 return string_rotations def get_rotated_palindromes(string_rotations): is_empty = True for word in string_rotations: if is_
palindrome(word) is True: print(word) is_empty = False if is_empty is True: print("NONE") def main(): user_input = input("Enter a string: ") string_rotations = generate_rotations(user_input) get_rotated_palindromes(string_rotations) if __name__ == '__main__': main()
#!/usr/bin/env python #-*- coding: utf-8 -*- from __future__ import unicode_literals import sqlite3 from flask import Flask, render_template, g, current_app, request from flask.ext.paginate import Pagination app = Flask(__name__) app.config.from_pyfile('app.cfg') @app.before_request def before_request(): g.conn = sqlite3.connect('test.db') g.conn.row_factory = sqlite3.Row g.cur = g.conn.cursor() @app.teardown_request def teardown(error): if hasattr(g, 'conn'): g.conn.close() @app.route('/') def index(): g.cur.execute('select count(*) from users') total = g.cur.fetchone()[0] page, per_page, offset = get_page_items() sql = 'select name from users order by name limit {}, {}'\ .format(offset, per_page) g.cur.execute(sql) users = g.
cur.fetchall() pagination = get_pagination(page=page, per_page=per_page, total=total, record_name='users', ) return render_template('index.html', users=users, page=page, per_page=per_page,
pagination=pagination, ) def get_css_framework(): return current_app.config.get('CSS_FRAMEWORK', 'bootstrap3') def get_link_size(): return current_app.config.get('LINK_SIZE', 'sm') def show_single_page_or_not(): return current_app.config.get('SHOW_SINGLE_PAGE', False) def get_page_items(): page = int(request.args.get('page', 1)) per_page = request.args.get('per_page') if not per_page: per_page = current_app.config.get('PER_PAGE', 10) else: per_page = int(per_page) offset = (page - 1) * per_page return page, per_page, offset def get_pagination(**kwargs): kwargs.setdefault('record_name', 'records') return Pagination(css_framework=get_css_framework(), link_size=get_link_size(), show_single_page=show_single_page_or_not(), **kwargs ) if __name__ == '__main__': app.run(debug=True)
from django.contrib.contenttypes.models import ContentType import json from django.http import Http404, HttpResponse from django.contrib import messages from django.contrib.auth import get_user_model from django.contrib.auth.decorators import login_required, user_passes_test from django.core.urlresolvers import reverse from django.shortcuts import get_object_or_404, redirect, render from guardian.decorators import permission_required from guardian.shortcuts import get_objects_for_user from account.models import DepartmentGroup from backend.tasks import TestConnectionTask from event.models import NotificationPreferences from .models import Application, Department, Environment, Server, ServerRole from task.models import Execution @login_required def index(request): data = {} executions = Execution.objects.filter(task__application__department_id=request.current_department_id) if not executions.count(): return redirect(reverse('first_steps_page')) return render(request, 'page/index.html', data) @permission_required('core.view_application', (Application, 'id', 'application_id')) def application_page(request, application_id): data = {} data['application'] = get_object_or_404(Application, pk=application_id) return render(request, 'page/application.html', data) @permission_required('core.view_environment', (Environment, 'id', 'environment_id')) def environment_page(request, environment_id): data = {} data['environment'] = get_object_or_404(Environment, pk=environment_id) data['servers'] = list(Server.objects.filter(environment_id=environment_id).prefetch_related('roles')) return render(request, 'page/environment.html', data) @permission_required('core.view_environment', (Environment, 'servers__id', 'server_id')) def server_test(request, server_id): data = {} data['server'] = get_object_or_404(Server, pk=server_id) data['task_id'] = TestConnectionTask().delay(server_id).id return render(request, 'partial/server_test.html', data) @login_required def server_test_ajax(request, task_id): data = {} task = TestConnectionTask().AsyncResult(task_id) if task.status == 'SUCCESS': status, output = task.get() data['status'] = status data['output'] = output elif task.status == 'FAILED': data['status'] = False else: data['status'] = None return HttpResponse(json.dumps(data), content_type="application/json") @login_required def first_steps_page(request): data = {} return render(request, 'page/first_steps.html', data) @login_required def settings_page(request, section='user', subsection='profile'): data = {} data['section'] = section data['subsection'] = subsection data['department'] = Departmen
t(pk=request.current_department_id) data['on_settings'] = True handler = '_settings_%
s_%s' % (section, subsection) if section == 'system' and request.user.is_superuser is not True: return redirect('index') if section == 'department' and not request.user.has_perm('core.change_department', obj=data['department']): return redirect('index') if handler in globals(): data = globals()[handler](request, data) else: raise Http404 return render(request, 'page/settings.html', data) def _settings_account_profile(request, data): data['subsection_template'] = 'partial/account_profile.html' from account.forms import account_create_form form = account_create_form('user_profile', request, request.user.id) form.fields['email'].widget.attrs['readonly'] = True data['form'] = form if request.method == 'POST': if form.is_valid(): form.save() data['user'] = form.instance messages.success(request, 'Saved') return data def _settings_account_password(request, data): data['subsection_template'] = 'partial/account_password.html' from account.forms import account_create_form form = account_create_form('user_password', request, request.user.id) data['form'] = form if request.method == 'POST': if form.is_valid(): user = form.save(commit=False) user.set_password(user.password) user.save() data['user'] = form.instance messages.success(request, 'Saved') return data def _settings_account_notifications(request, data): data['subsection_template'] = 'partial/account_notifications.html' data['applications'] = get_objects_for_user(request.user, 'core.view_application') content_type = ContentType.objects.get_for_model(Application) if request.method == 'POST': for application in data['applications']: key = 'notification[%s]' % application.id notification, created = NotificationPreferences.objects.get_or_create( user=request.user, event_type='ExecutionFinish', content_type=content_type, object_id=application.id) if notification.is_active != (key in request.POST): notification.is_active = key in request.POST notification.save() messages.success(request, 'Saved') data['notifications'] = NotificationPreferences.objects.filter( user=request.user, event_type='ExecutionFinish', content_type=content_type.id).values_list('object_id', 'is_active') data['notifications'] = dict(data['notifications']) return data def _settings_department_applications(request, data): data['subsection_template'] = 'partial/application_list.html' data['applications'] = Application.objects.filter(department_id=request.current_department_id) data['empty'] = not bool(data['applications'].count()) return data def _settings_department_users(request, data): data['subsection_template'] = 'partial/user_list.html' from guardian.shortcuts import get_users_with_perms department = Department.objects.get(pk=request.current_department_id) data['users'] = get_users_with_perms(department).prefetch_related('groups__departmentgroup').order_by('name') data['department_user_list'] = True data['form_name'] = 'user' return data def _settings_department_groups(request, data): data['subsection_template'] = 'partial/group_list.html' data['groups'] = DepartmentGroup.objects.filter(department_id=request.current_department_id) return data def _settings_department_serverroles(request, data): data['subsection_template'] = 'partial/serverrole_list.html' data['serverroles'] = ServerRole.objects.filter(department_id=request.current_department_id) data['empty'] = not bool(data['serverroles'].count()) return data @user_passes_test(lambda u: u.is_superuser) def _settings_system_departments(request, data): data['subsection_template'] = 'partial/department_list.html' data['departments'] = Department.objects.all() return data @user_passes_test(lambda u: u.is_superuser) def _settings_system_users(request, data): data['subsection_template'] = 'partial/user_list.html' data['users'] = get_user_model().objects.exclude(id=-1).prefetch_related('groups__departmentgroup__department').order_by('name') data['form_name'] = 'usersystem' return data def department_switch(request, id): department = get_object_or_404(Department, pk=id) if request.user.has_perm('core.view_department', department): request.session['current_department_id'] = int(id) else: messages.error(request, 'Access forbidden') return redirect('index') def handle_403(request): print 'aaaaaaaa' messages.error(request, 'Access forbidden') return redirect('index')
"""Support for Acmeda Roller Blind Batteries.""" from __future__ import annotations from homeassistant.components.sensor import SensorDeviceClass, SensorEntity from homeassistant.config_entries import ConfigEntry from homeassistant.const import PERCENTAGE from homeassistant.core import HomeAssistant, callback from homeassistant.helpers.dispatcher import async_dispatcher_connect from homeassistant.helpers.entity_platform import AddEntitiesCallback from .base import AcmedaBase from .const import ACMEDA_HUB_UPDATE, DOMAIN from .helpers import async_add_acmeda_entities async def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback, ) -> None: """Set up the Acmeda Rollers from a config entry.""" hub = hass.data[DOMAIN][config_ent
ry.entry_id] current: set[int] = set() @callback def async_add_acmeda_sensors(): async_add_acmeda_entities(
hass, AcmedaBattery, config_entry, current, async_add_entities ) hub.cleanup_callbacks.append( async_dispatcher_connect( hass, ACMEDA_HUB_UPDATE.format(config_entry.entry_id), async_add_acmeda_sensors, ) ) class AcmedaBattery(AcmedaBase, SensorEntity): """Representation of a Acmeda cover device.""" device_class = SensorDeviceClass.BATTERY _attr_native_unit_of_measurement = PERCENTAGE @property def name(self): """Return the name of roller.""" return f"{super().name} Battery" @property def native_value(self): """Return the state of the device.""" return self.roller.battery
#!/usr/bin/env python # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # (1) Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # (2) Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # (3)The name of the author may not be used to # endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER C
AUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, # STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING # IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """ Python setup script. """ from setuptools import setup, find_packages def extract_requirements(filename): with open(filename, 'r') as requirements_file: return [x[:-1] for x in requirements_file.readlines()] install_requires = extract_requirements('requirements.txt') test_require = extract_requirements('test-requirements.txt') setup( name='etcdobj', version='0.0.0', description='Basic ORM for etcd', author='Steve Milner', url='https://github.com/ashcrow/etcdobj', license="MBSD", install_requires=install_requires, tests_require=test_require, package_dir={'': 'src'}, packages=find_packages('src'), )
"quotas", "provider"] binding_view = "extension:port_binding:view" binding_set = "extension:port_binding:set" def __init__(self): LOG.info(_('Neutron PLUMgrid Director: Starting Plugin')) super(NeutronPluginPLUMgridV2, self).__init__() self.plumgrid_init() LOG.debug(_('Neutron PLUMgrid Director: Neutron server with ' 'PLUMgrid Plugin has started')) def plumgrid_init(self): """PLUMgrid initialization.""" director_plumgrid = cfg.CONF.plumgriddirector.director_server director_port = cfg.CONF.plumgriddirector.director_server_port director_admin = cfg.CONF.plumgriddirector.username director_password = cfg.CONF.plumgriddirector.password timeout = cfg.CONF.plumgriddirector.servertimeout plum_driver = cfg.CONF.plumgriddirector.driver # PLUMgrid Director info validation LOG.info(_('Neutron PLUMgrid Director: %s'), director_plumgrid) self._plumlib = importutils.import_object(plum_driver) self._plumlib.director_conn(director_plumgrid, director_port, timeout, director_admin, director_password) def create_network(self, context, network): """Create Neutron network. Creates a PLUMgrid-based bridge. """ LOG.debug(_('Neutron PLUMgrid Director: create_network() called')) # Plugin DB - Network Create and validation tenant_id = self._get_tenant_id_for_create(context, network["network"]) self._network_admin_state(network) with context.session.begin(subtransactions=True): net_db = super(NeutronPluginPLUMgridV2, self).create_network(context, network) # Propagate all L3 data into DB self._process_l3_create(context, net_db, network['network']) try: LOG.debug(_('PLUMgrid Library: create_network() called')) self._plumlib.create_network(tenant_id, net_db, network) except Exception as err_message: raise plum_excep.PLUMgridException(err_msg=err_message) # Return created network return net_db def update_network(self, context, net_id, network): """Update Neutron network. Updates a PLUMgrid-based bridge. """ LOG.debug(_("Neutron PLUMgrid Director: update_network() called")) self._network_admin_state(network) tenant_id = self._get_tenant_id_for_create(context, network["network"]) with context.session.begin(subtransactions=True): # Plugin DB - Network Update net_db = super( NeutronPluginPLUMgridV2, self).update_network(context, net_id, network) self._process_l3_update(context, net_db, network['network']) try: LOG.debug(_("PLUMgrid Library: update_network() called")) self._plumlib.update_network(tenant_id, net_id) except Exception as err_message: raise plum_excep.
PLUMgridException(err_msg=err_message) # Return
updated network return net_db def delete_network(self, context, net_id): """Delete Neutron network. Deletes a PLUMgrid-based bridge. """ LOG.debug(_("Neutron PLUMgrid Director: delete_network() called")) net_db = super(NeutronPluginPLUMgridV2, self).get_network(context, net_id) with context.session.begin(subtransactions=True): self._process_l3_delete(context, net_id) # Plugin DB - Network Delete super(NeutronPluginPLUMgridV2, self).delete_network(context, net_id) try: LOG.debug(_("PLUMgrid Library: update_network() called")) self._plumlib.delete_network(net_db, net_id) except Exception as err_message: raise plum_excep.PLUMgridException(err_msg=err_message) def create_port(self, context, port): """Create Neutron port. Creates a PLUMgrid-based port on the specific Virtual Network Function (VNF). """ LOG.debug(_("Neutron PLUMgrid Director: create_port() called")) # Port operations on PLUMgrid Director is an automatic operation # from the VIF driver operations in Nova. # It requires admin_state_up to be True port["port"]["admin_state_up"] = True with context.session.begin(subtransactions=True): # Plugin DB - Port Create and Return port port_db = super(NeutronPluginPLUMgridV2, self).create_port(context, port) device_id = port_db["device_id"] if port_db["device_owner"] == constants.DEVICE_OWNER_ROUTER_GW: router_db = self._get_router(context, device_id) else: router_db = None try: LOG.debug(_("PLUMgrid Library: create_port() called")) self._plumlib.create_port(port_db, router_db) except Exception as err_message: raise plum_excep.PLUMgridException(err_msg=err_message) # Plugin DB - Port Create and Return port return self._port_viftype_binding(context, port_db) def update_port(self, context, port_id, port): """Update Neutron port. Updates a PLUMgrid-based port on the specific Virtual Network Function (VNF). """ LOG.debug(_("Neutron PLUMgrid Director: update_port() called")) with context.session.begin(subtransactions=True): # Plugin DB - Port Create and Return port port_db = super(NeutronPluginPLUMgridV2, self).update_port( context, port_id, port) device_id = port_db["device_id"] if port_db["device_owner"] == constants.DEVICE_OWNER_ROUTER_GW: router_db = self._get_router(context, device_id) else: router_db = None try: LOG.debug(_("PLUMgrid Library: create_port() called")) self._plumlib.update_port(port_db, router_db) except Exception as err_message: raise plum_excep.PLUMgridException(err_msg=err_message) # Plugin DB - Port Update return self._port_viftype_binding(context, port_db) def delete_port(self, context, port_id, l3_port_check=True): """Delete Neutron port. Deletes a PLUMgrid-based port on the specific Virtual Network Function (VNF). """ LOG.debug(_("Neutron PLUMgrid Director: delete_port() called")) with context.session.begin(subtransactions=True): # Plugin DB - Port Create and Return port port_db = super(NeutronPluginPLUMgridV2, self).get_port(context, port_id) self.disassociate_floatingips(context, port_id) super(NeutronPluginPLUMgridV2, self).delete_port(context, port_id) if port_db["device_owner"] == constants.DEVICE_OWNER_ROUTER_GW: device_id = port_db["device_id"] router_db = self._get_router(context, device_id) else: router_db = None try: LOG.debug(_("PLUMgrid Library: delete_port() called")) self._plumlib.delete_port(port_db, router_db) except Exception as err_message: raise plum_excep.PLUMgridException(err_msg=err_message) def get_port(self, context, id, fields=None): with context.session.begin(subtransactions=True): port_db = super(NeutronPluginPLUMgridV2, self).get_port(context, id, fields) self._port_viftype_binding(context, port_db) return self._fields(port_db, fields) def get_ports(self, context, filters=None,
rt", MagicMock(return_value=True), ), patch( "salt.utils.process.SignalHandlingProcess.join", MagicMock(return_value=True), ): try: mock_opts = salt.config.DEFAULT_MINION_OPTS.copy() mock_opts["cachedir"] = str(tmp_path) minion = salt.minion.Minion( mock_opts, io_loop=salt.ext.tornado.ioloop.IOLoop(), ) minion.schedule = salt.utils.schedule.Schedule(mock_opts, {}, returners={}) assert not hasattr(minion, "beacons") minion.module_refresh() assert hasattr(minion, "beacons") assert hasattr(minion.beacons, "beacons") assert "service.beacon" in minion.beacons.beacons minion.destroy() finally: minion.destroy() @pytest.mark.slow_test def test_when_ping_interval_is_set_the_callback_should_be_added_to_periodic_callbacks(): with patch("salt.minion.Minion.ctx", MagicMock(return_value={})), patch( "salt.minion.Minion.sync_connect_master", MagicMock(side_effect=RuntimeError("stop execution")), ), patch( "salt.utils.process.SignalHandlingProcess.start", MagicMock(return_value=True), ), patch( "salt.utils.process.SignalHandlingProcess.join", MagicMock(return_value=True), ): mock_opts = salt.config.DEFAULT_MINION_OPTS.copy() mock_opts["ping_interval"] = 10 io_loop = salt.ext.tornado.ioloop.IOLoop() io_loop.make_current() minion = salt.minion.Minion(mock_opts, io_loop=io_loop) try: try: minion.connected = MagicMock(side_effect=(False, True)) minion._fire_master_minion_start = MagicMock() minion.tune_in(start=False) except RuntimeError: pass # Make sure the scheduler is initialized but the beacons are not assert "ping" in minion.periodic_callbacks finally: minion.destroy() @pytest.mark.slow_test def test_when_passed_start_event_grains(): mock_opts = salt.config.DEFAULT_MINION_OPTS.copy() # provide mock opts an os grain since we'll look for it later. mock_opts["grains"]["os"] = "linux" mock_opts["start_event_grains"] = ["os"] io_loop = salt.ext.tornado.ioloop.IOLoop() io_loop.make_current() minion = salt.minion.Minion(mock_opts, io_loop=io_loop) try: minion.tok = MagicMock() minion._send_req_sync = MagicMock() minion._fire_master( "Minion has started", "minion_start", include_startup_grains=True ) load = minion._send_req_sync.call_args[0][0] assert "grains" in load assert "os" in load["grains"] finally: minion.destroy() @pytest.mark.slow_test def test_when_not_passed_start_event_grains(): mock_opts = salt.config.DEFAULT_MINION_OPTS.copy() io_loop = salt.ext.tornado.ioloop.IOLoop() io_loop.make_current() minion = salt.minion.Minion(mock_opts, io_loop=io_loop) try: minion.tok = MagicMock() minion._send_req_sync = MagicMock() minion._fire_master("Minion has started", "minion_start")
load = minion._send_req_sync.call_args[0][0] assert "grains" not in load finally: minion.destroy() @pytest.mark.slow_test def test_when_other_events_fired_and_start_event_grains_are_set(): mock_opts = salt.config.DEFAULT_MINION_OPTS.copy() mock_opts["start_event_grains"] = ["os"] io_loop = salt.ext.tornado.ioloop.IOLoop() io_l
oop.make_current() minion = salt.minion.Minion(mock_opts, io_loop=io_loop) try: minion.tok = MagicMock() minion._send_req_sync = MagicMock() minion._fire_master("Custm_event_fired", "custom_event") load = minion._send_req_sync.call_args[0][0] assert "grains" not in load finally: minion.destroy() @pytest.mark.slow_test def test_minion_retry_dns_count(): """ Tests that the resolve_dns will retry dns look ups for a maximum of 3 times before raising a SaltMasterUnresolvableError exception. """ opts = salt.config.DEFAULT_MINION_OPTS.copy() with patch.dict( opts, { "ipv6": False, "master": "dummy", "master_port": "4555", "retry_dns": 1, "retry_dns_count": 3, }, ): pytest.raises(SaltMasterUnresolvableError, salt.minion.resolve_dns, opts) @pytest.mark.slow_test def test_gen_modules_executors(): """ Ensure gen_modules is called with the correct arguments #54429 """ mock_opts = salt.config.DEFAULT_MINION_OPTS.copy() io_loop = salt.ext.tornado.ioloop.IOLoop() io_loop.make_current() minion = salt.minion.Minion(mock_opts, io_loop=io_loop) class MockPillarCompiler: def compile_pillar(self): return {} try: with patch("salt.pillar.get_pillar", return_value=MockPillarCompiler()): with patch("salt.loader.executors") as execmock: minion.gen_modules() assert execmock.called_with(minion.opts, minion.functions) finally: minion.destroy() @patch("salt.utils.process.default_signals") @pytest.mark.slow_test def test_reinit_crypto_on_fork(def_mock): """ Ensure salt.utils.crypt.reinit_crypto() is executed when forking for new job """ mock_opts = salt.config.DEFAULT_MINION_OPTS.copy() mock_opts["multiprocessing"] = True io_loop = salt.ext.tornado.ioloop.IOLoop() io_loop.make_current() minion = salt.minion.Minion(mock_opts, io_loop=io_loop) job_data = {"jid": "test-jid", "fun": "test.ping"} def mock_start(self): # pylint: disable=comparison-with-callable assert ( len( [ x for x in self._after_fork_methods if x[0] == salt.utils.crypt.reinit_crypto ] ) == 1 ) # pylint: enable=comparison-with-callable with patch.object(salt.utils.process.SignalHandlingProcess, "start", mock_start): io_loop.run_sync(lambda: minion._handle_decoded_payload(job_data)) def test_minion_manage_schedule(): """ Tests that the manage_schedule will call the add function, adding schedule data into opts. """ with patch("salt.minion.Minion.ctx", MagicMock(return_value={})), patch( "salt.minion.Minion.sync_connect_master", MagicMock(side_effect=RuntimeError("stop execution")), ), patch( "salt.utils.process.SignalHandlingMultiprocessingProcess.start", MagicMock(return_value=True), ), patch( "salt.utils.process.SignalHandlingMultiprocessingProcess.join", MagicMock(return_value=True), ): mock_opts = salt.config.DEFAULT_MINION_OPTS.copy() io_loop = salt.ext.tornado.ioloop.IOLoop() io_loop.make_current() with patch("salt.utils.schedule.clean_proc_dir", MagicMock(return_value=None)): try: mock_functions = {"test.ping": None} minion = salt.minion.Minion(mock_opts, io_loop=io_loop) minion.schedule = salt.utils.schedule.Schedule( mock_opts, mock_functions, returners={}, new_instance=True, ) minion.opts["foo"] = "bar" schedule_data = { "test_job": { "function": "test.ping", "return_job": False, "jid_include": True, "maxrunning": 2, "seconds": 10, } } data = { "name": "test-item", "schedule": schedule_data, "func": "add", "persist": False, } tag = "manage_schedule" minion.manage_schedule(tag, data) assert "test_job" i
#!/usr/bin/env python import sys # sys.dont_write_bytecode = True import glob import os import time import logging import os.path from argparse import ArgumentParser class RtmBot(object): def __init__(self, token): self.last_ping = 0 self.token = token self.bot_plugins = [] self.slack_client = None def connect(self): """Convenience method that creates Server instance""" from slackclient import SlackClient self.slack_client = SlackClient(self.token) self.slack_client.rtm_connect() def start(self): self.connect() self.load_plugins() while True: for reply in self.slack_client.rtm_read(): self.input(reply) self.crons() self.output() self.autoping() time.sleep(.5) def autoping(self): # hardcode the interval to 3 seconds now = int(time.time()) if now > self.last_ping + 3: self.slack_client.server.ping() self.last_ping = now def input(self, data): if "type" in data: function_name = "process_" + data["type"] logging.debug("got {}".format(function_name)) for plugin in self.bot_plugins: plugin.register_jobs() plugin.do(function_name, data) def output(self): for plugin in self.bot_plugins: limiter = False for output in plugin.do_output(): channel = self.slack_client.server.channels.find(output[0]) if channel != None and output[1] != None: if limiter == True: time.sleep(.1) limiter = False message = output[1].encode('ascii', 'ignore') channel.send_message("{}".format(message)) limiter = True def crons(self): for plugin in self.bot_plugins: plugin.do_jobs() def load_plugins(self): for plugin in glob.glob(directory + '/plugins/*'): sys.path.insert(0, plugin) sys.path.insert(0, directory + '/plugins/') for plugin in glob.glob(directory + '/plugins/*.py') + glob.glob( directory + '/plugins/*/*.py'): logging.info(plugin) name = plugin.split('/')[-1][:-3] try: self.bot_plugins.append(Plugin(name)) except: import traceback traceback_msg = traceback.format_exc() logging.error("error loading plugin {name} {traceback_msg}".format(name=name, traceback_msg=traceback_msg)) class Plugin(object): def __init__(self, name, plugin_config={}): self.name = name self.jobs = [] self.module = __import__(name) self.register_jobs() self.outputs = [] if name in config: logging.info("config found for: " + name) self.module.config = config[name] if 'setup' in dir(self.module): self.module.setup() def register_jobs(self): if 'crontable' in dir(self.module): for interval, function in self.module.crontable: self.jobs.append(Job(interval, eval("self.module." + function))) logging.info(self.module.crontable) self.module.crontable = [] else: self.module.crontable = [] def do(self, function_name, dat
a): if function_name in dir(self.module): # this makes the plugin fail with stack trace in debug mode if not debug: try: eval("self.module." + function_name)(data) except: logging.debug("problem in module {} {}".format(function_name, data)) else: eval("self.module."
+ function_name)(data) if "catch_all" in dir(self.module): try: self.module.catch_all(data) except: logging.debug("problem in catch all") def do_jobs(self): for job in self.jobs: job.check() def do_output(self): output = [] while True: if 'outputs' in dir(self.module): if len(self.module.outputs) > 0: logging.info("output from {}".format(self.module)) output.append(self.module.outputs.pop(0)) else: break else: self.module.outputs = [] return output class Job(object): def __init__(self, interval, function): self.function = function self.interval = interval self.lastrun = 0 def __str__(self): return "{} {} {}".format(self.function, self.interval, self.lastrun) def __repr__(self): return self.__str__() def check(self): if self.lastrun + self.interval < time.time(): if not debug: try: self.function() except: logging.debug("problem") else: self.function() self.lastrun = time.time() pass class UnknownChannel(Exception): pass def main_loop(): if "LOGFILE" in config: logging.basicConfig(filename=config["LOGFILE"], level=logging.INFO, format='%(asctime)s %(message)s') logging.info(directory) try: bot.start() except KeyboardInterrupt: sys.exit(0) except: logging.exception('OOPS') def parse_args(): parser = ArgumentParser() parser.add_argument( '-c', '--config', help='Full path to config file.', metavar='path' ) return parser.parse_args() if __name__ == "__main__": try: from config import Config args = parse_args() directory = os.path.dirname(sys.argv[0]) if not directory.startswith('/'): directory = os.path.abspath("{}/{}".format(os.getcwd(), directory )) config = Config() if os.path.exists('./rtmbot.conf'): config.load_yaml(args.config or 'rtmbot.conf') else: config.load_os_environ_vars('FB__') logging.basicConfig(stream=sys.stdout, filename='debug.log', level=logging.DEBUG if config["DEBUG"] else logging.INFO) logging.info('Bot is') token = config["SLACK_TOKEN"] debug = config["DEBUG"] bot = RtmBot(token) site_plugins = [] files_currently_downloading = [] job_hash = {} if config["DAEMON"] in ['True', True]: import daemon with daemon.DaemonContext(): main_loop() else: main_loop() except: import traceback print traceback.format_exc()
CFG_SITE_NAME_INTL, \ CFG_SITE_NAME, \ CFG_SITE_ADMIN_EMAIL, \ CFG_MISCUTIL_SMTP_HOST, \ CFG_MISCUTIL_SMTP_PORT, \ CFG_VERSION, \ CFG_DEVEL_SITE from invenio.errorlib import register_exception from invenio.messages import wash_language, gettext_set_language from invenio.miscutil_config import InvenioMiscUtilError from invenio.textutils import guess_minimum_encoding try: from invenio.config import \ CFG_MISCUTIL_SMTP_USER,\ CFG_MISCUTIL_SMTP_PASS,\ CFG_MISCUTIL_SMTP_TLS except ImportError: CFG_MISCUTIL_SMTP_USER = '' CFG_MISCUTIL_SMTP_PASS = '' CFG_MISCUTIL_SMTP_TLS = False def scheduled_send_email(fromaddr, toaddr, subject="", content="", header=None, footer=None, copy_to_admin=0, attempt_times=1, attempt_sleeptime=10, user=None, other_bibtasklet_arguments=None, replytoaddr="", bccaddr="", ): """ Like send_email, but send an email via the bibsched infrastructure. @param fromaddr: sender @type fromaddr: string @param toaddr: list of receivers @type toaddr: string (comma separated) or list of strings @param subject: the subject @param content: the body of the message @param header: optional header, otherwise default is used @param footer: optional footer, otherwise default is used @param copy_to_admin: set to 1 in order to send email the admins @param attempt_times: try at least n times before giving up sending @param attempt_sleeptime: number of seconds to sleep between two attempts @param user: the user name to user when scheduling the bibtasklet. If None, the sender will be used @param other_bibtasklet_arguments: other arguments to append to the list of arguments to the call of task_low_level_submission @param replytoaddr: [string or list-of-strings] to be used for the reply-to header of the email (if string, then receivers are separated by ',') @param bccaddr: [string or list-of-strings] to be used for BCC header of the email (if string, then receivers are separated by ',') @return: the scheduled bibtasklet """ from invenio.bibtask import task_low_level_submission if not isinstance(toaddr, (unicode, str)): toaddr = ','.join(toaddr) if not isinstance(replytoaddr, (unicode, str)): replytoaddr = ','.join(replytoaddr) toaddr = remove_temporary_emails(toaddr) if user is None: user = fromaddr if other_bibtasklet_arguments is None: other_bibtasklet_arguments = [] else: other_bibtasklet_arguments = list(other_bibtasklet_arguments) if not header is None: other_bibtasklet_arguments.extend(("-a", "header=%s" % header)) if not footer is None: other_bibtasklet_arguments.extend(("-a", "footer=%s" % footer)) return task_low_level_submission( "bibtasklet", user, "-T", "bst_send_email", "-a", "fromaddr=%s" % fromaddr, "-a", "toaddr=%s" % toaddr, "-a", "replytoaddr=%s" % replytoaddr, "-a", "subject=%s" % subject, "-a", "content=%s" % content, "-a", "copy_to_admin=%s" % copy_to_admin, "-a", "attempt_times=%s" % attempt_times, "-a", "attempt_sleeptime=%s" % attempt_sleeptime, "-a", "bccaddr=%s" % bccaddr, *other_bibtasklet_arguments) def send_email(fromaddr, toaddr, subject="", content="", html_content='', html_images=None, header=None, footer=None, html_header=None, html_footer=None, copy_to_admin=0, attempt_times=1, attempt_sleeptime=10, debug_level=0, ln=CFG_SITE_LANG, charset=None, replytoaddr="", attachments=None, bccaddr="", forward_failures_to_admin=True, ): """Send a forged email to TOADDR from FROMADDR with message created from subjet, content and possibly header and footer. @param fromaddr: [string] sender @param toaddr: [string or list-of-strings] list of receivers (if string, then receivers are separated by ','). BEWARE: If more than once receiptiant is given, the receivers are put in BCC and To will be "Undisclosed.Recipients:". @param subject: [string] subject of the email @param content: [string] content of the email @param html_content: [string] html version of the email @param html_images: [dict] dictionary of image id, image path @param header: [string] header to add, None for the Default @param footer: [string] footer to add, None for the Default @param html_header: [string] header to add to the html part, None for the Default @param html_footer: [string] footer to add to the html part, None for the Default @param copy_to_admin: [int] if 1 add CFG_SITE_ADMIN_EMAIL in receivers @param attempt_times: [int] number of tries @param attempt_sleeptime: [int] seconds in between tries @param debug_level: [int] debug level @param ln: [string] invenio language @param charset: [string] the content charset. By default is None which means to try to encode the email as ascii, then latin1 then utf-8. @param replytoaddr: [string or list-of-strings] to be used for the reply-to header of the email (if string, then receivers are separated by ',') @param attachments: list of paths of files to be attached. Alternatively, every element of the list could be a tuple: (filename, mimetype) @param bccaddr: [string or list-of-strings] to be used for BCC header of the email (if string, then receivers are separated by ',') @param forward_failures_to_admin: [bool] prevents infinite recursion in case of admin reporting, when the problem is not in the e-mail address format, but rather in the network If sending fails, try to send it ATTEMPT_TIMES, and wait for ATTEMPT_SLEEPTIME seconds in between tries. e.g.: send_email('foo.bar@cern.ch', 'bar.foo@cern.ch', 'Let\'s try!'', 'check 1234', '<strong>check</strong> <em>1234</em><img src="cid:image1">', {'image1': '/tmp/quantum.jpg'}) @return: [bool]: True if
email was sent okay, False if it was not. """ if html_images is None: html_images = {} if type(toaddr) is str: toaddr = toaddr.strip().split(','
) toaddr = remove_temporary_emails(toaddr) if type(bccaddr) is str: bccaddr = bccaddr.strip().split(',') usebcc = len(toaddr) > 1 # More than one address, let's use Bcc in place of To if copy_to_admin: if CFG_SITE_ADMIN_EMAIL not in toaddr: toaddr.append(CFG_SITE_ADMIN_EMAIL) if CFG_DEVEL_SITE: # if we are on a development site, we don't want to send external e-mails content = """ -------------------------------------------------------------- This message would have been sent to the following recipients: %s -------------------------------------------------------------- %s""" % (toaddr, content) toaddr = CFG_SITE_ADMIN_EMAIL usebcc = False body = forge_email(fromaddr, toaddr, subject, content, html_content, html_images, usebcc, header, footer, html_header, html_footer, ln, charset, replytoaddr, attachments, bccaddr) _ = gettext_set_language(CFG_SITE_LANG) if attempt_times < 1 or not toaddr:
import os from torch.utils.ffi import create_extension sources = ["src/lib_cffi.cpp"] headers = ["src/lib_cffi.h"] extra_objects = ["src/bn.o"] with_cuda = True this_file = os.path.dirname(os.path.realpath(__file__)) extra_objects = [os
.path.join(this_file, fname) for fname in extra_objects] ffi = create_extension( "_ext", headers=headers, sources=sources,
relative_to=__file__, with_cuda=with_cuda, extra_objects=extra_objects, extra_compile_args=["-std=c++11"], ) if __name__ == "__main__": ffi.build()
#!/usr/bin/env python import gtk, sys, string class Socket: def __init_
_(self): window = gtk.Window() window.set_default_size(200, 200) socket = gtk.Socket() window.add(socket) print "Socket ID:", socket.get_id() if len(sys.argv) == 2: socket.add_id(long(sys.argv[1])) window.
connect("destroy", gtk.main_quit) socket.connect("plug-added", self.plugged_event) window.show_all() def plugged_event(self, widget): print "A plug has been inserted." Socket() gtk.main()
ay the video And I watch 5 seconds of it And I pause the video Then a "load_video" event is emitted And a "play_video" event is emitted And a "pause_video" event is emitted """ def is_video_event(event): """Filter out anything other than the video events of interest""" return event['event_type'] in ('load_video', 'play_video', 'pause_video') captured_events = [] with self.capture_events(is_video_event, number_of_matches=3, captured_events=captured_events): self.navigate_to_video() self.video.click_player_button('play') self.video.wait_for_position('0:05') self.video.click_player_button('pause') for idx, video_event in enumerate(captured_events): self.assert_payload_contains_ids(video_event) if idx == 0: assert_event_matches({'event_type': 'load_video'}, video_event) elif idx == 1: assert_event_matches({'event_type': 'play_video'}, video_event) self.assert_valid_control_event_at_time(video_event, 0) elif idx == 2: assert_event_matches({'event_type': 'pause_video'}, video_event) self.assert_valid_control_event_at_time(video_event, self.video.seconds) def test_strict_event_format(self): """ This test makes a very strong assertion about the fields present in events. The goal of it is to ensure that new fields are not added to all events mistakenly. It should be the only existing test that is updated when new top level fields are added to all events. """ captured_events = [] with self.capture_events(lambda e: e['event_type'] == 'load_video', captured_events=captured_events): self.navigate_to_video() load_video_event = captured_events[0] # Validate the event payload self.assert_payload_contains_ids(load_video_event) # We cannot predict the value of these fields so we make weaker assertions about them dynamic_string_fields = ( 'accept_language', 'agent', 'host', 'ip', 'event', 'session' ) for field in dynamic_string_fields: self.assert_field_type(load_video_event, field, basestring) self.assertIn(field, load_video_event, '{0} not found in the root of the event'.format(field)) del load_video_event[field] # A weak assertion for the timestamp as well self.assert_field_type(load_video_event, 'time', datetime.datetime) del load_video_event['time'] # Note that all unpredictable fields have been deleted from the event at this point course_key = CourseKey.from_string(self.course_id) static_fields_pattern = { 'context': { 'course_id': unicode(course_key), 'org_id': course_key.org, 'path': '/event', 'user_id': self.user_info['user_id'] }, 'event_source': 'browser', 'event_type': 'load_video', 'username': self.user_info['username'], 'page': self.browser.current_url, 'referer': self.browser.current_url, 'name': 'load_video', } assert_events_equal(static_fields_pattern, load_video_event) @attr(shard=8) @ddt.ddt class VideoBumperEventsTest(VideoEventsTestMixin): """ Test bumper video event emission """ # helper methods def watch_video_and_skip(self): """ Wait 5 seconds and press "skip" button. """ self.video.wait_for_position('0:05') self.video.click_player_button('skip_bumper') def watch_video_and_dismiss(self): """ Wait 5 seconds and press "do not show again" button. """ self.video.wait_for_position('0:05') self.video.click_player_button('do_not_show_again') def wait_for_state(self, state='finished'): """ Wait until video will be in given state. Finished state means that video is played to the end. """ self.video.wait_for_state(state) def add_bumper(self): """ Add video bumper to the course. """ additional_data = { u'video_bumper': { u'value': { "transcripts": {}, "video_id": "video_001" } } } self.course_fixture.add_advanced_settings(additional_data) @ddt.data( ('edx.video.bumper.skipped', watch_video_and_skip), ('edx.video.bumper.dismissed', watch_video_and_dismiss), ('edx.video.bumper.stopped', wait_for_state) ) @ddt.unpack def test_video_control_events(self, event_type, action): """ Scenario: Video component with pre-roll emits events correctly Given the course has a Video component in "Youtube" mode with pre-roll enabled And I click on the video poster And the pre-roll video start playing And I watch (5 seconds/5 seconds/to the end of) it And I click (skip/do not show again) video button Then a "edx.video.bumper.loaded" event is emitted And a "edx.video.bumper.played" event is emitted And a "edx.video.bumper.skipped/dismissed/stopped" event is emitted And a "load_video" event is emitted And a "play_video" event is emitted """ def is_video_event(event): """Filter out anything other than the video events of interest""" return event['event_type'] in ( 'edx.video.bumper.loaded', 'edx.video.bumper.played', 'edx.video.bumper.skipped', 'edx.video.bumper.dismissed', 'edx.video.bumper.stopped', 'load_video', 'play_video', 'pause_video' ) and self.video.state != 'buffering' captured_events = [] self.add_bumper() with self.capture_events(is_video_event, number_of_matches=5, captured_events=captured_events): self.navigate_to_video_no_render() self.video.click_on_poster() self.video.wait_for_video_bumper_render() sources, duration = self.video.sources[0], self.video.duration action(self) # Filter subsequent events that appear due to bufferisation: edx.video.bumper.played # As bumper does not emit pause event, we filter subsequent edx.video.bumper.played events from # the list, except first. filtered_events = [] for video_event in captured_events: is_played_event = video_event['event_type'] == 'edx.video.bumper.played' appears_again = filtered_events and video_event['event_type'] == filtered_events[-1]
['event_type'] if is_played_event and appears_again: continue filtered_events.append(video_event) for idx, video_event in enumerate(filtered_events): if idx < 3: self.assert_bumper_payload_contains_ids(video_event, sources, duration) else: self.assert_payload_contains_ids(video_event) if idx == 0:
assert_event_matches({'event_type': 'edx.video.bumper.loaded'}, video_event) elif idx == 1: assert_event_matches({'event_type': 'edx.video.bumper.played'}, video_event) self.assert_valid_control_event_at_time(video_event, 0) elif idx == 2: assert_event_matches({'event_type': event_type}, video_event) elif idx == 3: assert_event_matches({'event_type': 'load_video'}, video_event) elif idx == 4: assert_event_matches({'event_type': 'play_video'}, video_event) self.assert_valid_control_event_at_time(video_event, 0) def assert_bumper_payload_contains_ids(self, video_event, sources, duration): """
#!/usr/bin/env python # # Copyright (c) 2001 - 2016 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "test/QT/up-to-date.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog" """ Validate that a stripped-down real-world Qt configuation (thanks to Leanid Nazdrynau) with a generated .h file is correctly up-to-date after a build. (This catches a bug that was introduced during a signature refactoring ca. September 2005.) """ import os import TestSCons _obj = TestSCons._obj test = TestSCons.TestSCons() if not os.environ.get('QTDIR', None): x ="External environment variable $QTDIR not set; skipping test(s).\n" test.skip_test(x) test.subdir('layer', ['layer', 'aclock'], ['layer', 'aclock', 'qt_bug']) test.write('SConstruct', """\ import os aa=os.getcwd() env=Environment(tools=['default','expheaders','qt'],toolpath=[aa]) env["EXP_HEADER_ABS"]=os.path.join(os.getcwd(),'include') if not os.access(env["EXP_HEADER_ABS"],os.F_OK): os.mkdir (env["EXP_HEADER_ABS"]) Export('env') env.SConscript('layer/aclock/qt_bug/SConscript') """) test.write('expheaders.py', """\ import SCons.Defaults def ExpHeaderScanner(node, env, path): return [] def generate(env): HeaderAction=SCons.Action.Action([SCons.Defaults.Copy('$TARGET','$SOURCE'),SCons.Defaults.Chmod('$TARGET',0755)]) HeaderBuilder= SCons.Builder.Builder(action=HeaderAction) env['BUILDERS']['ExportHeaders'] = HeaderBuilder def exists(env): return 0 """) test.write(['layer', 'aclock', 'qt_bug', 'SConscript'], """\ import os Import ("env") env.ExportHeaders(os.path.join(env["EXP_HEADER_ABS"],'main.h'), 'main.h') env.ExportHeaders(os.path.join(env["EXP_HEADER_ABS"],'migraform.h'), 'migraform.h') env.Append(CPPPATH=env["EXP_HEADER_ABS"]) env.StaticLibrary('all',['main.ui','migraform.ui','my.cc']) """) test.write(['layer', 'aclock', 'qt_bug', 'main.ui'], """\ <!DOCTYPE UI><UI version="3.3" stdsetdef="1"> <class>Main</class> <widget class="QWizard"> <property name="name"> <cstring>Main</cstring> </property> <property name="geometry"> <rect>
<x>0</x> <y>0</y> <width>600</width> <height>385</height> </rect> </property> </widget> <includes> <include location="local" impldecl="in implementation">migraform.h</include> </includes> </UI> """) test.write(['layer', 'aclock', 'qt_bug', 'mi
graform.ui'], """\ <!DOCTYPE UI><UI version="3.3" stdsetdef="1"> <class>MigrateForm</class> <widget class="QWizard"> <property name="name"> <cstring>MigrateForm</cstring> </property> <property name="geometry"> <rect> <x>0</x> <y>0</y> <width>600</width> <height>385</height> </rect> </property> </widget> </UI> """) test.write(['layer', 'aclock', 'qt_bug', 'my.cc'], """\ #include <main.h> """) my_obj = 'layer/aclock/qt_bug/my'+_obj test.run(arguments = my_obj, stderr=None) expect = my_obj.replace( '/', os.sep ) test.up_to_date(options = '--debug=explain', arguments = (expect), stderr=None) test.pass_test() # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
esult[bucket] = res return result class CacheMissRatio: def run(self, accessor): result = {} cluster = 0 for bucket, stats_info in stats_buffer.buckets.iteritems(): values = stats_info[accessor["scale"]][accessor["counter"]] timestamps = values["timestamp"] timestamps = [x - timestamps[0] for x in timestamps] nodeStats = values["nodeStats"] samplesCount = values["samplesCount"] trend = [] total = 0 data = [] num_error = [] for node, vals in nodeStats.iteritems(): #a, b = util.linreg(timestamps, vals) value = sum(vals) / samplesCount total += value if value > accessor["threshold"]: num_error.append({"node":node, "value":value}) trend.append((node, util.pretty_float(value))) data.append(value) total /= len(nodeStats) trend.append(("total", util.pretty_float(total))) trend.append(("variance", util.two_pass_variance(data))) if len(num_error) > 0: trend.append(("error", num_error)) cluster += total result[bucket] = trend if len(stats_buffer.buckets) > 0: result["cluster"] = util.pretty_float(cluster / len(stats_buffer.buckets)) return result class MemUsed: def run(self, accessor): result = {} cluster = 0 for bucket, stats_info in stats_buffer.buckets.iteritems(): values = stats_info[accessor["scale"]][accessor["counter"]] timestamps = values["timestamp"] timestamps = [x - timestamps[0] for x in timestamps] nodeStats = values["nodeStats"] samplesCount = values["samplesCount"] trend = [] total = 0 data = [] for node, vals in nodeStats.iteritems(): avg = sum(vals) / samplesCount trend.append((node, util.size_label(avg))) data.append(avg) #print data trend.append(("variance", util.two_pass_variance(data))) result[bucket] = trend return result class ItemGrowth: def run(self, accessor): result = {} start_cluster = 0 end_cluster = 0 for bucket, stats_info in stats_buffer.buckets.iteritems(): trend = [] values = stats_info[accessor["scale"]][accessor["counter"]] timestamps = values["timestamp"] timestamps = [x - timestamps[0] for x in timestamps] nodeStats = values["nodeStats"] samplesCount = values["samplesCount"] for node, vals in nodeStats.iteritems(): a, b = util.linreg(timestamps, vals) if b < 1: trend.append((node, 0)) else: start_val = b start_cluster += b end_val = a * timestamps[-1] + b end_cluster += end_val rate = (end_val * 1.0 / b - 1.0) * 100 trend.append((node, util.pretty_float(rate) + "%")) result[bucket] = trend if len(stats_buffer.buckets) > 0: rate = (end_cluster * 1.0 / start_cluster - 1.0) * 100 result["cluster"] = util.pretty_float(rate) + "%" return result class NumVbuckt: def run(self, accessor): result = {} for bucket, stats_info in stats_buffer.buckets.iteritems(): num_error = [] values = stats_info[accessor["scale"]][accessor["counter"]] nodeStats = values["nodeStats"] for node, vals in nodeStats.iteritems(): if vals[-1] < accessor["threshold"]: num_error.append({"node":node, "value": int(vals[-1])}) if len(num_error) > 0: result[bucket] = {"error" : num_error} return result class RebalanceStuck: def run(self, accessor): result = {} for bucket, bucket_stats in stats_buffer.node_stats.iteritems(): num_error = [] for node, stats_info in bucket_stats.iteritems(): for key, value in stats_info.iteritems(): if key.find(accessor["counter"]) >= 0: if accessor.has_key("threshold"): if int(value) > accessor["threshold"]: num_error.append({"node":node, "value": (key, value)}) else: num_error.append({"node":node, "value": (key, value)}) if len(num_error) > 0:
result[bucket] = {"error" : num_error} return result class MemoryFramentation: def run(self, accessor): result = {} for bucket, bucket_stats in stats_buffer.node_stats.iteritems(): num_error = [] for node, stats_info in bucket_stats.iteritems(): for key, value in stats_info.ite
ritems(): if key.find(accessor["counter"]) >= 0: if accessor.has_key("threshold"): if int(value) > accessor["threshold"]: if accessor.has_key("unit"): if accessor["unit"] == "time": num_error.append({"node":node, "value": (key, util.time_label(value))}) elif accessor["unit"] == "size": num_error.append({"node":node, "value": (key, util.size_label(value))}) else: num_error.append({"node":node, "value": (key, value)}) else: num_error.append({"node":node, "value": (key, value)}) if len(num_error) > 0: result[bucket] = {"error" : num_error} return result class EPEnginePerformance: def run(self, accessor): result = {} for bucket, bucket_stats in stats_buffer.node_stats.iteritems(): num_error = [] for node, stats_info in bucket_stats.iteritems(): for key, value in stats_info.iteritems(): if key.find(accessor["counter"]) >= 0: if accessor.has_key("threshold"): if accessor["counter"] == "flusherState" and value != accessor["threshold"]: num_error.append({"node":node, "value": (key, value)}) elif accessor["counter"] == "flusherCompleted" and value == accessor["threshold"]: num_error.append({"node":node, "value": (key, value)}) else: if value > accessor["threshold"]: num_error.append({"node":node, "value": (key, value)}) if len(num_error) > 0: result[bucket] = {"error" : num_error} return result class TotalDataSize: def run(self, accessor): result = [] total = 0 for node, nodeinfo in stats_buffer.nodes.iteritems(): if nodeinfo["StorageInfo"].has_key("hdd"): total += nodeinfo['StorageInfo']['hdd']['usedByData'] result.append(util.size_label(total)) return result class AvailableDiskSpace: def run(self, accessor): result = [] total = 0 for node, nodeinfo in stats_buffer.nodes.iteritems(): if nodeinfo["StorageInfo"].has_key("hdd"): total += nodeinfo['StorageInfo']['hdd']['free'] result.append(util.size_label(total)) return result ClusterCapsule = [ {"name" : "TotalDataSize", "ingredients" : [ { "name" : "totalDataSize", "description" : "Total Data Size across cluster", "code" : "TotalDataSize", } ], "clusterwi
"""`main` is the top level module for your Flask application.""" # Import the Flask Framework import os import json from flask import Flask, request, send_from_directory, render_template app = Flask(__name__, static_url_path='') # Note: We don't need to call run() since our application is embedded within # the App Engine WSGI application server. @app.route('/') def hello(): """Return a friendly HTTP greeting.""" return 'Hello World!' @app.errorhandler(404) def page_not_found(e): """Return a custom 404 error.""" return 'Sorry, Nothing at this URL.', 404 @app.errorhandler(500) def application_error(e): """Return a custom 500 error.""" return 'Sorry, unexpected error: {}'.format(e), 500 @app.route("/spk/json/<path:path>", methods=['POST', 'GET']) def send_js(path): file, ext = os.pat
h.splitext(path) if ext == "": ext = ".json" SITE_ROOT = os.path.realpath(os.path.dirname(__file__)) json_url = os.path.join(SITE_ROOT, "static", "json", file + ext) s = '' with open(json_url) as f:
for line in f: s += line return s if __name__ == '__main__': app.run()
me": "Always Disable"}, ] LIGHT_MODE_MOTION = "On Motion - Always" LIGHT_MODE_MOTION_DARK = "On Motion - When Dark" LIGHT_MODE_DARK = "When Dark" LIGHT_MODE_OFF = "Manual" LIGHT_MODES = [LIGHT_MODE_MOTION, LIGHT_MODE_DARK, LIGHT_MODE_OFF] LIGHT_MODE_TO_SETTINGS = { LIGHT_MODE_MOTION: (LightModeType.MOTION.value, LightModeEnableType.ALWAYS.value), LIGHT_MODE_MOTION_DARK: ( LightModeType.MOTION.value, LightModeEnableType.DARK.value, ), LIGHT_MODE_DARK: (LightModeType.WHEN_DARK.value, LightModeEnableType.DARK.value), LIGHT_MODE_OFF: (LightModeType.MANUAL.value, None), } MOTION_MODE_TO_LIGHT_MODE = [ {"id": LightModeType.MOTION.value, "name": LIGHT_MODE_MOTION}, {"id": f"{LightModeType.MOTION.value}Dark", "name": LIGHT_MODE_MOTION_DARK}, {"id": LightModeType.WHEN_DARK.value, "name": LIGHT_MODE_DARK}, {"id": LightModeType.MANUAL.value, "name": LIGHT_MODE_OFF}, ] DEVICE_RECORDING_MODES = [ {"id": mode.value, "name": mode.value.title()} for mode in list(RecordingMode) ] DEVICE_CLASS_LCD_MESSAGE: Final = "unifiprotect__lcd_message" @dataclass class ProtectSelectEntityDescription(ProtectRequiredKeysMixin, SelectEntityDescription): """Describes UniFi Protect Select entity.""" ufp_options: list[dict[str, Any]] | None = None ufp_enum_type: type[Enum] | None = None ufp_set_function: str | None = None CAMERA_SELECTS: tuple[ProtectSelectEntityDescription, ...] = ( ProtectSelectEntityDescription( key=_KEY_REC_MODE, name="Recording Mode", icon="mdi:video-outline", entity_category=EntityCategory.CONFIG, ufp_options=DEVICE_RECORDING_MODES, ufp_enum_type=RecordingMode, ufp_value="recording_settings.mode", ufp_set_function="set_recording_mode", ), ProtectSelectEntityDescription( key=_KEY_IR, name="Infrared Mode", icon="mdi:circle-opacity", entity_category=EntityCategory.CONFIG, ufp_required_field="feature_flags.has_led_ir", ufp_options=INFRARED_MODES, ufp_enum_type=IRLEDMode, ufp_value="isp_settings.ir_led_mode", ufp_set_function="set_ir_led_model", ), ProtectSelectEntityDescription( key=_KEY_DOORBELL_TEXT, name="Doorbell Text", icon="mdi:card-text", entity_category=EntityCategory.CONFIG, device_class=DEVICE_CLASS_LCD_MESSAGE, ufp_required_field="feature_flags.has_lcd_screen", ufp_value="lcd_message", ), ) LIGHT_SELECTS: tuple[ProtectSelectEntityDescription, ...] = ( ProtectSelectEntityDescription( key=_KEY_LIGHT_MOTION, name="Light Mode", icon="mdi:spotlight", entity_category=EntityCategory.CONFIG, ufp_options=MOTION_MODE_TO_LIGHT_MODE, ufp_value="light_mode_settings.mode", ), ProtectSelectEntityDescription( key=_KEY_PAIRED_CAMERA, name="Paired Camera", icon="mdi:cctv", entity_category=EntityCategory.CONFIG, ufp_value="camera_id", ), ) VIEWER_SELECTS: tuple[ProtectSelectEntityDescription, ...] = ( ProtectSelectEntityDescription( key=_KEY_VIEWER, name="Liveview", icon="mdi:view-dashboard", entity_category=None, ufp_value="liveview", ufp_set_function="set_liveview", ), ) async def async_setup_entry( hass: HomeAssistant, entry: ConfigEntry, async_add_entities: entity_platform.AddEntitiesCallback, ) -> None: """Set up number entities for UniFi Protect integration.""" data: ProtectData = hass.data[DOMAIN][entry.entry_id] entities: list[ProtectDeviceEntity] = async_all_device_entities( data, ProtectSelects, camera_descs=CAMERA_SELECTS, light_descs=LIGHT_SELECTS, viewer_descs=VIEWER_SELECTS, ) async_add_entities(entities) platform = entity_platform.async_get_current_platform() platform.async_register_entity_service( SERVICE_SET_DOORBELL_MESSAGE, SET_DOORBELL_LCD_MESSAGE_SCHEMA, "async_set_doorbell_message", ) class ProtectSelects(ProtectDeviceEntity, SelectEntity): """A UniFi Protect Select Entity.""" def __init__( self, data: ProtectData, device: Camera | Light | Viewer, description: ProtectSelectEntityDescription, ) -> None: """Initialize the unifi protect select entity.""" assert description.ufp_value is not None self.device: Camera | Light | Viewer = device self.entity_description: ProtectSelectEntityDescription = description super().__init__(data) self._attr_name = f"{self.device.name} {self.entity_description.name}" options = description.ufp_options if options is not None: self._attr_options = [item["name"] for item in options] self._hass_to_unifi_options: dict[str, Any] = { item["name"]: item["id"] for item in options }
self._unifi_to_hass_options: dict[Any, str] = { item["id"]: item["name"] for item in options }
self._async_set_dynamic_options() @callback def _async_update_device_from_protect(self) -> None: super()._async_update_device_from_protect() # entities with categories are not exposed for voice and safe to update dynamically if self.entity_description.entity_category is not None: _LOGGER.debug( "Updating dynamic select options for %s", self.entity_description.name ) self._async_set_dynamic_options() @callback def _async_set_dynamic_options(self) -> None: """Options that do not actually update dynamically. This is due to possible downstream platforms dependencies on these options. """ if self.entity_description.ufp_options is not None: return if self.entity_description.key == _KEY_VIEWER: options = [ {"id": item.id, "name": item.name} for item in self.data.api.bootstrap.liveviews.values() ] elif self.entity_description.key == _KEY_DOORBELL_TEXT: default_message = ( self.data.api.bootstrap.nvr.doorbell_settings.default_message_text ) messages = self.data.api.bootstrap.nvr.doorbell_settings.all_messages built_messages = ( {"id": item.type.value, "name": item.text} for item in messages ) options = [ {"id": "", "name": f"Default Message ({default_message})"}, *built_messages, ] elif self.entity_description.key == _KEY_PAIRED_CAMERA: options = [{"id": TYPE_EMPTY_VALUE, "name": "Not Paired"}] for camera in self.data.api.bootstrap.cameras.values(): options.append({"id": camera.id, "name": camera.name}) self._attr_options = [item["name"] for item in options] self._hass_to_unifi_options = {item["name"]: item["id"] for item in options} self._unifi_to_hass_options = {item["id"]: item["name"] for item in options} @property def current_option(self) -> str: """Return the current selected option.""" assert self.entity_description.ufp_value is not None unifi_value = get_nested_attr(self.device, self.entity_description.ufp_value) if unifi_value is None: unifi_value = TYPE_EMPTY_VALUE elif isinstance(unifi_value, Liveview): unifi_value = unifi_value.id elif self.entity_description.key == _KEY_LIGHT_MOTION: assert isinstance(self.device, Light) # a bit of extra to allow On Motion Always/Dark if ( self.device.light_mode_settings.mode == LightModeType.MOTION and self.device.light_mode_settings.enable_at == LightModeEnableType.DARK ): unifi_value = f"{LightModeType.MOTION.value}Dark" elif self.entity_description.key == _KEY_DOORBELL_TEXT: assert isin
f
rom django.db.backends.postgresql.creatio
n import * # NOQA
try: from s
etuptools import setup except ImportError: from distutils.core import setup config = { 'description': 'My Project', 'author': 'Wouter Oosterveld', 'url': 'URL to get it at.', 'download_url': 'Where to download it.', 'author_email': 'wouter@fizzyflux.nl', 'version': '0.1', 'install_requires': ['nose','what','boto'], 'packages': ['snaps'], 'scripts': ['scripts/snaps'], 'name': 'snaps' } set
up(**config)
# This file is part of Indico. # Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN). # # Indico is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 3 of the # License, or (at your option) any later version. # # Indico is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Indico; if not, see <http://www.gnu.org/licenses/>. from __future__ import unicode_literals from hashlib import sha1 from flask import render_template from indico.modules.events.agreements.models.agreements import Agreement from indico.modules.events.settings import EventSettingsProxy from indico.util.caching import make_hashable, memoize_request from indico.util.decorators import cached_classproperty, classproperty from indico.util.i18n import _ from indico.util.string import return_ascii from indico.web.flask.templating import get_overridable_template_name, get_template_module class AgreementPersonInfo(object): def __init__(self, name=None, email=None, user=None, data=None): if user: if not name: name = user.full_name if not email: email = user.email if not name: raise ValueError('name is missing') self.name = name # Note: If you have persons with no email, you *MUST* have data that uniquely identifies such persons self.email = email or None self.user = user self.data = data @return_ascii def __repr__(self): return '<AgreementPersonInfo({}, {}, {})>'.format(self.name, self.email, self.identifier) @property def identifier(self): data_string = None if self.data: data_string = '-'.join('{}={}'.format(k, make_hashable(v)) for k, v in sorted(self.data.viewitems())) identifier = '{}:{}'.format(self.email, data_string or None) return sha1(identifier).hexdigest() class AgreementDefinitionBase(object): """Base class for agreement definitions""" #: unique name of the agreement definition name = None #: readable name of the agreement definition title = None #: optional and short description of the agreement definition description = None #: url to obtain the paper version of the agreement form paper_form_url = None #: template of the agreement form - agreement definition name by default form_template_name = None #: template of the email body - emails/agreement_default_body.html by default email_body_template_name = None #: plugin containing this agreement definition - assigned automatically plugin = None #: default settings for an event default_event_settings = {'manager_notifications_enabled': True} #: default message to display when the agreement definition type is disabled disabled_reason = _('No signatures needed.') @classproperty @classmethod def locator(cls): return {'definition': cls.name} @cached_classproperty @classmethod def event_settings(cls): return EventSettingsProxy('agreement_{}'.format(cls.name), cls.default_event_settings) @classmethod def can_access_api(cls, user, event): """Checks if a user can list the agreements for an event""" return event.can_manage(user) @classmethod def extend_api_data(cls, event, person, agreement, data): # pragma: no cover """Extends the data returned in the HTTP API :param event: the event :param person: the :class:`AgreementPersonInfo` :param agreement: the :class:`Agreement` if available :param data: a dict containing the default data for the agreement """ pass @classmethod def get_email_body_template(cls, event, **kwargs): """Returns the template of the email body for th
is agreement definition""" template_n
ame = cls.email_body_template_name or 'emails/agreement_default_body.html' template_path = get_overridable_template_name(template_name, cls.plugin, 'events/agreements/') return get_template_module(template_path, event=event) @classmethod @memoize_request def get_people(cls, event): """Returns a dictionary of :class:`AgreementPersonInfo` required to sign agreements""" people = cls.iter_people(event) if people is None: return {} return {p.identifier: p for p in people} @classmethod def get_people_not_notified(cls, event): """Returns a dictionary of :class:`AgreementPersonInfo` yet to be notified""" people = cls.get_people(event) sent_agreements = {a.identifier for a in event.agreements.filter_by(type=cls.name)} return {k: v for k, v in people.items() if v.identifier not in sent_agreements} @classmethod def get_stats_for_signed_agreements(cls, event): """Returns a digest of signed agreements on an event :param event: the event :return: (everybody_signed, num_accepted, num_rejected) """ people = cls.get_people(event) identifiers = [p.identifier for p in people.itervalues()] query = event.agreements.filter(Agreement.type == cls.name, Agreement.identifier.in_(identifiers)) num_accepted = query.filter(Agreement.accepted).count() num_rejected = query.filter(Agreement.rejected).count() everybody_signed = len(people) == (num_accepted + num_rejected) return everybody_signed, num_accepted, num_rejected @classmethod def is_active(cls, event): """Checks if the agreement type is active for a given event""" return bool(cls.get_people(event)) @classmethod def is_agreement_orphan(cls, event, agreement): """Checks if the agreement no longer has a corresponding person info record""" return agreement.identifier not in cls.get_people(event) @classmethod def render_form(cls, agreement, form, **kwargs): template_name = cls.form_template_name or '{}.html'.format(cls.name.replace('-', '_')) template_path = get_overridable_template_name(template_name, cls.plugin, 'events/agreements/') return render_template(template_path, agreement=agreement, form=form, **kwargs) @classmethod def render_data(cls, event, data): # pragma: no cover """Returns extra data to display in the agreement list If you want a column to be rendered as HTML, use a :class:`~markupsafe.Markup` object instead of a plain string. :param event: The event containing the agreements :param data: The data from the :class:`AgreementPersonInfo` :return: List of extra columns for a row """ return None @classmethod def handle_accepted(cls, agreement): # pragma: no cover """Handles logic on agreement accepted""" pass @classmethod def handle_rejected(cls, agreement): # pragma: no cover """Handles logic on agreement rejected""" pass @classmethod def handle_reset(cls, agreement): # pragma: no cover """Handles logic on agreement reset""" pass @classmethod def iter_people(cls, event): # pragma: no cover """Yields :class:`AgreementPersonInfo` required to sign agreements""" raise NotImplementedError
m_cache[vm]["instance"]["state"]["state"].title() server_mac_address = vm_cache[vm]['id'] server_mac_address = str(server_mac_address).replace(':','-') if(vm_state=="Running"): isotope_filter_classes = " linux " if(data_median<17): color = "lightBlue " if(data_median>=17 and data_median<=35): color = "green " isotope_filter_classes += " busy" if(data_median>35 and data_median<=50): color = "darkGreen " isotope_filter_classes += " busy" if(data_median>50 and data_median<=70): color = "lightOrange " isotope_filter_classes += " busy" if(data_median>70): isotope_filter_classes += " busy critical" color = "red " if data_median>85: vm_state = "Hot hot hot!" if(vm_state=="Stopping"): color = "pink " if(vm_state=="Pending"): color = "pink " if(vm_state=="Shutting-Down"): color = "pink " if(vm_state=="Stopped"): isotope_filter_classes += " offline" if(vm_cache[vm]['provider']!='agent'): isotope_filter_classes += " cloud" ajax_vms_response += "\"" ajax_vms_response += server_mac_address ajax_vms_response += "\": {" ajax_vms_response += "\"vmcolor\":\"" ajax_vms_response += color ajax_vms_response += "\"," ajax_vms_response += "\"vmname\":\"" ajax_vms_response += instance_name ajax_vms_response += "\"," ajax_vms_response += "\"vmtitle\":\"" ajax_vms_response += isotope_filter_classes ajax_vms_response += "\"," ajax_vms_response += "\"averge\":\"" ajax_vms_response += data ajax_vms_response += "\"," ajax_vms_response += "\"state\":\"" ajax_vms_response += vm_state ajax_vms_response += "\"," ajax_vms_response += "\"link\":\"" if(vm_cache[vm]['provider']=='agent'): ajax_vms_response += "/server/"+vm+"/" else: ajax_vms_response += "/aws/"+vm+"/" ajax_vms_response += "\"" ajax_vms_response += "}," if(c==len(vm_cache)-1): ajax_vms_response += "}" c+=1 #print '-_'*80 #print vm_cache[vm]["instance"]["state"]["state"].title(), vm ajax_vms_response = ajax_vms_response.replace(",}","}") if(not vm_cache): ajax_vms_response = {} return render_to_response('ajax_virtual_machines.html', {'user':user,'ajax_vms_response':ajax_vms_response,'vms_cached_response':vm_cache,}, context_instance=RequestContext(request)) @login_required() def ajax_aws_graphs(request, instance_id, graph_type="all"): print '-- ajax_aws_graphs', request.user user = request.user profile = userprofile.objects.get(user=request.user) vms_cache = Cache.objects.get(user=user) vm_cache = vms_cache.vms_response vm_cache = base64.b64decode(vm_cache) try: vm_cache = pickle.loads(vm_cache)[instance_id] except: return HttpResponse("XXX " + instance_id) if(vm_cache['user_id']!=request.user.id): return HttpResponse("access denied") aws_access_key = profile.aws_access_key aws_secret_key = profile.aws_secret_key aws_ec2_verified = profile.aws_ec2_verified ec2_region = vm_cache['instance']['region']['name'] ec2conn = boto.ec2.connect_to_region(ec2_region,aws_access_key_id=aws_access_key,aws_secret_access_key=aws_secret_key) cloudwatch = boto.ec2.cloudwatch.connect_to_region(ec2_region,aws_access_key_id=aws_access_key,aws_secret_access_key=aws_secret_key) reservations = ec2conn.get_all_instances(instance_ids=[instance_id,]) instance = reservations[0].instances[0] end = datetime.datetime.utcnow() start = end - datetime.timedelta(days=10) metric = cloudwatch.list_metrics(dimensions={'InstanceId':instance_id}, metric_name="CPUUtilization")[0] cpu_utilization_datapoints = metric.query(start, end, 'Average', 'Percent',period=3600) return HttpResponse("data " + instance_id + "=" + str(instance) + " ** " + graph_type.upper()) @login_required() def ajax_server_graphs(request, hwaddr, graph_type=""): print '-- ajax_server_graphs, type', graph_type print request.user graphs_mixed_respose = [] secret = request.POST['secret'] uuid = request.POST['server'] uuid = uuid.replace('-',':') server = mongo.servers.find_one({'secret':secret,'uuid':uuid,}) print 'debug', secret, uuid try: uuid = server['uuid'] except: return HttpResponse("access denied") server_status = "Running" if((datetime.datetime.utcnow()-server['last_seen']).total_seconds()>20): server_status = "Stopped" if((datetime.datetime.utcnow()-server['last_seen']).total_seconds()>1800): server_status = "Offline" #activity = mongo.activity.find({'uuid':uuid,}).sort('_id',-1).limit(3) if(graph_type=="server_info"): graphs_mixed_respose = {} graphs_mixed_respose['name'] = server['name'] graphs_mixed_respose['server_info_hostname'] = server['hostname'] graphs_mixed_respose['cpu_used'] = server['cpu_usage']['cpu_used'] graphs_mixed_respose['memory_used'] = server['memory_usage']['memory_used_percentage'] graphs_mixed_respose['swap_used'] = server['memory_usage']['swap_used_percentage'] graphs_mixed_respose['loadavg_used'] = server['loadavg'][1] graphs_mixed_respose['server_info_uptime'] = server['uptime'] graphs_mixed_respose['server_info_loadavg'] = server['loadavg'] graphs_mixed_respose['server_info_status'] = server_status graphs_mixed_respose = str(graphs_mixed_respose).replace('u"','"') graphs_mixed_respose = graphs_mixed_respose.replace("'",'"') graphs_mixed_respose = str(graphs_mixed_respose).replace('u"','"') return HttpResponse(graphs_mixed_respose, content_type="application/json") if(graph_type=="processes"): pr
ocesses_ = [] processes = server['processes'] c=0 for line in processes: if(c>0): if not line:break line = line.split(' ') line_ = [] for i in line:
if i: line_.append(i) line = line_ process_user = line[0] process_pid = line[1] process_cpu = line[2] process_mem = line[3] process_vsz = line[4] process_rss = line[5] process_tty = line[6] process_stat = line[7] process_start_time = line[8]+'-'+line[9] process_command = line[10:] process_name = clean_ps_command(process_command[0]) process = { 'pid': process_pid, 'cpu': process_cpu+'%', 'mem': process_mem+'%', # 'vsz': process_vsz, # 'rss': process_rss, # 'tty': process_tty, # 'stat': process_stat, # 'start_time': process_start_time, 'process': process_name, 'command': ' '.join(str(x) for x in process_command).replace("[", "").replace("]","") } process['user'] = '<span class=\\"label label-success\\">' if int(float(process_cpu)) > 50: process['user'] = '<span class=\\"label label-warning\\">' if int(float(process_cpu)) > 75: process['user'] = '<span class=\\"label label-danger\\">' process['user'] += process_user
# Python - 3.6.0 century = lambda year: year // 100
+ ((year % 100) > 0)
from datetime import datetime from django.http import HttpResponse, HttpResponseRedirect from django.views.generic import View, ListView, DetailView from django.views.generic.edit import CreateView, UpdateView from content.models import Sub, SubFollow, Post, Commit from content.forms import SubForm, PostForm, CommitForm from notify.models import Noty from core.core import random_avatar_sub class Create
SubView(CreateView): template_name = 'content/sub_create.html' form_class = SubForm def form_valid(self, form): obj = form.save(commit=False) obj.save() obj.image = 'sub/%s.png' % (obj.slug) obj.save() random_avatar_sub(obj.slug) return HttpResponseRedirect('/sub') class SubView(ListView): template_name = 'content/sub.html' model = Sub class FrontView(ListView): template_name = 'layouts/post_list.ht
ml' paginate_by = 4 def get(self, request, *args, **kwargs): if request.is_ajax(): self.template_name = 'ajax/post_list.html' return super(FrontView, self).get(request, *args, **kwargs) def get_queryset(self): if self.kwargs['tab'] == 'top': return Post.objects.last_commited() else: return Post.objects.created() def get_context_data(self, **kwargs): context = super(FrontView, self).get_context_data(**kwargs) context['list'] = 'portada' context['tab_show'] = self.kwargs['tab'] if self.kwargs['tab'] == 'top': context['list_url'] = '/' else: context['list_url'] = '/new' return context class SubPostListView(ListView): template_name = 'content/sub_post_list.html' paginate_by = 4 def get(self, request, *args, **kwargs): if request.is_ajax(): self.template_name = 'ajax/post_list.html' return super(SubPostListView, self).get(request, *args, **kwargs) def get_queryset(self): if self.kwargs['tab'] == 'top': return Post.objects.sub_last_commited(self.kwargs['sub']) else: return Post.objects.sub_created(self.kwargs['sub']) def get_context_data(self, **kwargs): context = super(SubPostListView, self).get_context_data(**kwargs) sub = Sub.objects.get(pk=self.kwargs['sub']) user = self.request.user if self.kwargs['tab'] == 'followers': context['followers'] = True context['tab_show'] = self.kwargs['tab'] context['list'] = sub context['tab'] = self.kwargs['tab'] if self.kwargs['tab'] == 'top': context['list_url'] = '/sub/%s' % sub else: context['list_url'] = '/sub/%s/new' % sub context['action'] = 'follow' if user.is_authenticated(): follow_state = SubFollow.objects.by_id(sub_followid='%s>%s' % (user.pk, sub.pk)) if follow_state: context['action'] = 'unfollow' else: context['action'] = 'follow' return context class PostCommitView(CreateView): template_name = 'layouts/post_detail.html' form_class = CommitForm def get_context_data(self, **kwargs): context = super(PostCommitView, self).get_context_data(**kwargs) pk, slug = self.kwargs['pk'], self.kwargs['slug'] context['object'] = Post.objects.by_post(pk, slug) return context def form_valid(self, form): if self.request.user.is_authenticated(): user = self.request.user post = Post.objects.get(postid=self.kwargs['pk']) obj = form.save(commit=False) obj.create_commit(user, post) if not obj.post.user.pk == user.pk: noty = Noty.objects.create(user_id=obj.post.user_id, category='C', commit=obj) noty.create_noty() return HttpResponseRedirect(obj.get_commit_url()) else: commit_url = '/post/%s/%s/' % (self.kwargs['pk'], self.kwargs['slug']) return HttpResponseRedirect('/login/?next=%s' % (commit_url)) class CreatePostView(CreateView): template_name = 'layouts/post_create.html' form_class = PostForm def form_valid(self, form): obj = form.save(commit=False) obj.user = self.request.user obj.save() if obj.draft: return HttpResponseRedirect('/created') else: obj.user.last_commited = obj.created obj.user.save() obj.sub.last_commited = obj.created obj.sub.save() obj.last_commited = obj.created obj.save() return HttpResponseRedirect(obj.get_absolute_url()) class UpdatePostView(UpdateView): template_name = 'layouts/post_create.html' form_class = PostForm def get_queryset(self): return Post.objects.by_user(self.request.user) def form_valid(self, form): obj = form.save(commit=False) if not obj.last_commited and not obj.draft: now = datetime.now() obj.last_commited = now obj.user.last_commited = now obj.user.save() obj.sub.last_commited = now obj.sub.save() obj.save() if obj.draft: return HttpResponseRedirect('/created') else: return HttpResponseRedirect(obj.get_absolute_url()) class PostUserCreatedView(ListView): template_name = 'content/post_user_created.html' def get_queryset(self): return Post.objects.by_user(self.request.user) class SubFollowCreate(View): def post(self, request, *args, **kwargs): user = self.request.user sub_followed = self.kwargs['followed'] sub_followed_obj = SubFollow.objects.create(follower=user, sub_id=sub_followed) sub_followed_obj.save() sub_followed_obj.follower.sub_following_number += 1 sub_followed_obj.follower.save() sub_followed_obj.sub.follower_number += 1 sub_followed_obj.sub.save() return HttpResponse(status=200) class SubFollowDelete(View): def post(self, request, *args, **kwargs): sub_unfollowed = self.kwargs['unfollowed'] sub_unfollowed_obj = SubFollow.objects.get(follower=self.request.user, sub_id=sub_unfollowed) sub_unfollowed_obj.follower.sub_following_number -= 1 sub_unfollowed_obj.follower.save() sub_unfollowed_obj.sub.follower_number -= 1 sub_unfollowed_obj.sub.save() sub_unfollowed_obj.delete() return HttpResponse(status=200)
# -*- coding: utf-8 -*- import time from openerp import api, models import datetime class ReportSampleReceivedvsReported(models.AbstractModel): _name = 'report.olims.report_sample_received_vs_reported' def _get_samples(self, samples): datalines = {} footlines = {} total_received_count = 0 total_published_count = 0 for sample in samples: # For each sample, retrieve check is has results published # and add it to datalines published = False analyses = self.env['olims.analysis_request'].search([('Sample_id', '=', sample.id)]) if analyses: for analysis in analyses: if not (analysis.DatePublished is False): published = True break datereceived = datetime.datetime.strptime(sample.DateReceived, \ "%Y-%m-%d %H:%M:%S") monthyear = datereceived.strftime("%B") + " " + datereceived.strftime( "%Y") received = 1 publishedcnt = published and 1 or 0 if (monthyear in datalines): received = datalines[monthyear]['ReceivedCount'] + 1 publishedcnt = published and datalines[monthyear][ 'PublishedCount'] + 1 or \ datalines[monthyear]['PublishedCount'] ratio = publishedcnt / received dataline = {'MonthYear': monthyear, 'ReceivedCount': received, 'PublishedCount': publishedcnt, 'UnpublishedCount': received - publishedcnt, 'Ratio': ratio, 'RatioPercentage': '%02d' % ( 100 * (float(publishedcnt) / float(received))) + '%'} datalines[monthyear] = dataline total_received_count += 1 total_published_count = published and total_published_count + 1 or total_published_count
# Footer total data if total_received_count > 0: ratio = total_published_count / total_received_count else: ratio = total_published_count / 1 try: footline = {'ReceivedCount': total_received
_count, 'PublishedCount': total_published_count, 'UnpublishedCount': total_received_count - total_published_count, 'Ratio': ratio, 'RatioPercentage': '%02d' % (100 * ( float(total_published_count) / float( total_received_count))) + '%' } except: footline = {'ReceivedCount': total_received_count, 'PublishedCount': total_published_count, 'UnpublishedCount': total_received_count - total_published_count, 'Ratio': ratio, 'RatioPercentage': '%02d' % (100 * ( float(total_published_count) / float( 1))) + '%' } footlines['Total'] = footline return datalines, footlines @api.multi def render_html(self, data): startdate = datetime.datetime.strptime(data['form'].get('date_from'), \ "%Y-%m-%d %H:%M:%S").strftime("%Y/%m/%d %H:%M:%S") enddate = datetime.datetime.strptime(data['form'].get('date_to'), \ "%Y-%m-%d %H:%M:%S").strftime("%Y/%m/%d %H:%M:%S") self.model = self.env.context.get('active_model') docs = self.env[self.model].browse(self.env.context.get('active_id')) samples = self.env['olims.sample'].search([('SamplingDate', '>=', startdate), \ ('SamplingDate', '<=', enddate), \ ('state', 'in', ['sample_received','expired','disposed'])]) samples_res, footlines= self.with_context(data['form'].get('used_context'))._get_samples(samples) docargs = { 'doc_ids': self.ids, 'doc_model': self.model, 'data': data['form'], 'docs': docs, 'time': time, 'Samples': samples_res, 'footlines' : footlines #sum(samples_res.values()) } return self.env['report'].render('olims.report_sample_received_vs_reported', docargs)
_iter, end_iter) # if it's a ${cursor} variable we don't want to insert # any new text. Just go to the else and get it's start # offset, used later to mark that location if not var.group() == "${cursor}": # insert the variable identifier into the buffer # at the start location self.editor.buff.insert(start_iter, var.group(1)) current_offset = current_offset-3 # record our start and end offsets used later # to mark these variables so we can select the text start_and_end_offsets = { "start" : occur_offset_start, "end" : occur_offset_end-3 } #print "START = %d | END = %d" % (start_and_end_offsets["start"], start_and_end_offsets["end"]) else: # if we have a ${cursor} then we want a # marker added with no text so we can # tab to it. start_and_end_offsets = { "start" : occur_offset_start, "end" : occur_offset_start } current_offset = current_offset-len(var.group()) # put the start/end offsets into a list of dictionaries offsets.append( start_and_end_offsets ) return offsets # This functions purpose is to add spaces/tabs to the snippets according # to what level we have indented to def auto_indent_snippet(self, snippet): cursor_iter = self.get_cursor_iter() line_number = cursor_iter.get_line() start_of_current_line_iter = self.editor.buff.get_iter_at_line(line_number) text = self.editor.buff.get_text(cursor_iter, start_of_current_line_iter) space_re = re.compile(' ') tab_re = re.compile('\t') tab_count = len(tab_re.findall(text)) space_count = len(space_re.findall(text)) lines = snippet.split("\n") new_lines = [] tabs = "" spaces = "" if tab_count > 0: for i in range(tab_count): tabs = tabs + "\t" if space_count > 0: for i in range(space_count): spaces = spaces + " " for i,line in enumerate(lines): # don't add any of the spaces/tabs to the first # line in the snippet if not i == 0: snip = tabs + spaces + line new_lines.append(snip) else: new_lines.append(line) return "\n".join(new_lines) def snippet_completion(self): cursor_iter = self.get_cursor_iter() line_number = cursor_iter.get_line() start_of_current_line_iter = self.editor.buff.get_iter_at_line(line_number) text = self.editor.buff.get_text(start_of_current_line_iter, cursor_iter) words = text.split() if words: word_last_typed = words.pop() word_index = text.find(word_last_typed) # Run through all snippets trying to find a match for s in self.SNIPPETS: key=s.shortcut value=s.snippet if word_last_typed == key: self.TABBED = True value = self.auto_indent_snippet(value) word_index = text.rfind(word_last_typed) index_iter = self.editor.buff.get_iter_at_line_offset(line_number, word_index) end_iter = self.editor.buff.get_iter_at_line_offset(line_number, word_index+len(word_last_typed)) self.editor.buff.delete(index_iter, end_iter) overall_offset = index_iter.get_offset() self.editor.buff.insert(index_iter, value) start_mark_iter = self.editor.buff.get_iter_at_line_offset(line_number, word_index) end_mark_iter = self.editor.buff.get_iter_at_offset(start_mark_iter.get_offset()+len(value)) self.SNIPPET_START_MARK = self.editor.buff.create_mark(None, start_mark_iter, True) self.SNIPPET_END_MARK = self.editor.buff.create_mark(None, end_mark_iter, False) offsets = self.get_varia
ble_offsets(value, overall_offset) if offsets: marks = self.mark_variables(offsets) if marks: _iter = self.editor.buff.get_iter_at_offset( offsets[0]["start"] ) self.editor.buff.place_cursor(_i
ter) marks.reverse() for mark in marks: self.SNIPPET_MARKS.insert(0, mark) offsets.reverse() for offset in offsets: self.SNIPPET_OFFSETS.insert(0,offset) self.IN_SNIPPET = True else: self.HAS_NO_VARIABLES=True def pair_text(self, pair_chars): selection = self.editor.buff.get_selection_bounds() if(selection): selected_text = self.editor.buff.get_text(selection[0], selection[1]) self.editor.buff.delete(selection[0], selection[1]) self.editor.buff.insert_at_cursor("%s%s%s" % (pair_chars[0],selected_text,pair_chars[1])) return True return False def comment_line(self, comment_char): selection = self.editor.buff.get_selection_bounds() if(selection): selected_text = self.editor.buff.get_text(selection[0], selection[1]) self.editor.buff.delete(selection[0], selection[1]) for line in selected_text.splitlines(True): self.editor.buff.insert_at_cursor("%s %s" % (comment_char, line)) return True return False def key_event(self, widget, key_press): keycodes = { "space" : 32, "tab" : 65289, "quote" : 34, "open_brace" : 123, "open_bracket" : 91, "open_paren" : 40, "less_than" : 60, "single_quote" : 39, "pound" : 35 } # Need to add a new key, just uncomment this, run the program # and look at the output from the key press #print key_press.keyval if not key_press.keyval == keycodes["tab"]: self.TABBED = False if key_press.keyval == keycodes["pound"]: if key_press.state & gtk.gdk.SHIFT_MASK: comment_char = None if(self.mime_type == ("text/x-python") or self.mime_type == ("application/x-ruby") or self.mime_type == ("application/x-shellscript") ): comment_char = "#" elif (self.mime_type == ("text/x-java") or self.mime_type == ("text/x-c++src") ): comment_char = "//" if(comment_char): if(self.comment_line(comment_char)): return True if key_press.keyval == keycodes["quote"]: if (self.pair_text(["\"", "\""])): return True elif key_press.keyval == keycodes["open_brace"]: if (self.pair_text(["{", "}"])): return True elif key_press.keyval == keycodes["open_bracket"]: if (self.pair_text(["[", "]"])): return True elif key_press.keyval == keycodes["open_paren"]: if (self.pair_text(["(", ")"])): return True elif key_press.keyval == keycodes["less_than"]: if (self.pair_text(["<", ">"])): return True elif key_press.keyval == keycodes["single_quote"]: if (self.pair_text(["\'", "\'"])): return True elif key_press.keyval == keycodes["tab"]: if not self.TABBED: self.snippet_completion() if self.HAS_NO_VARIABLES: self.HAS_NO_VARIABLES=False return True if(len(self.SNIPPET_MARKS)>0): for i, v in enumerate(self.SNIPPET_MARKS): if len(self.SNIPPET_MARKS)>1: self.editor.source_view.scroll_mark_onscreen(self.SNIPPET_MARKS[i+1]["start"]) _iter = self.editor.buff.get_iter_at_mark(v["start"]) mark_offset = _iter.get_offset() self.editor.buff.select_range( self.editor.buff.get_iter_at_mark(v["start"]), self.editor.buff.get_iter_at_mark(v["end"])) self.editor.buff.delete_mark(v["start"]) self.editor.buff.delete_mark(v["end"]) del self.SNIPPET_MARKS[i] del self.SNIPPET_OFFSETS[i] if (i==len(self.SNIPPET_OFFSETS)): self.IN_SNIPPET = False self.editor.buff.delete_mark(self.SNIPPET_START_MARK) self.editor.buff.delete_mark(self.SNIPPET_END_MARK) break return True return False def load(self): pass def unload(self): pass def __get_language_for_mime_type(self, mime): from gtksourceview2 import language_manager_get_default lang_manager = language_manager_get_default() lang_ids = lang_manager.get_language_ids() for i in lang_ids: lang = lang_manager.get_language(i) for m in lang.get_mime_types(): if m == mime: return lang return None def get_language(self, uri): try: if uri is None: return None from gnomevfs import get_mime_type self.mime_type = gio.File(uri.strip()).query_info("*").get_content_type() language = self.__get_language_for_mime_type(self.mime_type) except RuntimeE
ort os import itertools from collections import defaultdict import angr UNIQUE_STRING_COUNT = 20 # strings longer than MAX_UNIQUE_STRING_LEN will be truncated MAX_UNIQUE_STRING_LEN = 70 def get_basic_info(ar_path: str) -> Dict[str,str]: """ Get basic information of the archive file. """ with tempfile.TemporaryDirectory() as tempdirname: cwd = os.getcwd() os.chdir(tempdirname) subprocess.call(["ar", "x", ar_path]) # Load arch and OS information from the first .o file o_files = [ f for f in os.listdir(".") if f.endswith(".o") ] if o_files: proj = angr.Project(o_files[0], auto_load_libs=False) arch_name = proj.arch.name.lower() os_name = proj.simos.name.lower() os.chdir(cwd) return { 'arch': arch_name, 'platform': os_name, } def get_unique_strings(ar_path: str) -> List[str]: """ For Linux libraries, this method requires ar (from binutils), nm (from binutils), and strings. """ # get symbols nm_output = subprocess.check_output(["nm", ar_path]) nm_lines = nm_output.decode("utf-8").split("\n") symbols = set() for nm_line in nm_lines: symbol_types = "UuVvTtRrDdWwBbNn" for symbol_type in symbol_types: if f" {symbol_type} " in nm_line: # parse it symbol = nm_line[nm_line.find(f" {symbol_type}") + 3: ].strip(" ") if "." in symbol: symbols |= set(symbol.split(".")) else: symbols.add(symbol) break # extract the archive file into a temporary directory all_strings = set() with tempfile.TemporaryDirectory() as tempdirname: cwd = os.getcwd() os.chdir(tempdirname) subprocess.call(["ar", "x", ar_path]) for filename in os.listdir("."): if filename.endswith(".o"): strings = subprocess.check_output(["strings", "-n", "8", filename]) strings = strings.decode("utf-8").split("\n") non_symbol_strings = set() for s in strings: if s in symbols: continue if "." in s and any(subs in symbols for subs in s.split(".")): continue # C++ specific if "::" in s: continue if "_" in s: # make sure it's not a substring of any symbol is_substring = False for symbol in symbols: if s in symbol: is_substring = True break if is_substring: continue non_symbol_strings.add(s) all_strings |= non_symbol_strings os.chdir(cwd) grouped_strings = defaultdict(set) for s in all_strings: grouped_strings[s[:5]].add(s) sorted_strings = list(sorted(all_strings, key=len, reverse=True)) ctr = 0 pic
ked = set() unique_strings = [ ] for s in sorted_strings: if s[:5] in picked: co
ntinue unique_strings.append(s[:MAX_UNIQUE_STRING_LEN]) picked.add(s[:5]) ctr += 1 if ctr >= UNIQUE_STRING_COUNT: break return unique_strings def run_pelf(pelf_path: str, ar_path: str, output_path: str): subprocess.check_call([pelf_path, "-r43:0:0", ar_path, output_path]) def run_sigmake(sigmake_path: str, sig_name: str, pat_path: str, sig_path: str): if " " not in sig_name: sig_name_arg = f"-n{sig_name}" else: sig_name_arg = f"-n\"{sig_name}\"" proc = subprocess.Popen([sigmake_path, sig_name_arg, pat_path, sig_path], stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) _, stderr = proc.communicate() if b"COLLISIONS:" in stderr: return False return True def process_exc_file(exc_path: str): """ We are doing the stupidest thing possible: For each batch of conflicts, we pick the most likely result baed on a set of predefined rules. TODO: Add caller-callee-based de-duplication. """ with open(exc_path, "r") as f: data = f.read() lines = data.split("\n") # parse groups ctr = itertools.count() idx = 0 groups = defaultdict(dict) for line in lines: if line.startswith(";"): continue if not line: idx = next(ctr) else: # parse the function name func_name = line[:line.index("\t")].strip(" ") groups[idx][func_name] = line # for each group, decide the one to keep for idx in list(groups.keys()): g = groups[idx] if len(g) == 1: # don't pick anything. This is a weird case that I don't understand continue if all(func_name.endswith(".cold") for func_name in g): # .cold functions. doesn't matter what we pick continue non_cold_names = [ ] for func_name in g: if func_name.endswith(".cold"): continue non_cold_names.append(func_name) # sort it non_cold_names = list(sorted(non_cold_names, key=len)) # pick the top one the_chosen_one = non_cold_names[0] line = g[the_chosen_one] g[the_chosen_one] = "+" + line # output with open(exc_path, "w") as f: for g in groups.values(): for line in g.values(): f.write(line + "\n") f.write("\n") def main(): parser = argparse.ArgumentParser() parser.add_argument("ar_path", help="Path of the .a file to build signatures for") parser.add_argument("sig_name", help="Name of the signature (a string inside the signature file)") parser.add_argument("sig_path", help="File name of the generated signature") parser.add_argument("--compiler", help="Name of the compiler (e.g., gcc, clang). It will be stored in the meta " "data file.") parser.add_argument("--compiler_version", help="Version of the compiler (e.g., 6). It will be stored in the meta " "data file.") # parser.add_argument("--platform", help="Name of the platform (e.g., windows/linux/macos). It will be stored in # the meta data file.") parser.add_argument("--os", help="Name of the operating system (e.g., ubuntu/debian). It will be stored in the " "meta data file.") parser.add_argument("--os_version", help="Version of the operating system (e.g., 20.04). It will be stored in the " "meta data file.") parser.add_argument("--pelf_path", help="Path of pelf") parser.add_argument("--sigmake_path", help="Path of sigmake") args = parser.parse_args() if args.pelf_path: pelf_path = args.pelf_path elif "pelf_path" in os.environ: pelf_path = os.environ['pelf_path'] else: raise ValueError("pelf_path must be specified.") if args.sigmake_path: sigmake_path = args.sigmake_path elif "sigmake_path" in os.environ: sigmake_path = os.environ['sigmake_path'] else: raise ValueError("sigmake_path must be specified.") compiler = args.compiler if compiler: compiler = compiler.lower() compiler_version = args.compiler_version if compiler_version: compiler_version = compiler_version.lower() os_name = args.os if os_name: os_name = os_name.lower() os_version = args.os_version if os_version: os_version = os_version.lower() # Get basic information # Get basic information basic_info = get_basic_info(args.ar_path) # Get unique strings from the library unique_strings = get_unique_strings(args.ar_path) # Build necessary file paths sig_path_basename
of data values. The data values are displayed in successive columns after the tree label.""" def __init__(self, master=None, **kw): """Construct a Ttk Treeview with parent master. STANDARD OPTIONS class, cursor, style, takefocus, xscrollcommand, yscrollcommand WIDGET-SPECIFIC OPTIONS columns, displaycolumns, height, padding, selectmode, show ITEM OPTIONS text, image, values, open, tags TAG OPTIONS foreground, background, font, image """ Widget.__init__(self, master, "ttk::treeview", kw) def bbox(self, item, column=None): """Returns the bounding box (relative to the treeview widget's window) of the specified item in the form x y width height. If column is specified, returns the bounding box of that cell. If the item is not visible (i.e., if it is a descendant of a closed item or is scrolled offscreen), returns an empty string.""" return self.tk.call(self._w, "bbox", item, column) def get_children(self, item=None): """Returns a tuple of children belonging to item. If item is not specified, returns root children.""" return self.tk.call(self._w, "children", item or '') or () def set_children(self, item, *newchildren): """Replaces item's child with newchildren. Children present in item that are not present in newchildren are detached from tree. No items in newchildren may be an ancestor of item.""" self.tk.call(self._w, "children", item, newchildren) def column(self, column, option=None, **kw): """Query or modify the options for the specified column. If kw is not given, returns a dict of the column option values. If option is specified then the value for that option is returned. Otherwise, sets the options to the corresponding values.""" if option is not None: kw[option] = None return _val_or_dict(kw, self.tk.call, self._w, "column", column) def delete(self, *items): """Delete all specified items and all their descendants. The root item may not be deleted.""" self.tk.call(self._w, "delete", items) def detach(self, *items): """Unlinks all of the specified items from the tree. The items and all of their descendants are still present, and may be reinserted at another point in the tree, but will not be displayed. The root item may not be detached.""" self.tk.call(self._w, "detach", items) def exists(self, item): """Returns True if the specified item is present in the tree, False otherwise.""" return bool(self.tk.call(self._w, "exists", item)) def focus(self, item=None): """If item is specified, sets the focus item to item. Otherwise, returns the current focus item, or '' if there is none.""" return self.tk.call(self._w, "focus", item) def heading(self, column, option=None, **kw): """Query or modify the heading options for the specified column. If kw is not given, returns a dict of the heading option values. If option is specified then the value for that option is returned. Otherwise, sets the options to the corresponding values. Valid options/values are: text: text The text to display in the column heading image: image_name Specifies an image to display to the right of the column heading anchor: anchor Specifies how the heading text should be aligned. One of the standard Tk anchor values command: callback A callback to be invoked when the heading label is pressed. To configure the tree column heading, call this with column = "#0" """ cmd = kw.get('command') if cmd and not isinstance(cmd, str): # callback not registered yet, do it now kw['command'] = self.master.register(cmd, self._substitute) if option is not None: kw[option] = None return _val_or_dict(kw, self.tk.call, self._w, 'heading', colum
n) def identify(self, component, x, y):
"""Returns a description of the specified component under the point given by x and y, or the empty string if no such component is present at that position.""" return self.tk.call(self._w, "identify", component, x, y) def identify_row(self, y): """Returns the item ID of the item at position y.""" return self.identify("row", 0, y) def identify_column(self, x): """Returns the data column identifier of the cell at position x. The tree column has ID #0.""" return self.identify("column", x, 0) def identify_region(self, x, y): """Returns one of: heading: Tree heading area. separator: Space between two columns headings; tree: The tree area. cell: A data cell. * Availability: Tk 8.6""" return self.identify("region", x, y) def identify_element(self, x, y): """Returns the element at position x, y. * Availability: Tk 8.6""" return self.identify("element", x, y) def index(self, item): """Returns the integer index of item within its parent's list of children.""" return self.tk.call(self._w, "index", item) def insert(self, parent, index, iid=None, **kw): """Creates a new item and return the item identifier of the newly created item. parent is the item ID of the parent item, or the empty string to create a new top-level item. index is an integer, or the value end, specifying where in the list of parent's children to insert the new item. If index is less than or equal to zero, the new node is inserted at the beginning, if index is greater than or equal to the current number of children, it is inserted at the end. If iid is specified, it is used as the item identifier, iid must not already exist in the tree. Otherwise, a new unique identifier is generated.""" opts = _format_optdict(kw) if iid: res = self.tk.call(self._w, "insert", parent, index, "-id", iid, *opts) else: res = self.tk.call(self._w, "insert", parent, index, *opts) return res def item(self, item, option=None, **kw): """Query or modify the options for the specified item. If no options are given, a dict with options/values for the item is returned. If option is specified then the value for that option is returned. Otherwise, sets the options to the corresponding values as given by kw.""" if option is not None: kw[option] = None return _val_or_dict(kw, self.tk.call, self._w, "item", item) def move(self, item, parent, index): """Moves item to position index in parent's list of children. It is illegal to move an item under one of its descendants. If index is less than or equal to zero, item is moved to the beginning, if greater than or equal to the number of children, it is moved to the end. If item was detached it is reattached.""" self.tk.call(self._w, "move", item, parent, index) reattach = move # A sensible method name for reattaching detached items def next(self, item): """Returns the identifier of item's next sibling, or '' if item is the last child of its parent.""" return self.tk.call(self._w, "next", item) def parent(self, item): """Returns the ID of the parent of item, or '' if item is at the top level of the hierarchy.""" return self.tk.call(self._w, "parent", item) def prev(self, item): """Returns the identifier of item's previous sibling, or '' if item is the first child of its parent.""" return self.tk.cal
'/pywb/') def head_insert_func(rule, cdx): if rule.js_rewrite_location != 'urls': return '<script src="/static/__pywb/wombat.js"> </script>' else: return '' def test_csrf_token_headers(): rewriter = LiveRewriter() env = {'HTTP_X_CSRFTOKEN': 'wrong', 'HTTP_COOKIE': 'csrftoken=foobar'} req_headers = rewriter.translate_headers('http://example.com/', 'com,example)/', env) assert req_headers == {'X-CSRFToken': 'foobar', 'Cookie': 'csrftoken=foobar'} def test_forwarded_scheme(): rewriter = LiveRewriter() env = {'HTTP_X_FORWARDED_PROTO': 'https', 'Other': 'Value'} req_headers = rewriter.translate_headers('http://example.com/', 'com,example)/', env) assert req_headers == {'X-Forwarded-Proto': 'http'} def test_req_cookie_rewrite_1(): rewriter = LiveRewriter() env = {'HTTP_COOKIE': 'A
=B'} urlkey = 'example,example,test)/' url = 'test.example.example/' req_headers = rewriter.translate_headers(url, urlkey, env) assert req_headers == {'Cookie': 'A=B; FOO=&bar=1'} def test_req_cookie_rewrite_2(): rewriter = L
iveRewriter() env = {'HTTP_COOKIE': 'FOO=goo'} urlkey = 'example,example,test)/' url = 'test.example.example/' req_headers = rewriter.translate_headers(url, urlkey, env) assert req_headers == {'Cookie': 'FOO=&bar=1'} def test_req_cookie_rewrite_3(): rewriter = LiveRewriter() env = {} urlkey = 'example,example,test)/' url = 'test.example.example/' req_headers = rewriter.translate_headers(url, urlkey, env) assert req_headers == {'Cookie': '; FOO=&bar=1'} def test_local_1(): status_headers, buff = get_rewritten(get_test_dir() + 'text_content/sample.html', urlrewriter, head_insert_func, 'com,example,test)/') # wombat insert added assert '<head><script src="/static/__pywb/wombat.js"> </script>' in buff, buff # JS location and JS link rewritten assert 'window.WB_wombat_location = "/pywb/20131226101010/http:\/\/example.com/dynamic_page.html"' in buff # link rewritten assert '"/pywb/20131226101010/http://example.com/some/path/another.html"' in buff def test_local_no_head(): status_headers, buff = get_rewritten(get_test_dir() + 'text_content/sample_no_head.html', urlrewriter, head_insert_func, 'com,example,test)/') # wombat insert added assert '<script src="/static/__pywb/wombat.js"> </script>' in buff # location rewritten assert 'window.WB_wombat_location = "/other.html"' in buff # link rewritten assert '"/pywb/20131226101010/http://example.com/some/path/another.html"' in buff def test_local_no_head_banner_only(): status_headers, buff = get_rewritten(get_test_dir() + 'text_content/sample_no_head.html', bn_urlrewriter, head_insert_func, 'com,example,test)/') # wombat insert added assert '<script src="/static/__pywb/wombat.js"> </script>' in buff # location NOT rewritten assert 'window.location = "/other.html"' in buff # link NOT rewritten assert '"another.html"' in buff def test_local_banner_only_no_rewrite(): status_headers, buff = get_rewritten(get_test_dir() + 'text_content/sample.html', bn_urlrewriter, head_insert_func, 'com,example,test)/') # wombat insert added assert '<head><script src="/static/__pywb/wombat.js"> </script>' in buff # JS location NOT rewritten, JS link NOT rewritten assert 'window.location = "http:\/\/example.com/dynamic_page.html"' in buff, buff # link NOT rewritten assert '"another.html"' in buff def test_local_2_link_only_rewrite(): status_headers, buff = get_rewritten(get_test_dir() + 'text_content/sample.html', urlrewriter, head_insert_func, 'example,example,test)/nolocation_rewrite') # no wombat insert assert '<head><script src="/static/__pywb/wombat.js"> </script>' not in buff # JS location NOT rewritten, JS link rewritten assert 'window.location = "/pywb/20131226101010/http:\/\/example.com/dynamic_page.html"' in buff # still link rewrite assert '"/pywb/20131226101010/http://example.com/some/path/another.html"' in buff def test_local_2_js_loc_only_rewrite(): status_headers, buff = get_rewritten(get_test_dir() + 'text_content/sample.html', urlrewriter, head_insert_func, 'example,example,test,loconly)/') # wombat insert added assert '<script src="/static/__pywb/wombat.js"> </script>' in buff # JS location rewritten, JS link NOT rewritten assert 'window.WB_wombat_location = "http:\/\/example.com/dynamic_page.html"' in buff # still link rewrite in HTML assert '"/pywb/20131226101010/http://example.com/some/path/another.html"' in buff def test_local_2_no_rewrite(): status_headers, buff = get_rewritten(get_test_dir() + 'text_content/sample.html', urlrewriter, head_insert_func, 'example,example,test,norewrite)/') # wombat insert added assert '<script src="/static/__pywb/wombat.js"> </script>' in buff # JS location NOT rewritten, JS link NOT rewritten assert 'window.location = "http:\/\/example.com/dynamic_page.html"' in buff # still link rewrite in HTML assert '"/pywb/20131226101010/http://example.com/some/path/another.html"' in buff def test_local_unclosed_script(): status_headers, buff = get_rewritten(get_test_dir() + 'text_content/sample_unclosed_script.html', urlrewriter, head_insert_func, 'com,example,test)/') # wombat insert added assert '<head><script src="/static/__pywb/wombat.js"> </script>' in buff, buff # JS location and JS link rewritten assert 'window.WB_wombat_location = "/pywb/20131226101010/http:\/\/example.com/dynamic_page.html";\n}\n</script>' in buff, buff def test_example_1(): status_headers, buff = get_rewritten('http://example.com/', urlrewriter, req_headers={'Connection': 'close'}) # verify header rewriting assert (('X-Archive-Orig-connection', 'close') in status_headers.headers), status_headers # verify utf-8 charset detection assert status_headers.get_header('content-type') == 'text/html; charset=utf-8' assert '/pywb/20131226101010/http://www.iana.org/domains/example' in buff, buff def test_example_2_redirect(): status_headers, buff = get_rewritten('http://httpbin.org/redirect-to?url=http://example.com/', urlrewriter) # redirect, no content assert status_headers.get_statuscode() == '302' assert len(buff) == 0 def test_example_3_rel(): status_headers, buff = get_rewritten('//example.com/', urlrewriter) assert status_headers.get_statuscode() == '200' def test_example_4_rewrite_err(): # may occur in case of rewrite mismatch, the /// gets stripped off status_headers, buff = get_rewritten('http://localhost:8080///example.com/', urlrewriter) assert status_headers.get_statuscode() == '200' def test_example_domain_specific_3(): status_headers, buff = get_rewritten('http://facebook.com/digitalpreservation', urlrewriter, follow_redirects=True) # comment out Bootloader.configurePage, if it is still there if 'Bootloader.configurePage' in buff: assert '/* Bootloader.configurePage' in buff def test_wombat_top(): #status_headers, buff = get_rewritten('https://as
# -*- coding: utf-8 -*- # # Copyright © 2009-2010 Pierre Raybaut # Licensed under the terms of the MIT License # (see spyderlib/__init__.py for details) """Online Help Plugin""" from spyderlib.qt.QtCore import Signal import os.path as osp # Local imports from spyderlib.baseconfig import get_conf_path, _ from spyderlib.widgets.pydocgui import PydocBrowser from spyderlib.plugins import SpyderPluginMixin class OnlineHelp(PydocBrowser, SpyderPluginMixin): """ Online Help Plugin """ sig_option_changed = Signal(str, object) CONF_SECTION = 'onlinehelp' LOG_PATH = get_conf_path('.onlinehelp') def __init__(self, parent): self.main = parent PydocBrowser.__init__(self, parent) SpyderPluginMixin.__init__(self, parent) # Initialize plugin self.initialize_plugin() self.register_widget_shortcuts("Editor", self.find_widget) self.webview.set_zoom_factor(self.get_option('zoom_factor')) self.url_combo.setMaxCount(self.get_option('max_history_entries')) self.url_combo.addItems( self.load_history() ) #------ Public API --------------------------------------------------------- def load_history(self, obj=None): """Load history from a text file in user home directory""" if osp.isfile(self.LOG_PATH): history = [line.replace('\n','') for line in file(self.LOG_PATH, 'r').readlines()] else: history = [] return history def save_history(self): """Save history to a text file in user home directory""" file(self.LOG_PATH, 'w').write("\n".join( \ [ unicode( self.url_combo.itemText(index) ) for index in range(self.url_combo.count()) ] )) #------ SpyderPluginMixin API --------------------------------------------- def visibility_changed(self, enable): """DockWidget visibility has ch
anged""" SpyderPluginMixin.visibility_changed(self, enable) if enable and not self.is_server_running():
self.initialize() #------ SpyderPluginWidget API --------------------------------------------- def get_plugin_title(self): """Return widget title""" return _('Online help') def get_focus_widget(self): """ Return the widget to give focus to when this plugin's dockwidget is raised on top-level """ self.url_combo.lineEdit().selectAll() return self.url_combo def closing_plugin(self, cancelable=False): """Perform actions before parent main window is closed""" self.save_history() self.set_option('zoom_factor', self.webview.get_zoom_factor()) return True def refresh_plugin(self): """Refresh widget""" pass def get_plugin_actions(self): """Return a list of actions related to plugin""" return [] def register_plugin(self): """Register plugin in Spyder's main window""" self.main.add_dockwidget(self)
"""Support for the Hive switches.""" from datetime import timedelta from homeassistant.components.switch import SwitchEntity from . import ATTR_AVAILABLE, ATTR_MODE, DATA_HIVE, DOMAIN, HiveEntity, refresh_system PARALLEL_UPDATES = 0 SCAN_INTERVAL = timedelta(seconds=15) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up the Hive Switch.""" if discovery_info is None: return hive = hass.data[DOMAIN].get(DATA_HIVE) devices = hive.devices.get("switch") entities = [] if devices: for dev in devices: entities.append(HiveDevicePlug(hive, dev)) async_add_entities(entities, True) class HiveDevicePlug(HiveEntity, SwitchEntity): """Hive Active Plug.""" @property def unique_id(self): """Return unique ID of entity.""" return self._unique_id @property def device_info(self): """Return device information.""" return {"identifiers": {(DOMAIN, self.unique_id)}, "name": self.name} @property def name(self): """Return the name of this Switch device if any.""" return self.device["haName"] @property def available(self): """Return if the device is available.""" return self.device["deviceData"].get("online") @property def device_state_attributes(self): """Show Device Attributes.""" return { ATTR_AVAILABLE: self.attributes.get(ATTR_AVAILABLE), ATTR_MODE: self.attributes.get(ATTR_MODE), } @property def current_power_w(self): """Return the current power usage in W.""" return self.device["status"]["power_usage"] @property def is_on(self): """Return true if switch is on.""" return self.device["status"]["state"] @refresh_system async def async_turn_on(self, **kwargs):
"""Turn the switch on.""" if self.device["hiveType"] == "activeplug": await self.hive.switch.turn_on(self.device) @refresh_system async def async_turn_off(self, **kwargs): """Turn the device off.""" if self.device["hiveType"] == "activeplug": await self.hive.switch.turn_off(self.device) async def async_update(self): """Update a
ll Node data from Hive.""" await self.hive.session.updateData(self.device) self.device = await self.hive.switch.get_plug(self.device)
troveTuple[0].split(':', 1)[0] == troveName: # exact matches take priority return (jobId, troveTuple) elif troveTuple[0].startswith(troveName) and startsWith is None: startsWith = (jobId, troveTuple) return startsWith def getTroveState(self, jobId, troveTuple): return self.states[jobId, troveTuple] def getBuildingTroves(self): return [ x[0] for x in self.states.iteritems() if x[1] in (buildtrove.TroveState.BUILDING, buildtrove.TroveState.RESOLVING) ] def updateTrovesForJob(self, jobId): self.troves = [] self.states = {} for state, troveTupleList in self.client.listTrovesByState(jobId).items(): for troveTuple in troveTupleList: self.troves.append((jobId, troveTuple)) self.states[jobId, troveTuple] = state self.troves.sort() def _troveStateUpdated(self, (jobId, troveTuple), state, status): if (jobId, troveTuple) not in self.states: self.updateTrovesForJob(jobId) else: self.states[jobId, troveTuple] = state def _jobStateUpdated(self, jobId, state, status): self.jobState = state if self._isBuilding(): self.updateTrovesForJob(jobId) def _jobTrovesSet(self, jobId, troveList): self.updateTrovesForJob(jobId) def _isBuilding(self): return self.jobState in (buildjob.JOB_STATE_BUILD, buildjob.JOB_STATE_STARTED) def _isFinished(self): return self.jobState in ( buildjob.JOB_STATE_FAILED, buildjob.JOB_STATE_BUILT) class DisplayManager(object):#xmlrpc.BasicXMLRPCStatusSubscriber): displayClass = JobLogDisplay stateClass = DisplayState
def __init__(self, client, showBuildLogs, out=None, exitOnFinish=None): self.termInfo = set_raw_mode() if out is None: out = open('/dev/tty', 'w') self.state = self.stateClass(client) self.display = self.displayClass(client, self.state, out) self.client =
client self.troveToWatch = None self.troveIndex = 0 self.showBuildLogs = showBuildLogs if exitOnFinish is None: exitOnFinish = False self.exitOnFinish = exitOnFinish def _receiveEvents(self, *args, **kw): methodname = '_receiveEvents' method = getattr(self.state, methodname, None) if method: try: method(*args) except errors.uncatchableExceptions: raise except Exception, err: print 'Error in handler: %s\n%s' % (err, traceback.format_exc()) method = getattr(self.display, methodname, None) if method: try: method(*args) except errors.uncatchableExceptions: raise except Exception, err: print 'Error in handler: %s\n%s' % (err, traceback.format_exc()) return '' def getCurrentTrove(self): if self.state.troves: return self.state.troves[self.troveIndex] else: return None def _primeOutput(self, jobId): self.state._primeOutput(jobId) self.display._msg('Watching job %s' % jobId) if self.getCurrentTrove(): self.displayTrove(*self.getCurrentTrove()) def displayTrove(self, jobId, troveTuple): self.display.setTroveToWatch(jobId, troveTuple) state = self.state.getTroveState(jobId, troveTuple) state = buildtrove.stateNames[state] def _serveLoopHook(self): ready = select.select([sys.stdin], [], [], 0.1)[0] if ready: cmd = sys.stdin.read(1) if cmd == '\x1b': cmd += sys.stdin.read(2) if cmd == ' ': self.do_switch_log() elif cmd == 'n' or cmd == '\x1b[C': self.do_next() elif cmd == 'p' or cmd == '\x1b[D': self.do_prev() elif cmd == 'q': sys.exit(0) elif cmd == 'h': self.do_help() elif cmd == 'b': self.do_next_building() elif cmd == 'f': self.do_next_failed() elif cmd == 'i': self.do_info() elif cmd == 'l': self.do_log() elif cmd == 's': self.do_status() elif cmd == 'g': self.do_goto() if self.showBuildLogs: for jobId, troveTuple in self.state.getBuildingTroves(): self.display.updateBuildLog(jobId, troveTuple) def do_next(self): if not self.state.troves: return self.troveIndex = (self.troveIndex + 1) % len(self.state.troves) if self.getCurrentTrove(): self.displayTrove(*self.getCurrentTrove()) def do_next_building(self): if not self.state.troves: return startIndex = self.troveIndex self.troveIndex = (self.troveIndex + 1) % len(self.state.troves) while (not self.state.isBuilding(*self.getCurrentTrove()) and self.troveIndex != startIndex): self.troveIndex = (self.troveIndex + 1) % len(self.state.troves) if self.troveIndex != startIndex: self.displayTrove(*self.getCurrentTrove()) def do_goto(self): if not self.state.troves: print 'No troves loaded yet' return self.display.erasePrompt() restore_terminal(*self.termInfo) try: troveName = raw_input("\nName or part of name of trove: ") troveInfo = self.state.findTroveByName(troveName) if not troveInfo: print 'No trove starting with "%s"' % troveName self.display.writePrompt() return while not self.getCurrentTrove() == troveInfo: self.troveIndex = (self.troveIndex + 1) % len(self.state.troves) self.displayTrove(*self.getCurrentTrove()) finally: self.termInfo = set_raw_mode() def do_next_failed(self): if not self.state.troves: return startIndex = self.troveIndex self.troveIndex = (self.troveIndex + 1) % len(self.state.troves) while (not self.state.isFailed(*self.getCurrentTrove()) and self.troveIndex != startIndex): self.troveIndex = (self.troveIndex + 1) % len(self.state.troves) if self.troveIndex != startIndex: self.displayTrove(*self.getCurrentTrove()) def do_prev(self): if not self.state.troves: return self.troveIndex = (self.troveIndex - 1) % len(self.state.troves) if self.getCurrentTrove(): self.displayTrove(*self.getCurrentTrove()) def do_info(self): if not self.getCurrentTrove(): return jobId, troveTuple = self.getCurrentTrove() job = self.client.getJob(jobId) trove = job.getTrove(*troveTuple) dcfg = query.DisplayConfig(self.client, showTracebacks=True) self.display.setWatchTroves(False) self.display.erasePrompt() query.displayTroveDetail(dcfg, job, trove, out=self.display.out) self.display.writePrompt() def do_log(self): if not self.getCurrentTrove(): return jobId, troveTuple = self.getCurrentTrove() job = self.client.getJob(jobId) trove = job.getTrove(*troveTuple) moreData, data, mark = self.client.getTroveBuildLog(jobId, troveTuple, 0) if not data: self.display._msg('No log yet.') return fd, path = tempfile.mkstemp() os.fdopen(fd, 'w').write(data) try: os.system('less %s' % path) finally: os.remove(path) def do_
#!/usr/bin/env
python # encoding: utf-8 class MyRange(object): def __init__(self, n): self.idx = 0 self.n = n def __iter__(self): return self def next(self): if self.idx < self.n: val = self.idx self.idx += 1 return val else: raise StopIteration() myRange = MyRange(3) for i in myRange: print
i
# -*- coding: utf-8 -*- import os.path import re import warnings try: from setuptools import setup, find_packages except ImportError: from distribute_setup import use_setuptools use_setuptools()
from setuptools import setup, find_packages version = '0.2.1' news = os.path.join(os.path.dirname(__file__), 'docs', 'news.rst') news = open(news).read() parts = re.split(r'([0-9\.]+)\s*\n\r?-+\n\r?', news) found_news = '' for i in range(len(parts)-1): if parts[i] == version: found_news = parts[i+i] break if not found_news: warnings.warn('No news for this version found.'
) long_description = """ keepassdb is a Python library that provides functionality for reading and writing KeePass 1.x (and KeePassX) password databases. This library brings together work by multiple authors, including: - Karsten-Kai König <kkoenig@posteo.de> - Brett Viren <brett.viren@gmail.com> - Wakayama Shirou <shirou.faw@gmail.com> """ if found_news: title = 'Changes in %s' % version long_description += "\n%s\n%s\n" % (title, '-'*len(title)) long_description += found_news setup( name = "keepassdb", version = version, author = "Hans Lellelid", author_email = "hans@xmpl.org", url = "http://github.com/hozn/keepassdb", license = "GPLv3", description = "Python library for reading and writing KeePass 1.x databases.", long_description = long_description, packages = find_packages(), include_package_data=True, package_data={'keepassdb': ['tests/resources/*']}, install_requires=['pycrypto>=2.6,<3.0dev'], tests_require = ['nose>=1.0.3'], test_suite = 'keepassdb.tests', classifiers=[ 'Development Status :: 3 - Alpha', 'License :: OSI Approved :: GNU General Public License (GPL)', 'Intended Audience :: Developers', 'Operating System :: OS Independent', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.0', 'Programming Language :: Python :: 3.1', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Topic :: Security :: Cryptography', 'Topic :: Software Development :: Libraries :: Python Modules' ], use_2to3=True, zip_safe=False # Technically it should be fine, but there are issues w/ 2to3 )
#!/Users/harvey/Projects/face-hack/venv/face/bin/python # # The Python Imaging Library # $Id$ # from __future__ import print_function try: from tkinter import * except ImportError: from Tkinter import * from PIL import Image, ImageTk import sys # -------------------------------------------------------------------- # an image animation player class UI(Label): def __init__(self, master, im): if isinstance(im, list): # list of images self.im = im[1:] im = self.im[0] else: # sequence self.im = im if im.mode == "1": self.image = ImageTk.BitmapImage(im, foreground="white") else: self.image = ImageTk.PhotoImage(im) Label.__init__(self, master, image=self.image, bg="black", bd=0) self.update() try: duration = im.info["duration"] except KeyError: duration = 100 self.after(duration, self.next) def next(self): if isinstance(self.im, list): try: im = self.im[0] del self.im[0] self.image.paste(im) except IndexError: return # end of list else: try: im = self.im im.seek(im.tell() + 1) self.image.paste(im) except EOFError: return # end of file try: duration = im.info["duration"] except KeyError: duration = 100 self.after(duration, self.next)
self.update_idletasks() # -------------------------------------------------------------------- # scr
ipt interface if __name__ == "__main__": if not sys.argv[1:]: print("Syntax: python player.py imagefile(s)") sys.exit(1) filename = sys.argv[1] root = Tk() root.title(filename) if len(sys.argv) > 2: # list of images print("loading...") im = [] for filename in sys.argv[1:]: im.append(Image.open(filename)) else: # sequence im = Image.open(filename) UI(root, im).pack() root.mainloop()
Thomas J Fan <thomasjpfan@gmail.com> # License: BSD 3 clause import numpy as np from ._base import _BaseImputer from ..utils.validation import FLOAT_DTYPES from ..metrics import pairwise_distances_chunked from ..metrics.pairwise import _NAN_METRICS from ..neighbors._base import _get_weights from ..neighbors._base import _check_weights from ..utils import is_scalar_nan from ..utils._mask import _get_mask from ..utils.validation import check_is_fitted class KNNImputer(_BaseImputer): """Imputation for completing missing values using k-Nearest Neighbors. Each sample's missing values are imputed using the mean value from `n_neighbors` nearest neighbors found in the training set. Two samples are close if the features that neither is missing are close. Read more in the :ref:`User Guide <knnimpute>`. .. versionadded:: 0.22 Parameters ---------- missing_values : int, float, str, np.nan or None, default=np.nan The placeholder for the missing values. All occurrences of `missing_values` will be imputed. For pandas' dataframes with nullable integer dtypes with missing values, `missing_values` should be set to np.nan, since `pd.NA` will be converted to np.nan. n_neighbors : int, default=5 Number of neighboring samples to use for imputation. weights : {'uniform', 'distance'} or callable, default='uniform' Weight function used in prediction. Possible values: - 'uniform' : uniform weights. All points in each neighborhood are weighted equally. - 'distance' : weight points by the inverse of their distance. in this case, closer neighbors of a query point will have a greater influence than neighbors which are further away. - callable : a user-defined function which accepts an array of distances, and returns an array of the same shape containing the weights. metric : {'nan_euclidean'} or callable, default='nan_euclidean' Distance metric for searching neighbors. Possible values: - 'nan_euclidean' - callable : a user-defined function which conforms to the definition of ``_pairwise_callable(X, Y, metric, **kwds)``. The function accepts two arrays, X and Y, and a `missing_values` keyword in `kwds` and returns a scalar distance value. copy : bool, default=True If True, a copy of X will be created. If False, imputation will be done in-place whenever possible. add_indicator : bool, default=False If True, a :class:`MissingIndicator` transform will stack onto the output of the imputer's transform. This allows a predictive estimator to account for missingness despite imputation. If a feature has no missing values at fit/train time, the feature won't appear on the missing indicator even if there are missing values at transform/test time. Attributes ---------- indicator_ : :class:`~sklearn.impute.MissingIndicator` Indicator used to add binary indicators for missing values. ``None`` if add_indicator is False. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 References ---------- * Olga Troyanskaya, Michael Cantor, Gavin Sherlock, Pat Brown, Trevor Hastie, Robert Tibshirani, David Botstein and Russ B. Altman, Missing value estimation methods for DNA microarrays, BIOINFORMATICS Vol. 17 no. 6, 2001 Pages 520-525. Examples -------- >>> import numpy as np >>> from sklearn.impute import KNNImputer >>> X = [[1, 2, np.nan], [3, 4, 3], [np.nan, 6, 5], [8, 8, 7]] >>> imputer = KNNImputer(n_neighbors=2) >>> imputer.fit_transform(X) array([[1. , 2. , 4. ], [3. , 4. , 3. ], [5.5, 6. , 5. ], [8. , 8. , 7. ]]) """ def __init__(self, *, missing_values=np.nan, n_neighbors=5, weights="uniform", metric="nan_euclidean", copy=True, add_indicator=False): super().__init__( missing_values=missi
ng_values, add_indicator=add_indicator ) self.n_neighbors = n_neighbors self.weights = weights self.metric = metric self.copy = copy def _calc_impute(self, dist_pot_donors, n_neighbors, fit_X_col, mask_fit_X_col): """Helper function to impu
te a single column. Parameters ---------- dist_pot_donors : ndarray of shape (n_receivers, n_potential_donors) Distance matrix between the receivers and potential donors from training set. There must be at least one non-nan distance between a receiver and a potential donor. n_neighbors : int Number of neighbors to consider. fit_X_col : ndarray of shape (n_potential_donors,) Column of potential donors from training set. mask_fit_X_col : ndarray of shape (n_potential_donors,) Missing mask for fit_X_col. Returns ------- imputed_values: ndarray of shape (n_receivers,) Imputed values for receiver. """ # Get donors donors_idx = np.argpartition(dist_pot_donors, n_neighbors - 1, axis=1)[:, :n_neighbors] # Get weight matrix from from distance matrix donors_dist = dist_pot_donors[ np.arange(donors_idx.shape[0])[:, None], donors_idx] weight_matrix = _get_weights(donors_dist, self.weights) # fill nans with zeros if weight_matrix is not None: weight_matrix[np.isnan(weight_matrix)] = 0.0 # Retrieve donor values and calculate kNN average donors = fit_X_col.take(donors_idx) donors_mask = mask_fit_X_col.take(donors_idx) donors = np.ma.array(donors, mask=donors_mask) return np.ma.average(donors, axis=1, weights=weight_matrix).data def fit(self, X, y=None): """Fit the imputer on X. Parameters ---------- X : array-like shape of (n_samples, n_features) Input data, where `n_samples` is the number of samples and `n_features` is the number of features. Returns ------- self : object """ # Check data integrity and calling arguments if not is_scalar_nan(self.missing_values): force_all_finite = True else: force_all_finite = "allow-nan" if self.metric not in _NAN_METRICS and not callable(self.metric): raise ValueError( "The selected metric does not support NaN values") if self.n_neighbors <= 0: raise ValueError( "Expected n_neighbors > 0. Got {}".format(self.n_neighbors)) X = self._validate_data(X, accept_sparse=False, dtype=FLOAT_DTYPES, force_all_finite=force_all_finite, copy=self.copy) _check_weights(self.weights) self._fit_X = X self._mask_fit_X = _get_mask(self._fit_X, self.missing_values) super()._fit_indicator(self._mask_fit_X) return self def transform(self, X): """Impute all missing values in X. Parameters ---------- X : array-like of shape (n_samples, n_features) The input data to complete. Returns ------- X : array-like of shape (n_samples, n_output_features) The imputed dataset. `n_output_features` is the number of features that is not always missing during `fit`. """ check_is_fitted(self) if not is_scalar_nan(self.missing_values): force_all_finite = True else: force_all_finite = "allow-nan" X = self._validate_data(X, accept_sparse=False, dtype=FLOAT_DTYPES, force_all_finite=force_all_finite, copy=self.copy, reset=False) mask = _get_mask(X, self.
#!/usr/bin/python -Wall # ================================================================ # Copyright (c) John Kerl 2007 # kerl.john.r@gmail.com # ================================================================ from __future__ import division # 1/2 = 0.5, not 0. from math import * from sackmat_m import * import copy # ---------------------------------------------------------------- # Let # F: R^m -> R^n # i.e. # [ F_1(x_1, ..., x_m) ] # F(x) = [ : : : ] # [ F_n(x_1, ..., x_m) ]. # Then Dij = dFi/dxj, i=1..n, j=1..m (an n x m matrix). # This is numerically approximated (forward-difference approximation) by # (F(x1,...,xj+h,...,xn) - F(x1,...,xj,...,xn)) / h # or (centered-difference approximation) # (F(x1,...,xj+h/2,...,xn) - F(x1,...,xj-h/2,...,xn)) / h. def jac(F, q, h=1e-6): m = len(q) n = len(F(q)) DFq = make_zero_matrix(n, m) # Centered-difference approximation h2 = 0.5 * h for j in range(0, m): qb = copy.copy(q) qf = copy.copy(q) qb[j] -= h2 qf[j] += h2 Fqb = F(qb) Fqf = F(qf) for i in range(0, n): DFq[i][j] = (Fqf[i] - Fqb[i]) / h return DFq # ---------------------------------------------------------------- def F1(q): [x, y, z] = q #f1 = x**2 #f2 = y**2 #f3 = z**2 #f1 = x**2 * y**2 #f2 = y**2 * z**2 #f3 = z**2 * x**2 f1 = x * y f2 = y * z f3 = z * x #f1 = 1.0 * y * y #f2 = 2.0 * x #f3 = 3.0 * z return [f1, f2, f3] # ---------------------------------------------------------------- def F2(q): [x, y, z] = q return [x**2 + y**2 + z**2] # ---------------------------------------------------------------- def do_point(F,q): print "q =", q DFq = jac(F, q) print "DFq=" print DFq #print "det(DFq) =", DFq.det() # ---------------------------------------------------------------- def do_point_with_det(F,q): print "-" * 40 print "q =", q DFq = jac(F, q) print "DFq=" print DFq print "det(DFq) =", DFq.det() # ---------------------------------------------------------------- def frufru(): F = F1 do_point_with_det(F, [0,0,0]) print do_point_with_det(F, [0,0,1]) do_point_with_det(F, [0,1,0]) do_point_with_det(F, [1,0,0]) print do_point_with_det(F, [1,1,0]) do_point_with_det(F, [1,0,1]) do_point_with_det(F, [0,1,1]) print do_point_with_det(F, [1,1,1]) do_point_with_det(F, [1,2,3]) do_point_with_det(F, [sqrt(0.5),sqrt(0.5),0]) a=0.1 do_point_with_det(F, [cos(a),sin(a),0]) a = 0.2 b = 0.3 c = sqrt(1 - a**2 - b**2) do_point_with_det(F, [a,b,c]) a = 0.8 b = 0.2 c = sqrt(1 - a**2 - b**2) do_point_with_det(F, [a,b,c]) print # ---------------------------------------------------------------- def F(q): [x, y, z] = q #f1 = x**2 #f2 = y**2 #f3 = z**2 #f1 = x**2 * y**2 #f2 = y**2 * z**2 #f3 = z**2 * x**2 f1 = x * y f2 = y * z f3 = z * x #f1 = 1.0 * y * y #f2 = 2.0 * x #f3 = 3.0 * z return [f1, f2, f3] # ---------------------------------------------------------------- def G(q): [x, y, z] = q return [x**2 + y**2 + z**2] # ---------------------------------------------------------------- def gt_something(): thetalo = 0 thetahi = 2*math.pi philo = 0 phihi = math.pi nphi = 12 ntheta = 12 if (len(sys.argv) == 3): nphi = int(sys.argv[1]) ntheta = int(sys.argv[2]) dtheta = (thetahi-thetalo)/ntheta dphi = (phihi-philo)/nphi phi = 0 for ii in range(0, nphi): theta = 0 for jj in range(0, ntheta): x = sin(phi) * cos(theta) y = sin(phi) * sin(theta) z = cos(phi) q = [x,y,z] DF = jac(F, q) d = DF.det() # Let G(x,y,z) = x^2 + y^2 + z^2. The unit sphere is the level set # for G(x,y,z) = 1. # Tangent plane at (u,v,w): # dG/dx(x-u) + dG/dy(y-v) + dG/dz(z-w) # where (u,v,w) are the coordinates of the point q and (x,y,z) are variable. DG = jac(G, q) # For DF restricted to this tangent plane: # * DG (i.e. grad G) is the normal vector # * This gives a point-normal form for the tangent plane # * Project the standard basis for R3 onto the tangent plane # * Row-reduce DF = jac(F, q) # * Form an orthonormal basis # * Compute DF of the basis # * Row-reduce that to get the rank of DF on TM|q #print "q = ", q, #print "det(DF) = ", d #print "%7.4f %7.4f %7.4f %7.4f %7.4f,%7.4f %7.4f,%7.4f %7.4f,%7.4f" % ( # x,y,z, d, DG[0][0], -DG[0][0]*x, DG[0][1], -DG[0][1]*y, DG[0][2], -DG[0][2]*z) nx = DG[0][0] ny = DG[0][1] nz = DG[0][2]
nml = [nx, ny, nz] e0 = [1,0,0] e1 = [0,1,0] e2 = [0,0,1] # Project the standard basis for R3 down to the tange
nt plane TM|q. proj_e0 = projperp(e0, nml) proj_e1 = projperp(e1, nml) proj_e2 = projperp(e2, nml) proj_e = sackmat([proj_e0, proj_e1, proj_e2]) # Row-reduce, compute rank, and trim proj_e.row_echelon_form() rank = proj_e.rank_rr() proj_e.elements = proj_e.elements[0:rank] # Orthonormalize proj_e = gram_schmidt(proj_e) #print "q=[%7.4f,%7.4f,%7.4f]" % (x, y, z), #print "nml=[%7.4f,%7.4f,%7.4f]" % (nx, ny, nz), #print "p0=[%7.4f,%7.4f,%7.4f] p1=[%7.4f,%7.4f,%7.4f]" % ( #proj_e[0][0], proj_e[0][1], proj_e[0][2], proj_e[1][0], proj_e[1][1], proj_e[1][2]), # Take DF of the orthonormal basis. proj_e = proj_e.transpose() proj_e = DF * proj_e proj_e = proj_e.transpose() rank = proj_e.rank() #print "p0=[%7.4f,%7.4f,%7.4f] p1=[%7.4f,%7.4f,%7.4f]" % ( #proj_e[0][0], proj_e[0][1], proj_e[0][2], proj_e[1][0], proj_e[1][1], proj_e[1][2]), #print "rank=", proj_e.rank_rr(), #print "d=%11.3e" % (d), # xxx hack if (rank == 1): d = 0.7 #print "%11.3e" % (d), print "%8.4f" % (d), #print theta += dtheta print phi += dphi gt_something()
information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from copy import deepcopy from typing import Any, Awaitable, Optional, TYPE_CHECKING from azure.core.rest import AsyncHttpResponse, HttpRequest from azure.mgmt.core import AsyncARMPipelineClient from msrest import Deserializer, Serializer from .. import models from ._configuration import WebSiteManagementClientConfiguration from .operations import AppServiceCertificateOrdersOperations, AppServiceEnvironmentsOperations, AppServicePlansOperations, CertificateRegistrationProviderOperations, CertificatesOperations, DeletedWebAppsOperations, DiagnosticsOperations, DomainRegistrationProviderOperations, DomainsOperations, ProviderOperations, RecommendationsOperations, ResourceHealthMetadataOperations, StaticSitesOperations, TopLevelDomainsOperations, WebAppsOperations, WebSiteManagementClientOperationsMixin if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from azure.core.credentials_async import AsyncTokenCredential class WebSiteManagementClient(WebSiteManagementClientOperationsMixin): """WebSite Management Client. :ivar app_service_certificate_orders: AppServiceCertificateOrdersOperations operations :vartype app_service_certificate_orders: azure.mgmt.web.v2020_06_01.aio.operations.AppServiceCertificateOrdersOperations :ivar certificate_registration_provider: CertificateRegistrationProviderOperations operations :vartype certificate_registration_provider: azure.mgmt.web.v2020_06_01.aio.operatio
ns.CertificateRegistrationProviderOperations :ivar domains: DomainsOperations operations :vartype domains: azure.mgmt.web.v2020_06_01.aio.operations.DomainsOperations :ivar top_level_domains: TopLevelDomainsOperations operations :vartype top_level_domains: azure.mgmt.web.v2020_06_01.aio.operations.TopLevelDomainsOperations :ivar domain_registration_provider: DomainRegistrationProviderOperations operations :vartype do
main_registration_provider: azure.mgmt.web.v2020_06_01.aio.operations.DomainRegistrationProviderOperations :ivar certificates: CertificatesOperations operations :vartype certificates: azure.mgmt.web.v2020_06_01.aio.operations.CertificatesOperations :ivar deleted_web_apps: DeletedWebAppsOperations operations :vartype deleted_web_apps: azure.mgmt.web.v2020_06_01.aio.operations.DeletedWebAppsOperations :ivar diagnostics: DiagnosticsOperations operations :vartype diagnostics: azure.mgmt.web.v2020_06_01.aio.operations.DiagnosticsOperations :ivar provider: ProviderOperations operations :vartype provider: azure.mgmt.web.v2020_06_01.aio.operations.ProviderOperations :ivar recommendations: RecommendationsOperations operations :vartype recommendations: azure.mgmt.web.v2020_06_01.aio.operations.RecommendationsOperations :ivar web_apps: WebAppsOperations operations :vartype web_apps: azure.mgmt.web.v2020_06_01.aio.operations.WebAppsOperations :ivar static_sites: StaticSitesOperations operations :vartype static_sites: azure.mgmt.web.v2020_06_01.aio.operations.StaticSitesOperations :ivar app_service_environments: AppServiceEnvironmentsOperations operations :vartype app_service_environments: azure.mgmt.web.v2020_06_01.aio.operations.AppServiceEnvironmentsOperations :ivar app_service_plans: AppServicePlansOperations operations :vartype app_service_plans: azure.mgmt.web.v2020_06_01.aio.operations.AppServicePlansOperations :ivar resource_health_metadata: ResourceHealthMetadataOperations operations :vartype resource_health_metadata: azure.mgmt.web.v2020_06_01.aio.operations.ResourceHealthMetadataOperations :param credential: Credential needed for the client to connect to Azure. :type credential: ~azure.core.credentials_async.AsyncTokenCredential :param subscription_id: Your Azure subscription ID. This is a GUID-formatted string (e.g. 00000000-0000-0000-0000-000000000000). :type subscription_id: str :param base_url: Service URL. Default value is 'https://management.azure.com'. :type base_url: str :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. """ def __init__( self, credential: "AsyncTokenCredential", subscription_id: str, base_url: str = "https://management.azure.com", **kwargs: Any ) -> None: self._config = WebSiteManagementClientConfiguration(credential=credential, subscription_id=subscription_id, **kwargs) self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} self._serialize = Serializer(client_models) self._deserialize = Deserializer(client_models) self._serialize.client_side_validation = False self.app_service_certificate_orders = AppServiceCertificateOrdersOperations(self._client, self._config, self._serialize, self._deserialize) self.certificate_registration_provider = CertificateRegistrationProviderOperations(self._client, self._config, self._serialize, self._deserialize) self.domains = DomainsOperations(self._client, self._config, self._serialize, self._deserialize) self.top_level_domains = TopLevelDomainsOperations(self._client, self._config, self._serialize, self._deserialize) self.domain_registration_provider = DomainRegistrationProviderOperations(self._client, self._config, self._serialize, self._deserialize) self.certificates = CertificatesOperations(self._client, self._config, self._serialize, self._deserialize) self.deleted_web_apps = DeletedWebAppsOperations(self._client, self._config, self._serialize, self._deserialize) self.diagnostics = DiagnosticsOperations(self._client, self._config, self._serialize, self._deserialize) self.provider = ProviderOperations(self._client, self._config, self._serialize, self._deserialize) self.recommendations = RecommendationsOperations(self._client, self._config, self._serialize, self._deserialize) self.web_apps = WebAppsOperations(self._client, self._config, self._serialize, self._deserialize) self.static_sites = StaticSitesOperations(self._client, self._config, self._serialize, self._deserialize) self.app_service_environments = AppServiceEnvironmentsOperations(self._client, self._config, self._serialize, self._deserialize) self.app_service_plans = AppServicePlansOperations(self._client, self._config, self._serialize, self._deserialize) self.resource_health_metadata = ResourceHealthMetadataOperations(self._client, self._config, self._serialize, self._deserialize) def _send_request( self, request: HttpRequest, **kwargs: Any ) -> Awaitable[AsyncHttpResponse]: """Runs the network request through the client's chained policies. >>> from azure.core.rest import HttpRequest >>> request = HttpRequest("GET", "https://www.example.org/") <HttpRequest [GET], url: 'https://www.example.org/'> >>> response = await client._send_request(request) <AsyncHttpResponse: 200 OK> For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart :param request: The network request you want to make. Required. :type request: ~azure.core.rest.HttpRequest :keyword bool stream: Whether the response payload will be streamed. Defaults to False. :return: The response of your network call. Does not do error handling on your response. :rtype: ~azure.core.rest.AsyncHttpResponse """ request_copy = deepcopy(request) request_copy.url = self._client.format_url(request_copy.url) return self._client.send_request(request_copy, **kwargs) async def close(self) -> None: await self._client.cl
from FortyTwo.fortytwo import * def Start(): """No Clue what to
ad
d here"""
''Saves an uploaded data source to MEDIA_ROOT/data_sources ''' with open(os.path.join(settings.MEDIA_ROOT, 'data_sources', f.name), 'wb+') as destination: for chunk in f.chunks(): destination.write(chunk) return destination @task() def extract_features(dataset_id, instance_id, audiofile_path): dataset = Dataset.objects.get(pk=dataset_id) inst = Instance.objects.get(pk=instance_id) n_frames, sample_rate, duration = 0, 0, 0 # Calculate the sample rate and duration with contextlib.closing(wave.open(audiofile_path, 'r')) as audiofile: n_frames = audiofile.getnframes() sample_rate = audiofile.getframerate() duration = n_frames / float(sample_rate) # Format - {'Display name': 'name: Definition'} FEATURES = [ {'display_name': 'Spectral Shape Characteristics', 'yaafe_name': 'sss', 'yaafe_definition': 'SpectralShapeStatistics', 'subfeatures': ['Spectral centroid', 'Spectral spread', 'Spectral kurtosis', 'Spectral skewness'] }, {'display_name': 'Temporal Shape Characteristics', 'yaafe_name': 'tss', 'yaafe_definition': 'TemporalShapeStatistics', 'subfeatures': ['Temporal centroid', 'Temporal spread', 'Temporal kurtosis', 'Temporal skewness'] }, {'display_name': 'ZCR', 'yaafe_name': 'zcr', 'yaafe_definition': 'ZCR', 'unit': 'Hz' }, {'display_name': 'Energy', 'yaafe_name': 'energy', 'yaafe_definition': 'Energy', }, {'display_name': 'Loudness', 'yaafe_name': 'loudness', 'yaafe_definition': 'Loudness', }, {'display_name': 'Spectral rolloff', 'yaafe_name': 'spectral_rolloff', 'yaafe_definition': 'SpectralRolloff', }, {'display_name': 'Perceptual sharpness', 'yaafe_name': 'perceptual_sharpness', 'yaafe_definition': 'PerceptualSharpness', }, {'display_name': 'Perceptual spread', 'yaafe_name': 'perceptual_spread', 'yaafe_definition': 'PerceptualSpread', }, {'display_name': 'Duration', 'unit': 's', }, {'display_name': 'Sample rate', 'unit': 'Hz', }, {'display_name': 'Spectral decrease', 'yaafe_name': 'spectral_decrease', 'yaafe_definition': 'SpectralDecrease', }, {'display_name': "Spectral flatness", 'yaafe_name': 'spectral_flatness', 'yaafe_definition': 'SpectralFlatness', }, # {'display_name': "Spectral flux", # 'yaafe_name': 'spectral_flux', # 'yaafe_definition': 'SpectralFlux', # }, {'display_name': "Spectral slope", 'yaafe_name': 'spectral_slope', 'yaafe_definition': 'SpectralSlope', }, # {'display_name': "Spectral variation", # 'yaafe_name': 'spectral_variation', # 'yaafe_definition': 'SpectralVariation', # } ] # Add features to extract feature_plan = yf.FeaturePlan(sample_rate=sample_rate, resample=False) for feature in FEATURES: if 'yaafe_definition' in feature: # YAAFE feature plans take definitions of the form: 'zcr: ZCR' full_definition = feature['yaafe_name'] + ': ' + feature['yaafe_definition'] # Add the feature to the feature plan to be extracted feature_plan.addFeature(full_definition) # Configure an Engine engine = yf.Engine() engine.load(feature_plan.getDataFlow()) # Extract features afp = yf.AudioFileProcessor() afp.processFile(engine, audiofile_path) # outputs dict format - {'Spectral centroid': [[2.33], [4.34],...[2.55]]} outputs = {} # Read and store output arrays to outputs dict for feature in FEATURES: if 'yaafe_definition' in feature: # Exclude duration and sample rate output_name = feature['yaafe_name']
# If the feature has subfeatures, e.g. Spec shape stats if 'subfeatures' in feature: full_output = engine.readOutput(output_name) for i, subfeature_display_name in enumerate(feature['subfeatures']): outputs[subfeature_display_name] = full_output[:, i] # If the feature has only 1 dimension(1 X T array) else: display_name = feature['display_name']
a = engine.readOutput(output_name) # 2D array # Transpose data to make it a 1D array outputs[display_name] = a.transpose()[0] # Create YAAFE feature objects feature_obj_list = [] for display_name in outputs.keys(): feature = find_dict_by_item(('display_name', display_name), FEATURES) f, created = Feature.objects.get_or_create( name=display_name.lower(), display_name=display_name ) if feature and ('unit' in feature): f.unit = feature['unit'] f.save() feature_obj_list.append(f) # Create Sample rate and Duration objects rate_obj, created = Feature.objects.get_or_create(name='sample rate') if not rate_obj.unit: rate_obj.unit = 'Hz' rate_obj.save() feature_obj_list.append(rate_obj) duration_obj, created = Feature.objects.get_or_create(name='duration') if not duration_obj.unit: duration_obj.unit = 's' duration_obj.save() feature_obj_list.append(duration_obj) # Associate features with instance # for feature in feature_obj_list: # inst.features.add(feature) # If dataset has labels if dataset.labels(): # NOTE: This assumes there's only one label name per dataset. # Just indexes the first label name label_name = dataset.labels()[0] else: # attach a placeholder LabelName called 'variable' filtered = LabelName.objects.filter(name='variable') # make sure that 'get' doesn't return an error if there are more than 1 # LabelName called 'variable' if len(filtered) <= 1: label_name, c = LabelName.objects.get_or_create(name='variable') else: label_name = filtered[0] # Add a placeholder label value called "none" to instance # This is necessary in order for plotting to work filtered = LabelValue.objects.filter(value="none", label_name=label_name) if len(filtered) <= 1: no_label, c = LabelValue.objects.get_or_create(value="none", label_name=label_name) else: no_label = filtered[0] inst.label_values.add(no_label) inst.save() # Save output data and associate it with inst for display_name, output in outputs.iteritems(): if output.size > 0: # Avoid empty data for i in range(output[0].size): output_mean = output[i].mean() FeatureValue.objects.create(value=output_mean, feature=Feature.objects.get(name__iexact=display_name.lower()), instance=inst) # Save sample_rate and duration data FeatureValue.objects.create(value=sample_rate, feature=Feature.objects.get(name='sample rate'), instance=inst) FeatureValue.objects.cr
import unittest import hashlib import httpsig.sign as sign from httpsig.utils import parse_authorization_header from requests.models import RequestEncodingMixin c
lass CrossPlatformTestCase(unittest.TestCase): def test_content_md5(self): data = {'signature': "HPMOHRgPSMKdXrU6AqQs/i9S7alOakkHsJiqLGmInt05Cxj6b/WhS7kJxbIQxKmDW08YKzoFnbVZIoTI2qofEzk="} assert RequestEncodingMixin._encode_params(data) == "signature=HPMOHRgPSMKdXrU6AqQs%2Fi9S7alOakkHsJiqLGmInt05Cxj6b%2FWhS7kJxbIQxKmDW08YKzoFnbVZIoTI2qofEzk%3D" assert hashlib.md5(RequestEncodingMixin._encode_params(data).encode(
"utf-8")).hexdigest() == "fdfc1a717d2c97649f3b8b2142507129" def test_hmac(self): hs = sign.HeaderSigner(key_id='pda', algorithm='hmac-sha256', secret='secret', headers=['(request-target)', 'Date']) unsigned = { 'Date': 'today', 'accept': 'llamas' } signed = hs.sign(unsigned, method='GET', path='/path?query=123') auth = parse_authorization_header(signed['authorization']) params = auth[1] self.assertIn('keyId', params) self.assertIn('algorithm', params) self.assertIn('signature', params) self.assertEqual(params['keyId'], 'pda') self.assertEqual(params['algorithm'], 'hmac-sha256') self.assertEqual(params['signature'], 'SFlytCGpsqb/9qYaKCQklGDvwgmrwfIERFnwt+yqPJw=') if __name__ == "__main__": unittest.main()
"""Resource manage module.""" import os from .utils import RequestUtil class ResourceAPI(object): """Resource wechat api.""" ADD_TEMP_URI = ('https://api.weixin.qq.com/cgi-bin/media/' 'upload?access_token={}&type={}') @classmethod def upload(cls, path, token, rtype, upload_type='temp'): """Upload resource. :path str: Resource local path :token str: Wechat access token :rtype str: Resource type such as image, voice ... :upload_type: Upload type, Now support temp and forever """ if not os.path.exists(path): return False method = getattr(cls, '_upload_{}'.format(upload_type), None) if method: return method(path, token, rtype) return False @classmethod def _upload_temp(cls, path, token, rtype): """Upload temp m
edia to wechat server. :path str: Upload e
ntity local path :token str: Wechat access token :rtype str: Upload entity type :Return dict: """ uri = cls.ADD_TEMP_URI.format(token, rtype) resp = RequestUtil.upload(uri, {}, path) return resp
# coding=utf-8 from __futur
e__ import unicode_literals, print_function from flask import request, jsonify, url_for from flask_login import current_user import bugsnag from . import load from webhookdb.tasks.pull_request_file import spawn_page_tasks_for_pull_request_files @load.route('/repos/<owner>/<repo>/pulls/<int:number>/files', methods=["POST"]) def pull_request_files(owner, repo, number): "
"" Queue tasks to load the pull request files (diffs) for a single pull request into WebhookDB. :statuscode 202: task successfully queued """ bugsnag_ctx = {"owner": owner, "repo": repo, "number": number} bugsnag.configure_request(meta_data=bugsnag_ctx) children = bool(request.args.get("children", False)) result = spawn_page_tasks_for_pull_request_files.delay( owner, repo, number, children=children, requestor_id=current_user.get_id(), ) resp = jsonify({"message": "queued"}) resp.status_code = 202 resp.headers["Location"] = url_for("tasks.status", task_id=result.id) return resp
import math import pkg_resources import itertools import pandas as pd import networkx as nx from postman_problems.viz import add_node_attributes from postman_problems.graph import ( read_edgelist, create_networkx_graph_from_edgelist, get_odd_nodes, get_shortest_paths_distances ) from postman_problems.solver import rpp, cpp # ################### # PARAMETERS / DATA # # ################### EDGELIST = pkg_resources.resource_filename('postman_problems', 'examples/sleeping_giant/edgelist_sleeping_giant.csv') NODELIST = pkg_resources.resource_filename('postman_problems', 'examples/sleeping_giant/nodelist_sleeping_giant.csv') START_NODE = 'b_end_east' ######### # TESTS # ######### def test_read_sleeping_giant_edgelist(): df = read_edgelist(EDGELIST, keep_optional=True) # check that our Sleeping Giant example dataset contains the correct fields and values assert ['node1', 'node2', 'trail', 'color', 'distance', 'estimate', 'required'] in df.columns.values assert math.isclose(df[df['required'] == 1]['distance'].sum(), 26.01) assert math.isclose(df['distance'].sum(), 30.48) df_req = read_edgelist(EDGELIST, keep_optional=False) assert math.isclose(df_req['distance'].sum(), 26.01) assert 'req' not in df_req.columns def test_create_networkx_graph_from_edgelist(): df = read_edgelist(EDGELIST, keep_optional=True) graph = create_networkx_graph_from_edgelist(df, edge_id='id') # check that our starting graph is created correctly assert isinstance(graph, nx.MultiGraph) assert len(graph.edges()) == 133 assert len(graph.nodes()) == 78 assert graph['b_end_east']['b_y'][0]['color'] == 'blue' assert graph['b_end_east']['b_y'][0]['trail'] == 'b' assert graph['b_end_east']['b_y'][0]['distance'] == 1.32 # check that starting graph with required trails only is correct df_req = read_edgelist(EDGELIST, keep_optional=False) graph_req = create_networkx_graph_from_edgelist(df_req, edge_id='id') assert isinstance(graph_req, nx.MultiGraph) assert len(graph_req.edges()) == 121 assert len(graph_req.nodes()) == 74 def test_add_node_attributes(): # create objects for testing df = read_edgelist(EDGELIST) graph = create_networkx_graph_from_edgelist(df, edge_id='id') nodelist_df = pd.read_csv(NODELIST) graph_node_attrs = add_node_attributes(graph, nodelist_df) assert len(graph_node_attrs.nodes()) == 74 # check that each node attribute has an X and Y coordinate for k, v in graph_node_attrs.nodes(data=True): assert 'X' in v assert 'Y' in v # spot check node attributes for first node node_data_from_graph = list(graph_node_attrs.nodes(data=True)) node_names = [n[0] for n in node_data_from_graph] assert 'rs_end_north' in node_names key = node_names.index('rs_end_north') assert node_data_from_graph[key][1]['X'] == 1772 assert node_data_from_graph[key][1]['Y'] == 172 def test_get_shortest_paths_distances(): df = read_edgelist(EDGELIST) graph = create_networkx_graph_from_edgelist(df, edge_id='id') odd_nodes = get_odd_nodes(graph) odd_node_pairs = list(itertools.combinations(odd_nodes, 2)) # coarsely checking structure of `get_shortest_paths_distances` return value odd_node_pairs_shortest_paths = get_shortest_paths_distances(graph, odd_node_pairs, 'distance') assert len(odd_node_pairs_shortest_paths) == 630 assert type(odd_node_pairs_shortest_paths) == dict # check that each node name appears the same number of times in `get_shortest_paths_distances` return value node_names = list(itertools.chain(*[i[0] for i in odd_node_pairs_shortest_paths.items()])) assert set(pd.value_counts(node_names)) == set([35]) def test_nodelist_edgelist_overlap(): """ Test that the nodelist and the edgelist contain the same node names. If using X,Y coordinates for plotting and not all nodes have attributes, this could get messy. """ eldf = read_edgelist(EDGELIST, keep_optional=True) nldf = pd.read_csv(NODELIST) edgelist_nodes = set(eldf['node1'].append(eldf['node2'])) nodelist_nodes = set(nldf['id']) nodes_in_el_but_not_nl = edgelist_nodes - nodelist_nodes assert nodes_in_el_but_not_
nl == set(), \ "Warning: The following nodes are in the edgelist, but not the nodelist: {}".format(nodes_in_el_but_not_nl) nodes_in_nl_but_not_el = nodelist_nodes - edgelist_nodes assert nodes_in_nl_but_not_el == set(), \ "Warning: The following nodes are in the nodelist, but not the edgelist: {}".format(nodes_in_nl_but_not_el) def test_sleeping_giant_cpp_solution():
cpp_solution, graph = cpp(edgelist_filename=EDGELIST, start_node=START_NODE) # make number of edges in solution is correct assert len(cpp_solution) == 155 # make sure our total mileage is correct cpp_solution_distance = sum([edge[3]['distance'] for edge in cpp_solution]) assert math.isclose(cpp_solution_distance, 33.25) # make sure our circuit begins and ends at the same place assert cpp_solution[0][0] == cpp_solution[-1][1] == START_NODE # make sure original graph is properly returned assert len(graph.edges()) == 121 [e[2].get('augmented') for e in graph.edges(data=True)].count(True) == 35 def test_sleeping_giant_rpp_solution(): rpp_solution, graph = rpp(edgelist_filename=EDGELIST, start_node=START_NODE) # make number of edges in solution is correct assert len(rpp_solution) == 151 # make sure our total mileage is correct rpp_solution_distance = sum([edge[3]['distance'] for edge in rpp_solution]) assert math.isclose(rpp_solution_distance, 32.12) # make sure our circuit begins and ends at the same place assert rpp_solution[0][0] == rpp_solution[-1][1] == START_NODE # make sure original graph is properly returned assert len(graph.edges()) == 133 [e[3].get('augmented') for e in graph.edges(data=True, keys=True)].count(True) == 30