commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
02e29e6f7904315dec5079a66cd242c1e48330f9
|
Add filters to separate logging events from non-events
|
nodeconductor/core/log.py
|
nodeconductor/core/log.py
|
from __future__ import absolute_import, unicode_literals
import json
import logging
from logging.handlers import SocketHandler
from datetime import datetime
class EventLoggerAdapter(logging.LoggerAdapter, object):
"""
LoggerAdapter
"""
def __init__(self, logger):
super(EventLoggerAdapter, self).__init__(logger, {})
def process(self, msg, kwargs):
if 'extra' in kwargs:
kwargs['extra']['event'] = True
else:
kwargs['extra'] = {'event': True}
return msg, kwargs
class EventLogFilter(logging.Filter):
"""
A filter that allows only event records that have event=True as extra parameter.
"""
def filter(self, record):
return hasattr(record, 'event')
class EventFormatter(logging.Formatter):
def format_timestamp(self, time):
return datetime.utcfromtimestamp(time).isoformat() + 'Z'
def levelname_to_importance(self, levelname):
if levelname == 'DEBUG':
return 'low'
elif levelname == 'INFO':
return 'normal'
elif levelname == 'WARNING':
return 'high'
elif levelname == 'ERROR':
return 'very high'
else:
return 'critical'
def get_customer_from_relative(self, *relatives):
for r in relatives:
customer = getattr(r, 'customer', None)
if customer is not None:
return customer
def format(self, record):
# base message
message = {
# basic
'@timestamp': self.format_timestamp(record.created),
'@version': 1,
'message': record.getMessage(),
'path': record.pathname,
# logging details
'levelname': record.levelname,
'logger': record.name,
'importance': self.levelname_to_importance(record.levelname),
'importance_code': record.levelno,
'event_type': getattr(record, 'event_type', 'undefined'),
}
# user
user = getattr(record, 'user', None)
if user is not None:
message.update({
"user_name": getattr(user, 'full_name', ''),
"user_uuid": str(getattr(user, 'uuid', '')),
})
# placeholder for a potential link
membership = None
# instance
instance = getattr(record, 'instance', None)
if instance is not None:
membership = getattr(instance, 'cloud_project_membership', None)
message['vm_instance_uuid'] = str(getattr(instance, 'uuid', ''))
# project
project = getattr(record, 'project', None)
if project is None and membership is not None:
project = getattr(membership, 'project', None)
if project is not None:
message.update({
"project_name": getattr(project, 'name', ''),
"project_uuid": str(getattr(project, 'uuid', '')),
})
# project group
project_group = getattr(record, 'project_group', None)
if project_group is not None:
message.update({
"project_group_name": getattr(project_group, 'name', ''),
"project_group_uuid": str(getattr(project_group, 'uuid', '')),
})
# cloud
cloud = getattr(record, 'cloud', None)
if cloud is None and membership is not None:
cloud = getattr(membership, 'cloud', None)
if cloud is not None:
message.update({
"cloud_account_name": getattr(cloud, 'name', ''),
"cloud_account_uuid": str(getattr(cloud, 'uuid', '')),
})
# customer
customer = getattr(record, 'customer', None)
if customer is None:
customer = self.get_customer_from_relative(project, cloud, project_group)
if customer is not None:
message.update({
"customer_name": getattr(customer, 'name', ''),
"customer_uuid": str(getattr(customer, 'uuid', '')),
})
return json.dumps(message)
class TCPEventHandler(SocketHandler, object):
def __init__(self, host='localhost', port=5959):
super(TCPEventHandler, self).__init__(host, port)
self.formatter = EventFormatter()
def makePickle(self, record):
return self.formatter.format(record) + b'\n'
|
Python
| 0
|
@@ -548,22 +548,20 @@
ass
-EventLogFilter
+RequireEvent
(log
@@ -629,48 +629,184 @@
ords
- that have event=True as extra parameter
+.%0A %22%22%22%0A def filter(self, record):%0A return hasattr(record, 'event')%0A%0A%0Aclass RequireNotEvent(logging.Filter):%0A %22%22%22%0A A filter that allows only non-event records
.%0A
@@ -848,35 +848,39 @@
%0A return
-has
+not get
attr(record, 'ev
@@ -875,32 +875,39 @@
(record, 'event'
+, False
)%0A%0A%0Aclass EventF
|
8801038cd56885a4aec79736c4f5ab682166e8b9
|
Optimize out clear loop
|
bfcomp.py
|
bfcomp.py
|
import sys
import os
import subprocess
OUTPUT=0
INPUT=1
LOOPSTART=2
LOOPEND=3
MOVE=4
ADD=5
def parse(code):
code = ''.join(i for i in code if i in '.,[]+-<>')
tokens = []
for i in code:
if i == '+':
tokens.append((ADD, 1))
elif i == '-':
tokens.append((ADD, -1))
elif i == '>':
tokens.append((MOVE, 1))
elif i == '<':
tokens.append((MOVE, -1))
elif i == '[':
tokens.append((LOOPSTART, None))
elif i == ']':
tokens.append((LOOPEND, None))
elif i == ',':
tokens.append((INPUT, None))
if i == '.':
tokens.append((OUTPUT, None))
return tokens
def optimize(tokens):
add = 0
move = 0
newtokens = []
for token, value in tokens:
if add and token != ADD:
newtokens.append((ADD, add))
add = 0
elif move and token != MOVE:
newtokens.append((MOVE, move))
move = 0
if token == ADD:
add += value
elif token == MOVE:
move += value
else:
newtokens.append((token, value))
return newtokens
def compile(code):
tokens = parse(code)
tokens = optimize(tokens)
output = """.section .bss
.lcomm mem, 8192
.set startidx, mem + 4096
.section .text
.global _start
_start:
movq $0, %r12
movq $startidx, %rbx
"""
loops = []
loopnum = 0
for token, value in tokens:
if token == ADD:
if value == 1:
output += " inc %r12\n"
elif value > 1:
output += " add $" + str(value) + ", %r12\n"
elif value == -1:
output += " dec %r12\n"
elif value < -1:
output += " sub $" + str(-value) + ", %r12\n"
elif token == MOVE:
if value:
output += " movq %r12, (%rbx)\n"
if value > 0:
output += " add $" + str(8*value) + ", %rbx\n"
else:
output += " sub $" + str(-8*value) + ", %rbx\n"
output += " movq (%rbx), %r12\n"
elif token == LOOPSTART:
loopnum += 1
loops.append(loopnum)
output += " loop" + str(loopnum) + ":\n"
elif token == LOOPEND:
output += " cmp $0, %r12\n" \
" jnz loop" + str(loops.pop()) + '\n'
elif token == INPUT:
output += """
movq $0, %rax
movq $0, %rdi
movq %rbx, %rsi
movq $1, %rdx
syscall
movq (%rbx), %r12
"""
elif token == OUTPUT:
output += """
movq %r12, (%rbx)
movq $1, %rax
movq $1, %rdi
movq %rbx, %rsi
movq $1, %rdx
syscall
"""
# Exit syscall
output += """
movq $60, %rax
movq $0, %rdi
syscall
"""
return output
if __name__ == '__main__':
print("Compiling...")
with open(sys.argv[1]) as bffile:
output = compile(bffile.read())
name = os.path.splitext(sys.argv[1])[0]
with open(name + '.s', 'w') as asmfile:
asmfile.write(output)
print("Assembling...")
status = subprocess.call(['as', '-g', name+'.s', '-o', name+'.o'])
if status == 0:
print("Linking...")
subprocess.call(['ld', name+'.o', '-o', name])
|
Python
| 0.000273
|
@@ -84,16 +84,22 @@
=4%0AADD=5
+%0ASET=6
%0A%0Adef pa
@@ -1175,16 +1175,462 @@
alue))%0A%0A
+ # Optimize out clear loop%0A i = 0%0A loop = 0%0A while i %3C len(newtokens):%0A if newtokens%5Bi%5D%5B0%5D == LOOPSTART:%0A loop += 1%0A j = i + 1%0A while j %3C len(newtokens) and newtokens%5Bj%5D%5B0%5D != LOOPEND:%0A if newtokens%5Bj%5D%5B0%5D != ADD:%0A break%0A j += 1%0A else:%0A del newtokens%5Bi:j+1%5D%0A newtokens.insert(i, (SET, 0))%0A i += 1%0A%0A
retu
@@ -2231,24 +2231,24 @@
value %3C -1:%0A
-
@@ -2292,32 +2292,124 @@
e) + %22, %25r12%5Cn%22%0A
+ elif token == SET:%0A output += %22 movq $%22 + str(value) + %22, %25r12%5Cn%22%0A
elif tok
|
e1315bb8ce57ab24419d8db761ac831e12db9b17
|
Rename _subsequent_partner
|
l10n_br_fiscal/models/subsequent_document.py
|
l10n_br_fiscal/models/subsequent_document.py
|
# Copyright 2018 KMEE INFORMATICA LTDA
# Gabriel Cardoso de Faria <gabriel.cardoso@kmee.com.br>
# License AGPL-3 or later (http://www.gnu.org/licenses/agpl)
#
from __future__ import division, print_function, unicode_literals
from odoo import api, fields, models
from ..constants.fiscal import (
SITUACAO_EDOC_CANCELADA,
MODELO_FISCAL_CFE,
MODELO_FISCAL_NFCE,
MODELO_FISCAL_CUPOM_FISCAL_ECF
)
SITUACAO_SUBSEQUENTE = (
('manual', 'Manualmente'),
('nota_de_cupom', 'Gerar Nota Fiscal de Cupons Fiscais'),
('nota_de_remessa', 'Gerar Nota Fiscal de Remessa'),
)
class SubsequentDocument(models.Model):
_name = 'l10n_br_fiscal.subsequent.document'
_description = 'Subsequent Document'
source_document_id = fields.Many2one(
string='Source document',
comodel_name='l10n_br_fiscal.document',
required=True,
ondelete='cascade',
)
subsequent_operation_id = fields.Many2one(
string='Subsequent Operation',
comodel_name='l10n_br_fiscal.subsequent.operation',
required=True,
)
fiscal_operation_id = fields.Many2one(
string='Related operation',
comodel_name='l10n_br_fiscal.operation',
required=True,
)
subsequent_document_id = fields.Many2one(
string='Subsequent Document',
comodel_name='l10n_br_fiscal.document',
ondelete='set null',
copy=False,
)
operation_performed = fields.Boolean(
string='Operation Performed',
compute='_compute_operation_performed',
default=False,
copy=False,
)
# def _subsequent_payment_type(self):
# return (self.operation_id.ind_forma_pagamento or
# self.source_document_id.ind_forma_pagamento)
#
# def _subsequent_payment_condition(self):
# return (self.operation_id.condicao_pagamento_id or
# self.source_document_id.condicao_pagamento_id)
def _subsequent_company(self):
return (self.fiscal_operation_id.company_id or
self.source_document_id.company_id)
def _subsequent_participant(self):
return (self.subsequent_operation_id.partner_id or
self.source_document_id.partner_id)
def _subsequent_referenced(self):
if self.subsequent_operation_id.reference_document:
return self.env.context.get(
'referenciado_ids',
self.source_document_id._prepare_referenced_subsequent()
)
return []
def generate_subsequent_document(self):
self._generate_subsequent_document()
def _generate_subsequent_document(self):
if self.operation_performed:
return self.subsequent_document_id
new_doc = self.source_document_id.copy()
new_doc.partner_id = self._subsequent_partner()
new_doc.company_id = self._subsequent_company()
new_doc.fiscal_operation_id = self.fiscal_operation_id
new_doc.document_type_id = self.subsequent_operation_id.\
operation_document_type_id.document_type_id
new_doc.document_serie_id = self.subsequent_operation_id.\
operation_document_type_id.document_serie_id
# new_doc.condicao_pagamento_id = \
# self._subsequent_payment_condition()
# new_doc.tipo_pagamento = self._subsequent_payment_type()
#
# Reference document
#
reference_ids = self._subsequent_referenced()
new_doc._document_reference(reference_ids)
new_doc._onchange_fiscal_operation_id()
new_doc.line_ids.write({'fiscal_operation_id': new_doc.fiscal_operation_id.id})
for item in new_doc.line_ids:
item._onchange_fiscal_operation_id()
item._onchange_fiscal_operation_line_id()
item._onchange_fiscal_taxes()
document = new_doc
document.action_document_confirm()
document.number = False
document.date_in_out = False
self.subsequent_document_id = document
@api.depends('subsequent_document_id.state_edoc')
def _compute_operation_performed(self):
for subsequent in self:
if not subsequent.subsequent_document_id:
continue
if subsequent.subsequent_document_id.state_edoc == \
SITUACAO_EDOC_CANCELADA:
subsequent.operation_performed = False
else:
subsequent.operation_performed = True
@api.multi
def show_subsequent_document(self):
return {
'name': 'Subsequent Document',
'type': 'ir.actions.act_window',
'target': 'current',
'views': [[False, 'form']],
'res_model': 'l10n_br_fiscal.document',
'domain': [['id', 'in', [self.subsequent_document_id.id]]],
'res_id': self.subsequent_document_id.id,
}
@api.multi
def show_source_document(self):
return {
'name': 'Source Document',
'type': 'ir.actions.act_window',
'target': 'current',
'views': [[False, 'form']],
'res_model': 'l10n_br_fiscal.document',
'domain': [['id', 'in', [self.source_document_id.id]]],
'res_id': self.source_document_id.id,
}
@api.multi
def unlink(self):
for subsequent_id in self:
if subsequent_id.operation_performed:
raise UserWarning("The document cannot be deleted: the "
"subsequent document has already been "
"generated.")
return super(SubsequentDocument, self).unlink()
@api.multi
def _confirms_document_generation(self):
""" We check if we can generate the subsequent document
:return: True: allowing generation
"""
result = False
if self.subsequent_operation_id.generation_situation in \
[x for x, y in SITUACAO_SUBSEQUENTE]:
coupon = self.source_document_id.filtered(
lambda document: document.document_type_id.code in (
MODELO_FISCAL_CFE,
MODELO_FISCAL_NFCE,
MODELO_FISCAL_CUPOM_FISCAL_ECF,))
if coupon and self.subsequent_operation_id.\
generation_situation == 'nota_de_cupom':
result = True
elif self.subsequent_operation_id.generation_situation == \
'manual' and self.env.context.get('manual', False):
result = True
elif self.subsequent_operation_id.generation_situation == \
'nota_de_remessa':
result = True
elif self.source_document_id.state_edoc == \
self.subsequent_operation_id.generation_situation:
result = True
return result
|
Python
| 0.000004
|
@@ -2112,15 +2112,11 @@
part
-icipant
+ner
(sel
|
04b343ec18c4708f9e873e55510e07a9305835ee
|
add on_delete
|
analytics/models.py
|
analytics/models.py
|
from django.db import models
class Report(models.Model):
""" Represents a report.
"""
SUBACCOUNT_ACTIVITY = "subaccount_activity"
TYPE_CHOICES = (
(SUBACCOUNT_ACTIVITY, "SubAccount Activity"),
)
report_type = models.CharField(max_length=80, choices=TYPE_CHOICES)
started_date = models.DateTimeField()
finished_date = models.DateTimeField(null=True)
class SubaccountActivity(models.Model):
""" Represents activity by sub-account and term
"""
report = models.ForeignKey(Report, on_delete=models.CASCADE)
term_id = models.CharField(max_length=20)
subaccount_id = models.CharField(max_length=100)
subaccount_name = models.CharField(max_length=200)
courses = models.PositiveIntegerField(default=0)
active_courses = models.PositiveIntegerField(default=0)
ind_study_courses = models.PositiveIntegerField(default=0)
active_ind_study_courses = models.PositiveIntegerField(default=0)
teachers = models.PositiveIntegerField(default=0)
unique_teachers = models.PositiveIntegerField(default=0)
students = models.PositiveIntegerField(default=0)
unique_students = models.PositiveIntegerField(default=0)
discussion_topics = models.PositiveIntegerField(default=0)
discussion_replies = models.PositiveIntegerField(default=0)
media_objects = models.PositiveIntegerField(default=0)
attachments = models.PositiveIntegerField(default=0)
assignments = models.PositiveIntegerField(default=0)
submissions = models.PositiveIntegerField(default=0)
announcements_views = models.PositiveIntegerField(default=0)
assignments_views = models.PositiveIntegerField(default=0)
collaborations_views = models.PositiveIntegerField(default=0)
conferences_views = models.PositiveIntegerField(default=0)
discussions_views = models.PositiveIntegerField(default=0)
files_views = models.PositiveIntegerField(default=0)
general_views = models.PositiveIntegerField(default=0)
grades_views = models.PositiveIntegerField(default=0)
groups_views = models.PositiveIntegerField(default=0)
modules_views = models.PositiveIntegerField(default=0)
other_views = models.PositiveIntegerField(default=0)
pages_views = models.PositiveIntegerField(default=0)
quizzes_views = models.PositiveIntegerField(default=0)
class WeeklyDataTimePeriod(models.Model):
""" Tracks period of time for a weekly report. Should be... weekly...
but it does the best it can.
"""
start_date = models.DateTimeField()
end_date = models.DateTimeField()
term = models.CharField(max_length=50, db_index=True)
class WeeklyDataDataPoint(models.Model):
time_period = models.ForeignKey(WeeklyDataTimePeriod, db_index=True)
course_id = models.CharField(max_length=100, db_index=True, null=True)
login_name = models.CharField(max_length=100, db_index=True)
key = models.CharField(max_length=500, db_index=True)
value = models.TextField(null=True)
class ManagedCurrentTerm(models.Model):
"""
Somewhat hacky - there can/should only be one.
Configured through the web front-end.
"""
start_date = models.DateField()
end_date = models.DateField()
quarter = models.CharField(max_length=50)
year = models.PositiveIntegerField()
class ManagedCourseSISIDs(models.Model):
"""
Just tracks the list of current courses. Nothing tracks past courses,
except any data that may have been stored!
"""
sis_id = models.CharField(max_length=200)
|
Python
| 0.000002
|
@@ -2724,16 +2724,114 @@
ePeriod,
+%0A on_delete=models.CASCADE,%0A
db_inde
|
314a43c52373f1cc8400dddcfa3335e3aadde133
|
Remove bedtools coverage iobuffer limit
|
binCov.py
|
binCov.py
|
#!/usr/bin/env python
# Copyright (c) 2016 Ryan Collins <rcollins@chgr.mgh.harvard.edu>
# Distributed under terms of the MIT license.
"""
Calculates non-duplicate primary-aligned binned coverage
of a chromosome from an input BAM file
"""
#Import libraries
import argparse
import sys
from subprocess import call
import pysam
import pybedtools
import pandas as pd
#Define exception class for invalid coverage modes
class InvalidModeError(Exception):
"""Invalid coverage mode"""
#Function to return read or fragment intervals from pysam.AlignmentFile
def filter_mappings(bam, mode='nucleotide'):
"""
Generates bed intervals from a bam for a specific chromosome corresponding
either to read coordinates or fragment coordinates
Parameters
----------
bam : pysam.AlignmentFile
Input bam
mode : str
'physical' or 'nucleotide' (default: 'physical')
Returns
------
mappings : BedTool
Read or fragment intervals (depending on mode)
"""
#Sanity check mode
if mode not in 'nucleotide physical'.split():
raise InvalidModeError('Invalid mode: ' + mode +
' (options: nucleotide, physical)')
#For nucleotide mode, return non-duplicate primary read mappings
for read in bam:
if (not any([read.is_duplicate, read.is_unmapped,
read.is_secondary, read.is_supplementary])
and all([read.reference_start>0, read.next_reference_start])):
if mode == 'nucleotide':
yield '\t'.join([read.reference_name,
str(read.reference_start),
str(read.reference_end)]) + '\n'
else:
if read.is_read1 and read.is_proper_pair:
fstart, fend = sorted([int(read.reference_start),
int(read.next_reference_start)])
if fstart < fend:
yield '\t'.join([read.reference_name,
str(fstart), str(fend)]) + '\n'
#Function to evaluate nucleotide or physical coverage
def binCov(bam, chr, binsize, mode='nucleotide', overlap=0.05,
blacklist=None, presubbed=False, oldBT=False):
"""
Generates non-duplicate, primary-aligned nucleotide or physical coverage
in regular bin sizes on a specified chromosome from a coordinate-sorted
bamfile
Parameters
----------
bam : pysam.AlignmentFile
Input bam
chr : string
Chromosome to evaluate
binsize : int
Size of bins in bp
mode : str
Evaluate 'nucleotide' or 'physical' coverage
overlap : float
Maximum tolerated blacklist overlap before excluding bin
blacklist : string
Path to blacklist BED file
presubbed : boolean
Has the bam already been subsetted to the desired chromosome?
oldBT : boolean
Are you using a version of bedtools pre-2.24.0?
Returns
------
coverage : pybedtools.BedTool
chr, start, end, coverage
"""
#Create coverage bins and convert to BedTool
maxchrpos = {d['SN']: d['LN'] for d in bam.header['SQ']}[chr]
bin_starts = range(0, maxchrpos - binsize, binsize)
bin_stops = range(binsize, maxchrpos, binsize)
bins = []
for i in range(0, len(bin_starts)-1):
bins.append([chr, bin_starts[i], bin_stops[i]])
bins = pybedtools.BedTool(bins)
#Remove bins that have at least 5% overlap with blacklist by size
if blacklist is not None:
blist = pybedtools.BedTool(blacklist)
bins_filtered = bins.intersect(blist, v=True, f=overlap)
else:
bins_filtered = bins
#Filter bam
if presubbed == True:
mappings = filter_mappings(bam, mode)
else:
mappings = filter_mappings(bam.fetch(chr), mode)
bambed = pybedtools.BedTool(mappings)
#Generate coverage & write to file
if oldBT == True:
coverage = bambed.coverage(bins_filtered, counts=True)
else:
coverage = bins_filtered.coverage(bambed, counts=True,
iobuf='4G', sorted=True)
return coverage
#Main function
def main():
#Add arguments
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('bam', type=pysam.AlignmentFile,
help='Input bam')
parser.add_argument('chr', help='Contig to evaluate')
parser.add_argument('cov_out', help='Output bed file of raw coverage')
parser.add_argument('-n', '--norm_out', type=str,
help='Output bed file of normalized coverage')
parser.add_argument('-b', '--binsize', type=int, default=1000,
help='Bin size in bp (default: 1000)')
parser.add_argument('-m', '--mode', default='nucleotide',
choices = ['nucleotide', 'physical'],
help='Evaluate nucleotide or physical coverage '
'(default: nucleotide)')
parser.add_argument('-x', '--blacklist', type=str, default=None,
help='BED file of regions to ignore')
parser.add_argument('-p', '--presubsetted', dest='presubbed',
action='store_true', help='Boolean flag to indicate'
' if input bam is already subsetted to desired chr',
default=False)
parser.add_argument('-v', '--overlap', nargs=1, type=float, default=0.05,
help='Maximum tolerated blacklist overlap before '
'excluding bin')
parser.add_argument('--oldBT', dest='oldBT', default=False,
action='store_true', help='Boolean flag to indicate'
' if you are using a bedtools version pre-2.24.0')
parser.set_defaults(presubbed=False)
args = parser.parse_args()
#Get coverage & write out
coverage = binCov(args.bam, args.chr, args.binsize,
args.mode, args.overlap, args.blacklist,
args.presubbed, args.oldBT)
coverage.saveas(args.cov_out)
call('sort -Vk1,1 -k2,2n -o ' + args.cov_out + ' ' + args.cov_out,
shell=True)
#Normalize coverage (if optioned) & write out
if args.norm_out is not None:
ncoverage = coverage.to_dataframe(names = 'chr start end cov'.split())
medcov = ncoverage.loc[ncoverage['cov'] > 0, 'cov'].median()
ncoverage['cov'] = ncoverage['cov'] / medcov
ncoverage.to_csv(args.norm_out, sep='\t', index=False, header=False)
call(' '.join(['sort -Vk1,1 -k2,2n -o', args.norm_out,
args.norm_out]), shell=True)
#Main block
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -3942,31 +3942,24 @@
ate
-coverage & write to fil
+& return coverag
e%0A
@@ -4117,62 +4117,8 @@
rue,
-%0A iobuf='4G',
sor
|
4a179d877ec9499448da4de05fdd03a0bbcf2300
|
Change meta_updated instead of meta_created at update
|
collectors/base/record.py
|
collectors/base/record.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import uuid
import scrapy
import logging
from datetime import datetime
from abc import abstractmethod
from . import fields
logger = logging.getLogger(__name__)
# Module API
class Record(scrapy.Item):
# Public
def __repr__(self):
template = '<%s: %s>'
text = template % (self.table.upper(), self.get(self.__primary_key))
return text
@property
@abstractmethod
def table(self):
"""Source name.
"""
pass # pragma: no cover
@classmethod
def create(cls, source, data):
# Init dict
self = cls()
# Get primary_key
self.__primary_key = None
for key, field in self.fields.items():
if field.primary_key:
self.__primary_key = key
break
if self.__primary_key is None:
raise TypeError('Record %s requires primary key' % cls)
if not isinstance(self.fields[self.__primary_key], fields.Text):
raise TypeError('Record %s requires text primary key' % cls)
# Get column types
self.__column_types = {}
for key, field in self.fields.items():
self.__column_types[key] = field.column_type
# Add metadata
ident = uuid.uuid1().hex
timestamp = datetime.utcnow()
self.fields['meta_id'] = fields.Text()
self.fields['meta_source'] = fields.Text()
self.fields['meta_created'] = fields.Datetime()
self.fields['meta_updated'] = fields.Datetime()
self['meta_id'] = ident
self['meta_source'] = source
self['meta_created'] = timestamp
self['meta_updated'] = timestamp
# Add data
undefined = []
for key, value in data.items():
field = self.fields.get(key)
if field is None:
undefined.append(key)
continue
if value is None:
continue
try:
value = field.parse(value)
except Exception as exception:
message = 'Parsing error: %s=%s: %s'
message = message % (key, value, exception)
logger.exception(message)
continue
self[key] = value
for key in undefined:
logger.warning('Undefined field: %s - %s' % (self, key))
return self
def write(self, conf, conn):
"""Write record to warehouse.
Args:
conf (dict): config dictionary
conn (dict): connections dictionary
"""
if self.table not in conn['warehouse'].tables:
if conf['ENV'] in ['development', 'testing']:
table = conn['warehouse'].create_table(
self.table,
primary_id=self.__primary_key,
primary_type='String')
table = conn['warehouse'][self.table]
action = 'created'
if table.find_one(**{self.__primary_key: self[self.__primary_key]}):
action = 'updated'
for key in ['meta_id', 'meta_updated']:
del self[key]
try:
ensure_fields = False
if conf['ENV'] in ['development', 'testing']:
ensure_fields = True
table.upsert(
self, [self.__primary_key],
ensure=ensure_fields, types=self.__column_types)
except Exception as exception:
logger.exception('Saving error: %s: %s', self, repr(exception))
else:
logger.debug('Record - %s: %s - %s fields', action, self, len(self))
|
Python
| 0
|
@@ -3256,27 +3256,27 @@
_id', 'meta_
-upd
+cre
ated'%5D:%0A
|
c6160abcb4fa716ed595e2d2e420656fef0d66a7
|
Allow override base port when running tests locally
|
nginx.bzl
|
nginx.bzl
|
# Copyright (C) Endpoints Server Proxy Authors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
################################################################################
#
# Skylark macros for nginx tests.
load("//:perl.bzl", "perl_test")
def nginx_test(name, nginx, data=None, env=None, config=None, **kwargs):
if nginx == None or len(nginx) == 0:
fail("'nginx' parameter must be a non-empty string (target).")
if data == None:
data = []
data += [ nginx ]
if env == None:
env = {}
if config != None:
data += [ config ]
c = Label(config)
env["TEST_CONFIG"] = "server_config " + "${TEST_SRCDIR}/" + c.package + "/" + c.name + ";"
name = name + '_' + c.name.split("/")[-1].split(".")[0]
# Count existing rules in the BUILD file and assign base port using
# Each rule can use 10 ports in its range
# Rules generated from config_list get separate ranges
port = 9000 + len(native.existing_rules().values()) * 10
l = Label(nginx)
env["TEST_NGINX_BINARY"] = "${TEST_SRCDIR}/" + l.package + "/" + l.name
env["TEST_PORT"] = str(port)
perl_test(name=name, data=data, env=env, **kwargs)
def nginx_suite(tests, deps, nginx, size="small", config_list=[], data=None, tags=[],
timeout="short", env=None):
for test in tests:
if not config_list:
nginx_test(
name = test.split(".")[0],
size = size,
timeout = timeout,
srcs = [test],
deps = deps,
data = data,
nginx = nginx,
config = None,
tags = tags,
env = env,
)
else:
for config in config_list:
nginx_test(
name = test.split(".")[0],
size = size,
timeout = timeout,
srcs = [test],
deps = deps,
data = data,
nginx = nginx,
config = config,
tags = tags,
env = env,
)
|
Python
| 0.000008
|
@@ -2320,17 +2320,33 @@
%5D =
-str(
+%22$%7BTEST_PORT:-%25s%7D%22 %25
port
-)
%0A p
|
471c17f31e64effb5a55df1c91a96c4d350ca220
|
remove wilder_scheiss
|
nodedb.py
|
nodedb.py
|
import json
from node import Node
from link import Link
from bs4 import BeautifulSoup
from urllib.request import urlopen
class NodeDB:
def __init__(self):
self._nodes = []
self._links = []
# fetch list of links
def get_links(self):
return [self.map_link(x) for x in self._links]
# fetch list of nodes
def get_nodes(self):
return self._nodes
def add_link(self, a, b, q):
l = tuple(sorted((a,b)))
for link in self._links:
if l == link[0]:
if link[1] != str(q):
link[1] = max(float(link[1]), float(q))
return
self._links.append([l,str(q)])
def maybe_node_by_mac(self, macs):
for node in self._nodes:
for mac in macs:
if mac.lower() in node.macs:
return node
raise
# import_batman(list(fileinput.input(options['batmanjson'])))
def import_batman(self, lines):
for line in lines:
x = json.loads(line)
if 'of' in x:
try:
node = self.maybe_node_by_mac((x['of'], x['secondary']))
except:
node = Node()
node.online = True
self._nodes.append(node)
node.add_mac(x['of'])
node.add_mac(x['secondary'])
for line in lines:
x = json.loads(line)
if 'router' in x:
try:
node = self.maybe_node_by_mac((x['router'], ))
except:
node = Node()
node.online = True
node.add_mac(x['router'])
self._nodes.append(node)
# If it's a TT link and the MAC is very similar
# consider this MAC as one of the routers
# MACs
if 'gateway' in x and x['label'] == "TT":
router = list(int(i, 16) for i in x['router'].split(":"))
gateway = list(int(i, 16) for i in x['gateway'].split(":"))
# first byte must only differ in bit 2
if router[0] == gateway[0] | 2:
# count different bytes
a = [x for x in zip(router[1:], gateway[1:]) if x[0] != x[1]]
# no more than two additional bytes must differ
if len(a) <= 2:
delta = 0
if len(a) > 0:
delta = sum(abs(i[0] -i[1]) for i in a)
if delta < 8:
# This TT link looks like a mac of the router!
node.add_mac(x['gateway'])
# skip processing as regular link
continue
try:
if 'gateway' in x:
x['neighbor'] = x['gateway']
node = self.maybe_node_by_mac((x['neighbor'], ))
except:
node = Node()
node.online = True
if x['label'] == 'TT':
node.group = 3
node.add_mac(x['neighbor'])
self._nodes.append(node)
for line in lines:
x = json.loads(line)
if 'router' in x:
try:
if 'gateway' in x:
x['neighbor'] = x['gateway']
router = self.maybe_node_by_mac((x['router'], ))
neighbor = self.maybe_node_by_mac((x['neighbor'], ))
except:
continue
a = self._nodes.index(router)
b = self._nodes.index(neighbor)
if a != b:
self.add_link(a, b, x['label'])
for line in lines:
x = json.loads(line)
if 'primary' in x:
try:
node = self.maybe_node_by_mac((x['primary'], ))
except:
continue
node.id = x['primary']
def import_aliases(self, aliases):
for mac, alias in aliases.items():
try:
node = self.maybe_node_by_mac((mac, ))
except:
continue
node.name = alias['name']
if 'group' in alias:
node.group = alias['group']
# list of macs
# if options['gateway']:
# mark_gateways(options['gateway'])
def mark_gateways(self, gateways):
for gateway in gateways:
try:
node = self.maybe_node_by_mac((gateway, ))
except:
continue
node.group = 2
def map_link(self, pair):
distance = 80
strength = 0.2
if any(filter(lambda x: self._nodes[x].group == 3, pair[0])):
distance = 10
strength = 1
link = Link()
link.pair = pair[0]
link.distance = distance
link.strength = strength
link.quality = pair[1]
return link
def import_wikigps(self, url):
def fetch_wikitable(url):
f = urlopen(url)
soup = BeautifulSoup(f)
table = soup.find_all("table")[0]
rows = table.find_all("tr")
headers = []
data = []
def maybe_strip(x):
if isinstance(x.string, str):
return x.string.strip()
else:
return ""
for row in rows:
tds = list([maybe_strip(x) for x in row.find_all("td")])
ths = list([maybe_strip(x) for x in row.find_all("th")])
if any(tds):
data.append(tds)
if any(ths):
headers = ths
nodes = []
for d in data:
nodes.append(dict(zip(headers, d)))
return nodes
nodes = fetch_wikitable(url)
for node in nodes:
if not ('MAC' in node and 'GPS' in node):
continue
macs = [s for s in [s.strip() for s in node['MAC'].split(',')] if s]
gps = [s for s in [s.strip() for s in node['GPS'].split(',')] if s]
zipped = zip(macs, gps)
if 'Nick' in node:
names = [s for s in [s.strip() for s in node['Nick'].split(',')] if s]
if names:
zipped = zip(macs, gps, names)
for pair in zipped:
try:
node = self.maybe_node_by_mac((pair[0], ))
except:
node = Node()
node.add_mac(pair[0])
self._nodes.append(node)
if len(pair) > 2:
if pair[2]:
node.name = pair[2]
node.gps = pair[1]
def find_link(self, i):
for link in self._links:
if i in link[0]:
return link
def wilder_scheiss(self):
for node in self._nodes:
if node.group == 3 and node.gps:
i = self._nodes.index(node)
link = self.find_link(i)
if link:
linklist = list(link)
linklist.remove(i)
j = linklist[0]
for mac in node.macs:
self._nodes[j].add_mac(mac)
self._nodes[j].gps = node.gps
node.gps = None
|
Python
| 0.999985
|
@@ -5757,515 +5757,4 @@
%5B1%5D%0A
-%0A def find_link(self, i):%0A for link in self._links:%0A if i in link%5B0%5D:%0A return link%0A%0A def wilder_scheiss(self):%0A for node in self._nodes:%0A if node.group == 3 and node.gps:%0A i = self._nodes.index(node)%0A link = self.find_link(i)%0A if link:%0A linklist = list(link)%0A linklist.remove(i)%0A j = linklist%5B0%5D%0A%0A for mac in node.macs:%0A self._nodes%5Bj%5D.add_mac(mac)%0A%0A self._nodes%5Bj%5D.gps = node.gps%0A%0A node.gps = None%0A%0A
|
f130dcd0e82e23a5c3d060f35372d6ccb6eb7c87
|
add rack to formatter
|
clusto_query/scripts/main.py
|
clusto_query/scripts/main.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import absolute_import
import itertools
import logging
import optparse
import sys
import string
import clusto
import clusto.script_helper
from clusto_query.query.objects import RFC1918
from clusto_query.lexer import lex
from clusto_query.parser import parse_query
from clusto_query.context import Context
__author__ = "James Brown <jbrown@uber.com>"
version_info = (0, 4, 0)
__version__ = ".".join(map(str, version_info))
long_help = """
clusto-query version %(version)s by %(author)s
Perform arbitrary boolean queries against clusto
Infix expression operators are the following:
= equality
!= inequality
<= le
< lt
> gt
>= ge
^ startswith
, endswith
contains substring
in_cidr ipv4 cidr comparisons
Additionally, there are boolean operators and, or, and - (set subtraction)
some keywords (pool, datacenter, clusto_type, and name) can be directly queried
anything that's an "Attribute" must be prefixed with attr
Here's an example query:
clusto_type = server and
(attr system.cpucount >= 15 or system.memory >= 32760)
and datacenter = peak-mpl1'
This query fetches all servers with more than 16 cores or 32768 MB of RAM
located in the "peak-mlp1" datacenter. Neato!
Note that I put in "15" instead of "16" intentionally; clusto's cpu counting
is off-by-one. That was fun. Let's go again:
clusto_type contains "server" and
(attr nagios.disabled = 1 - hostname endswith peak2)
This one finds all servers that are disabled in nagios and do not have a
hostname that ends in peak2.
Quoting and parens work the way you expect them to.
""" % {'version': __version__, 'author': __author__}
log = None
class HostFormatter(object):
option = None
default = False
def __init__(self, host, context):
self.host = host
self.context = context
def name(self):
return self.host.name
def hostname(self):
return self.host.hostname
def role(self):
return self.context.role_for_host(self.host)
def internal_ips(self):
return ",".join(ip for ip in self.host.get_ips() if ip in RFC1918)
def public_ips(self):
return ",".join(ip for ip in self.host.get_ips() if ip not in RFC1918)
def __getitem__(self, item):
if "." in item:
key, subkey = item.split(".")
return ",".join(map(str, (k.value for k in self.host.attrs(key=key, subkey=subkey))))
return getattr(self, item)()
class EasierTemplate(string.Template):
# $ is challenging in shell scripts
delimiter = "%"
idpattern = r'[a-z_][a-zA-Z0-9_.-]*'
def main():
global log
parser = optparse.OptionParser(usage="%prog [options] clusto_query_string", version=__version__)
parser.add_option('-v', '--verbose', action='count', default=0)
parser.add_option('-f', '--formatter', default=r"%name",
help='Formatter to use for printing, default "%default"')
parser.add_option('--list-attributes', default=False, action='store_true',
help='Print all the queryable attributes')
parser.add_option('--clusto-config', default='/etc/clusto/clusto.conf',
help='Path to clusto config file (default %default)')
parser.add_option('--man', action="store_true", help="Show more detailed help")
opts, args = parser.parse_args()
level = logging.WARNING
if opts.verbose == 1:
level = logging.INFO
elif opts.verbose > 1:
level = logging.DEBUG
if opts.man:
print long_help
return 0
conf = clusto.script_helper.load_config(opts.clusto_config)
clusto.connect(conf)
# clusto.connect screws up logging for all of its consumers, so we need
# to init logging *after* calling clusto.connect
log = logging.getLogger("clusto-query-logger")
log.propagate = 0
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)s: %(message)s"))
handler.setLevel(level)
log.addHandler(handler)
log.setLevel(level)
if opts.list_attributes:
all_attrs = [it.attrs() for it in clusto.get_entities()]
print "\n".join(sorted(set([".".join(map(str, (at.key, at.subkey)))
for at in itertools.chain.from_iterable(all_attrs)])))
return 0
if not args:
parser.error("No query provided")
raw_query = " ".join(args)
log.info("Going to parse %r", raw_query)
lexed_query = lex(raw_query)
log.info("Lexed into %r", lexed_query)
parsed_query, unparsed = parse_query(lexed_query)
log.info("Parsed into %r", parsed_query)
if unparsed:
log.warning("Unparsed content: %r", unparsed)
return 1
# fetch all the hosts
format_template = EasierTemplate(opts.formatter)
context = Context(clusto)
for result_key in sorted(parsed_query.run(context.entity_map.keys(), context)):
host = context.entity_map[result_key]
print format_template.substitute(HostFormatter(host, context))
return 0
if __name__ == '__main__':
sys.exit(main())
|
Python
| 0
|
@@ -2295,32 +2295,209 @@
ot in RFC1918)%0A%0A
+ def rack(self):%0A return ','.join(%0A p.name for p%0A in self.host.parents()%0A if isinstance(p, clusto.drivers.racks.BasicRack)%0A )%0A%0A
def __getite
|
cb216d82508248c531730c7bdc1bd408b7d7f792
|
fix debug messages
|
ckanext/ldap/plugin.py
|
ckanext/ldap/plugin.py
|
import logging
import uuid
import ldap
from pylons import session
import ckan.plugins as p
import ckan.lib.helpers as h
import ckan.model as model
import ckan.logic.schema as schema
t = p.toolkit
log = logging.getLogger(__name__)
def _no_permissions(context, msg):
user = context['user']
return {'success': False, 'msg': msg.format(user=user)}
@t.auth_sysadmins_check
def user_create(context, data_dict):
msg = p.toolkit._('Users cannot be created.')
return _no_permissions(context, msg)
@t.auth_sysadmins_check
def user_update(context, data_dict):
msg = p.toolkit._('Users cannot be edited.')
return _no_permissions(context, msg)
@t.auth_sysadmins_check
def user_reset(context, data_dict):
msg = p.toolkit._('Users cannot reset passwords.')
return _no_permissions(context, msg)
@t.auth_sysadmins_check
def request_reset(context, data_dict):
msg = p.toolkit._('Users cannot reset passwords.')
return _no_permissions(context, msg)
class LdapPlugin(p.SingletonPlugin):
p.implements(p.IAuthenticator, inherit=True)
p.implements(p.IAuthFunctions, inherit=True)
p.implements(p.IConfigurable)
p.implements(p.IConfigurer)
def update_config(self, config):
t.add_template_directory(config, 'templates')
def configure(self, config):
self.ldap_server = config.get('ckanext_ldap.server_url')
self.base_dn = config.get('ckanext_ldap.base_dn')
self.search_attr = config.get('ckanext_ldap.search_attr')
self.user_attr = config.get('ckanext_ldap.user_attr')
self.admin_attr = config.get('ckanext_ldap.admin_attr')
self.no_auth_message = config.get('ckanext_ldap.no_auth_message')
self.force_lower_username = t.asbool(
config.get('ckanext_ldap.force_lower_username', False)
)
self.debug = t.asbool(
config.get('ckanext_ldap.debug', False)
)
self.allow_anon_access = t.asbool(
config.get('ckanext_ldap.allow_anon_access'))
def make_password(self):
# create a hard to guess password
out = ''
for n in xrange(8):
out += str(uuid.uuid4())
return out
def check_ldap(self, password, ldap_user):
user_dn = 'uid=%s,%s' % (ldap_user, self.base_dn)
con = ldap.initialize(self.ldap_server)
try:
con.simple_bind_s(user_dn, password)
except (ldap.INVALID_CREDENTIALS, ldap.NO_SUCH_OBJECT):
if self.debug:
log.info('failed login for username `%s` '
'incorrect password or username')
msg = 'Sorry, your username or password was entered incorrectly.'
return False, msg
except Exception, e:
log.info('failed login for username `%s`\n' + str(e))
msg = ('Sorry, a problem occurred with your account '
'please contact the administrator.')
return False, msg
if self.debug:
log.info('successful login for username `%s`')
filter = '(uid=%s)' % ldap_user
attr = [self.search_attr]
results = con.search_s(self.base_dn, ldap.SCOPE_SUBTREE, filter, attr)
attrs = results[0][1][self.search_attr]
if self.admin_attr in attrs:
sysadmin = True
elif self.user_attr in attrs:
sysadmin = False
else:
msg = self.no_auth_message or \
'Sorry but your account is not authourised to ' + \
'access this CKAN system.'
return False, msg
# get email and full name for user
details = con.search_s(
self.base_dn, ldap.SCOPE_SUBTREE, filter,
['mail', 'displayName']
)[0][1]
email = details['mail'][0]
fullname = details['displayName'][0]
return True, {
'sysadmin': sysadmin,
'email': email,
'fullname': fullname,
}
def ldap(self, password, ldap_user):
if self.force_lower_username:
ldap_user = ldap_user.lower()
result, msg = self.check_ldap(password, ldap_user)
if not result:
return False, msg
userobj = model.User.get(ldap_user)
if userobj:
if userobj.sysadmin != msg.get('sysadmin'):
userobj.sysadmin = msg.get('sysadmin')
model.Session.add(userobj)
model.Session.commit()
else:
# Create the user
data_dict = {
'password': self.make_password(),
'name': ldap_user,
'email': msg.get('email'),
'fullname': msg.get('fullname'),
'sysadmin': msg.get('sysadmin'),
}
# Update the user schema to allow user creation
user_schema = schema.default_user_schema()
user_schema['email'] = []
context = {'schema': user_schema, 'ignore_auth': True}
p.toolkit.get_action('user_create')(context, data_dict)
session['ldap_user'] = ldap_user
session.save()
return True, None
def login(self):
username = t.request.POST.get('login')
password = t.request.POST.get('password')
if password and username:
result, msg = self.ldap(password, username)
if result:
h.redirect_to(controller='user', action='dashboard')
else:
h.flash_error(msg)
def logout(self):
session['ldap_user'] = None
session.delete()
def identify(self):
ldap_user = session.get('ldap_user')
c = t.c
if ldap_user:
c.userobj = model.User.get(ldap_user)
c.user = ldap_user
elif not self.allow_anon_access:
if t.request.environ['PATH_INFO'] != '/user/login':
t.redirect_to(controller='user', action='login')
def get_auth_functions(self):
# we need to prevent some actions being authorized.
return {
'user_create': user_create,
'user_update': user_update,
'user_reset': user_reset,
'request_reset': request_reset,
}
|
Python
| 0.000044
|
@@ -2609,16 +2609,28 @@
sername'
+ %25 ldap_user
)%0A
@@ -2773,32 +2773,33 @@
log.info(
+(
'failed login fo
@@ -2816,16 +2816,29 @@
%60%25s%60%5Cn'
+ %25 ldap_user)
+ str(e
@@ -3072,16 +3072,28 @@
me %60%25s%60'
+ %25 ldap_user
)%0A
|
e31ee6294121de6795697f23a40b579442d6ca70
|
Add full url path to prerender logging statements
|
common/helpers/caching.py
|
common/helpers/caching.py
|
from civictechprojects.sitemaps import all_sitemap_paths
from common.helpers.constants import FrontEndSection
from common.helpers.front_end import section_url
from django_seo_js import settings
from django_seo_js.helpers import update_cache_for_url, request_should_be_ignored
from django_seo_js.backends import SEOBackendBase, SelectedBackend
from django_seo_js.backends.base import RequestsBasedBackend
import re
import logging
logger = logging.getLogger(__name__)
def update_cached_project_url(project_id):
update_cached_url(section_url(FrontEndSection.AboutProject, {'id': project_id}))
# Update url cached with our 3rd party prerender service
def update_cached_url(url):
update_cache_for_url(url)
def is_prerenderable_request(request):
full_path = request.get_full_path()
if not request_should_be_ignored(request):
# Only prerender urls that are listed in the sitemap
for url in all_sitemap_paths:
if url in full_path:
print('Can prerender ' + full_path)
return True
else:
return False
class DebugUserAgentMiddleware(SelectedBackend):
def __init__(self, *args, **kwargs):
super(DebugUserAgentMiddleware, self).__init__(*args, **kwargs)
regex_str = "|".join(settings.USER_AGENTS)
regex_str = ".*?(%s)" % regex_str
self.USER_AGENT_REGEX = re.compile(regex_str, re.IGNORECASE)
def process_request(self, request):
if not settings.ENABLED:
print('Prerender: settings.ENABLED False')
return
if not is_prerenderable_request(request):
print('Prerender: do not prerender ' + request.path)
return
if "HTTP_USER_AGENT" not in request.META:
print('Prerender: HTTP_USER_AGENT not in request.META')
return
if not self.USER_AGENT_REGEX.match(request.META["HTTP_USER_AGENT"]):
print('Prerender: User agent "{agent}" not in list'.format(agent=request.META["HTTP_USER_AGENT"]))
return
url = self.backend.build_absolute_uri(request)
try:
return self.backend.get_response_for_url(url)
except Exception as e:
logger.exception(e)
class DebugPrerenderIO(SEOBackendBase, RequestsBasedBackend):
"""Implements the backend for prerender.io"""
BASE_URL = "https://service.prerender.io/"
RECACHE_URL = "https://api.prerender.io/recache"
def __init__(self, *args, **kwargs):
super(SEOBackendBase, self).__init__(*args, **kwargs)
self.token = self._get_token()
def _get_token(self):
if settings.PRERENDER_TOKEN is None:
raise ValueError("Missing SEO_JS_PRERENDER_TOKEN in settings.")
return settings.PRERENDER_TOKEN
def get_response_for_url(self, url):
"""
Accepts a fully-qualified url.
Returns an HttpResponse, passing through all headers and the status code.
"""
if not url or "//" not in url:
raise ValueError("Missing or invalid url: %s" % url)
render_url = self.BASE_URL + url
headers = {
'X-Prerender-Token': self.token,
}
r = self.session.get(render_url, headers=headers, allow_redirects=False)
assert r.status_code < 500
return self.build_django_response_from_requests_response(r)
def update_url(self, url=None, regex=None):
"""
Accepts a fully-qualified url, or regex.
Returns True if successful, False if not successful.
"""
if not url and not regex:
raise ValueError("Neither a url or regex was provided to update_url.")
headers = {
'Content-Type': 'application/json',
'Accept': '*/*',
'Host': 'api-prerender.io'
}
data = {
'prerenderToken': settings.PRERENDER_TOKEN
}
if url:
data["url"] = url
if regex:
data["regex"] = regex
headers['Content-Length'] = str(len(str(data)))
r = self.session.post(self.RECACHE_URL, headers=headers, data=data)
return r.status_code < 500
|
Python
| 0
|
@@ -971,60 +971,8 @@
th:%0A
- print('Can prerender ' + full_path)%0A
@@ -1610,20 +1610,31 @@
request.
+get_full_
path
+()
)%0A
@@ -1985,32 +1985,110 @@
return%0A%0A
+ print('Prerender: Retrieving prerendered ' + request.get_full_path())%0A
url = se
|
cf2f5b8deb3c93aca80a1067b4ea4190058b188d
|
Document view.
|
annotation/views.py
|
annotation/views.py
|
# Copyright 2013 Netherlands eScience Center
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
from iso8601 import parse_date
from iso8601.iso8601 import UTC
from pyramid.view import view_config
logger = logging.getLogger(__package__)
@view_config(route_name='trackers', renderer='json')
def trackers(request):
cur = request.db.cursor()
return {'trackers': fetchTrackers(cur, request.user)}
def fetchTrackers(cur, username):
data = []
cur.execute("SELECT device_info_serial as id FROM gps.uva_device JOIN gps.uva_access_device USING (device_info_serial) WHERE username=%s ORDER BY device_info_serial", (username,))
for row in cur:
row = dict(row)
data.append(row)
return data
def fetchAcceleration(cur, username, trackerId, start, end, freq=20.0):
accels = {}
sql1 = 'SELECT date_time, index, (x_acceleration-x_o)/x_s x_acceleration, '
sql1 += '(y_acceleration-y_o)/y_s y_acceleration, (z_acceleration-z_o)/z_s z_acceleration '
sql1 += 'FROM gps.uva_acceleration101 '
sql1 += 'JOIN gps.uva_device USING (device_info_serial) '
sql1 += 'JOIN gps.uva_access_device USING (device_info_serial)'
sql1 += 'WHERE device_info_serial=%s and date_time BETWEEN %s AND %s AND username=%s '
sql1 += 'ORDER BY date_time, index'
cur.execute(sql1, (trackerId, start, end, username,))
for row in cur:
y = row['date_time']
e = y.replace(tzinfo=UTC)
z = e.isoformat()
row['date_time'] = z
if row['date_time'] not in accels:
accels[row['date_time']] = []
try:
accels[row['date_time']].append({'time': int(row['index'])/freq, # use 20Hz as freq
"xa": row["x_acceleration"],
"ya": row["y_acceleration"],
"za": row["z_acceleration"]})
except ValueError:
continue
return accels
def fetchTrack(cur, username, trackerId, start, end):
sql2 = 'SELECT date_time, s.latitude, s.longitude, s.altitude, s.pressure, '
sql2 += 's.temperature, s.gps_fixtime, s.positiondop, '
sql2 += 's.h_accuracy, s.v_accuracy, s.x_speed, s.y_speed, s.z_speed,s.speed_accuracy, '
sql2 += 's.vnorth, s.veast, s.vdown, s.speed, s.speed3d, s.direction, '
sql2 += 't.speed as tspeed, t.direction as tdirection '
sql2 += 'FROM gps.uva_tracking_speed s '
sql2 += 'JOIN gps.get_uvagps_track_speed(%s, %s, %s) t USING (device_info_serial, date_time) '
sql2 += 'JOIN gps.uva_access_device USING (device_info_serial) '
sql2 += 'WHERE device_info_serial = %s AND '
sql2 += 'date_time BETWEEN %s AND %s AND userflag != %s AND username=%s '
sql2 += 'ORDER BY date_time'
cur.execute(sql2, (trackerId, start, end, trackerId, start, end, "1", username,))
return cur
def fetch(cur, username, trackerId, start, end):
accels = fetchAcceleration(cur, username, trackerId, start, end)
rows = fetchTrack(cur, username, trackerId, start, end)
data = []
for row in rows:
row = dict(row)
row['date_time'] = row['date_time'].replace(tzinfo=UTC).isoformat()
if row['date_time'] in accels:
row['accels'] = accels[row['date_time']]
try:
row['latitude'] = round(float(row['latitude']), 4)
row['longitude'] = round(float(row['longitude']), 4)
for x in ['altitude', 'temperature',
"gps_fixtime", "positiondop",
"h_accuracy", "v_accuracy",
"x_speed", "y_speed", "z_speed",
"speed_accuracy",
"vnorth", "veast", "vdown",
"speed", "speed3d", "direction",
"tspeed", "tdirection",
]:
if row[x] is not None:
row[x] = float(row[x])
data.append(row)
except ValueError:
continue
except TypeError:
continue
return data
@view_config(route_name='tracker', renderer='json')
def tracker(request):
cur = request.db.cursor()
trackerId = int(request.matchdict['id'])
start = parse_date(request.matchdict['start']).isoformat()
end = parse_date(request.matchdict['end']).isoformat()
return fetch(cur, request.user, trackerId, start, end)
|
Python
| 0
|
@@ -4655,32 +4655,100 @@
acker(request):%0A
+ %22%22%22Returns gps+accel data of tracker in a certain time range%22%22%22%0A
cur = reques
|
628a98f034360bbe48d5a5a7bdaad90165251755
|
update application
|
Controller/exp_monitorThroughput.py
|
Controller/exp_monitorThroughput.py
|
# Copyright (c) 2011-2013 Peng Sun. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the COPYRIGHT file.
# HONE application
# monitor the throughputs across hosts of application trafclient
from hone_lib import *
from math import *
import time
K = 0.2
def query():
q = (Select(['app','srcHost', 'srcIP','srcPort','dstIP','dstPort','BytesSentOut','StartTimeSecs','ElapsedSecs','StartTimeMicroSecs','ElapsedMicroSecs']) *
From('HostConnection') *
Where([('app', '==', 'trafclient')]) *
Every(2000))
return q
''' tpData[(srcIP,srcPort,dstIP,dstPort)] = (lastTimestamp, lastAccumulativeBytesSent, lastThroughput) '''
def CalThroughput(newData, oldData):
(hostId, tpData) = oldData
openConn = []
for conn in newData:
[app, newHostId, srcIP, srcPort, dstIP, dstPort, newAccumBS, startSecs, elapsedSecs, startMicrosecs, elapsedMicrosecs] = conn
if hostId is None:
hostId = newHostId
newTS = startSecs + elapsedSecs+startMicrosecs / 1000000.0 + elapsedMicrosecs / 1000000.0
thetuple = (srcIP,srcPort,dstIP,dstPort)
if thetuple in tpData:
(lastTS, lastAccumBS, lastTP) = tpData[thetuple]
if newTS > lastTS:
newTP = float((newAccumBS-lastAccumBS))/(newTS-lastTS)
else:
newTP = 0
else:
newTP = float(newAccumBS)/newTS
tpData[thetuple] = (newTS, newAccumBS, newTP)
openConn.append(thetuple)
closeConn = []
for key in tpData.iterkeys():
if not key in openConn:
closeConn.append(key)
for key in closeConn:
del tpData[key]
return [hostId, tpData]
def LocalSum(data):
(hostId, tpData) = data
sumTP = []
avgTS = []
for (ts, accumBS, tp) in tpData.itervalues():
# tp unit is bytes per second now
sumTP.append(tp)
avgTS.append(ts)
if avgTS:
return [hostId, sum(avgTS)/len(avgTS), sum(sumTP) * 8.0 / 1000.0] # now throughput change to Kbps
def EWMA(newData, lastData):
(newHostId, newTime, newTP) = newData
(lastHostId, lastTime, lastRate) = lastData
timeDiff = newTime - lastTime
if timeDiff > 0:
newRate = (1.0 - exp(-timeDiff / K)) * newTP + exp(-timeDiff / K) * lastRate
else:
newRate = 0.0
return [newHostId, newTime, newRate]
def MonitorThroughput(x):
sumRate = []
for (hostId, timestamp, rate) in x:
sumRate.append(rate)
print 'hostID:{0}. timestamp:{1}. data:{2}'.format(hostId, timestamp, rate)
print 'aggregate {0}'.format(sum(sumRate))
print '******************************************'
def main():
return (query() >>
ReduceStreamSet(CalThroughput, [None, {}]) >>
MapStreamSet(LocalSum) >>
ReduceStreamSet(EWMA, [None, time.time(), 100]) >>
MergeHosts() >>
MapStream(MonitorThroughput))
|
Python
| 0.000001
|
@@ -573,17 +573,17 @@
Every(
-2
+1
000))%0A
@@ -2433,16 +2433,66 @@
put(x):%0A
+ outputFile = open('logs/throughput.txt', 'a')%0A
sumR
@@ -2496,24 +2496,24 @@
umRate = %5B%5D%0A
-
for (hos
@@ -2587,41 +2587,37 @@
int
-'hostID:%7B0%7D. timestamp:%7B1%7D. data:
+%3E%3E outputFile, 'host %7B0%7D %7B1%7D
%7B2%7D'
@@ -2658,16 +2658,31 @@
print
+ %3E%3E outputFile,
'aggreg
@@ -2725,52 +2725,52 @@
int
-'******************************************'
+%3E%3E outputFile, 'done'%0A outputFile.close()
%0A%0Ade
|
19f7840b562555fb085bbafa849ebcee521d89cf
|
Add some comments
|
whos@home.py
|
whos@home.py
|
#!/usr/bin/env python3
print('Loading...')
import subprocess, sys, json, time, os
# Color class used to print colors
class colors:
PURPLE = '\033[95m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
def print_help():
print(colors.YELLOW + 'Usage: ' + sys.argv[0] + ' interface [options | filename]')
print('Options:')
print(' -t | txt file output')
print(' -j | json file output')
print(' -o | txt and json file output (don\'t write file extension in filename)' + colors.END)
# Analyze argv to extract interface
interface = ''
output_file_mode = 'no'
output_filename = ''
if len(sys.argv) != 4 and len(sys.argv) != 2:
print(colors.RED + 'ERROR: wrong arguments' + colors.END)
print_help()
exit()
else:
interface = sys.argv[1]
if len(sys.argv) == 4:
if sys.argv[2] == '-j':
output_file_mode = 'json'
output_filename = sys.argv[3]
if output_filename[-4:] != output_file_mode:
print(colors.RED + 'ERROR: file extension' + colors.END)
exit()
elif sys.argv[2] == '-t':
output_file_mode = 'txt'
output_filename = sys.argv[3]
if output_filename[-3:] != output_file_mode:
print(colors.RED + 'ERROR: file extension' + colors.END)
exit()
elif sys.argv[2] == '-o':
output_file_mode = 'both'
output_filename = sys.argv[3]
if output_filename == "people":
print(colors.RED + 'ERROR: invalid name' + colors.END)
exit()
else:
print(colors.RED + 'ERROR: wrong arguments' + colors.END)
print_help()
exit()
script_path = os.path.dirname(os.path.abspath(__file__)) + '/'
# Open people.json
try:
people_file = open(script_path + 'people.json', 'r')
people_json = json.load(people_file)
except:
print(colors.RED + 'ERROR opening people.json' + colors.END)
exit()
# Make people list
max_cycles = 30
people = []
allowed = '1234567890abcdef:'
for person_dict in people_json['people']:
person_dict['target'] = person_dict['target'].lower()
for c in person_dict['target']:
if c not in allowed:
print(colors.RED + 'ERROR: invalid character found in one or more MAC addresses' + colors.END)
exit()
if len(person_dict['target']) == 17:
person_dict['target'] = person_dict['target'][9:]
people.append(person_dict)
for person_dict in people:
person_dict['lastSeen'] = max_cycles
def execute_process(bash_command):
return subprocess.Popen(bash_command.split(), stdout=subprocess.PIPE)
def create_json(file):
json_obj = []
for person in people:
temp_dict = {}
temp_dict['name'] = person['name']
temp_dict['target'] = person['target']
if person['lastSeen'] < max_cycles:
temp_dict['home'] = True
else:
temp_dict['home'] = False
json_obj.append(temp_dict)
json.dump(json_obj, file)
# Main cycle,
arp_command = 'sudo arp-scan --interface ' + interface + ' --localnet'
while True:
print()
output = execute_process(arp_command)
if output_file_mode != 'no':
if output_file_mode != 'both':
file = open(output_filename, 'w')
else:
file_txt = open(output_filename + '.txt', 'w')
file_json = open(output_filename + '.json', 'w')
for line in output.stdout.readlines():
line = line.decode('utf8')
for split in line.split():
if len(split) == 17: # A MAC address is 17 characters long
mac = split[9:] # Only the last 3 bytes of the MAC address are taken into account, to ensure compatibility with some network devices which may change the vendor part of MAC addresses
for person in people:
if mac == person['target']:
person['lastSeen'] = -1 # The counter is set to -1 because every counter will be incremented in the next 'for' cycle
for person in people:
if person['lastSeen'] < max_cycles:
person['lastSeen'] += 1
print(colors.GREEN + person['name'] + ' is @ home ' + colors.END)
if output_file_mode == 'txt':
file.write(person['name'] + ' is @ home\n')
elif output_file_mode == 'both':
file_txt.write(person['name'] + ' is @ home\n')
else:
print(colors.PURPLE + person['name'] + ' is away ' + colors.END)
if output_file_mode == 'txt':
file.write(person['name'] + ' is away \n')
elif output_file_mode == 'both':
file_txt.write(person['name'] + ' is away \n')
if output_file_mode != 'no':
if output_file_mode == 'json':
create_json(file)
elif output_file_mode == 'both':
create_json(file_json)
try:
file_txt.close()
file_json.close()
except:
file.close()
time.sleep(30)
|
Python
| 0
|
@@ -616,17 +616,38 @@
nterface
+ and file output mode
%0A
-
interfac
@@ -1421,16 +1421,97 @@
people%22:
+%09%09# Refuse 'people' as filename to avoid conflicts with people.json (config file)
%0A%09%09%09%09pri
@@ -2563,13 +2563,9 @@
PE)%0A
-
%0A
+
def
@@ -2875,19 +2875,16 @@
, file)%0A
-%09%09%09
%0A%0A# Main
|
1af422fdf5d6ec32e215fecc0caa56331f9a0919
|
Remove version command.
|
numsed.py
|
numsed.py
|
"""
numsed: compiling python to sed
"""
from __future__ import print_function
import argparse
import os
import webbrowser
import common
import transformer
import opcoder
import sedcode
import snippet_test
VERSION = '0.01'
USAGE = '''
%s
Version %s
Usage numsed.py -h | -H
numsed.py <action> <format> <transformation> python-script
''' % (__doc__, VERSION)
def parse_command_line(argstring=None):
parser = argparse.ArgumentParser(description=USAGE, usage=argparse.SUPPRESS, add_help=False, formatter_class=argparse.RawTextHelpFormatter)
agroup = parser.add_argument_group('Information')
xgroup = agroup.add_mutually_exclusive_group()
xgroup.add_argument('-h', help='show this help message', action='store_true', dest='help')
xgroup.add_argument('-H', help='open full help page', action='store_true', dest='fullhelp')
agroup = parser.add_argument_group('Actions')
xgroup = agroup.add_mutually_exclusive_group()
xgroup.add_argument("--run", help="run generated script (default)", action="store_true")
xgroup.add_argument("--trace", help="trace generated script", action="store_true")
xgroup.add_argument("--coverage", help="run intermediate opcode and display opcode coverage (--opcode only)", action="store_true")
xgroup.add_argument("--test", help="run conversion and compare with original python script", action="store_true")
xgroup.add_argument("--all", help="complete test", action="store_true")
agroup = parser.add_argument_group('Formats')
xgroup = agroup.add_mutually_exclusive_group()
xgroup.add_argument("--ast", help="generate abstract syntax tree", action="store_true")
xgroup.add_argument("--script", help="generate python script", action="store_true")
xgroup.add_argument("--disassembly", help="generate disassembly", action="store_true")
xgroup.add_argument("--opcode", help="generate numsed intermediate opcode", action="store_true")
xgroup.add_argument("--sed", help="generate sed script (default)", action="store_true")
agroup = parser.add_argument_group('Transformations')
xgroup = agroup.add_mutually_exclusive_group()
xgroup.add_argument("--literal", help="no program transformation", action="store_true")
xgroup.add_argument("--unsigned", help="replace division, modulo and power by functions", action="store_true")
xgroup.add_argument("--signed", help="replace all operators by functions (default)", action="store_true")
parser.add_argument("source", nargs='?', help=argparse.SUPPRESS)
if argstring is None:
args = parser.parse_args()
else:
args = parser.parse_args(argstring.split())
information = (args.help, args.fullhelp)
actions = (args.trace, args.run, args.coverage, args.test, args.all)
if not any(actions):
args.run = True
formats = (args.ast, args.script, args.disassembly, args.opcode, args.sed)
if not any(formats):
args.sed = True
transformations = (args.literal, args.unsigned, args.signed)
if not any(transformations):
args.signed = True
# no action specified nor filename
if not any(information) and not any(actions) and args.source is None:
parser.print_help()
parser.exit(1)
# some action but no filename
if not any(information) and args.source is None:
print('numsed.py: error: filename required')
parser.exit(1)
if args.coverage and not args.opcode:
print('numsed.py: error: argument --coverage requires argument --opcode')
parser.exit(1)
return parser, args
def do_fullhelp():
if os.path.isfile('README.md'):
helpfile = 'README.md'
else:
helpfile = r'https://github.com/GillesArcas/numsed/blob/master/README.md'
webbrowser.open(helpfile, new=2)
def transformation(args):
if args.literal:
return transformer.LITERAL
elif args.unsigned:
return transformer.UNSIGNED
elif args.signed:
return transformer.SIGNED
def numsed_maker(args):
if args.ast:
return transformer.AstConversion
elif args.script:
return transformer.ScriptConversion
elif args.disassembly:
return opcoder.DisassemblyConversion
elif args.opcode:
return opcoder.OpcodeConversion
elif args.sed:
return sedcode.SedConversion
def process_test(args, source):
maker = numsed_maker(args)
target = maker(source, transformation(args))
if args.run:
x = target.run()
elif args.coverage:
x = target.coverage()
elif args.test:
x = target.test()
elif args.trace:
x = target.trace()
if args.run and args.sed:
# already printed
pass
else:
print(x)
return x
def process_suite(args):
status = True
for test in common.testlines(args.source):
print(test[0].rstrip())
with open('tmp.py', 'w') as f:
f.writelines(test)
r = process_test(args, 'tmp.py')
status = status and (not args.test or r)
if not status:
break
if args.test:
print('ALL TESTS OK' if status else 'ONE TEST FAILURE')
return status
def process_all(args):
# use --trace when -test is not relevant. This completes coverage and may
# catch some errors
test_args = ('--ast --literal --test ',
'--ast --unsigned --trace',
'--ast --signed --test ',
'--script --literal --test ',
'--script --unsigned --trace',
'--script --signed --test ',
'--dis --literal --trace',
'--dis --unsigned --trace',
'--dis --signed --trace',
'--opcode --literal --test ',
'--opcode --unsigned --trace',
'--opcode --signed --test ',
'--sed --literal --trace',
'--sed --unsigned --trace',
'--sed --signed --test ')
status = all(numsed('%s %s' % (x, args.source)) for x in test_args)
status = status and snippet_test.main()
print('ALL TESTS OK' if status else 'ONE TEST FAILURE')
def numsed(argstring=None):
parser, args = parse_command_line(argstring)
if False: # args.version:
print(__doc__)
print(VERSION)
elif args.help:
parser.print_help()
elif args.fullhelp:
do_fullhelp()
elif args.all:
process_all(args)
else:
if args.source.endswith('.suite.py'):
result = process_suite(args)
else:
result = process_test(args, args.source)
if args.coverage:
opcoder.display_coverage()
return result
if __name__ == "__main__":
numsed()
|
Python
| 0
|
@@ -6539,91 +6539,8 @@
if
-False: # args.version:%0D%0A print(__doc__)%0D%0A print(VERSION)%0D%0A%0D%0A elif
args
|
619b08b6d19769220d7c2d230797473eb504ba24
|
Update trns_validate_SBML_FBAModel.py
|
plugins/scripts/validate/trns_validate_SBML_FBAModel.py
|
plugins/scripts/validate/trns_validate_SBML_FBAModel.py
|
#!/usr/bin/python
# This code is part of KBase project to validate
#the sbml files
#PYTHON USE
import sys, getopt
import os.path
import subprocess
import json
import logging
#KBASE USE
import biokbase.Transform.script_utils as script_utils
desc1 = '''
NAME
trns_validate_KBaseFBA.SBML.py -- Validate the fasta files (1.0)
SYNOPSIS
'''
desc2 = '''
DESCRIPTION
trns_validate_KBaseFBA.SBML.py validate the fasta file and returns
a json string
TODO: It will support KBase log format.
'''
desc3 = '''
EXAMPLES
> trns_validate_KBaseFBA.SBML.py -i <Input fasta file>
AUTHORS
Srividya Ramakrishnan; Sam Seaver
'''
impt = "../bin/validateSBML"
def to_JSON(self):
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
class Validate(object):
""" Validate the object and return 0 or exit with an error
"""
def __init__(self,filename):
""" Initialize the validation function to proceed with validation
"""
self.filename=filename
cmd = [impt,filename]
process = subprocess.Popen(cmd,stdout=subprocess.PIPE)
output, unused_err = process.communicate()
retcode = process.poll()
error = ''
status = ''
if retcode:
self.status = 'FAILED'
self.error = output
else:
self.status = 'SUCCESS'
def usage():
print("Usage : trns_validate_KBaseFBA.SBML.py -i <filename> ")
def main(argv):
inputfile = ''
ret = None
logger = script_utils.getStderrLogger(__file__)
logger.info("Validation of SBML")
try:
opts, args = getopt.getopt(argv,"hi:",["help","input_file_name="])
except getopt.GetoptError:
print('trns_validate_KBaseFBA.SBML.py -i <inputfile>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h' or opt == '--help':
print('trns_validate_KBaseFBA.SBML.py -i <inputfile>')
sys.exit()
elif opt == '-i' or opt == '--input_file_name':
if os.path.isfile(arg):
ret = Validate(arg)
else:
logger.warn("File " + arg + " does not exist ")
print("File " + arg + " does not exist ")
sys.exit(1)
else:
logger.warn("Invalid Option "+usage())
print('Invalid Option ' + usage())
return ret
if __name__ == "__main__" :
if len(sys.argv) != 1:
ret = main(sys.argv[1:])
print(to_JSON(ret))
else:
usage()
exit(0);
|
Python
| 0.000001
|
@@ -648,15 +648,8 @@
= %22
-../bin/
vali
|
36643e757214ac87a2418c94434e657eaf4adb8c
|
fix a possible bug in anyconfig.schema.validate that it may return success even if validation failed for some cases
|
anyconfig/schema.py
|
anyconfig/schema.py
|
#
# Copyright (C) 2015 Satoru SATOH <ssato redhat.com>
# License: MIT
#
"""anyconfig.schema module.
"""
from __future__ import absolute_import
import anyconfig.compat
try:
import jsonschema
except ImportError:
pass
_SIMPLETYPE_MAP = {list: "array", tuple: "array",
bool: "boolean",
int: "integer", float: "number",
dict: "object",
str: "string"}
_SIMPLE_TYPES = (bool, int, float, str)
if not anyconfig.compat.IS_PYTHON_3:
_SIMPLETYPE_MAP[unicode] = "string"
_SIMPLE_TYPES = (bool, int, float, str, unicode)
def validate(obj, schema, format_checker=None, safe=True):
"""
Validate target object with given schema object, loaded from JSON schema.
See also: https://python-jsonschema.readthedocs.org/en/latest/validate/
:parae obj: Target object (a dict or a dict-like object) to validate
:param schema: Schema object (a dict or a dict-like object)
instantiated from schema JSON file or schema JSON string
:param format_checker: A format property checker object of which class is
inherited from jsonschema.FormatChecker, it's default if None given.
:param safe: Exception (jsonschema.ValidationError or
jsonschema.SchemaError) will be thrown if it's True and any validation
error occurs.
:return: (True if validation succeeded else False, error message)
"""
try:
if format_checker is None:
format_checker = jsonschema.FormatChecker() # :raises: NameError
try:
jsonschema.validate(obj, schema, format_checker=format_checker)
except (jsonschema.ValidationError, jsonschema.SchemaError) as exc:
if safe:
return (False, str(exc))
else:
raise
except NameError:
return (True, "Validation module (jsonschema) is not available")
return (True, '')
def array_to_schema_node(arr, typemap=None):
"""
Generate a node represents JSON schema object with type annotation added
for given object node.
:param arr: Array of dict or MergeableDict objects
:param typemap: Type to JSON schema type mappings
:return: Another MergeableDict instance represents JSON schema of items
"""
if typemap is None:
typemap = _SIMPLETYPE_MAP
if arr:
return gen_schema(arr[0], typemap)
else:
return gen_schema("str", typemap)
def object_to_schema_nodes_iter(obj, typemap=None):
"""
Generate a node represents JSON schema object with type annotation added
for given object node.
:param obj: Dict or MergeableDict object
:param typemap: Type to JSON schema type mappings
:yield: Another MergeableDict instance represents JSON schema of object
"""
if typemap is None:
typemap = _SIMPLETYPE_MAP
for key, val in anyconfig.compat.iteritems(obj):
yield (key, gen_schema(val, typemap=typemap))
def gen_schema(node, typemap=None):
"""
Generate a node represents JSON schema object with type annotation added
for given object node.
:param node: Object node :: MergeableDict
:param typemap: Type to JSON schema type mappings
:return: Another MergeableDict instance represents JSON schema of this node
"""
if typemap is None:
typemap = _SIMPLETYPE_MAP
default = dict(type="null")
if node is None:
return default
_type = type(node)
if _type in _SIMPLE_TYPES:
return dict(type=typemap[_type])
elif isinstance(node, dict):
props = object_to_schema_nodes_iter(node, typemap)
return dict(type=typemap[dict], properties=dict(props))
elif _type in (list, tuple) or hasattr(node, "__iter__"):
return dict(type=typemap[list],
items=array_to_schema_node(node, typemap))
return default # Default.
# vim:sw=4:ts=4:et:
|
Python
| 0.000007
|
@@ -1628,16 +1628,46 @@
hecker)%0A
+ return (True, '')%0A
@@ -1724,16 +1724,43 @@
emaError
+,%0A Exception
) as exc
|
acdd323b330080795425ab44e3d261cbefbdf78a
|
Fix representation of actions
|
ohneio.py
|
ohneio.py
|
import collections
import functools
import inspect
import io
class Buffer:
def __init__(self):
self.queue = collections.deque()
self.position = 0
def write(self, chunk):
self.queue.append(chunk)
def _get_queue(self):
assert len(self.queue) > 0 or self.position == 0, ("We can't have a positive position "
"on an empty queue.")
q = iter(self.queue)
try:
data = next(q)
yield data[self.position:]
except StopIteration:
pass
else:
yield from q
def _get_data(self, nbytes):
if nbytes == 0:
return len(self.queue), 0, b''.join(self._get_queue())
else:
acc = io.BytesIO()
q = self._get_queue()
segments_read = 0
position = 0
while True:
try:
read = acc.write(next(q))
segments_read += 1
except StopIteration:
break
if acc.tell() >= nbytes:
position = acc.tell() - nbytes
if position != 0:
position = read - position
break
acc.seek(0)
return segments_read, position, acc.read(nbytes)
def peek(self, nbytes):
_, _, data = self._get_data(nbytes)
return data
def read(self, nbytes):
segment_read, position, data = self._get_data(nbytes)
if position > 0:
segment_read -= 1
for i in range(segment_read):
self.queue.popleft()
self.position = position
assert len(self.queue) > 0 or self.position == 0, ("We can't have a positive position "
"on an empty queue.")
return data
def __len__(self):
return sum(len(d) for d in self.queue) - self.position
_no_result = object()
_state_ended = object()
class NoResult(RuntimeError):
pass
class Consumer:
def __init__(self, gen):
self.gen = gen
self.input = Buffer()
self.output = Buffer()
self.state = next(gen)
self.res = _no_result
def _wait_before(meth):
@functools.wraps(meth)
def wrapper(self, *args, **kwargs):
while self.state is _wait:
self.state = next(self.gen)
return meth(self, *args, **kwargs)
return wrapper
def _next_state(self, value=None):
try:
self.state = self.gen.send(value)
except StopIteration as e:
self.state = _state_ended
if len(e.args) > 0:
self.res = e.args[0]
@property
def has_result(self):
return self.res is not _no_result
def get_result(self):
if not self.has_result:
raise NoResult
return self.res
@_wait_before
def read(self, nbytes=0):
while self.state is _get_output:
self._next_state(self.output)
return self.output.read(nbytes)
@_wait_before
def send(self, data):
self.input.write(data)
while self.state is _get_input:
self._next_state(self.input)
def is_consumed(self):
return len(self.input) == 0
del _wait_before
class _Action:
"""Action yielded to the consumer.
Actions yielded to the consumer could be `object()`, but this custom object
with a custom `repr()` ease debugging.
"""
def __init__(self, name):
self.name = name
def __repr__(self):
return '<Action: {!r}'.format(self)
_get_input = _Action('get_input')
_get_output = _Action('get_output')
_wait = _Action('wait')
def peek(nbytes=0):
input_ = yield _get_input
return input_.peek(nbytes)
def wait():
yield _wait
def read(nbytes=0):
while True:
input_ = yield _get_input
if len(input_) >= nbytes:
return input_.read(nbytes)
yield _wait
def write(data):
output = yield _get_output
output.write(data)
def flush():
while True:
output = yield _get_output
if len(output) == 0:
return
yield _wait
def protocol(func):
if not callable(func):
raise ValueError("A protocol needs to a be a callable")
if not inspect.isgeneratorfunction(func):
raise ValueError("A protocol needs to be a generator function")
@functools.wraps(func)
def wrapper(*args, **kwargs):
return Consumer(func(*args, **kwargs))
return wrapper
|
Python
| 0.001772
|
@@ -3684,16 +3684,17 @@
on: %7B!r%7D
+%3E
'.format
@@ -3698,16 +3698,21 @@
mat(self
+.name
)%0A%0A%0A_get
|
b31217a1a0ed68e0dfee2fdea87aad569c73573f
|
update batch timing
|
clock.py
|
clock.py
|
from apscheduler.schedulers.blocking import BlockingScheduler
from farmsList.imports import every_night_at_1am
from rq import Queue
from worker import conn
import logging
logging.basicConfig()
q = Queue(connection=conn)
sched = BlockingScheduler()
@sched.scheduled_job('cron', hour=21, minute=11)# hour=1)
def scheduled_job():
q.enqueue(every_night_at_1am)
sched.start()
|
Python
| 0
|
@@ -289,17 +289,17 @@
minute=1
-1
+4
)# hour=
|
a991d462e8b29e6a6622f046df4f1abd29f64b28
|
update ZippyshareCom
|
src/pyload/plugins/downloaders/ZippyshareCom.py
|
src/pyload/plugins/downloaders/ZippyshareCom.py
|
# -*- coding: utf-8 -*-
import re
import urllib.parse
from bs4 import BeautifulSoup
from pyload.core.utils.misc import eval_js
from ..anticaptchas.ReCaptcha import ReCaptcha
from ..base.simple_downloader import SimpleDownloader
class ZippyshareCom(SimpleDownloader):
__name__ = "ZippyshareCom"
__type__ = "downloader"
__version__ = "0.98"
__status__ = "testing"
__pattern__ = r"https?://(?P<HOST>www\d{0,3}\.zippyshare\.com)/(?:[vd]/|view\.jsp.*key=)(?P<KEY>[\w^_]+)"
__config__ = [
("enabled", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool", "Fallback to free download if premium fails", True),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10),
]
__description__ = """Zippyshare.com downloader plugin"""
__license__ = "GPLv3"
__authors__ = [
("Walter Purcaro", "vuolter@gmail.com"),
("sebdelsol", "seb.morin@gmail.com"),
("GammaC0de", "nitzo2001[AT]yahoo[DOT]com"),
]
COOKIES = [("zippyshare.com", "ziplocale", "en")]
URL_REPLACEMENTS = [(__pattern__ + ".*", r"http://\g<HOST>/v/\g<KEY>/file.html")]
NAME_PATTERN = r'(?:<title>Zippyshare.com - |"/)(?P<N>[^/]+)(?:</title>|";)'
SIZE_PATTERN = r'>Size:.+?">(?P<S>[\d.,]+) (?P<U>[\w^_]+)'
OFFLINE_PATTERN = r"does not exist (anymore )?on this server<"
TEMP_OFFLINE_PATTERN = r"^unmatchable$"
LINK_PATTERN = r"document.location = '(.+?)'"
def setup(self):
self.chunk_limit = -1
self.multi_dl = True
self.resume_download = True
def handle_free(self, pyfile):
self.captcha = ReCaptcha(pyfile)
captcha_key = self.captcha.detect_key()
if captcha_key:
try:
self.link = re.search(self.LINK_PATTERN, self.data)
self.captcha.challenge()
except Exception as exc:
self.error(exc)
else:
self.link = self.fixurl(self.get_link())
if ".com/pd/" in self.link:
self.load(self.link)
self.link = self.link.replace(".com/pd/", ".com/d/")
if self.link and pyfile.name == "file.html":
pyfile.name = urllib.parse.unquote(self.link.split("/")[-1])
def get_link(self):
#: Get all the scripts inside the html body
soup = BeautifulSoup(self.data, 'html.parser')
scripts = [
s.getText()
for s in soup.body.findAll("script", type="text/javascript")
if "('dlbutton').href =" in s.getText()
]
#: Emulate a document in JS
inits = [
"""
var document = {}
document.getElementById = function(x) {
if (!this.hasOwnProperty(x)) {
this[x] = {getAttribute : function(x) { return this[x] } }
}
return this[x]
}
"""
]
#: inits is meant to be populated with the initialization of all the DOM elements found in the scripts
eltRE = r'getElementById\([\'"](.+?)[\'"]\)(\.)?(getAttribute\([\'"])?(\w+)?([\'"]\))?'
for m in re.findall(eltRE, " ".join(scripts)):
JSid, JSattr = m[0], m[3]
values = [
f for f in (elt.get(JSattr, None) for elt in soup.findAll(id=JSid)) if f
]
if values:
inits.append(
'document.getElementById("{}")["{}"] = "{}"'.format(
JSid, JSattr, values[-1]
)
)
#: Add try/catch in JS to handle deliberate errors
scripts = ["\n".join(("try{", script, "} catch(err){}")) for script in scripts]
#: Get the file's url by evaluating all the scripts
scripts = inits + scripts + ["document.dlbutton.href"]
return eval_js("\n".join(scripts))
|
Python
| 0
|
@@ -348,12 +348,12 @@
= %22
-0.98
+1.00
%22%0A
@@ -1293,66 +1293,42 @@
= r'
-(?:%3Ctitle%3EZippyshare.com - %7C%22/)(?P%3CN%3E%5B%5E/%5D+)(?:%3C/title%3E%7C
+%22/d/%5B%5Cw%5E_%5D+/%22.*%22.*/(?P%3CN%3E%5B%5E/%5D+?)
%22;
-)
'%0A
|
ba35ca873069ac149e0db3e0831097a95c6149bb
|
Add docstrings.
|
orders.py
|
orders.py
|
"""
Order matching objects.
"""
import json
class _Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(_Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class Singleton(_Singleton('SingletonMeta', (object,), {})):
pass
class Book(Singleton):
"""
Yikes, a singleton! Seems to fit this design though. Implements the order
book and matching engine.
"""
def __init__(self):
self.buys = []
self.sells = []
def _clear(self):
"""
Supports unit tests only.
"""
self.__init__()
def _ismatch(self, order, entry):
"""
Conditional boolean value depending on which side of order book.
"""
if isinstance(order, Buy):
return order >= entry
if isinstance(order, Sell):
return order <= entry
def _post(self, order, entry):
"""
Logic is reverse of _ismatch(). Avoid dupicating code, reverse args.
"""
return self._ismatch(entry, order)
def match(self, order):
"""
Match the order and post unfilled to book.
"""
side = None
if isinstance(order, Buy):
side = self.sells
if isinstance(order, Sell):
side = self.buys
fills = []
index = 0
for entry in [x for x in side]:
if self._ismatch(order, entry):
try:
remainder = entry - order
# order.qty = entry.qty : order complete fill. remove
# entry. stop.
if order.qty == entry.qty:
fills.append(order.dict())
order.complete()
del side[index]
break
# order.qty < entry.qty : order complete fill. update
# entry qty. zero order qty.
if remainder['qty'] > 0:
#entry.qty = entry.qty - order.qty
entry.update(**remainder)
fills.append(order.dict())
order.complete()
except:
# order.qty > entry.qty : order partial fill. remove entry.
# update order. continue.
# No danger of exception on next line since the exception
# that landed us here has demonstrated that order - entry
# is safe.
order.update(**(order - entry))
fills.append(entry.dict())
del side[index]
else:
# Don't look at every entry if we've exhausted the price
# limited possibilities.
break
if order.qty > 0:
self.post(order)
return {"fills": fills}
def post(self, order):
side = None
if isinstance(order, Buy):
side = self.buys
if isinstance(order, Sell):
side = self.sells
index = 0
for entry in side:
if self._post(order, entry):
index += 1
continue
else:
break
side.insert(index, order)
def orders(self):
return {
"buys": [buy.dict() for buy in self.buys],
"sells": [sell.dict() for sell in self.sells],
}
class Order(object):
"""
Base class for order objects.
"""
def __init__(self, qty=0, prc=0): # pragma: no cover
self.qty = qty
self.prc = prc
def __sub__(self, other):
if self.qty >= other.qty:
ret = {"qty": self.qty - other.qty, "prc": self.prc}
return ret
else:
raise Exception("Short of shares to fill.")
def __ge__(self, other): # pragma: no cover
return self.prc >= other.prc
def __str__(self): # pragma: no cover
ret = {"qty": self.qty, "prc": self.prc}
return json.dumps(ret)
def __unicode__(self): # pragma: no cover
return str(self)
def __repr__(self): # pragma: no cover
return str(self)
def dict(self): # pragma: no cover
return {"prc": self.prc, "qty": self.qty}
def complete(self): # pragma: no cover
self.qty = 0
def update(self, **remainder): # pragma: no cover
self.qty = remainder['qty']
def is_valid(self):
"""
Simple validity check.
"""
if self.qty == 0 or self.prc == 0:
return False
return True
class Buy(Order): # pragma: no cover
pass
class Sell(Order): # pragma: no cover
pass
|
Python
| 0
|
@@ -2997,32 +2997,130 @@
t(self, order):%0A
+ %22%22%22%0A Post the order to appropriate side of order book in sorted order.%0A %22%22%22%0A
side = N
@@ -3482,32 +3482,109 @@
f orders(self):%0A
+ %22%22%22%0A Returns a dict containing entire order book.%0A %22%22%22%0A
return %7B
|
b7ff53128c9c5b7eb9d5750bcc1bf16661e005dc
|
Check for implemented methods
|
wutu/util.py
|
wutu/util.py
|
import os
import inspect
import tempfile
from flask import Response, request
from flask_restful import Api as FlaskAPI
from logbook import Logger
from contextlib import contextmanager
from wutu.module import Module
modules = set()
from typing import List, Dict, TypeVar, Any, Callable
T = TypeVar("T")
def get_logger(name: str) -> Logger:
"""
Returns a logging provider
:param name: name for logger
:return: logger
"""
return Logger(name)
log = get_logger("util")
def location(directory: str) -> str:
"""
:param directory: Directory in usual unix convention
:return: OS-specialized
"""
return os.path.join(*directory.split("/"))
def get_modules() -> List[Module]:
"""
Returns currently loaded modules
:return:
"""
return modules
def current(*directory: List[str]) -> str:
"""
Locator service
:param directory: what to look for
:return: formed directory
"""
return os.path.join(os.getcwd(), *directory)
def module_locator(module: Module, *directory: List[str]) -> str:
"""
Custom locator for modules
:param module: module itself
:param directory: search directory
:return:
"""
get_module_dir = lambda mod: os.path.dirname(inspect.getmodule(mod.__class__).__file__)
return os.path.join(get_module_dir(module), *directory)
def class_factory(name: str, base: T, **kwargs: Dict[str, Any]) -> object:
"""
Dynamic class generator
:param name: class name
:param base: parent class
:param: kwargs: optional params
:return:
"""
def __init__(self, **options):
for key, val in options.items():
setattr(self, key, val)
self.__name__ = endpoint_name(name)
base.__init__(self)
struct = {"__init__": __init__}
struct.update(kwargs)
ctr = type(name, (base,), struct)
return ctr
def endpoint_name(name: str) -> str:
"""
Converts string from CameCase to under_score_case
:param name: regular name
:return:
"""
LState = class_factory("LState", object)
UState = class_factory("UState", object)
state = UState
words = []
cur = []
for l in name:
if state == UState and l.isupper():
cur.append(l.lower())
elif state == UState and l.islower():
state = LState
cur.append(l)
elif state == LState and l.isupper():
words.append("".join(cur))
cur = [l.lower()]
state = UState
else:
cur.append(l)
words.append("".join(cur))
return "_".join(words)
def camel_case_name(name: str) -> str:
"""
Converts string to CamelCase
:param name: input string
:return: CamelCased string
"""
return "".join([words[0].upper() + words[1:] for words in name.split("_")])
def get_identity(inst: Module) -> List[str]:
"""
Returns required positional arguments for module
:param inst: module instance
:return:
"""
return tuple(filter(lambda x: x != "self", inspect.getargspec(inst.get).args))
def setup_endpoint(api: FlaskAPI, inst: Module, name: str) -> None:
"""
Binds module to API
:param api: Flask-Restful
:param inst: module instance
:param name: end-point name
"""
params = "/".join(["<{0}>".format(param) for param in get_identity(inst)])
api.add_resource(inst, "/{0}".format(name), "/{0}/{1}/".format(name, params))
def load_js(file: str, locator: Callable=current) -> str:
"""
Loads JavaScript into memory
:param file: javascript file
:param locator: function which tells where to look for it
:return: javascript as a string
"""
with open(locator(file)) as f:
raw = f.read()
return raw
def get_request_args() -> tuple:
"""
Returns arguments passed to request
:return:
"""
return request.args
def is_stub(method):
"""
Checks if method is stub
:param method:
:return:
"""
return hasattr(method, "__stub__")
@contextmanager
def temp_file():
"""
Creates a temp file and deletes it afterwards
:return:
"""
temp = tempfile.NamedTemporaryFile(delete=False)
try:
yield temp
finally:
temp.close()
os.unlink(temp.name)
@contextmanager
def timer(title: str) -> None:
"""
Measures time elapsed in current block
:param title: Name of block to be visible in output
:return:
"""
from time import perf_counter as pc
start = pc()
try:
yield
finally:
timediff = (pc() - start) * 1000
log.debug("It took {0} ms to execute block '{1}'".format(timediff, title))
|
Python
| 0.000001
|
@@ -3895,23 +3895,337 @@
def
-is_stub(method)
+get_implemented_methods(module: Module) -%3E list:%0A %22%22%22%0A Returns implemented methods in module%0A :param module:%0A :return:%0A %22%22%22%0A return %7Bkey.lower(): val for key, val in dict(inspect.getmembers(module.__class__, predicate=inspect.isfunction))%0A if not is_stub(val)%7D%0A%0A%0Adef is_stub(method: Callable) -%3E bool
:%0A
|
aed52a30f6a3131d0e5401ef3cb4c9e788fe3979
|
Use new pull_requests api element
|
close_pull_requests.py
|
close_pull_requests.py
|
#!/usr/bin/env python
"""
Close PRs on repositories where the master is not on github.
Provide a closing comment, and print the lock URL if desired
"""
import argparse
import logging
import yaml
from client import get_github3_client
DEFAULT_MESSAGE = 'We do not use Pull Requests on this repo. Please see ' \
'CONTRIBUTING or ReadMe file.'
DEFAULT_CONFIG = 'close_pull_requests.yaml'
logger = logging.getLogger(__name__)
exit_code = 0
def update_exit_code(new_code):
""" Update global exit_code, following rules.
Current rule is only update if the new value signifies a
"more severe" error (higher integer value)
"""
global exit_code
exit_code = max(exit_code, new_code)
def close_prs(gh, organization=None, repository=None,
message=None, lock=False, close=False):
if message is None:
message = DEFAULT_MESSAGE
try:
repo = gh.repository(organization, repository)
logger.debug("Checking for PRs in %s", repo.name)
for pr in repo.issues(state='open'):
if pr.pull_request:
logger.debug("Examining PR %s for %s/%s", pr.number,
organization, repository)
if close:
pr.create_comment(message)
pr.close()
logger.info("Closed PR %s for %s/%s", pr.number,
organization, repository)
if lock:
print("Lock PR manually: "
"https://github.com/%s/%s/pull/%s" %
(organization, repository, pr.number))
else:
print("PR %s open for %s/%s at: "
"https://github.com/%s/%s/pull/%s" % (pr.number,
organization,
repository,
organization,
repository,
pr.number))
else:
logger.debug("Skipping issue %s for %s/%s", pr.number,
organization, repository)
else:
logger.debug("no open PR's in %s!", repo.name)
except AttributeError:
logger.error("No access to repository %s/%s", organization, repository)
update_exit_code(1)
def close_configured_prs(gh, config_file, dry_run=False):
config = []
with open(config_file, 'rb') as yaml_file:
config = yaml.safe_load(yaml_file)
for repository in config:
if dry_run:
repository['lock'] = False
repository['close'] = False
close_prs(gh, **repository)
def parse_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--only', help="org/repo to use instead of config")
parser.add_argument('--message', help="comment to add (default '%s')" %
DEFAULT_MESSAGE, default=None)
parser.add_argument('--close', action='store_true', help="Close PR")
parser.add_argument('--lock', action='store_true', help="Lock PR")
parser.add_argument('--debug', help="include github3 output",
action='store_true')
parser.add_argument('--config', help="read configs for projects (default "
"'%s')" % DEFAULT_CONFIG, default=DEFAULT_CONFIG)
parser.add_argument('--dry-run', action='store_true',
help='Just show, regardless of config')
return parser.parse_args()
def main():
args = parse_args()
if args.debug:
logger.setLevel(logging.DEBUG)
logging.getLogger('github3').setLevel(logging.DEBUG)
gh = get_github3_client()
me = gh.me()
logger.debug("I'm %s (%s)", me.name, me.login)
if args.only:
org, repo = args.only.split('/')
close_prs(gh, organization=org, repository=repo, close=args.close,
lock=args.lock, message=args.message)
else:
close_configured_prs(gh, args.config, args.dry_run)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
logging.getLogger('github3').setLevel(logging.ERROR)
main()
raise SystemExit(exit_code)
|
Python
| 0
|
@@ -1043,21 +1043,28 @@
in repo.
-issue
+pull_request
s(state=
@@ -1076,44 +1076,8 @@
'):%0A
- if pr.pull_request:%0A
@@ -1157,33 +1157,32 @@
-
organization, re
@@ -1203,20 +1203,16 @@
-
if close
@@ -1225,28 +1225,24 @@
-
-
pr.create_co
@@ -1268,28 +1268,24 @@
-
pr.close()%0A
@@ -1295,28 +1295,24 @@
-
-
logger.info(
@@ -1368,36 +1368,32 @@
-
organization, re
@@ -1418,20 +1418,16 @@
-
-
if lock:
@@ -1427,20 +1427,16 @@
f lock:%0A
-
@@ -1470,26 +1470,24 @@
manually: %22%0A
-
@@ -1567,18 +1567,16 @@
-
(organiz
@@ -1610,36 +1610,32 @@
r))%0A
-
else:%0A
@@ -1636,28 +1636,24 @@
-
-
print(%22PR %25s
@@ -1670,26 +1670,24 @@
%25s/%25s at: %22%0A
-
@@ -1739,36 +1739,32 @@
%22 %25 (pr.number,%0A
-
@@ -1873,36 +1873,32 @@
-
repository,%0A
@@ -1885,36 +1885,32 @@
repository,%0A
-
@@ -2019,36 +2019,32 @@
-
repository,%0A
@@ -2099,20 +2099,16 @@
-
pr.numbe
@@ -2115,152 +2115,8 @@
r))%0A
- else:%0A logger.debug(%22Skipping issue %25s for %25s/%25s%22, pr.number,%0A organization, repository)%0A
|
a3d99553baf56876e6ad842f0a55568bc13a6db5
|
Remove some debug code
|
LMA/live.py
|
LMA/live.py
|
"""
This code can be used to display a stream of LiveLMA data, read using the
Websocket stream support in lmatools.
Example
-------
# In an IPython notebook, you can then run:
%pylab
server="ws://someuniversity.edu:port/path/to/stream"
import numpy as np
from datetime import datetime
today = datetime.now().date()
basedate = datetime(today.year, today.month, today.day)
print basedate
from brawl4d.brawl4d import B4D_startup
from brawl4d.LMA.controller import LMAController
from brawl4d.LMA.live import LiveLMADataset
panels = B4D_startup(basedate=basedate)
lma_ctrl = LMAController()
d = LiveLMADataset(host=server)
post_filter_brancher, post_transform_branch_to_scatter_artists = lma_ctrl.pipeline_for_dataset(d, panels)
panels.panels['tz'].axis((0, 86400, 0, 20))
lma_ctrl.bounds.stations=(6,99)
lma_ctrl.bounds.chi2=(0,1.0)
This example does not automatically update the time in the plot, but is
easily accomplished by tapping into matplotlib's timer events.
"""
from datetime import datetime
import threading
from collections import deque
import numpy as np
from numpy.lib.recfunctions import rename_fields
from stormdrain.pubsub import get_exchange
from lmatools.live.liveLMA import LiveLMAController, WebsocketClient
def force_debug(func):
def wrapped(*args, **kwargs):
try:
func(*args,**kwargs)
except:
import sys, traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
print(exc_value, traceback.extract_tb(exc_traceback)[-1])
return wrapped
class LiveLMATimeController(object):
def __init__(self, panels, timespan=600.0, track_realtime=True, future_margin=.1, time_name='time'):
""" Contol the real-time display aspects of a live display
timespan is the total duration of the time axis in seconds
future_margin is the fraction of total width of time to be displayed
if track_realtime is set, the view will be updated every timespan * future_margin
"""
self.panels = panels
self.time_name = time_name
self.timespan = timespan
self.future_margin = future_margin
# Timer is in milliseconds
scroll_interval = 1000.0*timespan*future_margin
self.scroll_timer = panels.figure.canvas.new_timer()
self.scroll_timer.add_callback(self.scroll_to_current)
# interval needs to be specified last to work around some sort of bug in
# QT's timer. https://github.com/jonathanrocher/Code-samples/blob/master/matplotlib/animation_demo.py
self.scroll_timer.interval = scroll_interval
self.scroll_timer.start()
self.draw_timer = panels.figure.canvas.new_timer()
self.draw_timer.add_callback(self.draw)
self.draw_timer.interval = 1.0*1000.0
self.draw_timer.start()
self.track_realtime = track_realtime
if self.track_realtime:
self.scroll_to_current()
def draw(self):
self.panels.figure.canvas.draw()
def scroll_to_current(self):
if self.track_realtime:
t_now = (datetime.utcnow() - self.panels.basedate).total_seconds()
margin = self.future_margin * self.timespan
t_min = t_now - self.timespan + margin
t_max = t_now + margin
t_ax = self.panels.panels['tz']
t_ax.set_xlim((t_min, t_max))
class LiveLMADataset(object):
def __init__(self, target=None, host=None, basedate=None):
self.target = target
self.bounds_updated_xchg = get_exchange('SD_bounds_updated')
self.bounds_updated_xchg.attach(self)
self._t_offset = 0.0
if basedate is not None:
# corrects for the meaning of time in the LMA analysis code
self._t_offset += (basedate - datetime(1970, 1, 1)).total_seconds()
self._dataq = deque([])
self.livesource = LiveLMAController()
# New sources are sent as messages to self.show
self.livesource.views.append(self)
self._websocket_client = WebsocketClient(host=host)
# client.connect(on_message=liveDataController.on_message)
sock_thr = threading.Thread(target=self._websocket_client.connect,
kwargs={'on_message':self.livesource.on_message})
sock_thr.daemon=True
sock_thr.start()
def show(self, header, newdata):
# print("{0} new, {1} stations".format(header['num_sources'][0], header['num_stations'][0]))
if newdata.shape[0] > 0:
newdata = rename_fields(newdata, {'t':'time'})
newdata['time'] -= self._t_offset
self._dataq.append(newdata)
self.send("B4D_LMAnewsources_live")
@force_debug
def send(self, msg):
""" SD_bounds_updated messages are sent here """
# do we send the whole events table, or somehow dynamically determine that?
if len(self._dataq) > 0:
data = np.hstack([d for d in self._dataq if (d.shape[0] > 0)])
# print "sending data to {0} with generator frame {1}".format(self.target, self.target.gi_frame)
if self.target is not None:
self.target.send(data)
|
Python
| 0.001032
|
@@ -1235,326 +1235,8 @@
nt%0A%0A
-def force_debug(func):%0A def wrapped(*args, **kwargs):%0A try:%0A func(*args,**kwargs)%0A except:%0A import sys, traceback%0A exc_type, exc_value, exc_traceback = sys.exc_info()%0A print(exc_value, traceback.extract_tb(exc_traceback)%5B-1%5D) %0A return wrapped%0A
@@ -4510,25 +4510,8 @@
%0A
- @force_debug%0A
|
a34994bb6ae23f04627ab384fa2c2905997925a9
|
Revert rendering.
|
web/views.py
|
web/views.py
|
import json
from django.http import Http404, HttpResponse
from django.views.generic import TemplateView
from django.shortcuts import redirect, render
from django.template.loader import render_to_string
from .models import Game, Player
from .utils import create_new_game, generate_unique_anonymous_username, calculate_stats
class HomeView(TemplateView):
template_name = 'home.html'
class GameView(TemplateView):
template_name = 'game.html'
def get(self, request, game_id, *args, **kwargs):
game = Game.objects.get(id=game_id)
board = [[game.get_field_state(row_index, column_index) for column_index in range(3)] for row_index in range(3)]
game_finished = True if game.get_winner_or_draw() else False
ai_player = game.get_ai_player()
stats = calculate_stats(game)
return render(request, self.template_name, locals())
class Leaderboard(TemplateView):
template_name = 'leaderboard.html'
def get(self, request, *args, **kwargs):
players = Player.objects.all()
return render(request, self.template_name, locals())
def new_game(request, p1_type, p2_type):
"""
Start a new game. Create a Game object and redirects to it.
"""
if p1_type == 'anonymous' and p2_type == 'anonymous':
game = create_new_game('anonymous', 'anonymous')
return redirect(game)
if p1_type == 'anonymous' and p2_type == 'ai_random':
player1 = Player.objects.create(username=generate_unique_anonymous_username(), type=p1_type)
player2, created = Player.objects.get_or_create(username="AI Random", type=p2_type)
game = Game.objects.create(player1=player1, player2=player2)
return redirect(game)
raise Http404
def new_move(request, game_id):
"""
Save a new game's move to database.
"""
game = Game.objects.get(id=game_id)
player = request.POST.get('player')
x = request.POST.get('x')
y = request.POST.get('y')
m, action = game.add_move_and_get_action(player, x, y)
return render_to_string(request, str(action))
def rematch(request, game_id):
old_game = Game.objects.get(id=game_id)
game = Game.objects.create(
player1=old_game.player2,
player2=old_game.player1,
)
return redirect(game)
def ai_next_move(request, game_id):
game = Game.objects.get(id=game_id)
x, y = game.get_next_random_move()
return HttpResponse(json.dumps({'x': x, 'y': y}), content_type='application/json')
|
Python
| 0
|
@@ -147,60 +147,8 @@
nder
-%0Afrom django.template.loader import render_to_string
%0A%0Afr
@@ -1989,34 +1989,21 @@
urn
-render_to_string(request,
+HttpResponse(
str(
|
f958ef0179f72adb4b8c7243fc30395de1c31d6b
|
add authentication now required by https://issues.apache.org/jira
|
utils/jira.py
|
utils/jira.py
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information#
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import requests
import json
import sys
import pprint
filterid=str(sys.argv[1])
filterurl='https://issues.apache.org/jira/rest/api/2/filter/' + filterid
r=requests.get(filterurl)
rlist=r.json()['searchUrl']
count=requests.get(rlist).json()['total']
n, m = divmod(count, 50)
for i in range(n+1):
issueslist=requests.get(rlist+'&startAt='+str(i*50)).json()['issues']
for issue in issueslist:
'''assignee=issue['fields']['assignee']['displayName']
reporter=issue['fields']['reporter']['displayName']
'''
print '`'+ issue['key'] + ' <https://issues.apache.org/jira/browse/' + issue['key'] + '>`_' + ' ' + issue['fields']['summary'][:80] + '...'
|
Python
| 0
|
@@ -801,90 +801,574 @@
se.%0A
- %0Aimport requests%0Aimport json%0Aimport sys%0Aimport pprint%0A %0Afilterid=str(sys.argv%5B1%5D)
+%22%22%22jira.py: Output jira issues from https://issues.apache.org/jira into RST format for Apche CloudStack Release-Notes.%0A%0AUsage:%0A jira.py FILTERID -p USERNAME -u PASSWORD%0A jira.py (-h %7C --help)%0A jira.py --version%0A%0AOptions:%0A -h --help Show this screen.%0A --version Show version.%0A%0A%22%22%22%0Afrom docopt import docopt%0Aimport requests%0Aimport json%0Aimport sys%0Aimport pprint%0A%0A%0Aif __name__ == '__main__':%0A arguments = docopt(__doc__, version='jira.py 2.0')%0A #print(arguments)%0A%0A#print arguments%5B'FILTERID'%5D%0A#print arguments%5B'PASSWORD'%5D%0A#print arguments%5B'USERNAME'%5D%0A
%0Afil
@@ -1432,18 +1432,31 @@
' +
-filterid%0A
+arguments%5B'FILTERID'%5D%0A%0A
%0Ar=r
@@ -1476,16 +1476,68 @@
ilterurl
+, auth=(arguments%5B'USERNAME'%5D,arguments%5B'PASSWORD'%5D)
)%0Arlist=
@@ -1559,21 +1559,23 @@
hUrl'%5D%0A%0A
-count
+get_all
=request
@@ -1589,26 +1589,105 @@
list
-).json()%5B'total'%5D%0A
+, auth=(arguments%5B'USERNAME'%5D,arguments%5B'PASSWORD'%5D)).json()%0Acount=get_all%5B'total'%5D%0A%0A#print count
%0An,
@@ -1750,56 +1750,15 @@
ist=
-requests.get(rlist+'&startAt='+str(i*50)).json()
+get_all
%5B'is
|
aa5b93993d970a1cca9ed060639e40fc72ebfdb8
|
Version bump: 1.1.0 -> 1.1.1
|
iprestrict/__init__.py
|
iprestrict/__init__.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .restrictor import IPRestrictor
__all__ = ["IPRestrictor"]
__version__ = "1.1.0"
|
Python
| 0.000001
|
@@ -147,7 +147,7 @@
1.1.
-0
+1
%22%0A
|
afc99f3b6f01a0267c80ebab9f08538da07c9898
|
Allow https urls for Youtube embeds
|
MarkdownPP/Modules/YoutubeEmbed.py
|
MarkdownPP/Modules/YoutubeEmbed.py
|
# Copyright (C) 2012 Alex Nisnevich
# Licensed under the MIT license
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import re
import os
from MarkdownPP.Module import Module
from MarkdownPP.Transform import Transform
youtube_url_re = re.compile('^!VIDEO\s+"http://www\.youtube\.com'
'/embed/([a-zA-Z0-9\-]*)"')
glowfoto_server_re = re.compile("<uploadform>(.*)</uploadform>")
glowfoto_image_re = re.compile("<thumburl>(.*)</thumburl>")
codere = re.compile("^( |\t)")
fencedcodere = re.compile("^```\w*$")
play_button_url = 'http://i.imgur.com/1IHylPh.png'
class YoutubeEmbed(Module):
"""
Converts Youtube embed objects into links with screenshots,
taken from Youtube.
"""
def transform(self, data):
transforms = []
in_fenced_code_block = False
linenum = 0
for line in data:
# Handling fenced code blocks (for Github-flavored markdown)
if fencedcodere.search(line):
if in_fenced_code_block:
in_fenced_code_block = False
else:
in_fenced_code_block = True
# Are we in a code block?
if not in_fenced_code_block and not codere.search(line):
match = youtube_url_re.search(line)
if match:
# find URL of youtube video and screenshot
url = match.group(1)
image_url = 'http://img.youtube.com/vi/%s/0.jpg' % url
video_url = 'http://www.youtube.com/watch?v=%s' % url
processed_image_dir = os.path.join('images', 'youtube')
processed_image_path = os.path.join(processed_image_dir,
'%s.png' % url)
# do we already have a screenshot?
if not os.path.isfile(processed_image_path):
# create directories if needed
if not os.path.exists(processed_image_dir):
os.makedirs(processed_image_dir)
self._add_play_button(image_url, processed_image_path)
image_link = ('[](%s)\n' %
(processed_image_path, video_url))
transforms.append(Transform(linenum, "swap", image_link))
linenum += 1
return transforms
def _add_play_button(self, image_url, image_path):
"""Try to add a play button to the screenshot."""
try:
from PIL import Image
from tempfile import NamedTemporaryFile
import urllib
try:
urlretrieve = urllib.request.urlretrieve
except ImportError:
urlretrieve = urllib.urlretrieve
# create temporary files for image operations
with NamedTemporaryFile(suffix=".jpg") as screenshot_img:
with NamedTemporaryFile(suffix=".jpg") as button_img:
# grab screenshot and button image
urlretrieve(image_url, screenshot_img.name)
urlretrieve(play_button_url, button_img.name)
# layer the images using PIL and save
with Image.open(screenshot_img.name) as background:
with Image.open(button_img.name) as foreground:
background.paste(foreground, (90, 65), foreground)
background.save(image_path)
except ImportError as e:
print(e)
except Exception as e:
print('Unable to add play button to YouTube '
'screenshot (%s). Using the screenshot '
'on its own instead.' % e)
|
Python
| 0
|
@@ -323,24 +323,26 @@
IDEO%5Cs+%22http
+s?
://www%5C.yout
|
b631482e00e59224cb32682f6a9d221748368158
|
Remove os package
|
fabfile.py
|
fabfile.py
|
from datetime import datetime
import os
from fabric.api import (
cd,
env,
local,
put,
run,
sudo,
task
)
PRODUCTION_IP = '54.154.235.243'
PROJECT_DIRECTORY = '/home/ubuntu/ztm/'
BACKUP_DIRECTORY = '/home/ubuntu/backup/'
COMPOSE_FILE = 'compose-production.yml'
@task
def production():
env.run = sudo
env.hosts = [
'ubuntu@' + PRODUCTION_IP + ':22',
]
@task
def create_project_directory():
run('mkdir -p ' + PROJECT_DIRECTORY)
@task
def update_compose_file():
put('./' + COMPOSE_FILE, PROJECT_DIRECTORY)
@task
def backup():
backup_time = datetime.now().strftime('%Y-%m-%d_%H%M')
with cd(BACKUP_DIRECTORY):
command = 'tar -cjvf ztm-' + backup_time + \
'.tar.bz2 ' + PROJECT_DIRECTORY
env.run(command)
command = 's3cmd sync ' + BACKUP_DIRECTORY + ' ' \
's3://zendesk-tickets-machine'
run(command)
@task
def build():
command = 'docker build -t ' \
'133506877714.dkr.ecr.eu-west-1.amazonaws.com/ztm ' \
'-f ./compose/django/Dockerfile .'
local(command)
@task
def push():
local('docker push 133506877714.dkr.ecr.eu-west-1.amazonaws.com/ztm')
@task
def compose_up():
with cd(PROJECT_DIRECTORY):
env.run('docker-compose -f ' + COMPOSE_FILE + ' pull')
env.run('docker-compose -f ' + COMPOSE_FILE + ' up -d')
@task
def deploy():
build()
push()
create_project_directory()
update_compose_file()
compose_up()
|
Python
| 0.000001
|
@@ -26,18 +26,8 @@
time
-%0Aimport os
%0A%0Afr
|
ea2b67cd016492f1869f50f899360c3151977e79
|
Fix docstring term
|
csunplugged/topics/management/commands/_GlossaryTermsLoader.py
|
csunplugged/topics/management/commands/_GlossaryTermsLoader.py
|
"""Custom loader for loading glossary terms."""
import os.path
from django.db import transaction
from utils.BaseLoader import BaseLoader
from topics.models import GlossaryTerm
class GlossaryTermsLoader(BaseLoader):
"""Custom loader for loading glossary terms."""
def __init__(self, glossary_folder_path, glossary_terms, structure_file_path, BASE_PATH):
"""Create the loader for loading programming exercises.
Args:
glossary_folder_path: Folder path to definition files (string).
glossary_terms: List of glossary term slugs (list).
structure_file_path: Path to the config file, used for errors.
BASE_PATH: Base file path (string).
"""
super().__init__(BASE_PATH)
self.glossary_terms = glossary_terms
self.structure_file_path = structure_file_path
self.BASE_PATH = os.path.join(self.BASE_PATH, glossary_folder_path)
@transaction.atomic
def load(self):
"""Load the glossary content into the database."""
for glossary_slug in self.glossary_terms:
filename = "{term}.md".format(term=glossary_slug)
definition_file_path = os.path.join(
self.BASE_PATH,
filename
)
glossary_term_content = self.convert_md_file(
definition_file_path,
self.structure_file_path
)
# Create glossary term and save to database
glossary_term = GlossaryTerm(
slug=glossary_slug,
term=glossary_term_content.title,
definition=glossary_term_content.html_string
)
glossary_term.save()
self.log("Added Glossary Term: {}".format(glossary_term.__str__()))
# Print log output
self.print_load_log()
|
Python
| 0.000017
|
@@ -404,28 +404,21 @@
ing
-programming exercise
+glossary term
s.%0A%0A
|
f4feca8d4c76806c4c1f088e606fb9e66e188c89
|
Update cloudtrail mq plugin mapping
|
mq/plugins/cloudtrail.py
|
mq/plugins/cloudtrail.py
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
# Copyright (c) 2017 Mozilla Corporation
from mozdef_util.utilities.key_exists import key_exists
class message(object):
def __init__(self):
'''
Plugin used to fix object type discretions with cloudtrail messages
'''
self.registration = ['cloudtrail']
self.priority = 10
# Just add new entry to this dict to
# automatically convert key mappings
# which are mixed string and dict
# into a dict with a raw_value key as the string value
self.modify_keys = [
'details.additionaleventdata',
'details.apiversion',
'details.serviceeventdetails',
'details.requestparameters.attribute',
'details.requestparameters.bucketpolicy.statement.principal.service',
'details.requestparameters.bucketpolicy.statement.principal.aws',
'details.requestparameters.callerreference',
'details.requestparameters.description',
'details.requestparameters.describeegressonlyinternetgatewaysrequest',
'details.requestparameters.describehostsrequest',
'details.requestparameters.describeflowlogsrequest',
'details.requestparameters.describeflowlogsrequest.filter.value',
'details.requestparameters.describenatgatewaysrequest',
'details.requestparameters.describevpcendpointsrequest',
'details.requestparameters.describevpcendpointsrequest.filter',
'details.requestparameters.describevpcendpointsrequest.filter.value',
'details.requestparameters.describevpcendpointsrequest.vpcendpointid',
'details.requestparameters.describevpcendpointserviceconfigurationsrequest',
'details.requestparameters.disableapitermination',
'details.requestparameters.distributionconfig.callerreference',
'details.requestparameters.domainname',
'details.requestparameters.domainnames',
'details.requestparameters.ebsoptimized',
'details.requestparameters.filter',
'details.requestparameters.iaminstanceprofile',
'details.requestparameters.instancetype',
'details.requestparameters.logstreamname',
'details.requestparameters.metrics',
'details.requestparameters.source',
'details.requestparameters.tagging',
'details.requestparameters.logging',
'details.responseelements.role',
'details.responseelements.policy',
'details.requestparameters.rule',
'details.responseelements.createddate',
'details.responseelements.credentials',
'details.responseelements.subnets',
'details.responseelements.endpoint',
'details.responseelements.securitygroups',
'details.responseelements.lastmodified',
'details.responseelements.findings.service.additionalinfo.unusual',
'details.responseelements.distribution.distributionconfig.callerreference'
]
def convert_key_raw_str(self, needle, haystack):
num_levels = needle.split(".")
if len(num_levels) == 0:
return False
current_pointer = haystack
for updated_key in num_levels:
if updated_key == num_levels[-1]:
current_pointer[updated_key] = {
'raw_value': str(current_pointer[updated_key])
}
return haystack
if updated_key in current_pointer:
current_pointer = current_pointer[updated_key]
else:
return haystack
def onMessage(self, message, metadata):
if 'source' not in message:
return (message, metadata)
if not message['source'] == 'cloudtrail':
return (message, metadata)
for modified_key in self.modify_keys:
if key_exists(modified_key, message):
message = self.convert_key_raw_str(modified_key, message)
return (message, metadata)
|
Python
| 0
|
@@ -2337,24 +2337,73 @@
ceprofile',%0A
+ 'details.requestparameters.imageid',%0A
@@ -2932,24 +2932,78 @@
edentials',%0A
+ 'details.responseelements.dbsubnetgroup',%0A
|
146fd2b574dddea414c5b3523ec472e5af1f9f44
|
Add note about the fact that Rally won't use testtools' assertions
|
rally/task/functional.py
|
rally/task/functional.py
|
# Copyright 2015: Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally import exceptions
class FunctionalMixin(object):
def _concatenate_message(self, default, extended):
if not extended:
return default
if default[-1] != ".":
default += "."
return default + " " + extended.capitalize()
def assertEqual(self, first, second, err_msg=None):
if first != second:
msg = "%s != %s" % (repr(first),
repr(second))
raise exceptions.RallyAssertionError(
self._concatenate_message(msg, err_msg))
def assertNotEqual(self, first, second, err_msg=None):
if first == second:
msg = "%s == %s" % (repr(first),
repr(second))
raise exceptions.RallyAssertionError(
self._concatenate_message(msg, err_msg))
def assertTrue(self, value, err_msg=None):
if not value:
msg = "%s is not True" % repr(value)
raise exceptions.RallyAssertionError(
self._concatenate_message(msg, err_msg))
def assertFalse(self, value, err_msg=None):
if value:
msg = "%s is not False" % repr(value)
raise exceptions.RallyAssertionError(
self._concatenate_message(msg, err_msg))
def assertIs(self, first, second, err_msg=None):
if first is not second:
msg = "%s is not %s" % (repr(first),
repr(second))
raise exceptions.RallyAssertionError(
self._concatenate_message(msg, err_msg))
def assertIsNot(self, first, second, err_msg=None):
if first is second:
msg = "%s is %s" % (repr(first),
repr(second))
raise exceptions.RallyAssertionError(
self._concatenate_message(msg, err_msg))
def assertIsNone(self, value, err_msg=None):
if value is not None:
msg = "%s is not None" % repr(value)
raise exceptions.RallyAssertionError(
self._concatenate_message(msg, err_msg))
def assertIsNotNone(self, value, err_msg=None):
if value is None:
msg = "%s is None" % repr(value)
raise exceptions.RallyAssertionError(
self._concatenate_message(msg, err_msg))
def assertIn(self, member, container, err_msg=None):
if member not in container:
msg = "%s not found in %s" % (repr(member),
repr(container))
raise exceptions.RallyAssertionError(
self._concatenate_message(msg, err_msg))
def assertNotIn(self, member, container, err_msg=None):
if member in container:
msg = "%s found in %s" % (repr(member),
repr(container))
raise exceptions.RallyAssertionError(
self._concatenate_message(msg, err_msg))
def assertIsInstance(self, first, second, err_msg=None):
if not isinstance(first, second):
msg = "%s is not instance of %s" % (repr(first),
repr(second))
raise exceptions.RallyAssertionError(
self._concatenate_message(msg, err_msg))
def assertIsNotInstance(self, first, second, err_msg=None):
if isinstance(first, second):
msg = "%s is instance of %s" % (repr(first),
repr(second))
raise exceptions.RallyAssertionError(
self._concatenate_message(msg, err_msg))
|
Python
| 0.000051
|
@@ -684,16 +684,971 @@
object):
+%0A %22%22%22Functional assertions.%0A%0A The Rally core team deliberately decided not to use an existing framework%0A for this such a %60testtools%60.%0A%0A Using 'testtools' would introduce the following problems:%0A - Rally production code works with testing tools code that is not designed%0A to be used in production.%0A - Rally code depends on a bunch of new libs introduced by testtools and%0A testtools itself, which means: more code on which Rally is dependent,%0A more time required to install Rally, more disk space required by Rally.%0A - Classes like Scenario & Context are inherited from testtools.TestCase%0A that makes these classes really hard to learn (for instance:%0A running dir(base.Scenario) you cannot see a ton of methods inside it)%0A - It won't be clear for end users what exceptions are raised: unittest%0A exception are going to be raised during production runs instead of%0A Rally assertion exceptions.%0A %22%22%22
%0A%0A de
|
ebbf256bf505bf119c4c961d99c12afd7e6c0ca3
|
Create memory dynamically (#4938)
|
pytorch_lightning/trainer/supporters.py
|
pytorch_lightning/trainer/supporters.py
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Optional
import fsspec
import torch
from pytorch_lightning.utilities.cloud_io import get_filesystem
from torch import Tensor
class TensorRunningAccum(object):
"""Tracks a running accumulation values (min, max, mean) without graph
references.
Examples:
>>> accum = TensorRunningAccum(5)
>>> accum.last(), accum.mean()
(None, None)
>>> accum.append(torch.tensor(1.5))
>>> accum.last(), accum.mean()
(tensor(1.5000), tensor(1.5000))
>>> accum.append(torch.tensor(2.5))
>>> accum.last(), accum.mean()
(tensor(2.5000), tensor(2.))
>>> accum.reset()
>>> _= [accum.append(torch.tensor(i)) for i in range(13)]
>>> accum.last(), accum.mean(), accum.min(), accum.max()
(tensor(12.), tensor(10.), tensor(8.), tensor(12.))
"""
def __init__(self, window_length: int):
self.window_length = window_length
self.memory = torch.zeros(self.window_length)
self.current_idx: int = 0
self.last_idx: Optional[int] = None
self.rotated: bool = False
def reset(self) -> None:
"""Empty the accumulator."""
self = TensorRunningAccum(self.window_length)
def last(self):
"""Get the last added element."""
if self.last_idx is not None:
return self.memory[self.last_idx]
def append(self, x):
"""Add an element to the accumulator."""
# ensure same device and type
if self.memory.device != x.device or self.memory.type() != x.type():
x = x.to(self.memory)
# store without grads
with torch.no_grad():
self.memory[self.current_idx] = x
self.last_idx = self.current_idx
# increase index
self.current_idx += 1
# reset index when hit limit of tensor
self.current_idx = self.current_idx % self.window_length
if self.current_idx == 0:
self.rotated = True
def mean(self):
"""Get mean value from stored elements."""
return self._agg_memory('mean')
def max(self):
"""Get maximal value from stored elements."""
return self._agg_memory('max')
def min(self):
"""Get minimal value from stored elements."""
return self._agg_memory('min')
def _agg_memory(self, how: str):
if self.last_idx is not None:
if self.rotated:
return getattr(self.memory, how)()
else:
return getattr(self.memory[: self.current_idx], how)()
class Accumulator(object):
def __init__(self):
self.num_values = 0
self.total = 0
def accumulate(self, x):
with torch.no_grad():
self.total += x
self.num_values += 1
def mean(self):
return self.total / self.num_values
class PredictionCollection(object):
def __init__(self, global_rank: int, world_size: int):
self.global_rank = global_rank
self.world_size = world_size
self.predictions = {}
self.num_predictions = 0
def _add_prediction(self, name, values, filename):
if filename not in self.predictions:
self.predictions[filename] = {name: values}
elif name not in self.predictions[filename]:
self.predictions[filename][name] = values
elif isinstance(values, Tensor):
self.predictions[filename][name] = torch.cat(
(self.predictions[filename][name], values)
)
elif isinstance(values, list):
self.predictions[filename][name].extend(values)
def add(self, predictions):
if predictions is None:
return
for filename, pred_dict in predictions.items():
for feature_name, values in pred_dict.items():
self._add_prediction(feature_name, values, filename)
def to_disk(self) -> None:
"""Write predictions to file(s).
"""
for filepath, predictions in self.predictions.items():
fs = get_filesystem(filepath)
# normalize local filepaths only
if fs.protocol == "file":
filepath = os.path.realpath(filepath)
if self.world_size > 1:
stem, extension = os.path.splitext(filepath)
filepath = f"{stem}_rank_{self.global_rank}{extension}"
dirpath = os.path.split(filepath)[0]
fs.mkdirs(dirpath, exist_ok=True)
# Convert any tensor values to list
predictions = {
k: v if not isinstance(v, Tensor) else v.tolist()
for k, v in predictions.items()
}
# Check if all features for this file add up to same length
feature_lens = {k: len(v) for k, v in predictions.items()}
if len(set(feature_lens.values())) != 1:
raise ValueError(
"Mismatching feature column lengths found in stored EvalResult predictions."
)
# Switch predictions so each entry has its own dict
outputs = []
for values in zip(*predictions.values()):
output_element = {k: v for k, v in zip(predictions.keys(), values)}
outputs.append(output_element)
# Write predictions for current file to disk
with fs.open(filepath, "wb") as fp:
torch.save(outputs, fp)
|
Python
| 0
|
@@ -1562,39 +1562,12 @@
y =
-torch.zeros(self.window_length)
+None
%0A
@@ -2011,32 +2011,133 @@
accumulator.%22%22%22%0A
+ if self.memory is None:%0A self.memory = torch.zeros(self.window_length, *x.shape)%0A%0A
# ensure
|
f21abdbc675d0271419c78a293267289935de65f
|
add user and date to scaninfo
|
irma/common/objects.py
|
irma/common/objects.py
|
import re
import config.dbconfig as dbconfig
from lib.irma.database.objects import DatabaseObject
from lib.irma.machine.libvirt_manager import LibVirtMachineManager
from lib.irma.common.exceptions import IrmaAdminError
class AttributeDictionary(dict):
"""A dictionnary with object-like accessors"""
__getattr__ = lambda obj, key: obj.get(key, None)
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
class ScanStatus:
init = 10
launched = 11
finished = 20
cancelling = 30
cancelled = 31
label = {
init:"scan created",
launched:"scan launched",
finished:"scan finished",
cancelled:"scan cancelled"
}
class ScanInfo(DatabaseObject):
# TODO add date"
# TODO add accounting
_dbname = dbconfig.DB_NAME
_collection = dbconfig.COLL_SCANINFO
def __init__(self, dbname=None, _id=None):
if dbname:
self._dbname = dbname
self.oids = {}
self.taskid = None
self.probelist = []
self.status = ScanStatus.init
super(ScanInfo, self).__init__(_id=_id)
def get_results(self):
res = {}
for (oid, name) in self.oids.items():
r = ScanResults(_id=oid)
res[name] = dict((k, v) for (k, v) in r.results.iteritems() if k in self.probelist)
return res
class ScanResults(DatabaseObject):
_dbname = dbconfig.DB_NAME
_collection = dbconfig.COLL_SCANRES
def __init__(self, dbname=None, _id=None):
if dbname:
self._dbname = dbname
self.probelist = []
self.results = {}
super(ScanResults, self).__init__(_id=_id)
class Node(LibVirtMachineManager):
""" currently using libvirt to manage vms """
def __init__(self, node_cs):
self._probes = []
self._node_cs = node_cs
super(Node, self).__init__(node_cs)
def get_probes(self):
for label in self.list(self.INACTIVE):
self._probes.append(Probe(label, self._node_cs, self.INACTIVE))
for label in self.list(self.ACTIVE):
self._probes.append(Probe(label, self._node_cs, self.ACTIVE))
return self._probes
class Probe(object):
halted = 0
running = 1
status_str = ["halted", "running"]
def __init__(self, label, node, status):
self.label = label
self.node = node
self.status = status
@property
def pstatus(self):
return self.status_str[self.status]
class System(object):
IRMA_MASTER_PREFIX = "master"
def __init__(self, nodes_cs=[]):
""" init system with nodes connection string list """
self.probes = []
self.nodes_cs = nodes_cs
if self.nodes_cs:
self._refresh()
return
def _refresh(self):
res = []
for node_cs in self.nodes_cs:
n = Node(node_cs)
res += n.get_probes()
self.probes = res
return
def probe_is_master(self, probe):
return re.search(self.IRMA_MASTER_PREFIX, probe.label)
def _master_list(self):
return [p for p in self.probes if self.probe_is_master(p)]
def _probe_list(self):
return [p for p in self.probes if not self.probe_is_master(p)]
def _probe_lookup(self, node, label, master_lookup=False):
# look for probe with label specified
# node is optional if label is unique
self._refresh()
if not master_lookup and label in [p.label for p in self._master_list()]:
raise IrmaAdminError("Can not use {0}: master image (clone it first)".format(label))
if not node:
t = filter(lambda x: x.label == label, self.probes)
else:
t = filter(lambda x: x.label == label and x.node == node, self.probes)
if not t:
raise IrmaAdminError("Probe {0} not found".format(label))
elif len(t) != 1:
raise IrmaAdminError("More than one Probe {0}".format(label))
else:
return t.pop()
def _probe_set_state(self, node, label, state):
p = self._probe_lookup(node, label)
n = Node(driver=p.node)
if state == Probe.halted:
n.stop(p.label)
if state == Probe.running:
n.start(p.label)
p.status = state
return "node %s on %s %s" % (p.label, p.node, p.pstatus)
def probe_list(self):
# refresh list and state of each probe by directly connect to node
self._refresh()
res = {}
for probe in self._probe_list():
res[probe.node] = res.get(probe.node, []) + [(probe.label, probe.pstatus)]
return res
def probe_master_list(self):
# refresh list and state of each probe masters by directly connect to node
self._refresh()
res = {}
for probe in self._master_list():
res[probe.node] = res.get(probe.node, []) + [(probe.label, probe.pstatus)]
return res
def probe_start(self, node, label):
return self._probe_set_state(node, label, Probe.running)
def probe_stop(self, node, label):
return self._probe_set_state(node, label, Probe.halted)
def probe_clone(self, node, label, dstlabel):
p = self._probe_lookup(node, label, master_lookup=True)
n = Node(driver=p.node)
n.clone(label, dstlabel)
np = Probe(dstlabel, p.node, Probe.halted)
self.probes.append(np)
return "%s clone of %s added on node %s" % (np.label, p.label, np.node)
def node_list(self):
# refresh list and state of each probe by directly connect to node
return self.node_cs
|
Python
| 0
|
@@ -3,16 +3,46 @@
port re%0A
+from datetime import datetime%0A
import c
@@ -978,32 +978,92 @@
dbname = dbname%0A
+ self.user = None%0A self.date = datetime.now()%0A
self.oid
|
179308a7061b2e0b1bb10d5c7757a611196608db
|
change fabfile
|
fabfile.py
|
fabfile.py
|
from distutils.util import strtobool
from fabric.api import local, abort, run, sudo
from fabric.context_managers import cd, settings, hide, shell_env
from fabric.contrib.console import confirm
from getpass import getpass
from fabric.utils import puts
from fabric.state import env
env.control_dir = 'pipecontrol'
def with_sudo():
"""
Prompts and sets the sudo password for all following commands.
Use like
fab with_sudo command
"""
env.sudo_password = getpass('Please enter sudo password: ')
env.password = env.sudo_password
def down():
with cd(env.control_dir):
sudo('docker-compose down')
def get_branch(gitdir):
"""
Gets the branch of a git directory.
Args:
gitdir: path of the git directory
Returns: current active branch
"""
return local('git symbolic-ref --short HEAD', capture=True)
def pull():
with cd(env.control_dir):
branch = get_branch(env.control_dir)
sudo('git reset --hard')
sudo('git clean -fd')
sudo('git checkout {}'.format(branch))
sudo('git pull origin ' + branch)
def build():
with cd(env.control_dir):
sudo('docker-compose build pipecontrol')
def start():
with cd(env.control_dir):
sudo('docker-compose up -d pipecontrol')
def sync_files():
local('scp dj_local_conf.json ' + env.host_string + ':' + env.control_dir)
def deploy():
with settings(warn_only=True):
with_sudo()
down()
pull()
sync_files()
build()
start()
|
Python
| 0.000001
|
@@ -1449,25 +1449,20 @@
-with_sudo
+pull
()%0A
@@ -1464,20 +1464,26 @@
-down
+sync_files
()%0A
@@ -1477,36 +1477,41 @@
files()%0A
-pull
+with_sudo
()%0A sync_
@@ -1501,34 +1501,28 @@
o()%0A
-sync_files
+down
()%0A b
|
3f4614720d376a8f8caf008f8a6930d97d65db1a
|
align french
|
SCT/benchmark_aligner.py
|
SCT/benchmark_aligner.py
|
import sys
import shutil, os
sys.path.insert(0, os.path.expanduser('~/Montreal-Forced-Aligner'))
import time
import logging
import platform
import csv
import statistics
from datetime import datetime
from aligner.command_line.train_and_align import align_corpus, align_corpus_no_dict
#corpus_dir = '/media/share/datasets/aligner_benchmarks/LibriSpeech/standard'
corpus_dir = '/media/share/datasets/aligner_benchmarks/sorted_tagalog'
#dict_path = os.path.expanduser('~/Montreal-Forced-Aligner/librispeech-lexicon.txt')
dict_path = None
#output_directory = '/data/michaela/aligned_librispeech'
output_directory = '/data/michaela/aligned_tagalog'
output_model_path = os.path.expanduser('~/Documents/tagalog_models.zip')
num_jobs = 2
def benchmark_align_corpus(corpus_dir, dict_path, output_directory, speaker_characters, fast,
output_model_path, num_jobs, verbose):
beg = time.time()
align_corpus(corpus_dir, dict_path, output_directory, speaker_characters, fast,
output_model_path, num_jobs, verbose)
end = time.time()
return [(end - beg)]
def benchmark_align_corpus_no_dict(corpus_dir, output_directory, speaker_characters, fast,
output_model_path, num_jobs, verbose):
beg = time.time()
align_corpus_no_dict(corpus_dir, output_directory, speaker_characters, fast,
output_model_path, num_jobs, verbose)
end = time.time()
return [(end - beg)]
if dict_path == None:
benchmark_align_corpus_no_dict(corpus_dir, output_directory, 0, False, output_model_path, num_jobs, False)
else:
benchmark_align_corpus(corpus_dir, dict_path, output_directory, 0, False, output_model_path, num_jobs, True)
def WriteDictToCSV(csv_file,csv_columns,dict_data):
with open(csv_file, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=csv_columns)
writer.writeheader()
for data in dict_data:
writer.writerow(data)
return
csv_columns = ['Computer','Date','Corpus', 'Type of benchmark', 'Total time', 'Num_jobs']
if dict_path == None:
dict_data = [
{'Computer': platform.node(), 'Date': str(datetime.now()), 'Corpus': 'tagalog', 'Type of benchmark': 'train and align', 'Total time': benchmark_align_corpus_no_dict[0], 'Num_jobs': num_jobs}
]
else:
dict_data = [
{'Computer': platform.node(), 'Date': str(datetime.now()), 'Corpus': 'dog_cat', 'Type of benchmark': 'train and align', 'Total time': benchmark_align_corpus[0], 'Num_jobs': num_jobs}
]
now = datetime.now()
date = str(now.year)+str(now.month)+str(now.day)
if not os.path.exists('aligner_benchmark'+date+'.csv'):
open('aligner_benchmark'+date+'.csv', 'a')
with open('aligner_benchmark'+date+'.csv', 'a') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=csv_columns)
writer.writeheader()
csv_file = 'aligner_benchmark'+date+'.csv'
with open('aligner_benchmark'+date+'.csv', 'a') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=csv_columns)
writer.writerow(dict_data[0])
|
Python
| 0.999998
|
@@ -419,23 +419,29 @@
/sorted_
-tagalog
+quebec_french
'%0A#dict_
@@ -531,20 +531,124 @@
_path =
-None
+os.path.expanduser('~/Montreal-Forced-Aligner/dist/montreal-forced-aligner/prosodylab.dictionaries/fr.dict')
%0A#output
@@ -740,23 +740,29 @@
aligned_
-tagalog
+quebec_french
'%0Aoutput
@@ -807,23 +807,29 @@
cuments/
-tagalog
+quebec_french
_models.
@@ -1561,24 +1561,33 @@
== None:%0A
+ nodict =
benchmark_a
@@ -1691,16 +1691,26 @@
lse:%0A
+ yesdict =
benchma
@@ -2330,17 +2330,18 @@
s':
-'tagalog'
+corpus_dir
, 'T
@@ -2396,34 +2396,10 @@
e':
-benchmark_align_corpus_
no
-_
dict
@@ -2540,17 +2540,18 @@
s':
-'dog_cat'
+corpus_dir
, 'T
@@ -2602,38 +2602,23 @@
time':
-benchmark_align_corpus
+yesdict
%5B0%5D, 'Nu
|
33fa555cb8657589b47b6533094069e5ddde4000
|
Rename new companies dataset (no geolocation)
|
serenata_toolbox/datasets/downloader.py
|
serenata_toolbox/datasets/downloader.py
|
import asyncio
import os
import aiofiles
import aiohttp
from tqdm import tqdm
MAX_REQUESTS = 4
class Downloader:
LATEST = (
'2016-08-08-ceap-datasets.md',
'2016-08-08-current-year.xz',
'2016-08-08-datasets-format.html',
'2016-08-08-last-year.xz',
'2016-08-08-previous-years.xz',
'2016-09-03-companies.xz',
'2016-11-11-congressperson-relatives.xz',
'2016-11-19-current-year.xz',
'2016-11-19-last-year.xz',
'2016-11-19-previous-years.xz',
'2016-11-28-congressperson-civil-names.xz',
'2016-11-29-yelp-companies.xz',
'2016-12-02-foursquare-companies.xz',
'2016-12-15-speeches.xz',
'2016-12-20-impeded-non-profit-entities.xz',
'2016-12-21-deputies.xz',
'2016-12-21-inident-and-suspended-companies.xz',
'2016-12-21-national-register-punished-companies.xz',
'2016-12-21-presences.xz',
'2016-12-21-sessions.xz',
'2016-12-21-speeches.xz',
'2016-12-22-agreements.xz',
'2016-12-22-amendments.xz',
'2017-02-15-receipts-texts.xz',
'2017-03-15-reimbursements.xz',
'2017-03-20-purchase-suppliers.xz',
'2017-04-27-companies-no-geolocation.xz',
'2017-05-10-tse-candidates.xz',
'2017-05-21-companies.xz'
)
def __init__(self, target, **kwargs):
self.bucket = kwargs.get('bucket')
self.region = kwargs.get('region_name')
if not all((self.bucket, self.region)):
raise RuntimeError('No bucket and/or region_name kwargs provided')
self.target = os.path.abspath(target)
if not all((os.path.exists(self.target), os.path.isdir(self.target))):
msg = '{} does not exist or is not a directory.'
raise FileNotFoundError(msg.format(self.target))
self.semaphore = asyncio.Semaphore(MAX_REQUESTS)
self.total = 0
def download(self, files):
if isinstance(files, str):
files = [files]
files = tuple(filter(bool, files))
if not files:
return
loop = asyncio.get_event_loop()
loop.run_until_complete(self.main(loop, files))
async def main(self, loop, files):
desc = 'Downloading {} files'.format(len(files))
if len(files) == 1:
first_file, *_ = files
desc = 'Downloading {}'.format(first_file)
async with aiohttp.ClientSession(loop=loop) as client:
# fetch total size (all files)
sizes = [self.fetch_size(client, f) for f in files]
await asyncio.gather(*sizes)
# download
args = dict(total=self.total, desc=desc, unit='b', unit_scale=True)
with tqdm(**args) as progress:
self.progress = progress
downloads = [self.fetch_file(client, f) for f in files]
await asyncio.gather(*downloads)
# cleanup
del self.progress
self.total = 0
async def fetch_size(self, client, filename):
with (await self.semaphore):
async with client.head(self.url(filename)) as resp:
size = resp.headers.get('CONTENT-LENGTH', '0')
self.total += int(size)
async def fetch_file(self, client, filename):
filepath = os.path.join(self.target, filename)
with (await self.semaphore):
async with client.get(self.url(filename), timeout=None) as resp:
contents = await resp.read()
async with aiofiles.open(filepath, 'wb') as fh:
await fh.write(contents)
self.progress.update(len(contents))
def url(self, filename):
url = 'https://s3-{}.amazonaws.com/{}/{}'
return url.format(self.region, self.bucket, filename)
|
Python
| 0.000081
|
@@ -1311,24 +1311,39 @@
21-companies
+-no-geolocation
.xz'%0A )%0A%0A
|
3d2b08a971ded9fa4bf3a3d7c69c15e589b6adab
|
add v0.8.1 (#21798)
|
var/spack/repos/builtin/packages/py-parso/package.py
|
var/spack/repos/builtin/packages/py-parso/package.py
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyParso(PythonPackage):
"""Parso is a Python parser that supports error recovery and round-trip parsing
for different Python versions (in multiple Python versions).
Parso is also able to list multiple syntax errors
in your python file."""
pypi = "parso/parso-0.6.1.tar.gz"
version('0.6.1', sha256='56b2105a80e9c4df49de85e125feb6be69f49920e121406f15e7acde6c9dfc57')
depends_on('python@2.7:2.8,3.4:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
|
Python
| 0
|
@@ -542,149 +542,503 @@
('0.
-6.1', sha256='56b2105a80e9c4df49de85e125feb6be69f49920e121406f15e7acde6c9dfc57')%0A%0A depends_on('python@2.7:2.8,3.4:', type=('build', 'run')
+8.1', sha256='8519430ad07087d4c997fda3a7918f7cfa27cb58972a8c89c2a0295a1c940e9e')%0A version('0.6.1', sha256='56b2105a80e9c4df49de85e125feb6be69f49920e121406f15e7acde6c9dfc57')%0A version('0.4.0', sha256='2e9574cb12e7112a87253e14e2c380ce312060269d04bd018478a3c92ea9a376')%0A%0A depends_on('python@3.6:', type=('build', 'run'), when='@0.8.1:')%0A depends_on('python@2.7:2.8,3.4:', type=('build', 'run'), when='@0.6.1:')%0A depends_on('python@2.6:2.8,3.3:', type=('build', 'run'), when='@0.4.0:'
)%0A
|
deb2bb30b6f584a7a899d7c161605efadee468cf
|
add optional /geo suffix to /pollingstations
|
polling_stations/api/pollingstations.py
|
polling_stations/api/pollingstations.py
|
from rest_framework.mixins import ListModelMixin
from rest_framework.viewsets import GenericViewSet
from rest_framework_gis.serializers import GeoFeatureModelSerializer
from pollingstations.models import PollingStation
class PollingStationSerializer(GeoFeatureModelSerializer):
class Meta:
model = PollingStation
geo_field = 'location'
fields = ('council', 'postcode', 'address', 'location')
class PollingStationViewSet(GenericViewSet, ListModelMixin):
queryset = PollingStation.objects.all()
serializer_class = PollingStationSerializer
|
Python
| 0.000003
|
@@ -1,28 +1,77 @@
+from rest_framework.decorators import list_route%0A
from rest_framework.mixins i
@@ -612,16 +612,137 @@
ationSerializer%0A
+%0A @list_route(url_path='geo')%0A def geo(self, request, format=None):%0A return self.list(request, format=None)%0A
|
68f420099f1efc655757ec18097c6e389fa95cfb
|
fix case where an old subscriber subscribe again. it shouldn't stay flagged as old subscriber
|
product_subscription/models/invoice.py
|
product_subscription/models/invoice.py
|
# -*- coding: utf-8 -*-
from datetime import datetime
from openerp import models, fields, api
class AccountInvoice(models.Model):
_inherit = "account.invoice"
subscription = fields.Boolean(string="Subscription")
product_subscription_request = fields.One2many('product.subscription.request','invoice', string="Product subscription")
def process_subscription(self,effective_date):
# set the subscription request to paid
req_vals = {'state':'paid',
'payment_date':effective_date}
sub_req = self.product_subscription_request
sub_template = sub_req.subscription_template
# check if there is already an ongoing or an old subscription
# tied to the subscriber
subscriber = self.product_subscription_request.subscriber
# allocate the product quantity to the subscriber
if len(subscriber.subscriptions) > 0:
# there is an existing subscription
subscription = subscriber.subscriptions[0]
sub_vals = {'state':'ongoing',
'counter':subscription.counter + sub_template.product_qty}
subscription.write(sub_vals)
req_vals['subscription'] = subscription.id
else:
# no subscription found for this subscriber. We need to create one
prod_sub_seq = self.env.ref('product_subscription.sequence_product_subscription', False)
prod_sub_num = prod_sub_seq.next_by_id()
sub_vals = {'name':prod_sub_num,
'subscriber':subscriber.id,
'subscribed_on':effective_date,
'counter':sub_template.product_qty,
'state':'ongoing'}
subscription = self.env['product.subscription.object'].create(sub_vals)
req_vals['subscription'] = subscription.id
subscriber.write({'subscriber':True,'old_subscriber':False})
sub_req.write(req_vals)
return True
@api.multi
def confirm_paid(self):
for invoice in self:
super(AccountInvoice, invoice).confirm_paid()
if invoice.subscription and invoice.type == 'out_invoice':
effective_date = datetime.now().strftime("%d/%m/%Y")
#take the effective date from the payment. by default the confirmation date is the payment date
if invoice.payment_move_line_ids :
move_line = invoice.payment_move_line_ids[0]
effective_date = move_line.date
invoice.process_subscription(effective_date)
return True
|
Python
| 0.000001
|
@@ -1925,28 +1925,24 @@
id%0D%0A
-
subscriber.w
|
8ff0c9a996d88b345c15957d7881e61ee8db7014
|
Update gleu_score.py
|
nltk/translate/gleu_score.py
|
nltk/translate/gleu_score.py
|
# -*- coding: utf-8 -*-
# Natural Language Toolkit: GLEU Score
#
# Copyright (C) 2001-2016 NLTK Project
# Authors:
# Contributors:
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
""" GLEU score implementation. """
from __future__ import division
from collections import Counter
from nltk.util import ngrams, everygrams
def sentence_gleu(reference, hypothesis, min_len=1, max_len=4):
"""
Calculates the sentence level CHRF (Character n-gram F-score) described in
Yonghui Wu, Mike Schuster, Zhifeng Chen, Quoc V. Le, Mohammad Norouzi,
Wolfgang Macherey, Maxim Krikun, Yuan Cao, Qin Gao, Klaus Macherey,
Jeff Klingner, Apurva Shah, Melvin Johnson, Xiaobing Liu, Lukasz Kaiser,
Stephan Gouws, Yoshikiyo Kato, Taku Kudo, Hideto Kazawa, Keith Stevens,
George Kurian, Nishant Patil, Wei Wang, Cliff Young, Jason Smith,
Jason Riesa, Alex Rudnick, Oriol Vinyals, Greg Corrado, Macduff Hughes,
Jeffrey Dean. (2016) Google’s Neural Machine Translation System:
Bridging the Gap between Human and Machine Translation.
eprint arXiv:1609.08144. https://arxiv.org/pdf/1609.08144v2.pdf
Retrieved on 27 Oct 2016.
From Wu et al. (2016):
"The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective."
Note: The GLEU score is designed for sentence based evaluation thus there is
no corpus based scores implemented in NLTK.
The infamous "the the the ... " example
>>> ref = 'the cat is on the mat'.split()
>>> hyp = 'the the the the the the the'.split()
>>> sentence_gleu(ref, hyp) # doctest: +ELLIPSIS
0.0909...
An example to evaluate normal machine translation outputs
>>> ref1 = str('It is a guide to action that ensures that the military '
... 'will forever heed Party commands').split()
>>> hyp1 = str('It is a guide to action which ensures that the military '
... 'always obeys the commands of the party').split()
>>> hyp2 = str('It is to insure the troops forever hearing the activity '
... 'guidebook that party direct').split()
>>> sentence_gleu(ref1, hyp1) # doctest: +ELLIPSIS
0.4393...
>>> sentence_gleu(ref1, hyp2) # doctest: +ELLIPSIS
0.1206...
:param references: reference sentence
:type references: list(str)
:param hypothesis: a hypothesis sentence
:type hypothesis: list(str)
:param min_len: The minimum order of n-gram this function should extract.
:type min_len: int
:param max_len: The maximum order of n-gram this function should extract.
:type max_len: int
:return: the sentence level CHRF score.
:rtype: float
"""
# For each order of ngram, calculate the no. of ngram matches and
# keep track of no. of ngram in references.
ref_ngrams = Counter(everygrams(reference, min_len, max_len))
hyp_ngrams = Counter(everygrams(hypothesis, min_len, max_len))
overlap_ngrams = ref_ngrams & hyp_ngrams
tp = sum(overlap_ngrams.values()) # True positives.
tpfp = sum(hyp_ngrams.values()) # True positives + False positives.
tffn = sum(ref_ngrams.values()) # True posities + False negatives.
precision = tp / tpfp
recall = tp / tffn
return min(precision, recall)
|
Python
| 0.000002
|
@@ -446,39 +446,32 @@
vel
-CHRF (Character n-gram F-
+GLEU (Google-BLEU)
score
-)
des
|
f961c1031285c5852e46f880668b963d27245ace
|
Rename _table to table
|
examples/tictactoe-qlearning.py
|
examples/tictactoe-qlearning.py
|
import random
from capstone.environment import Environment
from capstone.game import TicTacToe
from capstone.mdp import GameMDP
from capstone.player import AlphaBeta, RandPlayer
from capstone.util import ZobristHashing
class TabularQLearning(object):
def __init__(self, env, policy=RandPlayer(), alpha=0.1, gamma=0.99, n_episodes=1000):
self.env = env
self.policy = RandPlayer()
self.alpha = alpha
self.gamma = gamma
self.n_episodes = n_episodes
self._table = {}
def max_q_value(self, state, actions):
if not actions:
return 0
best_value = -100000
for next_action in actions:
temp_value = self._table.get((state, next_action), random.random() - 0.5)
if temp_value > best_value:
best_value = temp_value
return best_value
def learn(self):
import random
for episode in range(self.n_episodes):
print('Episode {}'.format(episode))
self.env.reset()
step = 0
while not self.env.is_terminal():
print('Step {}'.format(step))
state = self.env.cur_state()
action = random.choice(self.env.actions())
reward, next_state = self.env.do_action(action)
max_q_value = self.max_q_value(next_state, self.env.actions())
q_value = self._table.get((state, action), 0.1)
update_value = reward + (self.gamma * max_q_value) - q_value
self._table[(state, action)] = q_value + (self.alpha * update_value)
step += 1
print('Results:')
for key, value in self._table.iteritems():
print(key)
print(value)
print('*' * 60)
game = TicTacToe(
'X-O'
'XO-'
'-XO'
)
ab = AlphaBeta()
mdp = GameMDP(game, ab, 1)
env = Environment(mdp)
td0 = TabularQLearning(env)
td0.learn()
|
Python
| 0.999999
|
@@ -495,25 +495,24 @@
self.
-_
table = %7B%7D%0A%0A
@@ -686,33 +686,32 @@
mp_value = self.
-_
table.get((state
@@ -1410,25 +1410,24 @@
alue = self.
-_
table.get((s
@@ -1540,25 +1540,24 @@
self.
-_
table%5B(state
@@ -1690,17 +1690,16 @@
in self.
-_
table.it
|
dc40793ad27704c83dbbd2e923bf0cbcd7cb00ed
|
Handle both event instanciation from object and from serialized events
|
polyaxon/event_manager/event_service.py
|
polyaxon/event_manager/event_service.py
|
from libs.services import Service
class EventService(Service):
__all__ = ('record', 'setup')
event_manager = None
def can_handle(self, event_type):
return isinstance(event_type, str) and self.event_manager.knows(event_type)
def get_event(self, event_type, instance, **kwargs):
return self.event_manager.get(
event_type,
).from_instance(instance, **kwargs)
def record(self, event_type, instance=None, **kwargs):
""" Validate and record an event.
>>> record('event.action', object_instance)
"""
if not self.is_setup:
return
if not self.can_handle(event_type=event_type):
return
event = self.get_event(event_type=event_type, instance=instance, **kwargs)
self.record_event(event)
def record_event(self, event):
""" Record an event.
>>> record_event(Event())
"""
pass
|
Python
| 0.000011
|
@@ -269,32 +269,49 @@
elf, event_type,
+ event_data=None,
instance, **kwa
@@ -295,32 +295,37 @@
a=None, instance
+=None
, **kwargs):%0A
@@ -313,32 +313,75 @@
one, **kwargs):%0A
+ if instance or not event_data:%0A
return s
@@ -411,24 +411,28 @@
+
+
event_type,%0A
@@ -431,16 +431,20 @@
t_type,%0A
+
@@ -478,16 +478,138 @@
*kwargs)
+%0A return self.event_manager.get(%0A event_type,%0A ).from_event_data(event_data=event_data, **kwargs)
%0A%0A de
@@ -626,32 +626,49 @@
elf, event_type,
+ event_data=None,
instance=None,
@@ -966,27 +966,143 @@
ype,
- instance=instance,
+%0A event_data=event_data,%0A instance=instance,%0A
**k
@@ -1140,16 +1140,37 @@
t(event)
+%0A return event
%0A%0A de
|
afc0b8cfb34830154b684732d395e68729e3a42f
|
remove obsolete test
|
corehq/apps/accounting/tests/test_forms.py
|
corehq/apps/accounting/tests/test_forms.py
|
from dateutil.relativedelta import relativedelta
import random
from corehq.apps.accounting.tasks import generate_invoices
from corehq.apps.accounting.forms import AdjustBalanceForm
from corehq.apps.accounting.models import (
CreditAdjustmentReason,
CreditLine,
Invoice,
)
from corehq.apps.accounting.tests.test_invoicing import BaseInvoiceTestCase
class TestAdjustBalanceForm(BaseInvoiceTestCase):
def setUp(self):
super(TestAdjustBalanceForm, self).setUp()
generate_invoices(self.subscription.date_start + relativedelta(months=1))
self.invoice = Invoice.objects.first()
def tearDown(self):
super(TestAdjustBalanceForm, self).tearDown()
def test_manual_adjustment(self):
original_balance = self.invoice.balance
adjustment_amount = random.randint(1, 5)
adjust_balance_form = AdjustBalanceForm(
self.invoice,
{
'adjustment_type': 'credit',
'custom_amount': adjustment_amount,
'method': CreditAdjustmentReason.MANUAL,
'note': 'some text',
'invoice_id': self.invoice.id,
}
)
self.assertTrue(adjust_balance_form.is_valid())
adjust_balance_form.adjust_balance()
self.assertEqual(original_balance - adjustment_amount, self.invoice.balance)
def test_transfer_credit_with_credit(self):
original_credit_balance = random.randint(5, 10)
CreditLine.add_credit(
original_credit_balance,
account=self.subscription.account,
subscription=self.subscription,
)
original_balance = self.invoice.balance
adjustment_amount = random.randint(1, 5)
adjust_balance_form = AdjustBalanceForm(
self.invoice,
{
'adjustment_type': 'credit',
'custom_amount': adjustment_amount,
'method': CreditAdjustmentReason.TRANSFER,
'note': 'some text',
'invoice_id': self.invoice.id,
}
)
self.assertTrue(adjust_balance_form.is_valid())
adjust_balance_form.adjust_balance()
self.assertEqual(original_balance - adjustment_amount, self.invoice.balance)
self.assertEqual(original_credit_balance - adjustment_amount, sum(
credit_line.balance
for credit_line in CreditLine.get_credits_for_invoice(self.invoice)
))
def test_transfer_credit_without_credit(self):
original_credit_balance = 0
CreditLine.add_credit(
original_credit_balance,
account=self.subscription.account,
subscription=self.subscription,
)
original_balance = self.invoice.balance
adjustment_amount = random.randint(1, 5)
adjust_balance_form = AdjustBalanceForm(
self.invoice,
{
'adjustment_type': 'credit',
'custom_amount': adjustment_amount,
'method': CreditAdjustmentReason.TRANSFER,
'note': 'some text',
'invoice_id': self.invoice.id,
}
)
self.assertTrue(adjust_balance_form.is_valid())
adjust_balance_form.adjust_balance()
self.assertEqual(original_balance, self.invoice.balance)
self.assertEqual(original_credit_balance, sum(
credit_line.balance
for credit_line in CreditLine.get_credits_for_invoice(self.invoice)
))
def test_transfer_debit_without_credit(self):
original_credit_balance = 0
CreditLine.add_credit(
original_credit_balance,
account=self.subscription.account,
subscription=self.subscription,
)
original_balance = self.invoice.balance
adjustment_amount = random.randint(1, 5)
adjust_balance_form = AdjustBalanceForm(
self.invoice,
{
'adjustment_type': 'debit',
'custom_amount': adjustment_amount,
'method': CreditAdjustmentReason.TRANSFER,
'note': 'some text',
'invoice_id': self.invoice.id,
}
)
self.assertTrue(adjust_balance_form.is_valid())
adjust_balance_form.adjust_balance()
self.assertEqual(original_balance + adjustment_amount, self.invoice.balance)
self.assertEqual(original_credit_balance + adjustment_amount, sum(
credit_line.balance
for credit_line in CreditLine.get_credits_for_invoice(self.invoice)
))
|
Python
| 0.002319
|
@@ -3529,1095 +3529,4 @@
))%0A
-%0A def test_transfer_debit_without_credit(self):%0A original_credit_balance = 0%0A CreditLine.add_credit(%0A original_credit_balance,%0A account=self.subscription.account,%0A subscription=self.subscription,%0A )%0A original_balance = self.invoice.balance%0A adjustment_amount = random.randint(1, 5)%0A%0A adjust_balance_form = AdjustBalanceForm(%0A self.invoice,%0A %7B%0A 'adjustment_type': 'debit',%0A 'custom_amount': adjustment_amount,%0A 'method': CreditAdjustmentReason.TRANSFER,%0A 'note': 'some text',%0A 'invoice_id': self.invoice.id,%0A %7D%0A )%0A self.assertTrue(adjust_balance_form.is_valid())%0A%0A adjust_balance_form.adjust_balance()%0A self.assertEqual(original_balance + adjustment_amount, self.invoice.balance)%0A self.assertEqual(original_credit_balance + adjustment_amount, sum(%0A credit_line.balance%0A for credit_line in CreditLine.get_credits_for_invoice(self.invoice)%0A ))%0A
|
19e5608b2d8d16e2c80390927658e8f322dd10e6
|
Add new encodings. (#3292)
|
speech/google/cloud/speech/encoding.py
|
speech/google/cloud/speech/encoding.py
|
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encodings used by the Google Cloud Speech API."""
class Encoding(object):
"""Audio encoding types.
See:
https://cloud.google.com/speech/reference/rest/v1/RecognitionConfig#AudioEncoding
"""
LINEAR16 = 'LINEAR16'
"""LINEAR16 encoding type."""
FLAC = 'FLAC'
"""FLAC encoding type."""
MULAW = 'MULAW'
"""MULAW encoding type."""
AMR = 'AMR'
"""AMR encoding type."""
AMR_WB = 'AMR_WB'
"""AMR_WB encoding type."""
|
Python
| 0
|
@@ -1028,24 +1028,188 @@
MR_WB encoding type.%22%22%22%0A
+%0A OGG_OPUS = 'OGG_OPUS'%0A %22%22%22OGG_OPUS encoding type.%22%22%22%0A%0A SPEEX_WITH_HEADER_BYTE = 'SPEEX_WITH_HEADER_BYTE'%0A %22%22%22SPEEX_WITH_HEADER_BYTE encoding type.%22%22%22%0A
|
ec33d9bfe95eae4f9eecda86374522291603de70
|
version 0.0.2
|
mystarspilot/__init__.py
|
mystarspilot/__init__.py
|
"""
MyStarsPilot - a CLI tool to search your starred Github repositories.
"""
__author__ = 'wolfg1969'
__version__ = '0.0.1'
__license__ = 'MIT'
|
Python
| 0.000003
|
@@ -115,17 +115,17 @@
= '0.0.
-1
+2
'%0A__lice
|
0b82616ca79c9c1fb33ccec8406720160fe053a2
|
Prepend manga name for starkana
|
src/parsers/starkana.py
|
src/parsers/starkana.py
|
#!/usr/bin/python
import re
from parsers.base import SiteParserBase
from util import getSourceCode
class Starkana(SiteParserBase):
#re_getPage = re.compile("<option.*?value=\"([^']*?)\"[^>]*>\s*(\d*)</option>")
re_getMaxPages = re.compile('</select> of <strong>(\d*)')
re_getImage = re.compile('img.*?class="dyn" src="([^"]*)')
re_getChapters = re.compile('<a class="download-link" href="([^"]*)">([^<]*) <em>chapter</em> <strong>(\d*)</strong></a>')
def fixFormatting(self, s):
p = re.compile( '\s+')
s = p.sub( ' ', s )
s = s.strip().replace(' ', '_')
return s
def parseSite(self):
print('Beginning Starkana check: %s' % self.manga)
url = 'http://starkana.com/manga/%s/%s' % (self.manga[0], self.fixFormatting( self.manga ))
if self.verbose_FLAG:
print(url)
source = getSourceCode(url, self.proxy)
self.chapters = Starkana.re_getChapters.findall(source)
self.chapters.reverse()
if not self.chapters:
raise self.MangaNotFound
lowerRange = 0
for i in range(0, len(self.chapters)):
self.chapters[i] = ('http://starkana.com%s' % self.chapters[i][0], self.chapters[i][2], self.chapters[i][2])
if (not self.auto):
print('(%i) %s' % (i + 1, self.chapters[i][1]))
else:
if (self.lastDownloaded == self.chapters[i][1]):
lowerRange = i + 1
# this might need to be len(self.chapters) + 1, I'm unsure as to whether python adds +1 to i after the loop or not
upperRange = len(self.chapters)
if (not self.auto):
self.chapters_to_download = self.selectChapters(self.chapters)
else:
if (lowerRange == upperRange):
raise self.NoUpdates
for i in range (lowerRange, upperRange):
self.chapters_to_download .append(i)
self.isPrependMangaName = True
return
def downloadChapter(self, downloadThread, max_pages, url, manga_chapter_prefix, current_chapter):
for page in range(1, max_pages + 1):
if (self.verbose_FLAG):
print(self.chapters[current_chapter][1] + ' | ' + 'Page %s / %i' % (page, max_pages))
self.downloadImage(downloadThread, page, '%s/%s' % (url, page), manga_chapter_prefix)
|
Python
| 0
|
@@ -1951,48 +1951,8 @@
i)%0A%0A
- self.isPrependMangaName = True%0A%0A
|
b2f2fcd9322837ea096a28ae584c8d236232c5a5
|
remove jython2.5 compat code
|
proton-j/src/main/resources/cproton.py
|
proton-j/src/main/resources/cproton.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
The cproton module defines a java implementation of the C interface as
exposed to python via swig. This allows tests defined in python to run
against both the C and Java protocol implementations.
"""
# @todo(kgiusti) dynamically set these via filters in the pom.xml file
PN_VERSION_MAJOR = 0
PN_VERSION_MINOR = 0
from ctypes import *
from cobject import *
from cerror import *
from ccodec import *
from cengine import *
from csasl import *
from cssl import *
from cdriver import *
from cmessenger import *
from cmessage import *
from curl import *
from creactor import *
from chandlers import *
# XXX: this is for compatibility, apparently the version of jython we
# use doesn't have next, we should remove this when we upgrade
_DEF = object()
def next(iter, default=_DEF):
try:
return iter.next()
except StopIteration:
if default is _DEF:
raise
else:
return default
|
Python
| 0.000001
|
@@ -1390,335 +1390,4 @@
t *%0A
-%0A# XXX: this is for compatibility, apparently the version of jython we%0A# use doesn't have next, we should remove this when we upgrade%0A%0A_DEF = object()%0A%0Adef next(iter, default=_DEF):%0A try:%0A return iter.next()%0A except StopIteration:%0A if default is _DEF:%0A raise%0A else:%0A return default%0A
|
511f422d61c97eb4d79b90c1dfe6baee40d1fe39
|
update srxlo unit test with inactive: term testing
|
tests/lib/srxlo_test.py
|
tests/lib/srxlo_test.py
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittest for Srxlo rendering module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from capirca.lib import naming
from capirca.lib import policy
from capirca.lib import srxlo
import mock
GOOD_HEADER_1 = """
header {
comment:: "this is a test acl"
target:: srxlo test-filter inet6
}
"""
GOOD_TERM_1 = """
term good-term-1 {
protocol:: icmpv6
action:: accept
}
"""
GOOD_TERM_2 = """
term good-term-2 {
protocol:: icmpv6
icmp-type:: destination-unreachable
action:: accept
}
"""
SUPPORTED_TOKENS = {
'action',
'address',
'comment',
'counter',
'destination_address',
'destination_address_exclude',
'destination_port',
'destination_prefix',
'destination_prefix_except',
'dscp_except',
'dscp_match',
'dscp_set',
'ether_type',
'expiration',
'forwarding_class',
'forwarding_class_except',
'fragment_offset',
'hop_limit',
'icmp_code',
'icmp_type',
'stateless_reply',
'logging',
'loss_priority',
'name',
'next_ip',
'option',
'owner',
'packet_length',
'platform',
'platform_exclude',
'policer',
'port',
'precedence',
'protocol',
'protocol_except',
'qos',
'routing_instance',
'source_address',
'source_address_exclude',
'source_port',
'source_prefix',
'source_prefix_except',
'traffic_class_count',
'traffic_type',
'translated',
'ttl',
'verbatim',
}
SUPPORTED_SUB_TOKENS = {
'action': {'accept', 'deny', 'reject', 'next', 'reject-with-tcp-rst'},
'icmp_type': {
'alternate-address',
'certification-path-advertisement',
'certification-path-solicitation',
'conversion-error',
'destination-unreachable',
'echo-reply',
'echo-request',
'mobile-redirect',
'home-agent-address-discovery-reply',
'home-agent-address-discovery-request',
'icmp-node-information-query',
'icmp-node-information-response',
'information-request',
'inverse-neighbor-discovery-advertisement',
'inverse-neighbor-discovery-solicitation',
'mask-reply',
'mask-request',
'information-reply',
'mobile-prefix-advertisement',
'mobile-prefix-solicitation',
'multicast-listener-done',
'multicast-listener-query',
'multicast-listener-report',
'multicast-router-advertisement',
'multicast-router-solicitation',
'multicast-router-termination',
'neighbor-advertisement',
'neighbor-solicit',
'packet-too-big',
'parameter-problem',
'redirect',
'redirect-message',
'router-advertisement',
'router-renumbering',
'router-solicit',
'router-solicitation',
'source-quench',
'time-exceeded',
'timestamp-reply',
'timestamp-request',
'unreachable',
'version-2-multicast-listener-report',
},
'option': {'established',
'first-fragment',
'inactive',
'is-fragment',
'.*', # not actually a lex token!
'sample',
'tcp-established',
'tcp-initial'}
}
# Print a info message when a term is set to expire in that many weeks.
# This is normally passed from command line.
EXP_INFO = 2
class SRXloTest(unittest.TestCase):
def setUp(self):
self.naming = mock.create_autospec(naming.Naming)
def testIcmpv6(self):
output = str(srxlo.SRXlo(policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_1,
self.naming), EXP_INFO))
self.failUnless('next-header icmp6;' in output,
'missing or incorrect ICMPv6 specification')
def testIcmpv6Type(self):
output = str(srxlo.SRXlo(policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_2,
self.naming), EXP_INFO))
self.failUnless('next-header icmp6;' in output,
'missing or incorrect ICMPv6 specification')
self.failUnless('icmp-type 1;' in output,
'missing or incorrect ICMPv6 type specification')
def testBuildTokens(self):
# self.naming.GetServiceByProto.side_effect = [['25'], ['26']]
pol1 = srxlo.SRXlo(policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_1,
self.naming), EXP_INFO)
st, sst = pol1._BuildTokens()
self.maxDiff = None
self.assertEquals(st, SUPPORTED_TOKENS)
self.assertEquals(sst, SUPPORTED_SUB_TOKENS)
def testBuildWarningTokens(self):
pol1 = srxlo.SRXlo(policy.ParsePolicy(
GOOD_HEADER_1 + GOOD_TERM_1, self.naming), EXP_INFO)
st, sst = pol1._BuildTokens()
self.assertEquals(st, SUPPORTED_TOKENS)
self.assertEquals(sst, SUPPORTED_SUB_TOKENS)
if __name__ == '__main__':
unittest.main()
|
Python
| 0
|
@@ -1210,16 +1210,117 @@
pt%0A%7D%0A%22%22%22
+%0AGOOD_TERM_3 = %22%22%22%0Aterm good-term-3 %7B%0A protocol:: icmpv6%0A action:: accept%0A option:: inactive%0A%7D%0A%22%22%22
%0A%0ASUPPOR
@@ -5501,17 +5501,17 @@
OD_TERM_
-1
+3
, self.n
@@ -5656,16 +5656,272 @@
OKENS)%0A%0A
+ def testInactiveTerm(self):%0A output = str(srxlo.SRXlo(policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_3,%0A self.naming), EXP_INFO))%0A self.failUnless('inactive: term good-term-3 %7B' in output, output)%0A
%0Aif __na
|
5a71c7b3c98f17da619f630f4fe201da1efbee17
|
fix indentation of section checks (should be outside of the loop)
|
rowprocsv.py
|
rowprocsv.py
|
"""
Module for reading and exporting csv files exported from Concept2 RowPro
"""
import datetime
import tcx
class RowProCSV:
HEADER_SUMMARY = 'Date,TotalTime,TotalDistance,'
FIELDS_SUMMARY = [
'date', 'total_time', 'total_distance', 'avg_pace', 'unit', 'origin', 'total_cals', 'duty_cycle', 'type', 'format',
'slide', 'session_id', 'rowfile_id', 'avg_hr', 'last_hr', 'offset'
]
HEADER_SAMPLES = 'Time,Distance,Pace,Watts,Cals,SPM,HR,DutyCycle,Rowfile_Id'
FIELDS_SAMPLES = ['time', 'distance', 'pace', 'watts', 'cals', 'spm', 'hr', 'duty_cycle', 'rowfile_id']
data = {
'samples': [],
}
def __init__(self, filename):
lines = []
try:
with open(filename, 'r') as fp:
lines = fp.read().split("\r\n")
except IOError as e:
print 'Could not read file {}: {}'.format(filename, e)
summary_found = False
samples_found = False
while len(lines):
line = lines.pop(0)
if not line:
continue
if line.startswith(self.HEADER_SUMMARY):
line = lines.pop(0)
summary_data = line.split(',')
if len(summary_data) != len(self.FIELDS_SUMMARY):
print 'Warning: summary line only has {} fields, {} expected'.format(len(summary_data),
len(self.FIELDS_SUMMARY))
for field in self.FIELDS_SUMMARY:
if len(summary_data):
self.data[field] = summary_data.pop(0)
summary_found = True
continue
elif line.startswith(self.HEADER_SAMPLES):
while len(lines):
line = lines.pop(0)
sample_data = line.split(',')
sample = {}
for field in self.FIELDS_SAMPLES:
sample[field] = sample_data.pop(0) if len(sample_data) else None
self.data['samples'].append(sample)
samples_found = True
break
if not summary_found:
print 'Warning: summary section not found in file'
if not samples_found:
print 'Warning: samples section not found in file'
def get_data(self):
return self.data
|
Python
| 0
|
@@ -2177,36 +2177,32 @@
break%0A%0A
-
if not summary_f
@@ -2199,36 +2199,32 @@
summary_found:%0A
-
prin
@@ -2274,28 +2274,24 @@
le'%0A
-
if not sampl
@@ -2292,36 +2292,32 @@
samples_found:%0A
-
prin
|
f3aa9bc3a58df97d79850cc23011a7b119ddf49f
|
add get_masks() to environment
|
reliefparser/environment/environment.py
|
reliefparser/environment/environment.py
|
__author__ = 'max'
import numpy as np
NEGTIVE_REWARD = -1
class Environment(object):
def __init__(self, heads, marks):
assert heads.ndim == 2
assert marks.ndim == 2
self.__batch_size = heads.shape[0]
self.__length = heads.shape[1]
self.__sizes = marks.sum(axis=1)
self.__marks = np.copy(marks)
self.__rewards = np.zeros((self.__batch_size, self.__length, self.__length), dtype=np.float32)
for b in xrange(self.__batch_size):
self.__rewards[b, 0] = NEGTIVE_REWARD
for i in xrange(1, self.__length):
if self.__marks[b, i]:
self.__rewards[b, i] = (1 - self.__marks[b]) * NEGTIVE_REWARD
self.__rewards[b, i, heads[b, i]] = 1.0
else:
self.__rewards[b, i] = NEGTIVE_REWARD
def take_action(self, acts):
# each act is between [0, 2n-1]
assert acts.shape == (self.__batch_size,)
assert (acts >= 0).all()
assert (acts < 2 * self.__length).all()
# [0, n-1] as left acts, [n, 2n-1] as right acts
is_lefts = acts < self.__length
heads = acts % self.__length
children = np.zeros_like(heads)
rewards = np.zeros_like(heads, dtype=np.float32)
for b in xrange(self.__batch_size):
head = heads[b]
if self.__marks[b, head]:
child = self.__find_left(b, head) if is_lefts[b] else self.__find_right(b, head)
children[b] = child
if child >= 0:
rewards[b] = self.__rewards[b, child, head]
self.__marks[b, child] = 0
else:
rewards[b] = NEGTIVE_REWARD
else:
rewards[b] = NEGTIVE_REWARD
heads[b] = -1
children[b] = -1
indexes, lefts, rights = self.get_indexes()
return rewards, heads, children, indexes, lefts, rights
def get_indexes(self):
lefts = np.zeros([self.__batch_size, self.__length], dtype=np.int32)
rights = np.zeros_like(lefts)
indexes = np.zeros_like(lefts)
# initialize lefts and rights and indexes to -1
lefts -= 1
rights -= 1
indexes -= 1
for b in xrange(self.__batch_size):
lefts[b, 0] = -1
indexes[b, 0] = 0
rights[b, self.__sizes[b] - 1] = -1
for j in xrange(1, self.__sizes[b]):
k = self.__sizes[b] - j - 1
indexes[b, j] = j
lefts[b, j] = j - 1 if self.__marks[b, j - 1] else lefts[b, j - 1]
rights[b, k] = k + 1 if self.__marks[b, k + 1] else rights[b, k + 1]
for j in xrange(self.__sizes[b]):
if not self.__marks[b, j]:
indexes[b, j] = -1
lefts[b, j] = -1
rights[b, j] = -1
return indexes, lefts, rights
def __find_left(self, b, pos):
left = -1
if pos >= self.__sizes[b]:
return left
for i in xrange(pos - 1, -1, -1):
if self.__marks[b, i]:
left = i
break
return left
def __find_right(self, b, pos):
right = -1
for i in xrange(pos + 1, self.__sizes[b]):
if self.__marks[b, i]:
right = i
break
return right
def display(self):
print self.__length
print self.__sizes
print self.__marks
print self.__rewards
|
Python
| 0.000002
|
@@ -1976,32 +1976,90 @@
lefts, rights%0A%0A
+ def get_masks(self):%0A return self.__marks%0A %0A
def get_inde
|
f47aa8c478db9d6370fa73ae687bedc0e9974f41
|
Load module data on first access
|
nemubot/modulecontext.py
|
nemubot/modulecontext.py
|
# Nemubot is a smart and modulable IM bot.
# Copyright (C) 2012-2015 Mercier Pierre-Olivier
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
class ModuleContext:
def __init__(self, context, module):
"""Initialize the module context
arguments:
context -- the bot context
module -- the module
"""
if module is not None:
module_name = module.__spec__.name if hasattr(module, "__spec__") else module.__name__
else:
module_name = ""
# Load module configuration if exists
if (context is not None and
module_name in context.modules_configuration):
self.config = context.modules_configuration[module_name]
else:
from nemubot.config.module import Module
self.config = Module(module_name)
self.hooks = list()
self.events = list()
self.debug = context.verbosity > 0 if context is not None else False
from nemubot.hooks import Abstract as AbstractHook
# Define some callbacks
if context is not None:
# Load module data
self.data = context.datastore.load(module_name)
def add_hook(hook, *triggers):
assert isinstance(hook, AbstractHook), hook
self.hooks.append((triggers, hook))
return context.treater.hm.add_hook(hook, *triggers)
def del_hook(hook, *triggers):
assert isinstance(hook, AbstractHook), hook
self.hooks.remove((triggers, hook))
return context.treater.hm.del_hooks(*triggers, hook=hook)
def subtreat(msg):
yield from context.treater.treat_msg(msg)
def add_event(evt, eid=None):
return context.add_event(evt, eid, module_src=module)
def del_event(evt):
return context.del_event(evt, module_src=module)
def send_response(server, res):
if server in context.servers:
if res.server is not None:
return context.servers[res.server].send_response(res)
else:
return context.servers[server].send_response(res)
else:
module.logger.error("Try to send a message to the unknown server: %s", server)
return False
else: # Used when using outside of nemubot
from nemubot.tools.xmlparser import module_state
self.data = module_state.ModuleState("nemubotstate")
def add_hook(hook, *triggers):
assert isinstance(hook, AbstractHook), hook
self.hooks.append((triggers, hook))
def del_hook(hook, *triggers):
assert isinstance(hook, AbstractHook), hook
self.hooks.remove((triggers, hook))
def subtreat(msg):
return None
def add_event(evt, eid=None):
return context.add_event(evt, eid, module_src=module)
def del_event(evt):
return context.del_event(evt, module_src=module)
def send_response(server, res):
module.logger.info("Send response: %s", res)
def save():
context.datastore.save(module_name, self.data)
self.add_hook = add_hook
self.del_hook = del_hook
self.add_event = add_event
self.del_event = del_event
self.save = save
self.send_response = send_response
self.subtreat = subtreat
def unload(self):
"""Perform actions for unloading the module"""
# Remove registered hooks
for (s, h) in self.hooks:
self.del_hook(h, *s)
# Remove registered events
for e in self.events:
self.del_event(e)
self.save()
|
Python
| 0
|
@@ -1718,26 +1718,24 @@
-# Load module
+def load_
data
+():
%0A
@@ -1743,27 +1743,26 @@
-self.data =
+ return
context
@@ -3066,16 +3066,49 @@
nemubot%0A
+ def load_data():%0A
@@ -3176,19 +3176,18 @@
-self.data =
+ return
mod
@@ -3990,16 +3990,51 @@
.data)%0A%0A
+ self.load_data = load_data%0A
@@ -4229,16 +4229,16 @@
esponse%0A
-
@@ -4264,16 +4264,159 @@
treat%0A%0A%0A
+ @property%0A def data(self):%0A if not hasattr(self, %22_data%22):%0A self._data = self.load_data()%0A return self._data%0A%0A%0A
def
|
27354d34f4f9117c0a3167f4af7d3e3c83d51c59
|
bump version # from 0.6.0 -> 0.7.0
|
cloudfeaster/__init__.py
|
cloudfeaster/__init__.py
|
__version__ = '0.6.0'
|
Python
| 0
|
@@ -14,9 +14,9 @@
'0.
-6
+7
.0'%0A
|
38d298a81aa8fcd85b16b3879c1665085e5450be
|
Add description where student should add logic
|
exercises/control_flow/prime.py
|
exercises/control_flow/prime.py
|
#!/bin/python
def is_prime(integer):
"""Determines weather integer is prime, returns a boolean value"""
for i in range(2, integer):
if integer % i == 0:
return False
return True
print("Should be False (0): %r" % is_prime(0))
print("Should be False (1): %r" % is_prime(1))
print("Should be True (2): %r" % is_prime(2))
print("Should be False (8): %r" % is_prime(8))
print("Should be True (17): %r"% is_prime(17))
# Your code below:
|
Python
| 0.000086
|
@@ -103,16 +103,76 @@
alue%22%22%22%0A
+ # add logic here to make sure number %3C 2 are not prime%0A%0A
for
|
016aa4cccc572555d7d3feb7cde974aa8cb2fe25
|
Add docstring to snippet
|
Snippets/compact_gpos.py
|
Snippets/compact_gpos.py
|
import argparse
from collections import defaultdict
import csv
import time
import sys
from pathlib import Path
from typing import Any, Iterable, List, Optional, Sequence, Tuple
from fontTools.ttLib import TTFont
from fontTools.otlLib.optimize import compact
MODES = [str(c) for c in range(1, 10)]
def main(args: Optional[List[str]] = None):
parser = argparse.ArgumentParser()
parser.add_argument("fonts", type=Path, nargs="+", help="Path to TTFs.")
parsed_args = parser.parse_args(args)
runtimes = defaultdict(list)
rows = []
font_path: Path
for font_path in parsed_args.fonts:
font = TTFont(font_path)
if "GPOS" not in font:
print(f"No GPOS in {font_path.name}, skipping.", file=sys.stderr)
continue
size_orig = len(font.getTableData("GPOS")) / 1024
print(f"Measuring {font_path.name}...", file=sys.stderr)
fonts = {}
font_paths = {}
sizes = {}
for mode in MODES:
print(f" Running mode={mode}", file=sys.stderr)
fonts[mode] = TTFont(font_path)
before = time.perf_counter()
compact(fonts[mode], mode=str(mode))
runtimes[mode].append(time.perf_counter() - before)
font_paths[mode] = (
font_path.parent
/ "compact"
/ (font_path.stem + f"_{mode}" + font_path.suffix)
)
font_paths[mode].parent.mkdir(parents=True, exist_ok=True)
fonts[mode].save(font_paths[mode])
fonts[mode] = TTFont(font_paths[mode])
sizes[mode] = len(fonts[mode].getTableData("GPOS")) / 1024
print(f" Runtimes:", file=sys.stderr)
for mode, times in runtimes.items():
print(
f" {mode:10} {' '.join(f'{t:5.2f}' for t in times)}",
file=sys.stderr,
)
# Bonus: measure WOFF2 file sizes.
print(f" Measuring WOFF2 sizes", file=sys.stderr)
size_woff_orig = woff_size(font, font_path) / 1024
sizes_woff = {
mode: woff_size(fonts[mode], font_paths[mode]) / 1024 for mode in MODES
}
rows.append(
(
font_path.name,
size_orig,
size_woff_orig,
*flatten(
(
sizes[mode],
pct(sizes[mode], size_orig),
sizes_woff[mode],
pct(sizes_woff[mode], size_woff_orig),
)
for mode in MODES
),
)
)
write_csv(rows)
def woff_size(font: TTFont, path: Path) -> int:
font.flavor = "woff2"
woff_path = path.with_suffix(".woff2")
font.save(woff_path)
return woff_path.stat().st_size
def write_csv(rows: List[Tuple[Any]]) -> None:
sys.stdout.reconfigure(encoding="utf-8")
sys.stdout.write("\uFEFF")
writer = csv.writer(sys.stdout, lineterminator="\n")
writer.writerow(
[
"File",
"Original GPOS Size",
"Original WOFF2 Size",
*flatten(
(
f"mode={mode}",
f"Change {mode}",
f"mode={mode} WOFF2 Size",
f"Change {mode} WOFF2 Size",
)
for mode in MODES
),
]
)
for row in rows:
writer.writerow(row)
def pct(new: float, old: float) -> float:
return -(1 - (new / old))
def flatten(seq_seq: Iterable[Iterable[Any]]) -> List[Any]:
return [thing for seq in seq_seq for thing in seq]
if __name__ == "__main__":
main()
|
Python
| 0.000001
|
@@ -1,12 +1,813 @@
+#! /usr/bin/env python3%0A%0A%22%22%22%0ASample script to use the otlLib.optimize.gpos functions to compact GPOS tables%0Aof existing fonts. This script takes one or more TTF files as arguments and%0Awill create compacted copies of the fonts using all available modes of the GPOS%0Acompaction algorithm. For each copy, it will measure the new size of the GPOS%0Atable and also the new size of the font in WOFF2 format. All results will be%0Aprinted to stdout in CSV format, so the savings provided by the algorithm in%0Aeach mode can be inspected.%0A%0AThis was initially made to debug the algorithm but can also be used to choose%0Aa mode value for a specific font (trade-off between bytes saved in TTF format%0Avs more bytes in WOFF2 format and more subtables).%0A%0ARun:%0A%0Apython Snippets/compact_gpos.py MyFont.ttf %3E results.csv%0A%22%22%22%0A%0A
import argpa
|
d5b326d8d368d2ac75c6e078572df8c28704c163
|
Use the app string version of foreign keying. It prevents a circular import.
|
vcs/models.py
|
vcs/models.py
|
from django.db import models
class Activity(models.Model):
group = models.CharField(max_length=4)
grouptype = models.TextField()
groupdetail = models.TextField()
details = models.TextField()
disabled = models.BooleanField()
time = models.DecimalField(decimal_places=2, max_digits=10)
unique_together = (("group", "grouptype", "disabled", "time"),)
class ActivityEntry(models.Model):
from timetracker.tracker.models import Tbluser
user = models.ManyToManyField(
Tbluser,
related_name="user_foreign"
)
activity = models.ManyToManyField(
Activity,
related_name="activity_foreign"
)
amount = models.BigIntegerField()
def time(self):
return self.activity.time * self.amount
|
Python
| 0
|
@@ -410,59 +410,8 @@
l):%0A
- from timetracker.tracker.models import Tbluser%0A
@@ -453,15 +453,25 @@
+'tracker.
Tbluser
+'
,%0A
|
8a4d9d18f29abacbce5216f8b4dad07fa879162e
|
print explicit << DRY RUN >> in summary
|
redis-migrate/migrate.py
|
redis-migrate/migrate.py
|
#!/usr/bin/env python
import os
import argparse
from urlparse import urlparse
import redis
from termcolor import cprint
DEBUG = os.environ.get("DEBUG")
DRY_RUN = os.environ.get("DRY_RUN")
CLEAN_UP = os.environ.get("CLEAN_UP")
if os.environ.get("REPLACE_DST_KEYS"):
REPLACE_DST_KEYS = True
else:
REPLACE_DST_KEYS = False
def connect_redis(conn_dict):
conn = redis.StrictRedis(host=conn_dict['host'],
port=conn_dict['port'],
db=conn_dict['db'])
return conn
def conn_string_type(string):
format = 'redis://<host>:<port>/<db>'
url = urlparse(string)
if url.scheme != "redis":
raise argparse.ArgumentTypeError('incorrect format, should be: %s' % format)
host = url.hostname
if url.port:
port = url.port
else:
port = "6379"
if url.path:
db = url.path.strip("/")
else:
db = "0"
try:
port = int(port)
db = int(db)
except ValueError:
raise argparse.ArgumentTypeError('incorrect format, should be: %s' % format)
return {'host': host,
'port': port,
'db': db}
def migrate_redis(source, destination):
if DRY_RUN:
cprint("Migrating %s:%s/%s to %s:%s/%s << DRY RUN >>..." % (source['host'], source['port'], source['db'], destination['host'], destination['port'], destination['db']), 'yellow')
else:
cprint("Migrating %s:%s/%s to %s:%s/%s..." % (source['host'], source['port'], source['db'], destination['host'], destination['port'], destination['db']), 'green')
src = connect_redis(source)
dst = connect_redis(destination)
keys = src.keys('*')
errors = 0
for key in keys:
ttl = src.ttl(key)
# we handle TTL command returning -1 (no expire) or -2 (no key)
if ttl < 0:
ttl = 0
if DEBUG:
cprint("Dumping key: %s with TTL %ss" % (key, ttl), 'yellow')
value = src.dump(key)
if DEBUG:
cprint("Restoring key: %s with TTL %sms" % (key, ttl * 1000), 'yellow')
if not DRY_RUN:
try:
# TTL command returns the key's ttl value in seconds but restore expects it in milliseconds!
dst.restore(key, ttl * 1000, value, replace=REPLACE_DST_KEYS)
except (redis.exceptions.ResponseError, redis.exceptions.DataError):
cprint("! Failed to restore key: %s" % key, 'red')
errors += 1
continue # Don't delete the key in src if it failed to restore - move on to the next iteration
if CLEAN_UP:
src.delete(key)
if DRY_RUN:
cprint("Migrated %d keys" % (len(keys) - errors), 'yellow')
else:
cprint("Migrated %d keys" % (len(keys) - errors), 'green')
def run():
parser = argparse.ArgumentParser()
parser.add_argument('source', type=conn_string_type)
parser.add_argument('destination', type=conn_string_type)
options = parser.parse_args()
migrate_redis(options.source, options.destination)
if __name__ == '__main__':
run()
|
Python
| 0.000004
|
@@ -2691,32 +2691,46 @@
Migrated %25d keys
+ %3C%3C DRY RUN %3E%3E
%22 %25 (len(keys) -
|
3b001eac78349e1e3f6235b14def0fa6752f6fba
|
add more dedup special cases
|
models/dedup_special_cases.py
|
models/dedup_special_cases.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
data = [
{
"main_profile": ("Hadley Wickham", "cran:reshape"),
"people_to_merge": [
("Hadley Wickham", "cran:GGally")
]
},
{
"main_profile": ("Barry Rowlingson", "cran:geonames"),
"people_to_merge": [
("B. S. Rowlingson", "cran:lgcp"),
("Barry Rowlingson", "cran:stpp")
]
},
{
"main_profile": ("Thomas Robitaille", "pypi:ATpy"),
"people_to_merge": [
("Thomas Robitaille", "pypi:PyAVM"),
("Tom Robitaille", "pypi:spectral-cube")
]
},
{
"main_profile": (u"Eduard Szöcs", "cran:webchem"),
"people_to_merge": [
("Eduard Szoecs", "cran:fortunes")
]
},
{
"main_profile": (u"Daniel Münch", "cran:webchem"),
"people_to_merge": [
("Daniel Muench", "cran:webchem")
]
}
]
|
Python
| 0
|
@@ -759,15 +759,508 @@
%0A%09%09%5D%09%0A%09%7D
-%09%09%09%09
+,%09%0A%09%7B%0A%09%09%22main_profile%22: (u%22Daniel M%C3%BCnch%22, %22cran:webchem%22),%0A%09%09%22people_to_merge%22: %5B%0A%09%09%09(%22Daniel Muench%22, %22cran:webchem%22)%0A%09%09%5D%09%0A%09%7D,%09%0A%09%7B%0A%09%09%22main_profile%22: (u%22Zhian N. Kamvar%22, %22cran:poppr%22),%0A%09%09%22people_to_merge%22: %5B%0A%09%09%09(%22Zhian Kamvar%22, %22cran:mmod%22)%0A%09%09%5D%09%0A%09%7D,%09%0A%09%7B%0A%09%09%22main_profile%22: (u%22Min RK%22, %22pypi:ggplot%22),%0A%09%09%22people_to_merge%22: %5B%0A%09%09%09(u%22Min Ragan-Kelley%22, %22pypi:pyzmq%22)%0A%09%09%5D%09%0A%09%7D,%09%0A%09%7B%0A%09%09%22main_profile%22: (u%22Benjamin M. Taylor%22, %22cran:cruts%22),%0A%09%09%22people_to_merge%22: %5B%0A%09%09%09(u%22B. M. Taylor%22, %22cran:lgcp%22)%0A%09%09%5D%09%0A%09%7D
%09%0A%5D
|
7295b55eb2967e87b909891ffee4b8a177a874e6
|
return 404 if no application exist
|
expenditure_application/main.py
|
expenditure_application/main.py
|
# -*- coding: UTF-8 -*-
from __future__ import unicode_literals, print_function, division
import time
import logging
from decimal import Decimal, ROUND_FLOOR
import tornado.web
import db
from collection import DictObject
from utils import convert_timestamp_to_utc_datetime, convert_datetime_to_client_timezone, get_current_timestamp
LOGGER = logging.getLogger(__name__)
class ApplicationsHandler(tornado.web.RequestHandler):
def get(self):
self.render('applications.html', applications=list_applications())
class ApplicationHandler(tornado.web.RequestHandler):
def get(self, application_id):
self.render('application.html', application=get_application(application_id))
class ApplicationApprovalHandler(tornado.web.RequestHandler):
def get(self, application_id):
approve_application(application_id)
self.write('Thanks! :)')
class ApplicationRejectionHandler(tornado.web.RequestHandler):
def get(self, application_id):
reject_application(application_id)
self.write('Thanks! :(')
def new_application(title, freight, items, comment=None):
conn = db.get_connection()
cur = db.get_cursor(conn)
subtotal = sum(item.total for item in items)
for item in items:
item.total = item.price * item.quantity - item.discount
try:
cur.execute('''
INSERT INTO expenditure_application(title, subtotal, freight, comment, created_at)
VALUES (?, ?, ?, ?, ?)
''', (title, subtotal, int(freight), comment, int(time.time())))
application_id = cur.lastrowid
values = tuple((application_id, item.title, item.link, item.price, item.quantity, item.discount, item.total) for item in items)
cur.executemany('''
INSERT INTO expenditure_application_item(application_id, title, link, price, quantity, discount, total)
VALUES(?, ?, ?, ?, ?, ?, ?)
''', values)
except Exception as e:
LOGGER.exception('Got exception when create application: {}'.format(e.message))
conn.rollback()
else:
LOGGER.info('Create application successfully!')
conn.commit()
def list_applications():
conn = db.get_connection()
cur = db.get_cursor(conn)
applications = cur.execute('SELECT * FROM expenditure_application').fetchall()
application_items = cur.execute('SELECT * FROM expenditure_application_item').fetchall()
application_id2items = {}
for item in application_items:
application_id2items.setdefault(item.application_id, []).append(item)
for application in applications:
application.line_items = application_id2items.get(application.id, [])
normalize_applications(applications)
return applications
def get_application(application_id):
conn = db.get_connection()
cur = conn.cursor()
application = cur.execute('SELECT * FROM expenditure_application WHERE id=?', (application_id, )).fetchone()
if application:
application.line_items = cur.execute('SELECT * FROM expenditure_application_item WHERE application_id=?', (application_id, )).fetchall()
normalize_applications([application])
return application
def normalize_applications(applications):
for application in applications:
application.subtotal = (Decimal(application.subtotal) / 100).quantize(Decimal('0.01'), rounding=ROUND_FLOOR)
application.freight = (Decimal(application.freight) / 100).quantize(Decimal('0.01'), rounding=ROUND_FLOOR)
application.total = application.subtotal + application.freight
application.created_at = convert_datetime_to_client_timezone(convert_timestamp_to_utc_datetime(application.created_at))
if application.approved_at:
application.approved_at = convert_datetime_to_client_timezone(convert_timestamp_to_utc_datetime(application.approved_at))
if application.rejected_at:
application.rejected_at = convert_datetime_to_client_timezone(convert_timestamp_to_utc_datetime(application.rejected_at))
def approve_application(application_id):
conn = db.get_connection()
cur = conn.cursor()
cur.execute('UPDATE expenditure_application SET approved_at=? WHERE id=? AND approved_at IS NULL AND rejected_at IS NULL', (get_current_timestamp(),
application_id))
conn.commit()
def reject_application(application_id):
conn = db.get_connection()
cur = conn.cursor()
cur.execute('UPDATE expenditure_application SET rejected_at=? WHERE id=? AND approved_at IS NULL AND rejected_at IS NULL', (get_current_timestamp(),
application_id))
conn.commit()
if __name__ == '__main__':
from pprint import pprint
new_application('title', 10000, [
DictObject(title='item-title', link='item-link', price=231000, quantity=1, discount=0, total=231000),
DictObject(title='item-title2', link='item-link2', price=21000, quantity=2, discount=0, total=42000),
])
pprint(list_applications())
|
Python
| 0.002439
|
@@ -598,32 +598,165 @@
pplication_id):%0A
+ application = get_application(application_id)%0A if not application:%0A self.send_error(404)%0A else:%0A
self.ren
@@ -791,20 +791,16 @@
ication=
-get_
applicat
@@ -802,32 +802,16 @@
lication
-(application_id)
)%0A%0A%0Aclas
@@ -3219,24 +3219,28 @@
.fetchall()%0A
+
normaliz
|
950b99ab173a4e5bf86a6df2dcbcf14e2e28915c
|
Add function parentheses
|
clowder/model/project.py
|
clowder/model/project.py
|
"""Model representation of clowder.yaml project"""
import os
from termcolor import colored
from clowder.utility.git_utilities import (
git_clone_url_at_path,
git_current_sha,
git_litter,
git_status,
git_sync,
git_sync_version,
git_validate_repo_state
)
class Project(object):
"""Model class for clowder.yaml project"""
def __init__(self, rootDirectory, project, defaults, remotes):
self.name = project['name']
self.path = project['path']
self.full_path = os.path.join(rootDirectory, self.path)
if 'ref' in project:
self.ref = project['ref']
else:
self.ref = defaults['ref']
if 'remote' in project:
self.remote_name = project['remote']
else:
self.remote_name = defaults['remote']
for remote in remotes:
if remote.name == self.remote_name:
self.remote = remote
def get_yaml(self):
"""Return python object representation for saving yaml"""
return {'name': self.name,
'path': self.path,
'ref': git_current_sha(self.full_path),
'remote': self.remote_name}
def sync(self):
"""Clone project or update latest from upstream"""
self.print_name()
git_path = os.path.join(self.full_path, '.git')
if not os.path.isdir(git_path):
git_clone_url_at_path(self._get_remote_url(), self.full_path)
else:
git_sync(self.full_path, self.ref)
def sync_version(self, version):
"""Check out fixed version of project"""
self.print_name()
git_path = os.path.join(self.full_path, '.git')
if not os.path.isdir(git_path):
git_clone_url_at_path(self._get_remote_url(), self.full_path)
git_sync_version(self.full_path, version, self.ref)
def status(self):
"""Print git status of project"""
git_status(self.full_path, self.path)
def print_name(self):
"""Project relative project path in green"""
project_output = colored(self.path, 'green')
print(project_output)
def litter(self):
"""Discard changes in project's repository"""
git_litter(self.full_path)
def _get_remote_url(self):
"""Return full remote url for project"""
url_prefix = self.remote.get_url_prefix
remote_url = url_prefix + self.name + ".git"
return remote_url
def validate(self):
"""Validate status of project's repository"""
git_validate_repo_state(self.full_path)
|
Python
| 0.998877
|
@@ -2390,16 +2390,18 @@
l_prefix
+()
%0A
|
7a4819afdb8d4817da59040390c8f8ebc9a2bf3c
|
Replace messy list comprehensions with list function
|
conference_scheduler/tests/conftest.py
|
conference_scheduler/tests/conftest.py
|
import pytest
import numpy as np
from conference_scheduler.resources import (
Person, Slot, EventType, Event, Role, ScheduledItem, Shape
)
from conference_scheduler import scheduler
from conference_scheduler.lp_problem import utils as lpu
@pytest.fixture(scope="module")
def people():
return {
'alice': Person(name='Alice', max_chair_sessions=3),
'bob': Person(name='Bob', max_chair_sessions=3),
'charlie': Person(name='Charlie')
}
@pytest.fixture(scope="module")
def event_types():
return {
'workshop': EventType(name='workshop'),
'talk': EventType(name='talk')
}
@pytest.fixture(scope="module")
def slots():
return (
Slot(venue='Room 1', starts_at='15-Sep-2016 09:30', duration=30,
capacity=50, session="01 Morning A"),
Slot(venue='Room 1', starts_at='15-Sep-2016 10:00', duration=30,
capacity=50, session="01 Morning A"),
Slot(venue='Room 1', starts_at='15-Sep-2016 11:30', duration=30,
capacity=50, session="01 Morning A"),
Slot(venue='Room 1', starts_at='15-Sep-2016 12:00', duration=30,
capacity=10, session="02 Afternoon A"),
Slot(venue='Room 1', starts_at='15-Sep-2016 12:30', duration=30,
capacity=50, session="02 Afternoon A"),
Slot(venue='Room 2', starts_at='15-Sep-2016 09:30', duration=90,
capacity=200, session="03 Morning B"),
Slot(venue='Room 2', starts_at='15-Sep-2016 11:30', duration=90,
capacity=200, session="04 Afternoon B")
)
@pytest.fixture(scope="module")
def roles():
return {
'speaker': Role(name='speaker'),
'leader': Role(name='leader'),
'mentor': Role(name='mentor')
}
@pytest.fixture(scope="module")
def events(event_types, roles, people, slots):
event1 = Event(
name='Talk 1',
event_type=event_types['talk'],
duration=30,
roles={roles['speaker']: people['alice']},
tags=['community'],
unavailability=[slots[0], slots[1]],
demand=30)
event2 = Event(
name='Talk 2',
event_type=event_types['talk'],
duration=30,
roles={roles['speaker']: people['bob']},
tags=['community', 'documentation'],
unavailability=[slots[2], slots[3], event1],
demand=500)
event3 = Event(
name='Workshop 1',
event_type=event_types['workshop'],
duration=60,
roles={roles['leader']: people['charlie']},
tags=['documentation'],
unavailability=[],
demand=20)
return (event1, event2, event3)
@pytest.fixture(scope='module')
def shape(events, slots):
return Shape(len(events), len(slots))
@pytest.fixture(scope='module')
def tag_array(events):
return lpu.tag_array(events)
@pytest.fixture(scope='module')
def session_array(slots):
return lpu.session_array(slots)
@pytest.fixture(scope='module')
def X(shape):
return lpu.variables(shape)
@pytest.fixture(scope='module')
def solution(events, slots):
return [
item for item in scheduler.solution(events, slots)
]
@pytest.fixture(scope='module')
def array(events, slots):
return scheduler.array(events, slots)
@pytest.fixture(scope='module')
def schedule(events, slots):
return [item for item in scheduler.schedule(events, slots)]
@pytest.fixture(scope='module')
def valid_solution():
return (
(0, 2),
(1, 4),
(2, 5)
)
@pytest.fixture(scope='module')
def valid_array():
return np.array([
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0]
])
@pytest.fixture(scope='module')
def valid_schedule(events, slots):
return [
ScheduledItem(event=events[0], slot=slots[2]),
ScheduledItem(event=events[1], slot=slots[4]),
ScheduledItem(event=events[2], slot=slots[5])
]
|
Python
| 0.999868
|
@@ -3070,35 +3070,13 @@
urn
-%5B%0A item for item in
+list(
sche
@@ -3104,22 +3104,17 @@
, slots)
-%0A %5D
+)
%0A%0A%0A@pyte
@@ -3286,26 +3286,13 @@
urn
-%5Bitem for item in
+list(
sche
@@ -3320,17 +3320,17 @@
, slots)
-%5D
+)
%0A%0A%0A@pyte
|
0c8d6b9ef87edbbd9a6d012b5bed94e20d43d722
|
Delete the "stolen" BioPython code
|
vectorPLUS.py
|
vectorPLUS.py
|
import textwrap
######
# START IMPORTANT METHODS
######
# Returns a list (see below) of all matching sequences in the given scaffold
# with a given initial search length.
# [name, (start_index, end_index)]
def tryFindStartCodon(scaffold, sequence, initialSearchLength=10):
searchLength = initialSearchLength - 1
outputList = []
workList = []
while True:
searchLength+=1
outputList = workList
workList = []
for start in substrings(scaffold,sequence[:searchLength]):
workList.append((start,start+searchLength))
if len(workList)==0:
outputList.insert(0,searchLength-1)
return outputList
# Returns a codon sequence for a given RNA sequence
def to_codon(seq):
cDict = codon_dict()
output = ""
s = split_by_n(seq, 3)
for codon in s:
if cDict.get(codon) != None:
if cDict[codon] == "Stop":
break
else:
output += cDict[codon]
return output
# Transcribes DNA into RNA, returning an RNA seq
def transcribe_DNA(seq):
rna = ''
for c in seq:
if c == 'T':
rna += 'U'
else:
rna += c
return rna
# Transcribes RNA back into DNA
def RNA_to_DNA(seq):
dna = ''
for c in seq:
if c == 'U':
dna += 'T'
else:
dna += c
return dna
# Takes a list of (title, nucleotide) tuples and prints as a FASTA file to the given filename
def buildFastaFile(listOfTuples,fileName):
output = ""
if fileName.find(".txt",0) == -1:
fileName += ".txt"
for data in listOfTuples:
output += (">" + data[0] + "\n")
output += textwrap.fill(data[1], 80)
with open(fileName, 'w') as outFile:
outFile.write(output)
######
# START UTILITY METHODS
######
# BioPython's generator for fasta files - returns (name, seq)
# Call using 'with open(file) as f'
def read_fasta(file):
name, seq = None, []
for line in file:
line = line.rstrip()
if line.startswith(">"):
if name: yield (name, ''.join(seq))
name, seq = line, []
else:
seq.append(line)
if name:
yield (name, ''.join(seq))
# Generates list of locations of all substrings in seq
def substrings(seq, sub):
start = 0
while True:
start = seq.find(sub, start)
if start == -1:
break
else:
yield start
start += 1
# Generates sequence split by n units
def split_by_n(seq, n):
while seq:
yield seq[:n]
seq = seq[n:]
# Returns a codon dict, used in to_codon
def codon_dict(): return file_to_dict("codons.txt")
# Creates dicts from two-cell tables in txt files
def file_to_dict(file):
mDict = {}
with open(file) as f:
lines = f.readlines()
for l in lines:
mList = l.split()
mDict[mList[0]] = mList[1]
return mDict
######
# START I/O METHODS
######
# Returns a single tuple (head, name) for a given fasta file
# In other words, gets the first fasta entry from a fasta file
def fasta_to_strings(fileName):
with open(fileName) as f:
for name, seq in read_fasta(f):
return (name[1:], seq)
def main():
#
is_DNA = input("Are you entering DNA or RNA? Type 'DNA' or 'RNA': \n")
large_fasta = input("Input scaffold's fasta filename: \n")
small_fasta = input("Input search sequence's filename: \n")
scaffold = fasta_to_strings(large_fasta)
search = fasta_to_strings(small_fasta)
#
if is_DNA == "DNA":
search_seq = transcribe_DNA(search[1].upper())
scaffold_seq = transcribe_DNA(scaffold[1].upper())
elif is_DNA == "RNA":
search_seq = search[1].upper()
scaffold_seq = scaffold[1].upper()
start_codons = tryFindStartCodon(scaffold_seq, search_seq)
# If only ONE result found, fasta outputted automatically
if len(start_codons) == 2:
start_index = start_codons[1][0]
length = start_codons[0]
print("Matching sequence of length " + str(length) +
" found in scaffold at index " + str(start_index+1) + ".")
fileName = input("What would you like to name the output fasta file? \n")
bp = input("How many base pairs upstream would you like to find? \n")
scaffold_seq = RNA_to_DNA(scaffold_seq)
buildFastaFile([(bp + " base pairs upstream of " + search[0]
, scaffold_seq[start_index - int(bp):start_index])], fileName)
print("File " + fileName + " saved.")
#elif len(start_codons) > 2: # If multiple results found, user chooses which fasta file to output
#i = 0
#for start, end in
main()
|
Python
| 0.999484
|
@@ -1836,18 +1836,41 @@
######%0A%0A
-
#
+Poor replacement for the
BioPyth
@@ -1871,18 +1871,16 @@
ioPython
-'s
generat
@@ -1886,82 +1886,23 @@
tor
-for fasta files - returns (name, seq)%0A # Call using 'with open(file) as f'
+written by Jack
%0Adef
@@ -1921,35 +1921,49 @@
file
+Handle
):%0A
-name, seq
+title = None%0A data
= None
-, %5B%5D
%0A
@@ -1979,16 +1979,22 @@
in file
+Handle
:%0A
@@ -1999,142 +1999,140 @@
-line = line.rstrip()%0A if line.startswith(%22%3E%22):%0A if name: yield (name, ''.join(seq))%0A name, seq = line, %5B%5D
+if line%5B0%5D==%22%3E%22:%0A if title != None:%0A yield (title,data)%0A title = line%5B1:%5D%0A data = ''
%0A
@@ -2158,32 +2158,28 @@
-seq.append(
+data +=
line
-)
%0A if
name
@@ -2174,19 +2174,28 @@
%0A if
-nam
+title == Non
e:%0A
@@ -2207,27 +2207,34 @@
eld
-(name, ''.join(seq)
+None%0A yield (title,data
)%0A%0A
|
7bf1f8bef973942755cb245d567a34ffac119a3f
|
Revert "demote boot2docker or else bad stuff happens"
|
dusty/preflight.py
|
dusty/preflight.py
|
"""This module contains checks for system dependencies that are run
when Dusty daemon first starts up. Any failed checks should throw an exception
which bubbles up to the daemon and causes it to crash."""
from __future__ import absolute_import
import os
import logging
import subprocess
import warnings
from .config import write_default_config, check_and_load_ssh_auth
from . import constants
from .warnings import daemon_warnings
from .subprocess import check_and_log_output_and_error, check_output_demoted
from . import constants
class PreflightException(Exception):
pass
def returns_exception(f):
def inner():
try:
f()
return None
except Exception as e:
return e
return inner
def _assert_executable_exists(executable_name):
logging.info('Checking for existence of {}'.format(executable_name))
try:
subprocess.check_output('which {}'.format(executable_name), shell=True)
except subprocess.CalledProcessError, OSError:
raise PreflightException('Executable not found: {}'.format(executable_name))
def _maybe_version_warning(executable, installed_version):
if installed_version != constants.SYSTEM_DEPENDENCY_VERSIONS[executable]:
message = 'Your {} version ({}) deviates from the supported version ({}).'.format(executable,
installed_version,
constants.SYSTEM_DEPENDENCY_VERSIONS[executable])
warnings.warn(message)
daemon_warnings.warn('preflight', message)
@returns_exception
def _check_nginx():
_assert_executable_exists('nginx')
installed_version = subprocess.check_output(['nginx', '-v'], stderr=subprocess.STDOUT).strip().split('/')[-1]
_maybe_version_warning('nginx', installed_version)
@returns_exception
def _check_rsync():
_assert_executable_exists('rsync')
@returns_exception
def _check_virtualbox():
_assert_executable_exists('VBoxManage')
installed_version = subprocess.check_output(['VBoxManage', '-v']).split('r')[0]
_maybe_version_warning('virtualbox', installed_version)
@returns_exception
def _check_boot2docker():
_assert_executable_exists('boot2docker')
installed_version = check_output_demoted(['boot2docker', 'version']).splitlines()[0].split(':')[1].split('v')[-1]
_maybe_version_warning('boot2docker', installed_version)
@returns_exception
def _check_docker():
_assert_executable_exists('docker')
installed_version = check_output_demoted(['docker', '-v']).split(',')[0].split(' ')[-1]
_maybe_version_warning('docker', installed_version)
@returns_exception
def _check_docker_compose():
_assert_executable_exists('docker-compose')
installed_version = check_output_demoted(['docker-compose', '--version']).split('\n')[0].split()[-1].strip()
_maybe_version_warning('docker-compose', installed_version)
@returns_exception
def _assert_hosts_file_is_writable():
if not os.access(constants.HOSTS_PATH, os.W_OK):
raise OSError('Hosts file at {} is not writable'.format(constants.HOSTS_PATH))
def _ensure_run_dir_exists():
if not os.path.exists(constants.RUN_DIR):
os.makedirs(constants.RUN_DIR)
def _ensure_config_dir_exists():
if not os.path.exists(constants.CONFIG_DIR):
os.makedirs(constants.CONFIG_DIR)
def _ensure_command_files_dir_exists():
if not os.path.exists(constants.COMMAND_FILES_DIR):
os.makedirs(constants.COMMAND_FILES_DIR)
def _ensure_github_known_host():
known_hosts_path = os.path.expanduser('~root/.ssh/known_hosts')
with open(known_hosts_path, 'w+') as f:
contents = f.read()
if 'github.com' not in contents:
logging.info('Adding github ssh key to roots ssh known_hosts file')
command = ['sh', '-c', 'ssh-keyscan -t rsa github.com >> {}'.format(known_hosts_path)]
check_and_log_output_and_error(command, demote=False)
def preflight_check():
logging.info('Starting preflight check')
errors = [check() for check in [_check_nginx, _check_rsync, _check_virtualbox, _check_boot2docker,
_check_docker, _check_docker_compose, _assert_hosts_file_is_writable]]
str_errors = [str(e) for e in errors if e is not None]
if str_errors:
raise PreflightException("Preflight Errors: \n\t{}".format('\n\t'.join(str_errors)))
_ensure_run_dir_exists()
_ensure_config_dir_exists()
_ensure_command_files_dir_exists()
_ensure_github_known_host()
if not os.path.exists(constants.CONFIG_PATH):
logging.info('Creating default config file at {}'.format(constants.CONFIG_PATH))
write_default_config()
check_and_load_ssh_auth()
logging.info('Completed preflight check successfully')
|
Python
| 0
|
@@ -485,30 +485,8 @@
rror
-, check_output_demoted
%0Afro
@@ -2304,32 +2304,43 @@
alled_version =
+subprocess.
check_output_dem
@@ -2335,24 +2335,16 @@
k_output
-_demoted
(%5B'boot2
@@ -2567,32 +2567,43 @@
alled_version =
+subprocess.
check_output_dem
@@ -2594,32 +2594,24 @@
check_output
-_demoted
(%5B'docker',
@@ -2823,16 +2823,27 @@
rsion =
+subprocess.
check_ou
@@ -2850,16 +2850,8 @@
tput
-_demoted
(%5B'd
|
1abbca6200fa3da0a3216b18b1385f3575edb49a
|
Move import of Django's get_version into django-registration's get_version, to avoid dependency-order problems.
|
registration/__init__.py
|
registration/__init__.py
|
from django.utils.version import get_version as django_get_version
VERSION = (0, 9, 0, 'beta', 1)
def get_version():
return django_get_version(VERSION) # pragma: no cover
|
Python
| 0
|
@@ -1,8 +1,64 @@
+VERSION = (0, 9, 0, 'beta', 1)%0A%0A%0Adef get_version():%0A
from dja
@@ -120,62 +120,8 @@
ion%0A
-%0A%0AVERSION = (0, 9, 0, 'beta', 1)%0A%0A%0Adef get_version():%0A
|
db5cb125fb9b0dd4d3b781d0b85250bdc7a52cba
|
fix error msg
|
dvc/command/add.py
|
dvc/command/add.py
|
from dvc.exceptions import DvcException
from dvc.command.common.base import CmdBase
class CmdAdd(CmdBase):
def run(self):
for target in self.args.targets:
try:
self.project.add(target)
except DvcException as ex:
self.project.logger.error('Failed to add {}', ex)
return 1
return 0
|
Python
| 0
|
@@ -320,11 +320,30 @@
add
-%7B%7D'
+%5C'%7B%7D%5C''.format(target)
, ex
|
3dd67c1d4fbda2d5006c49a5b49345c550c66091
|
extend towline backward instead
|
lib/void/ship.py
|
lib/void/ship.py
|
# Copyright (c) 2008 Mikael Lind
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import math, random
from void.agent import Agent
import void.box2d as box2d
from void.shot import Shot
class Ship(Agent):
def __init__(self, world, shots):
self.world = world
self.shots = shots
self.color = (1.0, 1.0, 1.0)
self.thrusting = False
self.firing = False
self.turning = 0.0
self.cooldown = 0.0
self.max_angular_velocity = 2.0 * math.pi
self.body = self.create_body(world)
def create_body(self, world):
body_def = box2d.b2BodyDef()
body_def.position.Set(0.0, 0.0)
body_def.angle = 2.0 * math.pi * random.random()
shape_def = box2d.b2PolygonDef()
shape_def.setVertices_tuple([(-1.0, -1.0), (1.0, -1.0), (0.0, 2.0)])
shape_def.density = 2.0
shape_def.restitution = 1.0
shape_def.filter.categoryBits = 0x0001
shape_def.filter.maskBits = 0x0002
body = world.CreateBody(body_def)
body.CreateShape(shape_def)
body.SetMassFromShapes()
body.SetUserData(self)
return body
def step(self, dt):
if self.thrusting:
angle = self.body.GetAngle()
force = 200.0 * box2d.b2Vec2(-math.sin(angle), math.cos(angle))
point = self.body.GetPosition()
self.body.ApplyForce(force, point)
self.cooldown -= dt
if self.firing and self.cooldown <= 0.0:
self.shots.append(Shot(self.world, self))
self.cooldown = 0.2
self.body.SetAngularVelocity(self.turning *
self.max_angular_velocity)
def toggle_towline(self):
joint_edge = self.body.GetJointList()
if joint_edge is not None:
self.world.DestroyJoint(joint_edge.joint)
return
angle = self.body.GetAngle()
unit = box2d.b2Vec2(-math.sin(angle), math.cos(angle))
segment = box2d.b2Segment()
segment.p1 = self.body.GetPosition()
segment.p2 = self.body.GetPosition() + 15.0 * unit
fraction, normal, shape = self.world.RaycastOne(segment, False, None)
if shape is not None:
agent = shape.GetBody().GetUserData()
joint_def = box2d.b2DistanceJointDef()
joint_def.Initialize(self.body, agent.body, self.body.GetPosition(),
agent.body.GetPosition())
joint_def.collideConnected = True
self.world.CreateJoint(joint_def)
|
Python
| 0
|
@@ -2931,24 +2931,25 @@
unit =
+-
box2d.b2Vec2
|
969b99f02f7db10103eb426e0a31e569998a82ae
|
fix #2394
|
module/plugins/hoster/Http.py
|
module/plugins/hoster/Http.py
|
# -*- coding: utf-8 -*-
import re
import urlparse
from module.plugins.internal.Hoster import Hoster
class Http(Hoster):
__name__ = "Http"
__type__ = "hoster"
__version__ = "0.06"
__status__ = "testing"
__pattern__ = r'(?:jd|pys?)://.+'
__config__ = [("activated", "bool", "Activated", True)]
__description__ = """Download simple http link"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
def setup(self):
self.chunk_limit = -1
self.resume_download = True
def process(self, pyfile):
url = re.sub(r'^(jd|py)', "http", pyfile.url)
netloc = urlparse.urlparse(url).netloc
link = self.isresource(url)
if not link:
return
for _i in xrange(2):
self.download(link, ref=False, disposition=True)
if self.req.code in (404, 410):
self.offline()
elif self.req.code in (401, 403):
self.log_debug("Auth required", "Received HTTP status code: %d" % e.code)
#@TODO: Recheck in 0.4.10
if self.account:
servers = [x['login'] for x in self.account.getAllAccounts()]
else:
servers = []
if netloc in servers:
self.log_debug("Logging on to %s" % netloc)
self.req.addAuth(self.account.get_login('password'))
else:
pwd = self.get_password()
if ':' in pwd:
self.req.addAuth(pwd)
else:
self.fail(_("Authorization required"))
else:
break
self.check_download()
def check_download(self):
errmsg = self.scan_download({
'Html error' : re.compile(r'\A(?:\s*<.+>)?((?:[\w\s]*(?:[Ee]rror|ERROR)\s*\:?)?\s*\d{3})(?:\Z|\s+)'),
'Html file' : re.compile(r'\A\s*<!DOCTYPE html'),
'Request error': re.compile(r'([Aa]n error occured while processing your request)')
})
if not errmsg:
return
try:
errmsg += " | " + self.last_check.group(1).strip()
except Exception:
pass
self.log_warning(_("Check result: ") + errmsg, _("Waiting 1 minute and retry"))
self.retry(3, 60, errmsg)
|
Python
| 0
|
@@ -706,86 +706,8 @@
oc%0A%0A
- link = self.isresource(url)%0A%0A if not link:%0A return%0A%0A
@@ -757,20 +757,19 @@
ownload(
-link
+url
, ref=Fa
|
f91f2c0f355e2d9ae413b0cef30fe80e35dc6be8
|
fix for issue #1
|
remote/client/connect.py
|
remote/client/connect.py
|
"""
connect.py: Authenticate and register on remote server
"""
__author__ = "Daniel Mazzer"
__copyright__ = "Copyright 2016, NORS Project"
__credits__ = ""
__license__ = "MIT"
__maintainer__ = "Daniel Mazzer"
__email__ = "dmazzer@gmail.com"
import json
import requests
from norsutils.logmsgs.logger import Logger
logger = Logger()
class Nors_Connect():
def __init__(self, server_ip,
server_port,
server_api_path,
server_token_path,
client_auth,
client_information):
self.server_ip = server_ip
self.server_port = server_port
self.server_api_path = server_api_path
self.server_token_path = server_token_path
self.client_auth = client_auth
self.client_id = client_information.get_remote_property('remote_id')
self.server_address = 'http://' + server_ip + ':' + server_port + server_api_path
self.token = ""
def check_connection(self):
logger.log('Sending GET command to server:', 'debug')
headers = {'content-type': 'application/json'}
rv, rt = self.get_resource('/server-info/')
if rv == 200:
return True
else:
return False
def get_token(self):
logger.log('----------------------------------------------------------', 'debug')
logger.log('Requesting access token', 'debug')
data = {"user": self.client_id, "pass": self.client_auth}
request_string = 'http://' + self.server_ip + ':' + self.server_port + self.server_token_path
logger.log('Request String: ' + request_string, 'debug')
logger.log('Data String: ' + str(data), 'debug')
try:
r = requests.post(request_string,
headers={'content-type': 'application/json'},
data=json.dumps(data)
)
except requests.exceptions.RequestException as e:
logger.log('Error connection to Server: ' + str(e), 'error')
return None
else:
logger.log(r.text, 'debug')
logger.log(r.headers, 'debug')
if r.status_code == 200:
return r.json()["access_token"]
#@staticmethod
def _token_manager(self, renew=False):
if renew is True:
self.token = self.get_token()
return self.token
else:
return self.token
def get_resource(self, resource, data=None, headers={}):
logger.log('----------------------------------------------------------', 'debug')
logger.log('GET at resource ' + resource, 'debug')
headers = headers.copy()
headers['Authorization'] = 'JWT ' + self._token_manager(renew=False)
headers['Content-Type'] = 'application/json'
headers['Accept'] = 'application/json'
# TODO:
if data is not None:
logger.log('GET WITH DATA NOT IMPLEMENTED', 'debug')
request_string = self.server_address + str(resource)
logger.log('Request String: ' + request_string, 'debug')
try:
r = requests.get(request_string, headers=headers)
except requests.exceptions.RequestException as e:
logger.log('Error connection to Server: ' + str(e), 'error')
return requests.Response.raise_for_status() , None
else:
logger.log('Response: ' + str(r.text), 'debug')
logger.log('Headers: ' + str(r.headers), 'debug')
logger.log('Return Code: ' + str(r.status_code), 'debug')
if r.status_code >= 400 and r.status_code < 500:
self.token = self._token_manager(renew=True)
if r.status_code == 200:
if r.headers.get('content-type') == 'application/json':
return r.status_code, r.json()
else:
return r.status_code, r.text
else:
return r.status_code, None
def _post_resource(self, resource, data_json, headers={}):
logger.log('----------------------------------------------------------', 'debug')
logger.log('POST at resource ' + resource, 'debug')
headers = headers.copy()
headers['Authorization'] = 'JWT ' + self._token_manager(renew=False)
headers['Content-Type'] = 'application/json'
headers['Accept'] = 'application/json'
request_string = self.server_address + str(resource)
logger.log('Request String: ' + request_string, 'debug')
try:
r = requests.post(request_string, data=data_json, headers=headers)
try:
r.raise_for_status()
except requests.exceptions.HTTPError as e:
print "And you get an HTTPError:", e.message
print r.status_code
else:
logger.log('Response: ' + str(r.text), 'debug')
logger.log('Headers: ' + str(r.headers), 'debug')
logger.log('Return Code: ' + str(r.status_code), 'debug')
if r.status_code >= 400 and r.status_code < 500:
logger.log('AUTH Response: ' + str(r.headers), 'debug')
self.token = self._token_manager(renew=True)
if r.status_code == 200:
if r.headers.get('content-type') == 'application/json':
return r.status_code, r.json()
else:
return r.status_code, r.text
else:
return r.status_code, None
except requests.exceptions.RequestException as e:
logger.log('Error when connecting to Server: ' + str(e), 'error')
return None , None
def post_resource(self, resource, data):
if type(data) is dict:
data_json = json.dumps(data, encoding='utf8')
else:
# logger.log('POST ERROR: data is not dict, sending as is...', 'debug')
data_json = data
return self._post_resource(resource, data_json)
|
Python
| 0
|
@@ -4991,16 +4991,17 @@
TPError:
+
%22, e.mes
@@ -5025,26 +5025,25 @@
-print r.status_cod
+return None , Non
e%0A
@@ -5977,32 +5977,63 @@
, None%0A
+%0A return None , None
%0A %0A de
@@ -6305,29 +6305,30 @@
%0A r
-eturn
+v, r =
self._post_
@@ -6356,16 +6356,49 @@
ta_json)
+ %0A return rv, r
%0A %0A
|
b5d32a3b1b8e85222497c4736c0c6707003dc848
|
Fix broken database IO tests.
|
pybtex/tests/database_test/__init__.py
|
pybtex/tests/database_test/__init__.py
|
# Copyright (C) 2009 Andrey Golovizin
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pkgutil
from unittest import TestCase
import yaml
from io import BytesIO, TextIOWrapper, BufferedWriter
from pybtex.plugin import find_plugin
class DatabaseIOTest(TestCase):
def setUp(self):
reference_data = pkgutil.get_data('pybtex', 'tests/database_test/reference_data.yaml')
self.reference_data = yaml.load(reference_data)
def _test_input(self, plugin):
parser = find_plugin('pybtex.database.input', plugin).Parser(encoding='UTF-8')
writer = find_plugin('pybtex.database.output', plugin).Writer(encoding='UTF-8')
stream = BytesIO()
writer_stream = TextIOWrapper(stream, 'UTF-8') if writer.unicode_io else stream
parser_stream = TextIOWrapper(stream, 'UTF-8') if parser.unicode_io else stream
writer.write_stream(self.reference_data, writer_stream)
writer_stream.flush()
stream.seek(0)
parser.parse_stream(parser_stream)
loaded_data = parser.data
self.assertEqual(loaded_data, self.reference_data)
def test_bibtex_input(self):
self._test_input('bibtex')
def test_bibyaml_input(self):
self._test_input('bibyaml')
def test_bibtexml_input(self):
# BibTeXML does not support TeX preambles AFAIK
self.reference_data._preamble = []
self._test_input('bibtexml')
|
Python
| 0
|
@@ -1128,15 +1128,8 @@
gin)
-.Parser
(enc
@@ -1209,15 +1209,8 @@
gin)
-.Writer
(enc
|
37d01f6088b1cf5673f66f4532dd51c73a0156f1
|
Fix grammar in login error message
|
rest_framework/authtoken/serializers.py
|
rest_framework/authtoken/serializers.py
|
from django.contrib.auth import authenticate
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
class AuthTokenSerializer(serializers.Serializer):
username = serializers.CharField()
password = serializers.CharField()
def validate(self, attrs):
username = attrs.get('username')
password = attrs.get('password')
if username and password:
user = authenticate(username=username, password=password)
if user:
if not user.is_active:
msg = _('User account is disabled.')
raise serializers.ValidationError(msg)
attrs['user'] = user
return attrs
else:
msg = _('Unable to login with provided credentials.')
raise serializers.ValidationError(msg)
else:
msg = _('Must include "username" and "password"')
raise serializers.ValidationError(msg)
|
Python
| 0.000763
|
@@ -783,16 +783,17 @@
e to log
+
in with
|
059090cd945f51ed0281a967e1ba9502d2dc0a40
|
Fix unittest
|
pymatgen/apps/borg/tests/test_queen.py
|
pymatgen/apps/borg/tests/test_queen.py
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
Created on Mar 18, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Mar 18, 2012"
import unittest2 as unittest
import os
from pymatgen.apps.borg.hive import VaspToComputedEntryDrone
from pymatgen.apps.borg.queen import BorgQueen
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
class BorgQueenTest(unittest.TestCase):
def setUp(self):
drone = VaspToComputedEntryDrone()
self.queen = BorgQueen(drone, test_dir, 1)
def test_get_data(self):
data = self.queen.get_data()
self.assertEqual(len(data), 6)
def test_load_data(self):
drone = VaspToComputedEntryDrone()
queen = BorgQueen(drone)
queen.load_data(os.path.join(test_dir, "assimilated.json"))
self.assertEqual(len(queen.get_data()), 1)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
Python
| 0.99804
|
@@ -910,9 +910,9 @@
a),
-6
+7
)%0A%0A
|
c750435cb79610cce1aa1855b8317858ae2e96bf
|
Change field name to reflect naming standard
|
oahapi/ratechecker/models.py
|
oahapi/ratechecker/models.py
|
from django.db import models
# I'm not fond of how these fields are named, but I tried to balance
# Python naming conventions with how the fields are actually referred to
# outside this software.
class Product(models.Model):
""" Loan Product. """
FIXED = 'FIXED'
ARM = 'ARM'
PAYMENT_TYPE_CHOICES = (
(FIXED, 'Fixed Rate Mortgage'),
(ARM, 'Adjustable Rate Mortgage'))
JUMBO = 'JUMBO'
CONF = 'CONF'
AGENCY = 'AGENCY'
FHA = 'FHA'
VA = 'VA'
VA_HB = 'VA-HB'
FHA_HB = 'FHA-HB'
LOAN_TYPE_CHOICES = (
(JUMBO, 'Jumbo Mortgage'),
(CONF, 'Conforming Loan'),
(AGENCY, 'Agency Loan'),
(FHA, 'Federal Housing Administration Loan'),
(VA, 'Veterans Affairs Loan'),
(VA_HB, 'VA-HB Loan'),
(FHA_HB, 'FHA-HB Loan'),
)
REFI = 'REFI'
PURCH = 'PURCH'
LOAN_PURPOSE_CHOICES = (
(REFI, 'Refinance'),
(PURCH, 'Purchase')
)
plan_id = models.IntegerField(primary_key=True)
institution = models.CharField(max_length=16)
loan_purpose = models.CharField(
max_length=12, choices=LOAN_PURPOSE_CHOICES)
pmt_type = models.CharField(
max_length=12, choices=PAYMENT_TYPE_CHOICES, default=FIXED)
loan_type = models.CharField(max_length=12, choices=LOAN_TYPE_CHOICES)
loan_term = models.IntegerField()
int_adj_term = models.IntegerField(
null=True,
help_text='1st part of the ARM definition. E.g. 5 in 5/1 ARM')
adj_period = models.PositiveSmallIntegerField(null=True)
io = models.BooleanField()
arm_index = models.CharField(max_length=96, null=True)
iac_help = 'Max percentage points the rate can adjust at first adjustment.'
int_adj_cap = models.IntegerField(null=True, help_text=iac_help)
cap_text = 'Max percentage points adjustable at each subsequent adjustment'
annual_cap = models.IntegerField(null=True, help_text=cap_text)
loan_cap = models.IntegerField(
null=True,
help_text='Total lifetime maximum change that the ARM rate can have.')
arm_margin = models.DecimalField(max_digits=6, decimal_places=4, null=True)
ai_value = models.DecimalField(max_digits=6, decimal_places=4, null=True)
min_ltv = models.FloatField()
max_ltv = models.FloatField()
min_fico = models.IntegerField()
max_fico = models.IntegerField()
min_loan_amt = models.DecimalField(max_digits=12, decimal_places=2)
max_loan_amt = models.DecimalField(max_digits=12, decimal_places=2)
singlefamily = models.BooleanField()
condo = models.BooleanField()
coop = models.BooleanField()
data_timestamp = models.DateTimeField()
class Adjustment(models.Model):
POINTS = 'P'
RATE = 'R'
AFFECT_RATE_TYPE_CHOICES = (
(POINTS, 'Points'),
(RATE, 'Rate'))
rule_id = models.IntegerField(primary_key=True)
product = models.ForeignKey(Product)
affect_rate_type = models.CharField(
max_length=1, choices=AFFECT_RATE_TYPE_CHOICES)
adj_value = models.DecimalField(max_digits=6, decimal_places=3, null=True)
min_loan_amt = models.DecimalField(
max_digits=12, decimal_places=2, null=True)
max_loan_amt = models.DecimalField(
max_digits=12, decimal_places=2, null=True)
prop_type = models.CharField(max_length=10, null=True)
min_fico = models.IntegerField(null=True)
max_fico = models.IntegerField(null=True)
min_ltv = models.FloatField(null=True)
max_ltv = models.FloatField(null=True)
state = models.CharField(max_length=2, null=True)
data_timestamp = models.DateTimeField()
class Region(models.Model):
""" This table maps regions to states. """
region_id = models.IntegerField()
state_id = models.CharField(max_length=2)
data_timestamp = models.DateTimeField()
class Rate(models.Model):
rates_id = models.IntegerField(primary_key=True)
product = models.ForeignKey(Product)
regions = models.ManyToManyField(Region)
lock = models.PositiveSmallIntegerField()
base_rate = models.DecimalField(max_digits=6, decimal_places=3)
total_points = models.DecimalField(max_digits=6, decimal_places=3)
data_timestamp = models.DateTimeField()
|
Python
| 0
|
@@ -2531,16 +2531,17 @@
single
+_
family =
|
b25fc29766d5df3567dc92f8d6078a0178e67bd4
|
Fix unittest.
|
pymatgen/symmetry/tests/test_groups.py
|
pymatgen/symmetry/tests/test_groups.py
|
#!/usr/bin/env python
"""
TODO: Modify unittest doc.
"""
from __future__ import division
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Virtual Lab"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "ongsp@ucsd.edu"
__date__ = "4/10/14"
import unittest
import numpy as np
from pymatgen.core.lattice import Lattice
from pymatgen.symmetry.groups import PointGroup, SpaceGroup
class PointGroupTest(unittest.TestCase):
def test_order(self):
order = {"mmm": 8, "432": 24, "-6m2": 12}
for k, v in order.items():
pg = PointGroup(k)
self.assertEqual(order[k], len(pg.symmetry_ops))
def test_get_orbit(self):
pg = PointGroup("mmm")
self.assertEqual(len(pg.get_orbit([0.1, 0.1, 0.1])), 8)
self.assertEqual(len(pg.get_orbit([0, 0, 0.1])), 2)
self.assertEqual(len(pg.get_orbit([1.2, 1.2, 1])), 8)
def test_is_sub_super_group(self):
pgmmm = PointGroup("mmm")
pgmm2 = PointGroup("mm2")
pg222 = PointGroup("222")
pg4 = PointGroup("4")
self.assertTrue(pgmmm.is_supergroup(pgmm2))
self.assertTrue(pgmm2.is_subgroup(pgmmm))
self.assertTrue(pgmmm.is_supergroup(pg222))
self.assertFalse(pgmmm.is_supergroup(pg4))
pgm3m = PointGroup("m-3m")
pg6mmm = PointGroup("6/mmm")
pg3m = PointGroup("-3m")
# TODO: Fix the test below.
# self.assertTrue(pg3m.is_subgroup(pgm3m))
self.assertTrue(pg3m.is_subgroup(pg6mmm))
self.assertFalse(pgm3m.is_supergroup(pg6mmm))
class SpaceGroupTest(unittest.TestCase):
def test_abbrev_symbols(self):
sg = SpaceGroup("P2/c")
self.assertEqual(sg.int_number, 13)
sg = SpaceGroup("R-3mH")
self.assertEqual(sg.int_number, 166)
def test_attr(self):
sg = SpaceGroup("Fm-3m")
self.assertEqual(sg.full_symbol, "F4/m-32/m")
self.assertEqual(sg.patterson_symmetry, "Fm-3m")
self.assertEqual(sg.point_group, "m-3m")
def test_full_symbols(self):
sg = SpaceGroup("P2/m2/m2/m")
self.assertEqual(sg.symbol, "Pmmm")
def test_order_symm_ops(self):
for name in SpaceGroup.SG_SYMBOLS:
sg = SpaceGroup(name)
self.assertEqual(len(sg.symmetry_ops), sg.order)
def test_crystal_system(self):
sg = SpaceGroup("R-3c")
self.assertEqual(sg.crystal_system, "trigonal")
sg = SpaceGroup("R-3cH")
self.assertEqual(sg.crystal_system, "trigonal")
def test_get_orbit(self):
sg = SpaceGroup("Fm-3m")
p = np.random.random_integers(0, 100, size=(3,))
p /= 100
self.assertLessEqual(len(sg.get_orbit(p)), sg.order)
def test_is_compatible(self):
cubic = Lattice.cubic(1)
hexagonal = Lattice.hexagonal(1, 2)
rhom = Lattice.rhombohedral(3, 80)
tet = Lattice.tetragonal(1, 2)
ortho = Lattice.orthorhombic(1, 2, 3)
sg = SpaceGroup("Fm-3m")
self.assertTrue(sg.is_compatible(cubic))
self.assertFalse(sg.is_compatible(hexagonal))
sg = SpaceGroup("R-3mH")
self.assertFalse(sg.is_compatible(cubic))
self.assertTrue(sg.is_compatible(hexagonal))
sg = SpaceGroup("R-3m")
self.assertTrue(sg.is_compatible(cubic))
self.assertTrue(sg.is_compatible(rhom))
self.assertFalse(sg.is_compatible(hexagonal))
sg = SpaceGroup("Pnma")
self.assertTrue(sg.is_compatible(cubic))
self.assertTrue(sg.is_compatible(tet))
self.assertTrue(sg.is_compatible(ortho))
self.assertFalse(sg.is_compatible(rhom))
self.assertFalse(sg.is_compatible(hexagonal))
sg = SpaceGroup("P12/c1")
self.assertTrue(sg.is_compatible(cubic))
self.assertTrue(sg.is_compatible(tet))
self.assertTrue(sg.is_compatible(ortho))
self.assertFalse(sg.is_compatible(rhom))
self.assertFalse(sg.is_compatible(hexagonal))
sg = SpaceGroup("P-1")
self.assertTrue(sg.is_compatible(cubic))
self.assertTrue(sg.is_compatible(tet))
self.assertTrue(sg.is_compatible(ortho))
self.assertTrue(sg.is_compatible(rhom))
self.assertTrue(sg.is_compatible(hexagonal))
def test_subgroup_supergroup(self):
self.assertTrue(SpaceGroup('Pma2').is_subgroup(SpaceGroup('Pccm')))
self.assertFalse(SpaceGroup.from_int_number(229).is_subgroup(
SpaceGroup.from_int_number(230)))
if __name__ == '__main__':
unittest.main()
|
Python
| 0.000001
|
@@ -2665,21 +2665,10 @@
3,))
-%0A p
/
-=
100
|
098219137bdf30d7ac1c321f7973e14bfc82bda4
|
Add configuration for PyBullet Ant.
|
agents/scripts/configs.py
|
agents/scripts/configs.py
|
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example configurations using the PPO algorithm."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-variable
import tensorflow as tf
from agents import algorithms
from agents.scripts import networks
def default():
"""Default configuration for PPO."""
# General
algorithm = algorithms.PPO
num_agents = 30
eval_episodes = 30
use_gpu = False
# Network
network = networks.feed_forward_gaussian
weight_summaries = dict(
all=r'.*', policy=r'.*/policy/.*', value=r'.*/value/.*')
policy_layers = 200, 100
value_layers = 200, 100
init_mean_factor = 0.1
init_std = 0.35
# Optimization
update_every = 30
update_epochs = 25
optimizer = tf.train.AdamOptimizer
learning_rate = 1e-4
# Losses
discount = 0.995
kl_target = 1e-2
kl_cutoff_factor = 2
kl_cutoff_coef = 1000
kl_init_penalty = 1
return locals()
def pendulum():
"""Configuration for the pendulum classic control task."""
locals().update(default())
# Environment
env = 'Pendulum-v0'
max_length = 200
steps = 2e6 # 2M
return locals()
def reacher():
"""Configuration for MuJoCo's reacher task."""
locals().update(default())
# Environment
env = 'Reacher-v1'
max_length = 1000
steps = 5e6 # 5M
discount = 0.985
update_every = 60
return locals()
def cheetah():
"""Configuration for MuJoCo's half cheetah task."""
locals().update(default())
# Environment
env = 'HalfCheetah-v1'
max_length = 1000
steps = 1e7 # 10M
discount = 0.99
return locals()
def walker():
"""Configuration for MuJoCo's walker task."""
locals().update(default())
# Environment
env = 'Walker2d-v1'
max_length = 1000
steps = 1e7 # 10M
return locals()
def hopper():
"""Configuration for MuJoCo's hopper task."""
locals().update(default())
# Environment
env = 'Hopper-v1'
max_length = 1000
steps = 1e7 # 10M
update_every = 60
return locals()
def ant():
"""Configuration for MuJoCo's ant task."""
locals().update(default())
# Environment
env = 'Ant-v1'
max_length = 1000
steps = 2e7 # 20M
return locals()
def humanoid():
"""Configuration for MuJoCo's humanoid task."""
locals().update(default())
# Environment
env = 'Humanoid-v1'
max_length = 1000
steps = 5e7 # 50M
update_every = 60
return locals()
|
Python
| 0
|
@@ -2950,28 +2950,259 @@
very = 60%0A return locals()%0A
+%0A%0Adef bullet_ant():%0A locals().update(default())%0A # Environment%0A import pybullet_envs # noqa pylint: disable=unused-import%0A env = 'AntBulletEnv-v0'%0A max_length = 1000%0A steps = 3e7 # 30M%0A update_every = 60%0A return locals()%0A
|
e7530150120a542f264acc4c7ba74418c5e2ecdb
|
Add download_text and extractor train views.
|
extractor_train/public/views.py
|
extractor_train/public/views.py
|
# -*- coding: utf-8 -*-
'''Public section, including homepage and signup.'''
from flask import (Blueprint, request, render_template, flash, url_for,
redirect, session)
from flask.ext.login import login_user, login_required, logout_user
from extractor_train.extensions import login_manager
from extractor_train.user.models import User
from extractor_train.public.forms import LoginForm
from extractor_train.user.forms import RegisterForm
from extractor_train.utils import flash_errors
from extractor_train.database import db
blueprint = Blueprint('public', __name__, static_folder="../static")
@login_manager.user_loader
def load_user(id):
return User.get_by_id(int(id))
@blueprint.route("/", methods=["GET", "POST"])
def home():
form = LoginForm(request.form)
# Handle logging in
if request.method == 'POST':
if form.validate_on_submit():
login_user(form.user)
flash("You are logged in.", 'success')
redirect_url = request.args.get("next") or url_for("user.members")
return redirect(redirect_url)
else:
flash_errors(form)
return render_template("public/home.html", form=form)
@blueprint.route('/logout/')
@login_required
def logout():
logout_user()
flash('You are logged out.', 'info')
return redirect(url_for('public.home'))
@blueprint.route("/register/", methods=['GET', 'POST'])
def register():
form = RegisterForm(request.form, csrf_enabled=False)
if form.validate_on_submit():
new_user = User.create(username=form.username.data,
email=form.email.data,
password=form.password.data,
active=True)
flash("Thank you for registering. You can now log in.", 'success')
return redirect(url_for('public.home'))
else:
flash_errors(form)
return render_template('public/register.html', form=form)
@blueprint.route("/about/")
def about():
form = LoginForm(request.form)
return render_template("public/about.html", form=form)
|
Python
| 0
|
@@ -181,16 +181,31 @@
ession)%0A
+%0Aimport flask%0A%0A
from fla
@@ -2071,24 +2071,1314 @@
/about.html%22, form=form)
+%0A%0A@blueprint.route(%22/download_text/%3Cint:downloads_id%3E%22)%0Adef download_text( downloads_id ):%0A print 'downloads_id', downloads_id%0A download = get_download( downloads_id )%0A%0A raw_content = download%5B'raw_content'%5D%0A%0A print raw_content%5B:200%5D%0A%0A response = flask.make_response( raw_content );%0A%0A return response%0A%0A print download.keys()%0A %0A #form = LoginForm(request.form)%0A #return render_template(%22public/about.html%22, form=form)%0A #return download%5B'raw_content'%5D%0A return ''%0A%0A@blueprint.route(%22/extractor_train/%3Cint:downloads_id%3E%22)%0Adef extractor_train( downloads_id ):%0A print 'downloads_id', downloads_id%0A%0A form = LoginForm(request.form)%0A return render_template(%22public/extractor_train.html%22, form=form, downloads_id=downloads_id )%0A return download%5B'raw_content'%5D%0A return ''%0A%0A%0Aimport cPickle%0Aimport os.path%0A%0Aapi_key = cPickle.load( file( os.path.expanduser( '~/mediacloud_api_key.pickle' ), 'r' ) )%0A%0Aloc_key = 'f66a50230d54afaf18822808aed649f1d6ca72b08fb06d5efb6247afe9fbae52'%0A%0Aimport mediacloud, requests, csv, sys, os, json, cPickle%0A%0Adef get_download( downloads_id ):%0A download = requests.get('https://api.mediacloud.org/api/v2/downloads/single/'+str(downloads_id)+'?key='+api_key)%0A%0A download.raise_for_status()%0A%0A return download.json()%5B0%5D%0A%0A
|
8843e1e1d648b387db3197c0f8fec54d6c2d5507
|
Fix docstrings in vsc.utils.missing to epydoc format
|
lib/vsc/utils/missing.py
|
lib/vsc/utils/missing.py
|
#!/usr/bin/env python
##
# Copyright 2012 Ghent University
# Copyright 2012 Andy Georges
#
# This file is part of VSC-tools,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/VSC-tools
#
# VSC-tools is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# VSC-tools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with VSC-tools. If not, see <http://www.gnu.org/licenses/>.
##
"""Various functions that are missing from the default Python library.
- nub(list): keep the unique elements in the list
- find_sublist_index(list, sublist): find the index of the first occurence of the sublist in the list
- Monoid: implementation of the monoid concept
- MonoidDict: dictionary that combines values upon insertiong according to the given monoid
"""
def nub(list_):
"""Returns the unique items of a list, while preserving order of the original list, i.e. the first unique element
encoutered is retained.
Code is taken from
http://stackoverflow.com/questions/480214/how-do-you-remove-duplicates-from-a-list-in-python-whilst-preserving-order
Supposedly, this is one of the fastest ways to determine the unique elements of a list.
@type list_: a list :-)
@returns: a new list with each element from `list` appearing only once (cfr. Michelle Dubois).
"""
seen = set()
seen_add = seen.add
return [x for x in list_ if x not in seen and not seen_add(x)]
def find_sublist_index(ls, sub_ls):
"""Find the index at which the sublist sub_ls can be found in ls.
@type ls: list
@type sub_ls: list
@return: index of the matching location or None if no match can be made.
"""
sub_length = len(sub_ls)
for i in xrange(len(ls)):
if ls[i:(i + sub_length)] == sub_ls:
return i
return None
class Monoid (object):
"""A monoid is a mathematical object with a default element (mempty or null) and a default operation to combine
two elements of a given data type.
Taken from http://fmota.eu/2011/10/09/monoids-in-python.html under the do whatever you want license.
"""
def __init__(self, null, lift, op):
"""Initialise.
@type null: default element of some data type, e.g., [] for list or 0 for int (identity element in an Abelian group)
@type lift: operation that injects an element into the target datatype (for duck typing)
@type op: mappend operation to combine two elements of the target datatype
"""
self.null = null
self.lift = lift
self.op = op
def fold(self, xs):
"""fold over the elements of the list, combining them into a single element of the target datatype."""
if hasattr(xs, "__fold__"):
return xs.__fold__(self)
else:
return reduce(
self.op,
map(self.lift, xs),
self.null
)
def __call__(self, *args):
"""When the monoid is called, the values are folded over and the resulting value is returned."""
return self.fold(args)
def star(self):
"""Return a new similar monoid."""
return Monoid(self.null, self.fold, self.op)
class MonoidDict(dict):
"""A dictionary with a monoid operation, that allows combining values in the dictionary according to the mappend
operation in the monoid.
"""
def __init__(self, monoid, *args, **kwargs):
"""Initialise.
@type monoid: Monoid instance
"""
super(MonoidDict, self).__init__(*args, **kwargs)
self.monoid = monoid
def __setitem__(self, key, value):
"""Combine the value the dict has for the key with the new value using the mappend operation."""
if super(MonoidDict, self).__contains__(key):
current = super(MonoidDict, self).__getitem__(key)
super(MonoidDict, self).__setitem__(key, self.monoid(current, value))
else:
super(MonoidDict, self).__setitem__(key, value)
def __getitem__(self, key):
""" Obtain the dictionary value for the given key. If no value is present,
we return the monoid's mempty (null).
"""
if not super(MonoidDict, self).__contains__(key):
return self.monoid.null
else:
return super(MonoidDict, self).__getitem__(key)
|
Python
| 0.000014
|
@@ -1099,16 +1099,17 @@
.%0A##%0A%22%22%22
+%0A
Various
@@ -1169,16 +1169,18 @@
brary.%0A%0A
+
- nub(li
@@ -1221,16 +1221,18 @@
he list%0A
+
- find_s
@@ -1287,16 +1287,20 @@
he first
+%0A
occuren
@@ -1329,16 +1329,18 @@
he list%0A
+
- Monoid
@@ -1378,16 +1378,18 @@
concept%0A
+
- Monoid
@@ -1441,16 +1441,20 @@
sertiong
+%0A
accordi
@@ -1566,16 +1566,20 @@
order of
+%0A
the ori
@@ -1619,20 +1619,16 @@
element
-%0A
encoute
@@ -1633,16 +1633,20 @@
tered is
+%0A
retaine
@@ -1854,24 +1854,28 @@
etermine the
+%0A
unique elem
|
f9f76e3889e351071eff98374b8419777d474cad
|
use urlunparse
|
oauth10a/signing_requests.py
|
oauth10a/signing_requests.py
|
"""
All Token requests and Protected Resources requests MUST be signed by the
Consumer and verified by the Service Provider. The purpose of signing requests
is to prevent unauthorized parties from using the Consumer Key and Tokens when
making Token requests or Protected Resources requests. The signature process
encodes the Consumer Secret and Token Secret into a verifiable value which is
included with the request.
OAuth does not mandate a particular signature method, as each implementation
can have its own unique requirements. The protocol defines three signature
methods: HMAC-SHA1, RSA-SHA1, and PLAINTEXT, but Service Providers are free to
implement and document their own methods. Recommending any particular method is
beyond the scope of this specification.
The Consumer declares a signature method in the oauth_signature_method
parameter, generates a signature, and stores it in the oauth_signature
parameter. The Service Provider verifies the signature as specified in each
method. When verifying a Consumer signature, the Service Provider SHOULD check
the request nonce to ensure it has not been used in a previous Consumer
request.
The signature process MUST NOT change the request parameter names or values,
with the exception of the oauth_signature parameter.
"""
import urlparse
import requests
def request_url(url):
"""9.1.2: Construct Request URL
The Signature Base String includes the request absolute URL, tying the
signature to a specific endpoint. The URL used in the Signature Base
String MUST include the scheme, authority, and path, and MUST exclude
the query and fragment as defined by [RFC3986] section 3.
If the absolute request URL is not available to the Service Provider
(it is always available to the Consumer), it can be constructed by
combining the scheme being used, the HTTP Host header, and the relative
HTTP request URL. If the Host header is not available, the Service
Provider SHOULD use the host name communicated to the Consumer in the
documentation or other means.
The Service Provider SHOULD document the form of URL used in the
Signature Base String to avoid ambiguity due to URL normalization.
Unless specified, URL scheme and authority MUST be lowercase and
include the port number; http default port 80 and https default port
443 MUST be excluded.
For example, the request:
HTTP://Example.com:80/resource?id=123
Is included in the Signature Base String as:
http://example.com/resource
"""
# urlparse correctly handles case insensitivity here
parsed = urlparse.urlparse(url)
# canonicalize the port if redundant with scheme
netloc = [parsed.hostname]
port = parsed.port
if parsed.scheme == 'http' and port == 80:
port = None
if parsed.scheme == 'https' and port == 443:
port = None
if port is not None:
netloc.append(str(port))
netloc = ':'.join(netloc)
return '%s://%s%s' % (parsed.scheme, netloc, parsed.path)
class AuthBase(requests.auth.AuthBase):
@property
def signature_base_string(self):
"""Consistent reproducible concatenation of the request elements.
The string is used as an input in hashing or signing algorithms.
"""
pass
@property
def normalized_request_parameters(self):
"""The request parameters are collected, sorted and concatenated into a
normalized string:
- Parameters in the OAuth HTTP Authorization header excluding the realm
parameter.
- Parameters in the HTTP POST request body (with a content-type of
application/x-www-form-urlencoded).
- HTTP GET parameters added to the URLs in the query part (as defined
by [RFC3986] section 3).
The oauth_signature parameter MUST be excluded.
The parameters are normalized into a single string as follows:
Parameters are sorted by name, using lexicographical byte value
ordering. If two or more parameters share the same name, they are
sorted by their value. For example:
a=1, c=hi%20there, f=25, f=50, f=a, z=p, z=t
Parameters are concatenated in their sorted order into a single string.
For each parameter, the name is separated from the corresponding value
by an '=' character (ASCII code 61), even if the value is empty. Each
name-value pair is separated by an '&' character (ASCII code 38). For
example:
a=1&c=hi%20there&f=25&f=50&f=a&z=p&z=t
"""
pass
@property
def request_elements(self):
"""9.1.3: Concatenate Request Elements
The following items MUST be concatenated in order into a single string.
Each item is encoded and separated by an '&' character (ASCII code 38),
even if empty.
- The HTTP request method used to send the request. Value MUST be
uppercase, for example: HEAD, GET , POST, etc.
- The request URL from Section 9.1.2.
- The normalized request parameters string from Section 9.1.1.
See Signature Base String example in Appendix A.5.1.
"""
pass
class HMACSHA1Auth(AuthBase):
"""9.2: HMAC-SHA1
The HMAC-SHA1 signature method uses the HMAC-SHA1 signature algorithm as
defined in [RFC2104] where the Signature Base String is the text and the
key is the concatenated values (each first encoded per Parameter Encoding)
of the Consumer Secret and Token Secret, separated by an '&' character
(ASCII code 38) even if empty.
"""
def __init__(self):
pass
def __call__(self, r):
"""9.2.1: Generating Signature
`oauth_signature` is set to the calculated digest octet string, first
base64-encoded per [RFC2045] section 6.8, then URL-encoded per
Parameter Encoding.
"""
oauth_signature = self.generate_signature()
return r
class RSASHA1Auth(AuthBase):
def __init__(self):
pass
def __call__(self, r):
"""9.3.1: Generating Signature
The Signature Base String is signed using the Consumer's RSA private
key per [RFC3447] section 8.2.1, where K is the Consumer's RSA private
key, M the Signature Base String, and S is the result signature octet
string:
S = RSASSA-PKCS1-V1_5-SIGN (K, M)
oauth_signature is set to S, first base64-encoded per [RFC2045] section
6.8, then URL-encoded per Parameter Encoding.
"""
return r
class PlaintextAuth(AuthBase):
"""9.4: Plaintext
The PLAINTEXT method does not provide any security protection and SHOULD
only be used over a secure channel such as HTTPS. It does not use the
Signature Base String.
"""
def __init__(self):
pass
def __call__(self, r):
"""9.4.1: Generating Signature
`oauth_signature` is set to the concatenated encoded values of the
Consumer Secret and Token Secret, separated by a '&' character (ASCII
code 38), even if either secret is empty. The result MUST be encoded
again.
These examples show the value of `oauth_signature` for Consumer Secret
`djr9rjt0jd78jf88` and 3 different Token Secrets:
jjd999tj88uiths3:
oauth_signature=djr9rjt0jd78jf88%26jjd999tj88uiths3
jjd99$tj88uiths3:
oauth_signature=djr9rjt0jd78jf88%26jjd99%2524tj88uiths3
Empty:
oauth_signature=djr9rjt0jd78jf88%26
"""
return r
|
Python
| 0.000024
|
@@ -2981,21 +2981,36 @@
urn
-'%25s://%25s%25s' %25
+urlparse.urlunparse(%0A
(pa
@@ -3041,16 +3041,35 @@
sed.path
+, None, None, None)
)%0A%0A%0Aclas
|
215439b43c27271c95fc208bf683a19619c81b8d
|
Add 3D slic tests (gray not working yet)
|
skimage/segmentation/tests/test_slic.py
|
skimage/segmentation/tests/test_slic.py
|
import numpy as np
from numpy.testing import assert_equal, assert_array_equal
from skimage.segmentation import slic
def test_color():
rnd = np.random.RandomState(0)
img = np.zeros((20, 21, 3))
img[:10, :10, 0] = 1
img[10:, :10, 1] = 1
img[10:, 10:, 2] = 1
img += 0.01 * rnd.normal(size=img.shape)
img[img > 1] = 1
img[img < 0] = 0
seg = slic(img, sigma=0, n_segments=4)
# we expect 4 segments
assert_equal(len(np.unique(seg)), 4)
assert_array_equal(seg[:10, :10], 0)
assert_array_equal(seg[10:, :10], 2)
assert_array_equal(seg[:10, 10:], 1)
assert_array_equal(seg[10:, 10:], 3)
def test_gray():
rnd = np.random.RandomState(0)
img = np.zeros((20, 21))
img[:10, :10] = 0.33
img[10:, :10] = 0.67
img[10:, 10:] = 1.00
img += 0.0033 * rnd.normal(size=img.shape)
img[img > 1] = 1
img[img < 0] = 0
seg = slic(img, sigma=0, n_segments=4, ratio=20.0, multichannel=False)
assert_equal(len(np.unique(seg)), 4)
assert_array_equal(seg[:10, :10], 0)
assert_array_equal(seg[10:, :10], 2)
assert_array_equal(seg[:10, 10:], 1)
assert_array_equal(seg[10:, 10:], 3)
if __name__ == '__main__':
from numpy import testing
testing.run_module_suite()
|
Python
| 0
|
@@ -1,12 +1,35 @@
+import itertools as it%0A
import numpy
@@ -35,16 +35,16 @@
y as np%0A
-
from num
@@ -148,16 +148,19 @@
st_color
+_2d
():%0A
@@ -661,16 +661,17 @@
:%5D, 3)%0A%0A
+%0A
def test
@@ -675,16 +675,19 @@
est_gray
+_2d
():%0A
@@ -1142,32 +1142,32 @@
g%5B:10, 10:%5D, 1)%0A
-
assert_array
@@ -1192,16 +1192,1325 @@
:%5D, 3)%0A%0A
+%0Adef test_color_3d():%0A rnd = np.random.RandomState(0)%0A img = np.zeros((20, 21, 22, 3))%0A slices = %5B%5D%0A for dim_size in img.shape%5B:-1%5D:%0A midpoint = dim_size // 2%0A slices.append((slice(None, midpoint), slice(midpoint, None)))%0A slices = list(it.product(*slices))%0A colors = list(it.product(*((%5B0, 1%5D,) * 3)))%0A for s, c in zip(slices, colors):%0A img%5Bs%5D = c%0A img += 0.01 * rnd.normal(size=img.shape)%0A img%5Bimg %3E 1%5D = 1%0A img%5Bimg %3C 0%5D = 0%0A seg = slic(img, sigma=0, n_segments=8)%0A%0A assert_equal(len(np.unique(seg)), 8)%0A for s, c in zip(slices, range(8)):%0A assert_array_equal(seg%5Bs%5D, c)%0A%0A%0Adef test_gray_3d():%0A rnd = np.random.RandomState(0)%0A img = np.zeros((20, 21, 22))%0A slices = %5B%5D%0A for dim_size in img.shape%5B:-1%5D:%0A midpoint = dim_size // 2%0A slices.append((slice(None, midpoint), slice(midpoint, None)))%0A slices = list(it.product(*slices))%0A shades = np.arange(0, 1.000001, 1.0/7)%0A for s, sh in zip(slices, shades):%0A img%5Bs%5D = sh%0A img += 0.001 * rnd.normal(size=img.shape)%0A img%5Bimg %3E 1%5D = 1%0A img%5Bimg %3C 0%5D = 0%0A seg = slic(img, sigma=0, n_segments=8, ratio=40.0, multichannel=False)%0A%0A assert_equal(len(np.unique(seg)), 8)%0A for s, c in zip(slices, range(8)):%0A assert_array_equal(seg%5Bs%5D, c)%0A%0A%0A
if __nam
|
52eb6a3d8188c5e8fbabbe4f4822d0c4ececf48b
|
Add option to see all available ldap attributes via ldapsearch command
|
pucas/management/commands/ldapsearch.py
|
pucas/management/commands/ldapsearch.py
|
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from pucas.ldap import LDAPSearch, LDAPSearchException
class Command(BaseCommand):
help = 'Look up one or more users in LDAP by netid'
def add_arguments(self, parser):
parser.add_argument('netid', nargs='+')
def handle(self, *args, **options):
ldap_search = LDAPSearch()
for netid in options['netid']:
print('\nLooking for %s...' % netid)
try:
info = ldap_search.find_user(netid)
# display attributes configured in settings
for attr in settings.PUCAS_LDAP['ATTRIBUTES']:
print('%-15s %s' % (attr, getattr(info, attr)))
except LDAPSearchException as err:
print(err)
|
Python
| 0
|
@@ -319,16 +319,139 @@
rgs='+')
+%0A parser.add_argument('--all', '-a', action='store_true',%0A help='Retrieve all available LDAP attributes')
%0A%0A de
@@ -519,16 +519,46 @@
earch()%0A
+ print(options%5B'all'%5D)%0A
@@ -708,27 +708,249 @@
etid
-)%0A #
+, all_attributes=options%5B'all'%5D)%0A # if all attributes were requested, just print the returned%0A # ldap search object%0A if options%5B'all'%5D:%0A print(info)%0A # otherwise,
dis
@@ -988,16 +988,42 @@
ettings%0A
+ else:%0A
@@ -1073,24 +1073,28 @@
TRIBUTES'%5D:%0A
+
|
ea3a390a88c63a566df567d245e22e505f17dcfe
|
Remove test_TextFile_bad_extension
|
tests/test_artefacts.py
|
tests/test_artefacts.py
|
import pytest
from pirec import artefacts
def test_Artefact_basename():
"""basename() should strip the extension from an artefact filename."""
img = artefacts.Artefact('foo.nii.gz', '.nii.gz', exists=False)
assert img.basename == 'foo'
def test_Artefact_dirname():
"""dirname() should return the path components up to the filename."""
img = artefacts.Artefact('dir/foo.nii.gz', '.nii.gz', exists=False)
assert img.dirname == 'dir'
img2 = artefacts.Artefact('/dir1/dir2/foo.nii.gz', '.nii.gz', exists=False)
assert img2.dirname == '/dir1/dir2'
img3 = artefacts.Artefact('foo.nii.gz', '.nii.gz', exists=False)
assert img3.dirname == ''
def test_Artefact_basename_with_dir():
"""basename() should still work in a subdirectory."""
img = artefacts.Artefact('dir/foo.nii.gz', '.nii.gz', exists=False)
assert img.basename == 'dir/foo'
def test_Artefact_justname():
"""justname() should work like basename() with no directory components."""
img = artefacts.Artefact('foo.nii.gz', '.nii.gz', exists=False)
assert img.justname == 'foo'
def test_Artefact_justname_with_dir():
"""justname() should strip extension and directory components."""
img = artefacts.Artefact('dir/foo.nii.gz', '.nii.gz', exists=False)
assert img.justname == 'foo'
def test_Artefact_repr():
"""Make sure __repr__() looks correct."""
img = artefacts.Artefact('foo.nii.gz', '.nii.gz', exists=False)
assert repr(img) == "Artefact('foo.nii.gz')"
def test_NiiGzImage_bad_extension():
"""__init__() should raise a ValueError if filename doesn't have the expected extension."""
with pytest.raises(ValueError):
img = artefacts.NiiGzImage('foo.nii.gx', exists=False)
def test_TextFile_bad_extension():
with pytest.raises(ValueError):
img = artefacts.NiiGzImage('foo.txx', exists=False)
def test_exists(tmpdir):
"""If the file is present and exists=True __init__ should work."""
f = tmpdir.join('foo.txt')
f.write('foo')
filename = str(f)
art = artefacts.Artefact(filename, '.txt')
def test_not_exists(tmpdir):
"""If the file is not present and exists=True __init__ should raise IOError."""
f = tmpdir.join('foo.txt')
filename = str(f)
with pytest.raises(IOError):
art = artefacts.Artefact(filename, '.txt')
def test_not_exists_ok(tmpdir):
"""If the file is not present and exists=False __init__ should work."""
filename = str(tmpdir.join('foo.txt'))
art = artefacts.Artefact(filename, '.txt', exists=False)
|
Python
| 0.000005
|
@@ -1738,141 +1738,8 @@
)%0A%0A%0A
-def test_TextFile_bad_extension():%0A with pytest.raises(ValueError):%0A img = artefacts.NiiGzImage('foo.txx', exists=False)%0A%0A%0A
def
|
5799a3b5127e09a0c705f9551deece75e6de321e
|
Fix docstrings in neurom.geom.transform
|
neurom/geom/transform.py
|
neurom/geom/transform.py
|
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Transformation functions for morphology objects'''
import numpy as np
_TRANSFDOC = '''
The transformation can be applied to [x, y, z] points via a call operator
with the following properties:
Parameters:
points: 2D numpy array of points, where the 3 columns
are the x, y, z coordinates respectively
Returns:
2D numpy array of transformed points
'''
class Transform3D(object):
'''Class representing a generic 3D transformation'''
__doc__ += _TRANSFDOC
def __call__(self, points):
'''Apply a 3D transformation to a set of points'''
raise NotImplementedError
class Translation(Transform3D):
'''class representing a 3D translation'''
__doc__ += _TRANSFDOC
def __init__(self, translation):
'''
Parameters:
translation: 3-vector of x, y, z
'''
self._trans = np.array(translation)
def __call__(self, points):
'''Apply a 3D translation to a set of points'''
return points + self._trans
class Rotation(Transform3D):
'''Class representing a 3D rotation'''
__doc__ += _TRANSFDOC
def __init__(self, dcm):
'''
Parameters:
cdm: a 3x3 direction cosine matrix
'''
self._dcm = np.array(dcm)
def __call__(self, points):
'''Apply a 3D rotation to a set of points'''
return np.dot(self._dcm, np.array(points).T).T
class PivotRotation(Rotation):
'''Class representing a 3D rotation about a pivot point'''
__doc__ += _TRANSFDOC
def __init__(self, dcm, pivot=None):
'''
Parameters:
cdm: a 3x3 direction cosine matrix
pivot: a 3-vector specifying the origin of rotation
'''
super(PivotRotation, self).__init__(dcm)
self._origin = np.zeros(3) if pivot is None else np.array(pivot)
def __call__(self, points):
'''Apply a 3D pivoted rotation to a set of points'''
points = points - self._origin
points = np.dot(self._dcm, np.array(points).T).T
points += self._origin
return points
def translate(obj, t):
'''
Translate object of supported type.
Parameters :
obj : object with one of the following types:
'NeuriteType', 'Neuron', 'fst.Neuron'
Returns:
copy of the object with the applied translation
'''
try:
return obj.transform(Translation(t))
except AttributeError:
raise NotImplementedError
def rotate(obj, axis, angle, origin=None):
'''
Rotation around unit vector following the right hand rule
Parameters:
obj : obj to be rotated (e.g. tree, neuron)
axis : unit vector for the axis of rotation
angle : rotation angle in rads
Returns:
A copy of the object with the applied translation.
'''
R = _rodrigues_to_dcm(axis, angle)
try:
return obj.transform(PivotRotation(R, origin))
except AttributeError:
raise NotImplementedError
def _sin(x):
'''sine with case for pi multiples'''
return 0. if np.isclose(np.mod(x, np.pi), 0.) else np.sin(x)
def _rodrigues_to_dcm(axis, angle):
'''
Generates transformation matrix from unit vector
and rotation angle. The rotation is applied in the direction
of the axis which is a unit vector following the right hand rule.
Inputs :
axis : unit vector of the direction of the rotation
angle : angle of rotation in rads
Returns : 3x3 Rotation matrix
'''
ux, uy, uz = axis / np.linalg.norm(axis)
uxx = ux * ux
uyy = uy * uy
uzz = uz * uz
uxy = ux * uy
uxz = ux * uz
uyz = uy * uz
sn = _sin(angle)
cs = _sin(np.pi / 2. - angle)
cs1 = 1. - cs
R = np.zeros([3, 3])
R[0, 0] = cs + uxx * cs1
R[0, 1] = uxy * cs1 - uz * sn
R[0, 2] = uxz * cs1 + uy * sn
R[1, 0] = uxy * cs1 + uz * sn
R[1, 1] = cs + uyy * cs1
R[1, 2] = uyz * cs1 - ux * sn
R[2, 0] = uxz * cs1 - uy * sn
R[2, 1] = uyz * cs1 + ux * sn
R[2, 2] = cs + uzz * cs1
return R
|
Python
| 0.000592
|
@@ -3925,91 +3925,60 @@
ect
-with one of the following types:%0A 'NeuriteType', 'Neuron', 'fst.Neuron'
+to be translated. Must implement a transform method.
%0A%0A
@@ -4342,11 +4342,14 @@
.g.
-tre
+neurit
e, n
@@ -4354,16 +4354,64 @@
neuron)
+.%0A Must implement a transform method.
%0A
|
7b27c6aa2c30317c4cbc1510ddf05b6b305209b0
|
Update objax/functional/core/ops.py
|
objax/functional/core/ops.py
|
objax/functional/core/ops.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ['dynamic_slice', 'pad', 'rsqrt', 'stop_gradient', 'top_k',
'flatten', 'one_hot', 'upsample_2d', 'upscale_nn']
import jax.nn
from jax import numpy as jn, lax
from typing import Union, Tuple
from objax import util
from objax.constants import UpSample
from objax.typing import JaxArray
dynamic_slice = lax.dynamic_slice
one_hot = jax.nn.one_hot
pad = jn.pad
stop_gradient = lax.stop_gradient
top_k = lax.top_k # Current code doesn't work with gradient.
rsqrt = lax.rsqrt
def flatten(x: JaxArray) -> JaxArray:
"""Flattens input tensor to a 2D tensor.
Args:
x: input tensor with dimensions (n_1, n_2, ..., n_k)
Returns:
The input tensor reshaped to two dimensions (n_1, n_prod),
where n_prod is equal to the product of n_2 to n_k.
"""
return x.reshape([x.shape[0], -1])
def upsample_2d(x: JaxArray,
scale: Union[Tuple[int, int], int],
method: Union[UpSample, str] = UpSample.BILINEAR) -> JaxArray:
"""Function to upscale 2D images.
Args:
x: input tensor.
scale: imt or tuple scaling factor
method: str or UpSample interpolation methods e.g. ['bilinear', 'nearest'].
Returns:
upscaled 2d image tensor
"""
s = x.shape
scale = util.to_tuple(scale, 2)
y = jax.image.resize(x.transpose([0, 2, 3, 1]),
shape=(s[0], s[2] * scale[0], s[3] * scale[1], s[1]),
method=util.to_upsample(method))
return y.transpose([0, 3, 1, 2])
def upscale_nn(x: JaxArray, scale: int = 2) -> JaxArray:
"""Nearest neighbor upscale for image batches of shape (N, C, H, W).
Args:
x: input tensor of shape (N, C, H, W).
scale: integer scaling factor.
Returns:
Output tensor of shape (N, C, H * scale, W * scale).
"""
s = x.shape
x = x.reshape(s[:2] + (s[2], 1, s[3], 1))
x = jn.tile(x, (1, 1, 1, scale, 1, scale))
return x.reshape(s[:2] + (scale * s[2], scale * s[3]))
|
Python
| 0
|
@@ -1660,17 +1660,17 @@
scale: i
-m
+n
t or tup
|
1834dff2b4af497ea70ad03dd3806086b20bbe82
|
Introduce a method to help us create VMs (not clones)
|
virtualbox.py
|
virtualbox.py
|
"""
A salt cloud provider that lets you use virtualbox on your machine
and act as a cloud.
For now this will only clone existing VMs. It's best to create a template
from which we will clone.
Followed
https://docs.saltstack.com/en/latest/topics/cloud/cloud.html#non-libcloud-based-modules
to create this.
Dicts provided by salt:
__opts__ : contains the options used to run Salt Cloud,
as well as a set of configuration and environment variables
"""
# Import python libs
import copy
import logging
import pprint
import time
import yaml
# Import salt libs
import salt.config as config
from salt.exceptions import SaltCloudSystemExit
import salt.utils.cloud
log = logging.getLogger(__name__)
# Import virtualbox libs
HAS_LIBS = False
try:
# This code assumes vboxapi.py from VirtualBox distribution
# being in PYTHONPATH, or installed system-wide
from vboxapi import VirtualBoxManager
HAS_LIBS = True
except ImportError:
log.error("Couldn't import VirtualBox API")
__virtualname__ = 'virtualbox'
_virtualboxManager = None
def __virtual__():
"""
This function determines whether or not
to make this cloud module available upon execution.
Most often, it uses get_configured_provider() to determine
if the necessary configuration has been set up.
It may also check for necessary imports decide whether to load the module.
In most cases, it will return a True or False value.
If the name of the driver used does not match the filename,
then that name should be returned instead of True.
@return True|False|str
"""
if not HAS_LIBS:
return False
if get_configured_provider() is False:
return False
# If the name of the driver used does not match the filename,
# then that name should be returned instead of True.
# return __virtualname__
return True
def get_configured_provider():
'''
Return the first configured instance.
'''
configured = config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
() # keys we need from the provider configuration
)
log.debug("First virtualbox configuration %s" % configured)
return configured
def create(vm_info):
"""
Creates a virtual machine from the given VM information.
This is what is used to request a virtual machine to be created by the
cloud provider, wait for it to become available,
and then (optionally) log in and install Salt on it.
Fires:
"starting create" : This event is tagged salt/cloud/<vm name>/creating.
The payload contains the names of the VM, profile and provider.
@param vm_info {dict}
{
name: <str>
profile: <dict>
provider: <provider>
clone_from: <vm_name>
}
@return dict of resulting vm. !!!Passwords cand and should be included!!!
"""
log.debug("Creating virtualbox with %s" % vm_info)
try:
# Check for required profile parameters before sending any API calls.
if vm_info['profile'] and config.is_profile_configured(
__opts__,
__active_provider_name__ or 'virtualbox',
vm_info['profile']
) is False:
return False
except AttributeError:
pass
# For now we can only clone
if 'clone_from' not in vm_info:
log.error('"clone_from" not in profile configuration!')
return False
salt.utils.cloud.fire_event(
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_info['name']),
{
'name': vm_info['name'],
'profile': vm_info['profile'],
'provider': vm_info['provider'],
},
transport=__opts__['transport']
)
# TODO Calculate kwargs with parameters required by virtualbox
# to create the virtual machine.
request_kwargs = {
'name': vm_info['name'],
'profile': vm_info['profile']
}
salt.utils.cloud.fire_event(
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_info['name']),
request_kwargs,
transport=__opts__['transport']
)
# TODO request a new VM!
vm_result = vb_clone_vm(**request_kwargs)
# TODO Prepare deployment of salt on the vm
# Any private data, including passwords and keys (including public keys)
# should be stripped from the deploy kwargs before the event is fired.
deploy_kwargs = {
}
salt.utils.cloud.fire_event(
'event',
'deploying salt',
'salt/cloud/{0}/deploying'.format(vm_info['name']),
deploy_kwargs,
transport=__opts__['transport']
)
deploy_kwargs = deploy_kwargs.update({
# TODO Add private data
})
# TODO wait for target machine to become available
# TODO deploy!
# Do we have to call this?
salt.utils.cloud.deploy_script(**deploy_kwargs)
salt.utils.cloud.fire_event(
'event',
'created machine',
'salt/cloud/{0}/created'.format(vm_info['name']),
vm_result,
transport=__opts__['transport']
)
# Passwords should be included in this object!!
return vm_result
# -----------------------------
# Virtualbox methods
# -----------------------------
def vb_get_manager():
# This code initializes VirtualBox manager with default style
# and parameters
global _virtualboxManager
if _virtualboxManager is None:
_virtualboxManager = VirtualBoxManager(None, None)
vbox = _virtualboxManager.vbox
return vbox
def vb_clone_vm(
name=None,
clone_from=None,
timeout=10000):
"""
Tells virtualbox to create a VM
@return dict of resulting VM
"""
vbox = vb_get_manager()
log.info("Clone virtualbox machine %s from %s" % (name, clone_from))
source_machine = vbox.findMachine(clone_from)
groups = None
osTypeId = "Other"
new_machine = vbox.createMachine(
None, # Settings file
name,
groups,
osTypeId,
None # flags
)
progress = source_machine.cloneTo(
new_machine,
0, # CloneMode
None # CloneOptions : None = Full?
)
progress.waitForCompletion(timeout)
log.info("Finished cloning %s from %s" % (name, clone_from))
vbox.registerMachine(new_machine)
def vb_start_vm(**kwargs):
"""
Tells Virtualbox to start up a VM.
Blocking function!
@return dict of started VM, contains IP addresses and what not
"""
pass
def vb_destroy_machine(name=None, timeout=10000):
"""
@param timeout int timeout in milliseconds
"""
vbox = vb_get_manager()
log.info("Destroying machine %s" % name)
machine = vbox.findMachine(name)
files = machine.unregister(2)
progress = machine.deleteConfig(files)
progress.waitForCompletion(timeout)
log.info("Finished destroying machine %s" % name)
|
Python
| 0
|
@@ -5677,16 +5677,404 @@
vbox%0A%0A%0A
+def vb_create_machine(name=None):%0A vbox = vb_get_manager()%0A log.info(%22Create virtualbox machine %25s %22 %25 (name,))%0A groups = None%0A osTypeId = %22Other%22%0A new_machine = vbox.createMachine(%0A None, # Settings file%0A name,%0A groups,%0A osTypeId,%0A None # flags%0A )%0A vbox.registerMachine(new_machine)%0A log.info(%22Finished creating %25s%22 %25 name)%0A%0A%0A
def vb_c
|
547afc8237b31d8dec9eb4a15fff5d69936b3ed1
|
Use load_ui from glue
|
glue_exp/importers/vizier/qt_widget.py
|
glue_exp/importers/vizier/qt_widget.py
|
import os
from glue.external.qt import QtGui
from glue.external.qt.QtCore import Qt
# TODO: update to use glue.qt once load_ui has been generalized
from PyQt4.uic import loadUi
from .vizier_helpers import query_vizier, fetch_vizier_catalog
__all__ = ["QtVizierImporter"]
UI_MAIN = os.path.join(os.path.dirname(__file__), 'vizier.ui')
class QtVizierImporter(QtGui.QDialog):
def __init__(self):
super(QtVizierImporter, self).__init__()
self.ui = loadUi(UI_MAIN, self)
self.cancel.clicked.connect(self.reject)
self.ok.clicked.connect(self.finalize)
self.search_button.clicked.connect(self.search)
self._checkboxes = {}
self.datasets = []
def clear(self):
self._checkboxes.clear()
self.tree.clear()
def search(self):
self.search_button.setEnabled(False)
self.search_button.setText("Searching")
QtGui.qApp.processEvents()
self.clear()
results = query_vizier(self.query.text())
for catalog_set in results:
main = QtGui.QTreeWidgetItem(self.tree.invisibleRootItem(),
[catalog_set['description'], ""])
main.setFlags(main.flags() | Qt.ItemIsTristate)
main.setCheckState(2, Qt.Unchecked)
for catalog in catalog_set['tables']:
sub = QtGui.QTreeWidgetItem(main)
sub.setFlags(sub.flags() | Qt.ItemIsUserCheckable)
sub.setCheckState(2, Qt.Unchecked)
sub.setText(0, catalog['description'])
sub.setText(1, catalog['nrows'])
sub.setText(2, "")
self._checkboxes[catalog['name']] = sub
self.tree.resizeColumnToContents(0)
self.tree.resizeColumnToContents(1)
self.tree.resizeColumnToContents(2)
self.search_button.setEnabled(True)
self.search_button.setText("Search")
def finalize(self):
retrieve = []
for name in self._checkboxes:
if self._checkboxes[name].checkState(2) > 0:
retrieve.append(name)
self.datasets = []
for iname, name in enumerate(retrieve):
self.progress.setValue(iname / float(len(retrieve)) * 100.)
QtGui.qApp.processEvents() # update progress bar
self.datasets.append(fetch_vizier_catalog(name))
self.progress.setValue(100)
self.accept()
|
Python
| 0
|
@@ -82,87 +82,27 @@
Qt%0A
-%0A# TODO: update to use glue.qt once load_ui has been generalized%0Afrom PyQt4.uic
+from glue.qt.qtutil
imp
@@ -109,17 +109,18 @@
ort load
-U
+_u
i%0A%0Afrom
@@ -412,17 +412,18 @@
i = load
-U
+_u
i(UI_MAI
|
12b1da21a5c738f28125f130daf11173520bbc16
|
Add support for kramdown/Jeklly as a flavor for MarkdownTableWriter class
|
pytablewriter/writer/text/_markdown.py
|
pytablewriter/writer/text/_markdown.py
|
import copy
from typing import List
import dataproperty as dp
import typepy
from dataproperty import ColumnDataProperty, DataProperty
from mbstrdecoder import MultiByteStrDecoder
from ...error import EmptyTableDataError
from ...style import Align, GFMarkdownStyler, MarkdownStyler, StylerInterface
from .._table_writer import AbstractTableWriter
from ._text_writer import IndentationTextTableWriter
class MarkdownTableWriter(IndentationTextTableWriter):
"""
A table writer class for Markdown format.
:Example:
:ref:`example-markdown-table-writer`
"""
FORMAT_NAME = "markdown"
DEFAULT_FLAVOR = "CommonMark"
@property
def format_name(self) -> str:
return self.FORMAT_NAME
@property
def support_split_write(self) -> bool:
return True
def __init__(self, **kwargs) -> None:
self.__flavor = kwargs.pop("flavor", self.DEFAULT_FLAVOR).casefold()
super().__init__(**kwargs)
self.indent_string = ""
self.column_delimiter = "|"
self.char_left_side_row = "|"
self.char_right_side_row = "|"
self.char_cross_point = "|"
self.char_header_row_cross_point = "|"
self.char_header_row_left_cross_point = "|"
self.char_header_row_right_cross_point = "|"
self.is_write_opening_row = True
self._use_default_header = True
self._is_require_header = True
self._quoting_flags = copy.deepcopy(dp.NOT_QUOTING_FLAGS)
self._dp_extractor.min_column_width = 3
self._init_cross_point_maps()
def _to_header_item(self, col_dp: ColumnDataProperty, value_dp: DataProperty) -> str:
return self.__escape_vertical_bar_char(super()._to_header_item(col_dp, value_dp))
def _to_row_item(self, row_idx: int, col_dp: ColumnDataProperty, value_dp: DataProperty) -> str:
return self.__escape_vertical_bar_char(super()._to_row_item(row_idx, col_dp, value_dp))
def _get_opening_row_items(self) -> List[str]:
return []
def _get_header_row_separator_items(self) -> List[str]:
header_separator_list = []
margin = " " * self.margin
for col_dp in self._column_dp_list:
padding_len = self._get_padding_len(col_dp)
align = self._get_align(col_dp.column_index, col_dp.align)
if align == Align.RIGHT:
separator_item = "-" * (padding_len - 1) + ":"
elif align == Align.CENTER:
separator_item = ":" + "-" * (padding_len - 2) + ":"
else:
separator_item = "-" * padding_len
header_separator_list.append(
"{margin}{item}{margin}".format(margin=margin, item=separator_item)
)
return header_separator_list
def _get_value_row_separator_items(self) -> List[str]:
return []
def _get_closing_row_items(self) -> List[str]:
return []
def write_table(self, **kwargs) -> None:
"""
|write_table| with Markdown table format.
Args:
flavor (Optional[str]):
possible flavors are:
- ``"CommonMark"``
- ``"github"``
- ``"gfm"`` (alias for ``"github"``)
Defaults to |None|.
Example:
:ref:`example-markdown-table-writer`
.. note::
- |None| values are written as an empty string
- Vertical bar characters (``'|'``) in table items are escaped
"""
if "flavor" in kwargs:
new_flavor = kwargs["flavor"].casefold()
if new_flavor != self.__flavor:
self._clear_preprocess()
self.__flavor = new_flavor
if self.__flavor:
self._styler = self._create_styler(self)
with self._logger:
try:
self._verify_property()
except EmptyTableDataError:
self._logger.logger.debug("no tabular data found")
return
self.__write_chapter()
self._write_table(**kwargs)
if self.is_write_null_line_after_table:
self.write_null_line()
def _write_table_iter(self, **kwargs) -> None:
self.__write_chapter()
super()._write_table_iter()
def __write_chapter(self) -> None:
if typepy.is_null_string(self.table_name):
return
self._write_line(
"{:s} {:s}".format(
"#" * (self._indent_level + 1), MultiByteStrDecoder(self.table_name).unicode_str
)
)
def _create_styler(self, writer: AbstractTableWriter) -> StylerInterface:
if self.__flavor in ("gfm", "github"):
return GFMarkdownStyler(writer)
return MarkdownStyler(writer)
@staticmethod
def __escape_vertical_bar_char(value: str) -> str:
return value.replace("|", r"\|")
|
Python
| 0
|
@@ -2920,32 +2920,186 @@
return %5B%5D%0A%0A
+ def _write_header(self) -%3E None:%0A super()._write_header()%0A%0A if self.__flavor in (%22kramdown%22, %22jekyll%22):%0A self._write_line()%0A%0A
def write_ta
@@ -3392,19 +3392,18 @@
(alias
+o
f
-or
%60%60%22gith
@@ -3408,16 +3408,110 @@
thub%22%60%60)
+%0A - %60%60kramdown%60%60%0A - %60%60Jekyll%60%60 (alias of %60%60%22kramdown%22%60%60)
%0A%0A
@@ -3532,22 +3532,32 @@
ults to
-%7CNone%7C
+%60%60%22CommonMark%22%60%60
.%0A%0A
@@ -4863,24 +4863,108 @@
%0A )%0A%0A
+ if self.__flavor in (%22kramdown%22, %22jekyll%22):%0A self._write_line()%0A%0A
def _cre
|
0226d6dff5bd46c3de1ba55e8b7bb01013be3d2f
|
Add additional contributions to savings goal
|
gnu_reporting/reports/savings_goals.py
|
gnu_reporting/reports/savings_goals.py
|
from datetime import datetime
import time
from dateutil.rrule import rrule, MONTHLY
from dateutil.relativedelta import relativedelta
from gnu_reporting.reports.base import Report
from gnu_reporting.wrapper import get_decimal, get_account, get_balance_on_date, account_walker
from gnu_reporting.periods import PeriodStart
from gnu_reporting.configuration.currency import get_currency
import simplejson as json
from decimal import Decimal
class SavingsGoal(Report):
report_type = 'savings_goal'
def __init__(self, name, account, goal, as_of=PeriodStart.today):
super(SavingsGoal, self).__init__(name)
if isinstance(account, basestring):
account = [account]
self.accounts = account
self.goal_amount = Decimal(goal)
self.as_of = PeriodStart(as_of)
def __call__(self):
total_balance = Decimal('0.0')
currency = get_currency()
for account_description in self.accounts:
multiplier = Decimal('1.0')
if isinstance(account_description, basestring):
account = account_description
else:
account = account_description[0]
multiplier = Decimal(account_description[1])
for account_name in account_walker([account]):
balance = get_balance_on_date(account_name, self.as_of.date, currency)
total_balance += (balance * multiplier)
payload = self._generate_result()
payload['data']['balance'] = total_balance
payload['data']['goal'] = self.goal_amount
return payload
class SavingsGoalTrend(Report):
report_type = 'savings_goal_trend'
def __init__(self, name, account_name, goal_amount, past_trend=12, future_trend=6):
super(SavingsGoalTrend, self).__init__(name)
self.account_name = account_name
self.goal_amount = goal_amount
self.past_trend = past_trend
self.future_trend = future_trend
def __call__(self):
account = get_account(self.account_name)
todays_date = datetime.today()
beginning_of_month = datetime(todays_date.year, todays_date.month, 1)
start_of_trend = beginning_of_month - relativedelta(months=self.past_trend)
end_of_trend = start_of_trend + relativedelta(months=self.past_trend + self.future_trend)
payload = self._generate_result()
payload['data']['trend'] = []
for dt in rrule(MONTHLY, dtstart=start_of_trend, until=end_of_trend):
time_value = time.mktime(dt.timetuple())
balance = account.GetBalanceAsOfDate(time_value)
payload['data']['trend'].append(dict(date=dt.strftime('%Y-%m-%d'),
balance=get_decimal(balance)))
return payload
if __name__ == '__main__':
from gnu_reporting.wrapper import initialize
from decimal import Decimal
session = initialize('data/Accounts.gnucash')
goal_amount = Decimal('25904.12')
report = SavingsGoalTrend('Estimated Taxes', 'Assets.Savings Goals.Estimated Taxes 2015', goal_amount)
payload = report()
other_report = SavingsGoal('Estimated Taxes', 'Assets.Savings Goals.Estimated Taxes 2015', goal_amount)
other_payload = other_report()
session.end()
print json.dumps(payload)
print ''
print json.dumps(other_payload)
|
Python
| 0
|
@@ -560,16 +560,36 @@
rt.today
+, contributions=None
):%0A
@@ -824,16 +824,208 @@
as_of)%0A%0A
+ if not contributions:%0A contributions = %5B%5D%0A elif type(contributions) != list:%0A contributions = %5Bcontributions%5D%0A%0A self.contributions = contributions%0A%0A
def
@@ -1643,16 +1643,107 @@
plier)%0A%0A
+ for contribution in self.contributions:%0A total_balance += contribution%0A%0A
|
f4dee9c7ef082d17e215f311dadb27fa4c44f6e7
|
fix regression: typo
|
src/adhocracy/adhocracy/sheets/pool.py
|
src/adhocracy/adhocracy/sheets/pool.py
|
"""Pool Sheet."""
from collections.abc import Iterable
from pyramid.traversal import resource_path
from substanced.util import find_catalog
import colander
from adhocracy.interfaces import ISheet
from adhocracy.interfaces import SheetToSheet
from adhocracy.sheets import GenericResourceSheet
from adhocracy.sheets import sheet_metadata_defaults
from adhocracy.sheets import add_sheet_to_registry
from adhocracy.schema import UniqueReferences
from adhocracy.utils import append_if_not_none
from adhocracy.utils import FormList
from adhocracy.utils import remove_keys_from_dict
filtering_pool_default_filter = ['depth', 'content_type', 'sheet', 'elements',
'count', 'aggregateby']
class PoolSheet(GenericResourceSheet):
"""Generic pool resource sheet."""
def _get_reference_appstruct(self, params):
appstruct = {'elements': []}
reftype = self._reference_nodes['elements'].reftype
target_isheet = reftype.getTaggedValue('target_isheet')
for child in self.context.values():
if target_isheet.providedBy(child):
appstruct['elements'].append(child)
return appstruct
class FilteringPoolSheet(PoolSheet):
"""Resource sheet that allows filtering and aggregating pools."""
def _get_reference_appstruct(self, params):
if not params or not self._custom_filtering_necessary(params):
return super()._get_reference_appstruct(params)
appstruct = {}
depth = self._build_depth(params)
iface_filter = self._build_iface_filter(params)
arbitrary_filters = self._get_arbitrary_filters(params)
elements = self._build_elements_form_list(params)
elements.extend(self._filter_elements(depth,
iface_filter,
arbitrary_filters,
))
appstruct['elements'] = elements
if self._count_matching_elements(params):
appstruct['count'] = len(elements)
# FIXME implement aggregateby
return appstruct
def _custom_filtering_necessary(self, params: dict) -> bool:
params_copy = params.copy()
return params_copy.pop('depth', '1') != '1' or\
params_copy.pope('elements', 'path') != 'path' or\
params_copy != {}
def _get_arbitrary_filters(self, params):
return remove_keys_from_dict(params, filtering_pool_default_filter)
def _build_iface_filter(self, params: dict) -> dict:
iface_filter = []
append_if_not_none(iface_filter, params.get('content_type', None))
append_if_not_none(iface_filter, params.get('sheet', None))
return iface_filter
def _build_elements_form_list(self, param) -> FormList:
elements_serialization_form = param.get('elements', 'path')
return FormList(form=elements_serialization_form)
def _build_depth(self, params) -> int:
raw_depth = params.get('depth', '1')
return None if raw_depth == 'all' else int(raw_depth)
def _count_matching_elements(self, params) -> bool:
return params.get('count', False)
def _filter_elements(self, depth=1, ifaces: Iterable=None,
arbitrary_filters: dict=None) -> Iterable:
"""See interface for docstring."""
system_catalog = find_catalog(self.context, 'system')
path_index = system_catalog['path']
query = path_index.eq(resource_path(self.context), depth=depth,
include_origin=False)
if ifaces:
interface_index = system_catalog['interfaces']
query &= interface_index.all(ifaces)
if arbitrary_filters:
adhocracy_catalog = find_catalog(self.context, 'adhocracy')
for name, value in valuefilters.items():
# FIXME This will raise a KeyError if no such index exists.
# Better validate first whether all remaining parameters
# indicate existing catalogs and raise colander.Invalid
# otherwise.
for name, value in arbitrary_filters.items():
index = adhocracy_catalog[name]
query &= index.eq(value)
resultset = query.execute()
for result in resultset:
yield result
class IPool(ISheet):
"""Marker interface for the pool sheet."""
class PoolElementsReference(SheetToSheet):
"""Pool sheet elements reference."""
source_isheet = IPool
source_isheet_field = 'elements'
target_isheet = ISheet
class PoolSchema(colander.MappingSchema):
"""Pool sheet data structure.
`elements`: children of this resource (object hierarchy).
"""
elements = UniqueReferences(reftype=PoolElementsReference,
readonly=True)
count = colander.SchemaNode(colander.Integer(), default=colander.drop)
pool_metadata = sheet_metadata_defaults._replace(
isheet=IPool,
schema_class=PoolSchema,
sheet_class=FilteringPoolSheet,
editable=False,
creatable=False,
)
def includeme(config):
"""Register adapter."""
add_sheet_to_registry(pool_metadata, config.registry)
|
Python
| 0.999734
|
@@ -2315,17 +2315,16 @@
copy.pop
-e
('elemen
@@ -3860,21 +3860,26 @@
alue in
-value
+arbitrary_
filters.
@@ -4141,66 +4141,8 @@
se.%0A
- for name, value in arbitrary_filters.items():%0A
|
4da3425eb0dbb5d95439584ee485a2fda16f095e
|
Print maximum work group size for opencl kernel
|
kernel_tuner/opencl.py
|
kernel_tuner/opencl.py
|
"""This module contains all OpenCL specific kernel_tuner functions"""
from __future__ import print_function
import numpy
#embedded in try block to be able to generate documentation
try:
import pyopencl as cl
except ImportError:
cl = None
class OpenCLFunctions(object):
"""Class that groups the OpenCL functions on maintains some state about the device"""
def __init__(self, device=0, platform=0, iterations=7, compiler_options=None):
"""Creates OpenCL device context and reads device properties
:param device: The ID of the OpenCL device to use for benchmarking
:type device: int
:param iterations: The number of iterations to run the kernel during benchmarking, 7 by default.
:type iterations: int
"""
if not cl:
raise ImportError("Error: pyopencl not installed, please install e.g. using 'pip install pyopencl'.")
self.iterations = iterations
#setup context and queue
platforms = cl.get_platforms()
self.ctx = cl.Context(devices=[platforms[platform].get_devices()[device]])
self.queue = cl.CommandQueue(self.ctx, properties=cl.command_queue_properties.PROFILING_ENABLE)
self.mf = cl.mem_flags
#inspect device properties
self.max_threads = self.ctx.devices[0].get_info(cl.device_info.MAX_WORK_GROUP_SIZE)
self.compiler_options = compiler_options or []
#collect environment information
dev = self.ctx.devices[0]
env = dict()
env["platform_name"] = dev.platform.name
env["platform_version"] = dev.platform.version
env["device_name"] = dev.name
env["device_version"] = dev.version
env["opencl_c_version"] = dev.opencl_c_version
env["driver_version"] = dev.driver_version
env["iterations"] = self.iterations
env["compiler_options"] = compiler_options
self.env = env
self.name = dev.name
def ready_argument_list(self, arguments):
"""ready argument list to be passed to the kernel, allocates gpu mem
:param arguments: List of arguments to be passed to the kernel.
The order should match the argument list on the OpenCL kernel.
Allowed values are numpy.ndarray, and/or numpy.int32, numpy.float32, and so on.
:type arguments: list(numpy objects)
:returns: A list of arguments that can be passed to an OpenCL kernel.
:rtype: list( pyopencl.Buffer, numpy.int32, ... )
"""
gpu_args = []
for arg in arguments:
# if arg i is a numpy array copy to device
if isinstance(arg, numpy.ndarray):
gpu_args.append(cl.Buffer(self.ctx, self.mf.READ_WRITE | self.mf.COPY_HOST_PTR, hostbuf=arg))
else: # if not an array, just pass argument along
gpu_args.append(arg)
return gpu_args
def compile(self, kernel_name, kernel_string):
"""call the OpenCL compiler to compile the kernel, return the device function
:param kernel_name: The name of the kernel to be compiled, used to lookup the
function after compilation.
:type kernel_name: string
:param kernel_string: The OpenCL kernel code that contains the function `kernel_name`
:type kernel_string: string
:returns: An OpenCL kernel that can be called directly.
:rtype: pyopencl.Kernel
"""
prg = cl.Program(self.ctx, kernel_string).build(options=self.compiler_options)
func = getattr(prg, kernel_name)
return func
def benchmark(self, func, gpu_args, threads, grid):
"""runs the kernel and measures time repeatedly, returns average time
Runs the kernel and measures kernel execution time repeatedly, number of
iterations is set during the creation of OpenCLFunctions. Benchmark returns
a robust average, from all measurements the fastest and slowest runs are
discarded and the rest is included in the returned average. The reason for
this is to be robust against initialization artifacts and other exceptional
cases.
:param func: A PyOpenCL kernel compiled for this specific kernel configuration
:type func: pyopencl.Kernel
:param gpu_args: A list of arguments to the kernel, order should match the
order in the code. Allowed values are either variables in global memory
or single values passed by value.
:type gpu_args: list( pyopencl.Buffer, numpy.int32, ...)
:param threads: A tuple listing the number of work items in each dimension of
the work group.
:type threads: tuple(int, int, int)
:param grid: A tuple listing the number of work groups in each dimension
of the NDRange.
:type grid: tuple(int, int)
:returns: A robust average for the kernel execution time.
:rtype: float
"""
global_size = (grid[0]*threads[0], grid[1]*threads[1], grid[2]*threads[2])
local_size = threads
times = []
for _ in range(self.iterations):
event = func(self.queue, global_size, local_size, *gpu_args)
event.wait()
times.append((event.profile.end - event.profile.start)*1e-6)
times = sorted(times)
return numpy.mean(times[1:-1])
def run_kernel(self, func, gpu_args, threads, grid):
"""runs the OpenCL kernel passed as 'func'
:param func: An OpenCL Kernel
:type func: pyopencl.Kernel
:param gpu_args: A list of arguments to the kernel, order should match the
order in the code. Allowed values are either variables in global memory
or single values passed by value.
:type gpu_args: list( pyopencl.Buffer, numpy.int32, ...)
:param threads: A tuple listing the number of work items in each dimension of
the work group.
:type threads: tuple(int, int, int)
:param grid: A tuple listing the number of work groups in each dimension
of the NDRange.
:type grid: tuple(int, int)
"""
global_size = (grid[0]*threads[0], grid[1]*threads[1], grid[2]*threads[2])
local_size = threads
event = func(self.queue, global_size, local_size, *gpu_args)
event.wait()
def memset(self, buffer, value, size):
"""set the memory in allocation to the value in value
:param allocation: An OpenCL Buffer to fill
:type allocation: pyopencl.Buffer
:param value: The value to set the memory to
:type value: a single 32-bit int
:param size: The size of to the allocation unit in bytes
:type size: int
"""
if isinstance(buffer, cl.Buffer):
cl.enqueue_fill_buffer(self.queue, buffer, numpy.uint32(value), 0, size)
def memcpy_dtoh(self, dest, src):
"""perform a device to host memory copy
:param dest: A numpy array in host memory to store the data
:type dest: numpy.ndarray
:param src: An OpenCL Buffer to copy data from
:type src: pyopencl.Buffer
"""
if isinstance(src, cl.Buffer):
cl.enqueue_copy(self.queue, dest, src)
|
Python
| 0
|
@@ -3558,16 +3558,178 @@
l_name)%0A
+ print(func.get_work_group_info(cl.kernel_work_group_info.WORK_GROUP_SIZE,%0A cl.get_platforms()%5B0%5D.get_devices()%5B0%5D))%0A
|
8b4981a7cae2c4c916d8671b420e29647bfeaaa9
|
Remove duplicate import of Decimal
|
gnu_reporting/reports/savings_goals.py
|
gnu_reporting/reports/savings_goals.py
|
from datetime import datetime
import time
from dateutil.rrule import rrule, MONTHLY
from dateutil.relativedelta import relativedelta
from gnu_reporting.reports.base import Report
from gnu_reporting.wrapper import get_decimal, get_account, get_balance_on_date, account_walker
from gnu_reporting.periods import PeriodStart
from gnu_reporting.configuration.currency import get_currency
import simplejson as json
from decimal import Decimal
class SavingsGoal(Report):
report_type = 'savings_goal'
def __init__(self, name, account, goal, as_of=PeriodStart.today, contributions=None):
super(SavingsGoal, self).__init__(name)
if isinstance(account, basestring):
account = [account]
self.accounts = account
self.goal_amount = Decimal(goal)
self.as_of = PeriodStart(as_of)
if not contributions:
contributions = []
elif type(contributions) != list:
contributions = [contributions]
self.contributions = contributions
def __call__(self):
total_balance = Decimal('0.0')
currency = get_currency()
for account_description in self.accounts:
multiplier = Decimal('1.0')
if isinstance(account_description, basestring):
account = account_description
else:
account = account_description[0]
multiplier = Decimal(account_description[1])
for account_name in account_walker([account]):
balance = get_balance_on_date(account_name, self.as_of.date, currency)
total_balance += (balance * multiplier)
for contribution in self.contributions:
total_balance += contribution
payload = self._generate_result()
payload['data']['balance'] = total_balance
payload['data']['goal'] = self.goal_amount
return payload
class SavingsGoalTrend(Report):
report_type = 'savings_goal_trend'
def __init__(self, name, account_name, goal_amount, past_trend=12, future_trend=6):
super(SavingsGoalTrend, self).__init__(name)
self.account_name = account_name
self.goal_amount = goal_amount
self.past_trend = past_trend
self.future_trend = future_trend
def __call__(self):
account = get_account(self.account_name)
todays_date = datetime.today()
beginning_of_month = datetime(todays_date.year, todays_date.month, 1)
start_of_trend = beginning_of_month - relativedelta(months=self.past_trend)
end_of_trend = start_of_trend + relativedelta(months=self.past_trend + self.future_trend)
payload = self._generate_result()
payload['data']['trend'] = []
for dt in rrule(MONTHLY, dtstart=start_of_trend, until=end_of_trend):
time_value = time.mktime(dt.timetuple())
balance = account.GetBalanceAsOfDate(time_value)
payload['data']['trend'].append(dict(date=dt.strftime('%Y-%m-%d'),
balance=get_decimal(balance)))
return payload
if __name__ == '__main__':
from gnu_reporting.wrapper import initialize
from decimal import Decimal
session = initialize('data/Accounts.gnucash')
goal_amount = Decimal('25904.12')
report = SavingsGoalTrend('Estimated Taxes', 'Assets.Savings Goals.Estimated Taxes 2015', goal_amount)
payload = report()
other_report = SavingsGoal('Estimated Taxes', 'Assets.Savings Goals.Estimated Taxes 2015', goal_amount)
other_payload = other_report()
session.end()
print json.dumps(payload)
print ''
print json.dumps(other_payload)
|
Python
| 0.000287
|
@@ -3191,40 +3191,8 @@
lize
-%0A from decimal import Decimal
%0A%0A
|
cd9a51ab2fe6b99c0665b8f499363a4d557b4a4d
|
Modify script which split your region in smaller sample
|
DataWrangling/CaseStudy/sample_file.py
|
DataWrangling/CaseStudy/sample_file.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import xml.etree.ElementTree as ET # Use cElementTree or lxml if too slow
import os
OSM_FILE = "san-francisco-bay_california.osm" # Replace this with your osm file
SAMPLE_FILE = "sample_sfb.osm"
k = 20 # Parameter: take every k-th top level element
def get_element(osm_file, tags=('node', 'way', 'relation')):
"""Yield element if it is the right type of tag
Reference:
http://stackoverflow.com/questions/3095434/inserting-newlines-in-xml-file-generated-via-xml-etree-elementtree-in-python
"""
context = iter(ET.iterparse(osm_file, events=('start', 'end')))
_, root = next(context)
for event, elem in context:
if event == 'end' and elem.tag in tags:
yield elem
root.clear()
def main():
os.chdir('./data')
with open(SAMPLE_FILE, 'wb') as output:
output.write('<?xml version="1.0" encoding="UTF-8"?>\n')
output.write('<osm>\n ')
# Write every kth top level element
for i, element in enumerate(get_element(OSM_FILE)):
if i % k == 0:
output.write(ET.tostring(element, encoding='utf-8'))
output.write('</osm>')
|
Python
| 0
|
@@ -1190,8 +1190,47 @@
%3C/osm%3E')
+%0A%0Aif __name__ == '__main__':%0A main()
|
cd79441f0c11fbc36f2f0b0098196373006718e4
|
Update binstar-push.py
|
continuous-integration/binstar-push.py
|
continuous-integration/binstar-push.py
|
import os
import glob
# import subprocess
# import traceback
from binstar_client.scripts import cli
def get_token():
token = None
if os.environ.get('TRAVIS_BRANCH', None) == 'master' or os.environ.get('APPVEYOR_REPO_BRANCH', None) == 'master':
token = os.environ.get('BINSTAR_TOKEN', None)
return token
token = get_token()
if token is not None:
cmd = ['-t', token, 'upload', '--force', '-u', 'ingeotec']
cmd.extend(glob.glob('*.tar.bz2'))
print(cmd)
cli.main(args=cmd)
# try:
# print('*', cmd, platform.system())
# subprocess.check_call(cmd)
# except subprocess.CalledProcessError:
# traceback.print_exc()
|
Python
| 0.000001
|
@@ -467,16 +467,18 @@
2'))%0A
+ #
print(c
|
b7777486ef36a20e148e3a3d81846f2b330e8622
|
Enable fullpath to be used in get_filenames
|
octotribble/Get_filenames.py
|
octotribble/Get_filenames.py
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
"""Get names of files that match regular expression.
Possibly better to use the glob module.
"""
import fnmatch
import os
from typing import List
# TODO: Try glob.glob
def get_filenames(path, regexp, regexp2=None):
# type: (str, str, str) -> List[str]
"""Regexp must be a regular expression as a string.
eg '*.ms.*', '*_2.*', '*.ms.norm.fits*'
regexp2 is if want to match two expressions such as
'*_1*' and '*.ms.fits*'
"""
os.chdir(path)
filelist = []
for file in os.listdir('.'):
if regexp2 is not None: # Match two regular expressions
if fnmatch.fnmatch(file, regexp) and fnmatch.fnmatch(file, regexp2):
filelist.append(file)
else:
if fnmatch.fnmatch(file, regexp):
filelist.append(file)
filelist.sort()
return filelist
def main():
# type: () -> None
"""Some test examples."""
path = "/home/jneal/data/BrownDwarfs-PedrosCode/HD30501-1/"
list1 = get_filenames(path, "*.ms.*")
for file in list1:
pass # print file
list2 = get_filenames(path, "*.norm.*", "*_1.*")
for file in list2:
pass # print file
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -255,19 +255,35 @@
xp2=None
+, fullpath=False
):%0A
-
# ty
@@ -505,24 +505,54 @@
s*'%0A %22%22%22%0A
+ current_path= os.getcwd()%0A
os.chdir
@@ -887,24 +887,24 @@
ppend(file)%0A
-
filelist
@@ -911,16 +911,122 @@
.sort()%0A
+ os.chdir(current_path)%0A if fullpath:%0A filelist = %5Bos.path.join(path, f) for f in filelist%5D%0A%0A
retu
|
0f0ef471e6bb9d873c890df2f538a00bbcae9637
|
print removed
|
viewer/templatetags/custom_tags.py
|
viewer/templatetags/custom_tags.py
|
from django import template
import markdown
import datetime
register = template.Library()
@register.filter()
def custom_date(value):
date = datetime.datetime.strptime(value, '%a, %d %b %Y %H:%M:%S %z')
return date.strftime('%d, %b %Y')
@register.filter()
def markdown_data(value):
return markdown.markdown(value)
@register.filter()
def url_replace(value):
value = value.replace("http://", "https://")
print(value)
return value
|
Python
| 0.000001
|
@@ -421,25 +421,8 @@
/%22)%0A
- print(value)%0A
@@ -429,12 +429,13 @@
return value
+%0A
|
0bac442df6fec974aec8cf6d9e4147a2e75cf139
|
Switch from VERSION to $VERSION in model migration.
|
go/vumitools/conversation/migrators.py
|
go/vumitools/conversation/migrators.py
|
from vumi.persist.model import ModelMigrator
class ConversationMigrator(ModelMigrator):
def migrate_from_unversioned(self, mdata):
# Copy stuff that hasn't changed between versions
mdata.copy_values(
'conversation_type',
'start_timestamp', 'end_timestamp', 'created_at',
'delivery_class', 'delivery_tag_pool', 'delivery_tag')
mdata.copy_indexes('user_account_bin', 'groups_bin', 'batches_bin')
# Add stuff that's new in this version
mdata.set_value('VERSION', 1)
mdata.set_value('name', mdata.old_data['subject'])
config = (mdata.old_data['metadata'] or {}).copy()
config['content'] = mdata.old_data['message']
mdata.set_value('config', config)
# We don't use the constants here because they may change or disappear
# underneath us in the future.
status = u'draft'
if mdata.new_index['batches_bin']:
# ^^^ This kind of hackery is part of the reason for the migration.
status = u'running'
if mdata.new_data['end_timestamp'] is not None:
status = u'finished'
mdata.set_value('status', status, index='status_bin')
# Add indexes for fields with new (or updated) indexes
mdata.add_index('end_timestamp_bin', mdata.new_data['end_timestamp'])
mdata.add_index(
'start_timestamp_bin', mdata.new_data['start_timestamp'])
mdata.add_index('created_at_bin', mdata.new_data['created_at'])
return mdata
|
Python
| 0
|
@@ -527,16 +527,17 @@
_value('
+$
VERSION'
|
fd7348951e46763dcd06cb673a6b01f6894efe4e
|
Set version as 0.8.8.1
|
alignak_webui/__init__.py
|
alignak_webui/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=global-statement
# Copyright (c) 2015-2017:
# Frederic Mohier, frederic.mohier@alignak.net
#
"""
Alignak - Web User Interface
"""
# Package name
__pkg_name__ = u"alignak_webui"
# Checks types for PyPI keywords
# Used for:
# - PyPI keywords
# - directory where to store files in the Alignak configuration (eg. arbiter/packs/checks_type)
__checks_type__ = u"demo"
# Application manifest
__application__ = u"Alignak-WebUI"
VERSION = (0, 8, 8)
__version__ = '.'.join((str(each) for each in VERSION[:4]))
__short_version__ = '.'.join((str(each) for each in VERSION[:2]))
__author__ = u"Frédéric Mohier"
__author_email__ = u"frederic.mohier@alignak.net"
__copyright__ = u"(c) 2015-2017 - %s" % __author__
__license__ = u"GNU Affero General Public License, version 3"
__git_url__ = "https://github.com/Alignak-monitoring-contrib/alignak-webui"
__doc_url__ = "http://alignak-web-ui.readthedocs.io/?badge=latest"
__description__ = u"Alignak - Web User Interface"
__releasenotes__ = u"""Alignak monitoring framework Web User Interface"""
__classifiers__ = [
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Bottle',
'Intended Audience :: Developers',
'Intended Audience :: Customer Service',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Natural Language :: English',
'Programming Language :: Python',
'Topic :: System :: Monitoring',
'Topic :: System :: Systems Administration'
]
# Application manifest
__manifest__ = {
'name': __application__,
'version': __version__,
'author': __author__,
'description': __description__,
'copyright': __copyright__,
'license': __license__,
'release': __releasenotes__,
'url': __git_url__,
'doc': __doc_url__
}
# Application configuration object
# Global variable to be used with accessor functions ...
# ... to make it package/module global!
# pylint: disable=invalid-name
app_config = None
def get_app_config():
# pylint: disable=global-variable-not-assigned
"""Return global application configuration"""
global app_config
return app_config
def set_app_config(config):
# pylint: disable=global-statement
"""Update global application configuration"""
global app_config
app_config = config
|
Python
| 0.001032
|
@@ -506,16 +506,19 @@
(0, 8, 8
+, 1
)%0A__vers
|
2ed5c92aabd349337579792b20613854370aa2ac
|
add log test
|
kfdda/views/general.py
|
kfdda/views/general.py
|
# -*- coding: utf-8 -*-
from flask import Blueprint
from flask.views import MethodView
from ..core import db, logger
from ..exceptions import NoError, FormValidationError
from ..forms.login import LoginForm
from ..models.user import User
from ..tasks import add
bp = Blueprint('general', __name__)
class IndexView(MethodView):
def get(self):
users = User.query.all()
return ''.join(x.phone for x in users)
class AddView(MethodView):
def get(self):
phone = '13800138000'
email = 'me@codeif.com'
password = '123456'
user = User(phone=phone, email=email, password=password)
db.session.add(user)
db.session.commit()
return 'ok'
class FormErrorView(MethodView):
def get(self):
form = LoginForm()
if not form.validate():
raise FormValidationError(form)
raise NoError()
class CeleryTestView(MethodView):
def get(self):
add.delay(1, 3)
return 'ok'
class ExceptionView(MethodView):
def get(self):
logger.error('this is error')
assert 1 == 2
return '1 == 2'
bp.add_url_rule('/', view_func=IndexView.as_view('index'))
bp.add_url_rule('/add', view_func=AddView.as_view('error'))
bp.add_url_rule('/form-error', view_func=FormErrorView.as_view('form_error'))
bp.add_url_rule('/celery-test',
view_func=CeleryTestView.as_view('celery_test'))
bp.add_url_rule('/exception', view_func=ExceptionView.as_view('excepiton'))
|
Python
| 0.000001
|
@@ -1124,16 +1124,241 @@
== 2'%0A%0A
+class LoggerView(MethodView):%0A def get(self):%0A logger.debug('log level debug')%0A logger.info('log level info')%0A logger.warn('log level warn')%0A logger.error('log level error')%0A return 'ok'%0A
%0Abp.add_
@@ -1716,12 +1716,70 @@
xcepiton'))%0A
+bp.add_url_rule('/log', view_func=LogView.as_view('log'))%0A
|
83a62e80d1b7551f0ccebf4bc95bba27c6bf94bc
|
Add compound nouns tests
|
tests/test_compounds.py
|
tests/test_compounds.py
|
import inflect
p = inflect.engine()
def test_compound_1():
assert p.singular_noun("hello-out-there") == "hello-out-there"
def test_compound_2():
assert p.singular_noun("hello out there") == "hello out there"
def test_compound_3():
assert p.singular_noun("continue-to-operate") == "continue-to-operate"
def test_compound_4():
assert p.singular_noun("case of diapers") == "case of diapers"
def test_unit_handling_degree():
test_cases = {
"degree celsius": "degrees celsius",
# 'degree Celsius': 'degrees Celsius',
"degree fahrenheit": "degrees fahrenheit",
"degree rankine": "degrees rankine",
"degree fahrenheit second": "degree fahrenheit seconds",
}
for singular, plural in test_cases.items():
assert p.plural(singular) == plural
def test_unit_handling_fractional():
test_cases = {
"pound per square inch": "pounds per square inch",
"metre per second": "metres per second",
"kilometre per hour": "kilometres per hour",
"cubic metre per second": "cubic metres per second",
"dollar a year": "dollars a year",
# Correct pluralization of denominator
"foot per square second": "feet per square second",
"mother-in-law per lifetime": "mothers-in-law per lifetime",
"pound-force per square inch": "pounds-force per square inch",
}
for singular, plural in test_cases.items():
assert p.plural(singular) == plural
def test_unit_handling_combined():
test_cases = {
# Heat transfer coefficient unit
"watt per square meter degree celsius": "watts per square meter degree celsius",
"degree celsius per hour": "degrees celsius per hour",
"degree fahrenheit hour square foot per btuit inch": (
"degree fahrenheit hour square feet per btuit inch"
),
# 'degree Celsius per hour': 'degrees Celsius per hour',
# 'degree Fahrenheit hour square foot per BtuIT inch':
# 'degree Fahrenheit hour square feet per BtuIT inch'
}
for singular, plural in test_cases.items():
assert p.plural(singular) == plural
|
Python
| 0.999995
|
@@ -2136,28 +2136,842 @@
.plural(singular) == plural%0A
+%0A%0Adef test_unit_open_compound_nouns():%0A test_cases = %7B%0A %22high school%22: %22high schools%22,%0A %22master genie%22: %22master genies%22,%0A %22MASTER genie%22: %22MASTER genies%22,%0A %22Blood brother%22: %22Blood brothers%22,%0A %22prima donna%22: %22prima donnas%22,%0A %22prima DONNA%22: %22prima DONNAS%22%0A %7D%0A for singular, plural in test_cases.items():%0A assert p.plural(singular) == plural%0A%0A%0Adef test_unit_open_compound_nouns_classical():%0A p.classical(all=True)%0A test_cases = %7B%0A %22master genie%22: %22master genii%22,%0A %22MASTER genie%22: %22MASTER genii%22,%0A %22Blood brother%22: %22Blood brethren%22,%0A %22prima donna%22: %22prime donne%22,%0A %22prima DONNA%22: %22prime DONNE%22%0A %7D%0A for singular, plural in test_cases.items():%0A assert p.plural(singular) == plural%0A p.classical(all=False)%0A
|
828e75919bd71912baf75a64010efcfcd93d07f1
|
Update library magic to be recursive
|
library_magic.py
|
library_magic.py
|
import sys
import subprocess
import shutil
executable = sys.argv[1]
execfolder = sys.argv[1].rsplit("/",1)[0]
libdir = execfolder+"/lib"
otool_cmd = ["otool", "-L",executable]
# Run otool
otool_out = subprocess.check_output(otool_cmd).split("\n\t")
# Find all the dylib files
for l in otool_out:
s = l.split(".dylib")
if len(s) > 1:
lib = s[0]+".dylib"
libname = lib.rsplit("/",1)[1]
shutil.copyfile(lib, libdir+"/"+libname)
install_name_tool = ["install_name_tool", "-change", lib, "@executable_path/lib/"+libname, executable]
subprocess.call(install_name_tool)
|
Python
| 0
|
@@ -41,154 +41,251 @@
il%0A%0A
-executable = sys.argv%5B1%5D%0Aexecfolder = sys.argv%5B1%5D.rsplit(%22/%22,1)%5B0%5D%0Alibdir = execfolder+%22/lib%22%0Aotool_cmd = %5B%22otool%22, %22-L%22,executable%5D%0A%0A# Run otool%0A
+copied = %5B%5D%0A%0Adef update_libraries(executable):%0A%09%0A%09# Find all the dylib files and recursively add dependencies%0A%09print %22%5CnChecking dependencies of %22 + executable%0A%09otool_cmd = %5B%22otool%22, %22-L%22,executable%5D%0A%09execfolder = executable.rsplit(%22/%22,1)%5B0%5D%0A%09
otoo
@@ -345,36 +345,50 @@
t%22)%0A
-%0A# Find all the dylib files%0A
+%09execname = executable.rsplit(%22/%22,1)%5B1%5D%0A%0A%09
for
@@ -403,16 +403,17 @@
ol_out:%0A
+%09
%09s = l.s
@@ -427,16 +427,17 @@
dylib%22)%0A
+%09
%09if len(
@@ -444,16 +444,17 @@
s) %3E 1:%0A
+%09
%09%09lib =
@@ -469,16 +469,17 @@
ylib%22%0A%09%09
+%09
libname
@@ -507,40 +507,181 @@
%5D%0A%09%09
-shutil.copyfile(lib, libdir+%22/%22+
+%09if libname not in copied: %0A%09%09%09%09print %22Requires: %22 + lib%0A%09%09%09%09new_lib = execfolder+%22/%22+libname%0A%09%09%09%09if (lib != new_lib):%0A%09%09%09%09%09shutil.copyfile(lib, new_lib)%0A%09%09%09%09%09copied.append(
libn
@@ -685,16 +685,18 @@
ibname)%0A
+%09%09
%09%09instal
@@ -752,28 +752,9 @@
b, %22
-@executable_path/lib
+.
/%22+l
@@ -773,16 +773,46 @@
utable%5D%0A
+%09%09%09%09print %22Installing %22+lib%0A%09%09
%09%09subpro
@@ -840,8 +840,202 @@
e_tool)%0A
+%09%09%09%09new_library = execfolder+%22/%22+libname%0A%09%09%09%09print %22Calling on %22 + new_library%0A%09%09%09%09update_libraries(new_library)%0A%0A%09%09%09%0A# Update libraries on the default executable %0Aupdate_libraries(sys.argv%5B1%5D)%0A
|
5558e4af4e0e341b9d92e4f433e2f5cc3d305f11
|
Fix space in trans string
|
VocaBot/contentparser.py
|
VocaBot/contentparser.py
|
import math
from collections import defaultdict
from telegram import Emoji
from constants import Context, VOCADB_BASE_URL
from i18n import _
from util import non_phone
from vocadb import voca_db
# I'm not exactly proud of this module's code.. but it does the job.
def names_text(song):
if len(song['names']) > 1:
names = _('<b>Additional names:</b>\n')
for name in song['names']:
if name['value'] != song['name']:
names += name['value'] + '\n'
return names
return _('No additional names found\n')
def artists_text(entry, inline):
if len(entry['artists']) > 0:
artists = _('<b>Artists:</b>\n')
for artist in entry['artists']:
roles = []
for role in artist['effectiveRoles'].split(', '):
if role == 'Default':
roles.append(artist['categories'][:2])
else:
roles.append(role[:2])
artists += _('[<code>{roles}</code>] '
'{artist_name}').format(roles=','.join(roles), artist_name=artist['name'])
if not inline:
try:
artists += ' /ar_{}'.format(artist['artist']['id'])
except KeyError:
pass
artists += '\n'
return artists
return _('No artists found\n')
def vocadb_url(entry, song=False, artist=False, album=False):
return '{base_url}{type}/{id}'.format(base_url=VOCADB_BASE_URL,
type='S' if song else 'Ar' if artist else 'Al',
id=entry['id'])
def content_parser(entries, info=False, inline=False, context=None, bot_name='', counts=None):
text = ''
if entries and len(entries) > 0:
if info:
entries = [entries]
for i, entry in enumerate(entries):
# Check if part of a disc listing
track_number = None
if 'song' in entry:
track_number = entry['trackNumber']
entry = entry['song']
song, album, artist = False, False, False
if 'songType' in entry:
song = True
if 'artistType' in entry:
artist = True
if 'discType' in entry:
album = True
if track_number is None or i != 0:
text += '\n\n'
try:
if context == Context.related:
if i == 0:
text += _('<i>Matching artist</i>')
elif i == 1:
text += _('<i>Matching likes</i>')
elif i == 2:
text += _('<i>Matching tags</i>')
text += '\n'
if song:
if track_number is None:
text += _('{emoji} <b>{name}</b>\n'
'{artist}\n{type}').format(emoji=Emoji.MUSICAL_NOTE, name=entry['name'],
artist=entry['artistString'],
type=voca_db.trans(entry['songType'], song=True))
if 'favoritedTimes' in entry:
text += ' ' + _('with {num} favourites').format(num=entry['favoritedTimes'])
else:
text += _('<code>{track_number})</code> <b>{name}</b>\n{artist}').format(
track_number=track_number,
name=entry['name'],
artist=entry['artistString'])
if artist:
text += _('{emoji} <b>{name}</b>\n'
'{type}').format(emoji=Emoji.MICROPHONE, name=entry['name'],
type=voca_db.trans(entry['artistType'], artist=True))
if album:
text += _('{emoji} <b>{name}</b>\n'
'{artist}\n{type}').format(emoji=Emoji.OPTICAL_DISC, name=entry['name'],
artist=entry['artistString'],
type=voca_db.trans(entry['discType'], album=True))
link = ''
if song:
link = '/info_{}'.format(entry['id'])
elif artist:
link = '/ar_{}'.format(entry['id'])
elif album:
link = '/al_{}'.format(entry['id'])
if info:
text += '\n\n'
text += names_text(entry)
text += '\n'
if song:
if not inline:
text += _('<b>Derived songs:</b>') + ' /dev_{}\n'.format(entry['id'])
text += _('<b>Related songs:</b>') + ' /rel_{}\n'.format(entry['id'])
text += _('<b>Featured on albums:</b>') + ' /albys_{}\n'.format(entry['id'])
if 'originalVersionId' in entry:
text += '\n'
text += _('<b>Original song:</b>') + ' /info_{}\n'.format(entry['originalVersionId'])
text += '\n'
text += artists_text(entry, inline)
if 'pvServices' in entry:
if entry['pvServices'] == 'Nothing':
text += _('\nNo promotional videos found')
if artist:
if not inline:
if 'baseVoicebank' in entry:
text += _('<b>Base voicebank:</b>') + ' /a_{}\n\n'.format(entry['baseVoicebank']['id'])
if album:
if 'releaseDate' in entry:
if not entry['releaseDate']['isEmpty']:
# i18n? .-.
text += _('Release date: {date}\n\n').format(date=entry['releaseDate']['formatted'])
else:
if not inline:
text += _('\nInfo:') + ' ' + link
if inline and bot_name:
text += _('<a href="https://telegram.me/{bot_name}?start=cmd%20{link}">'
'Click for more features.</a>').format(bot_name=bot_name, link=link)
except OSError:
pass
if counts:
text += _("\n\nFound {found_num} total. "
"Viewing page {cur_page}/{max_page}").format(found_num=non_phone(counts[1]),
cur_page=non_phone(math.ceil((counts[0] + 3) / 3)),
max_page=non_phone(math.ceil(counts[1] / 3)))
else:
if context == Context.search:
text += _("I couldn't find what you were looking for. Did you perhaps misspell it? "
"(tip: you can edit your message.)")
elif context == Context.derived:
text += _('No derived songs found.')
elif context == Context.related:
text += _('No related songs found.')
elif context == Context.albums_by_song:
text += _('Not featured on any albums.')
else:
text += _('Not found.')
return text
def album_tracks(album, inline):
text = _('<b>Tracks')
if not inline:
text += _(' on {album_name} by {album_artist}</b>\n').format(album_name=album['name'],
album_artist=album['artistString'])
else:
text += ':</b>\n'
discs = defaultdict(list)
for track in album['tracks']:
discs[track['discNumber']].append(track)
for i, (disc_number, tracks) in enumerate(discs.items()):
if len(discs) > 1:
if not i == 0:
text += '\n\n'
text += _('<i>Disc {disc_number}').format(disc_number=disc_number)
if 'discs' in album and album['discs']:
disc = [disc for disc in album['discs'] if disc['discNumber'] == disc_number]
# Can't find an album to test this on:
# if 'name' in disc:
# text += ' ' + disc['name']
text += ':</i>\n'
text += content_parser(tracks, inline=inline)
return text
|
Python
| 1
|
@@ -7663,13 +7663,18 @@
t +=
+ ' ' +
_('
-
on %7B
@@ -7736,32 +7736,37 @@
=album%5B'name'%5D,%0A
+
|
b535ad5c396539eb90d30b750e92380fa2a5d688
|
version inc
|
src/pymake2/__init__.py
|
src/pymake2/__init__.py
|
#---------------------------------------
# DUNDERS
#---------------------------------------
__author__ = "Philip Arvidsson <contact@philiparvidsson.com>"
__license__ = "MIT (see LICENSE.md)"
__version__ = "0.4.2"
#---------------------------------------
# IMPORTS
#---------------------------------------
from pymake2 import template
from pymake2.cli.main import pymake2
from pymake2.core import makeconf
from pymake2.core.decorators import (after_target, before_target, default_conf,
default_target, depends_on, target)
from pymake2.core.maker import make
from pymake2.makeutils.fs import (copy, create_dir, delete_dir, delete_file,
find_files, watch_files)
from pymake2.makeutils.proc import run_program
|
Python
| 0
|
@@ -205,17 +205,17 @@
= %220.4.
-2
+7
%22%0A%0A#----
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.