commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13 values | lang stringclasses 23 values |
|---|---|---|---|---|---|---|---|---|
eb828764ddbe3988f71b98082e1560e594c3f65d | Add a bot message to display TeamCity test results | modin-project/modin,modin-project/modin | ci/teamcity/comment_on_pr.py | ci/teamcity/comment_on_pr.py | """
Post the comment like the following to the PR:
```
:robot: TeamCity test results bot :robot:
<Logs from pytest>
```
"""
from github import Github
import os
import sys
# Check if this is a pull request or not based on the environment variable
try:
pr_id = int(os.environ["GITHUB_PR_NUMBER"].split("/")[-1])
except Exception:
sys.exit(0)
header = """
<h2 align="center">:robot: TeamCity test results bot :robot:</h2>
"""
pytest_outputs = ["ray_tests.log", "dask_tests.log", "python_tests.log"]
full_comment = header
for out in pytest_outputs:
full_comment += "<details><summary>{} Tests</summary>\n".format(
out.split("_")[0].title()
)
full_comment += "\n\n```\n"
full_comment += open(out, "r").read()
full_comment += "\n```\n\n</details>\n"
token = os.environ["GITHUB_TOKEN"]
g = Github(token)
repo = g.get_repo("modin-project/modin")
pr = repo.get_pull(pr_id)
if any(i.user.login == "modin-bot" for i in pr.get_issue_comments()):
pr_comment_list = [
i for i in list(pr.get_issue_comments()) if i.user.login == "modin-bot"
]
assert len(pr_comment_list) == 1, "Too many comments from modin-bot already"
pr_comment_list[0].edit(full_comment)
else:
pr.create_issue_comment(full_comment)
| apache-2.0 | Python | |
b0006bf92ae221558d47a0b3c9010cfaacde2bfe | add checkmein.py with __init__ function | bahmanh/Auto-Flight-Check-In | autocheckin/checkmein.py | autocheckin/checkmein.py | from selenium import webdriver
from selinium.webdriver.common.keys import Keys
class CheckMeIn(object):
def __init__(self, firstName, lastName, confNum):
self.firstName = firstName
self.lastName = lastName
self.confNum = confNum
| mit | Python | |
528401b2c5cab29e301814da1754f0c0c41bdcd1 | Update shortest-distance-to-a-character.py | kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,kamyu104/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015 | Python/shortest-distance-to-a-character.py | Python/shortest-distance-to-a-character.py | # Time: O(n)
# Space: O(1)
# Given a string S and a character C,
# return an array of integers representing the shortest distance
# from the character C in the string.
#
# Example 1:
#
# Input: S = "loveleetcode", C = 'e'
# Output: [3, 2, 1, 0, 1, 0, 0, 1, 2, 2, 1, 0]
#
# Note:
# - S string length is in [1, 10000].
# - C is a single character, and guaranteed to be in string S.
# - All letters in S and C are lowercase.
import itertools
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
class Solution(object):
def shortestToChar(self, S, C):
"""
:type S: str
:type C: str
:rtype: List[int]
"""
result = [len(S)] * len(S)
prev = -len(S)
for i in itertools.chain(xrange(len(S)),
reversed(xrange(len(S)))):
if S[i] == C:
prev = i
result[i] = min(result[i], abs(i-prev))
return result
| # Time: O(n)
# Space: O(n)
# Given a string S and a character C,
# return an array of integers representing the shortest distance
# from the character C in the string.
#
# Example 1:
#
# Input: S = "loveleetcode", C = 'e'
# Output: [3, 2, 1, 0, 1, 0, 0, 1, 2, 2, 1, 0]
#
# Note:
# - S string length is in [1, 10000].
# - C is a single character, and guaranteed to be in string S.
# - All letters in S and C are lowercase.
import itertools
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
class Solution(object):
def shortestToChar(self, S, C):
"""
:type S: str
:type C: str
:rtype: List[int]
"""
result = [len(S)] * len(S)
prev = -len(S)
for i in itertools.chain(xrange(len(S)),
reversed(xrange(len(S)))):
if S[i] == C:
prev = i
result[i] = min(result[i], abs(i-prev))
return result
| mit | Python |
231bc7bb7bd7e373d4b4c9a3e33d6539d0637828 | Add xfailing test for #3345 | spacy-io/spaCy,honnibal/spaCy,honnibal/spaCy,explosion/spaCy,explosion/spaCy,honnibal/spaCy,honnibal/spaCy,explosion/spaCy,explosion/spaCy,spacy-io/spaCy,explosion/spaCy,spacy-io/spaCy,spacy-io/spaCy,spacy-io/spaCy,spacy-io/spaCy,explosion/spaCy | spacy/tests/regression/test_issue3345.py | spacy/tests/regression/test_issue3345.py | """Test interaction between preset entities and sentence boundaries in NER."""
import spacy
from spacy.tokens import Doc
from spacy.pipeline import EntityRuler, EntityRecognizer
@pytest.mark.xfail
def test_issue3345():
"""Test case where preset entity crosses sentence boundary."""
nlp = spacy.blank("en")
doc = Doc(nlp.vocab, words=["I", "live", "in", "New", "York"])
doc[4].is_sent_start = True
ruler = EntityRuler(nlp, patterns=[{"label": "GPE", "pattern": "New York"}])
ner = EntityRecognizer(doc.vocab)
# Add the OUT action. I wouldn't have thought this would be necessary...
ner.moves.add_action(5, "")
ner.add_label("GPE")
doc = ruler(doc)
# Get into the state just before "New"
state = ner.moves.init_batch([doc])[0]
ner.moves.apply_transition(state, "O")
ner.moves.apply_transition(state, "O")
ner.moves.apply_transition(state, "O")
# Check that B-GPE is valid.
assert ner.moves.is_valid(state, "B-GPE")
| mit | Python | |
d358a799ce726706543ac1d440f5b60112125a52 | Add psf building directory | larrybradley/photutils,astropy/photutils | photutils/psf/building/__init__.py | photutils/psf/building/__init__.py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This subpackage contains modules and packages to build point spread
functions.
"""
| bsd-3-clause | Python | |
0c74512159641fa63cf6292439a20c6af9698a02 | add a template program so that we can base others off it :) | akrherz/pyWWA,akrherz/pyWWA | template.py | template.py | # Copyright (c) 2005 Iowa State University
# http://mesonet.agron.iastate.edu/ -- mailto:akrherz@iastate.edu
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
""" Template Example ingestor """
__revision__ = '$Id: template.py 4513 2009-01-06 16:57:49Z akrherz $'
# Twisted Python imports
from twisted.words.protocols.jabber import client, jid, xmlstream
from twisted.internet import reactor
from twisted.python import log
from twisted.enterprise import adbapi
from twisted.mail import smtp
# Standard Python modules
import os, re, traceback, StringIO, smtplib
from email.MIMEText import MIMEText
# Python 3rd Party Add-Ons
import mx.DateTime, pg
# pyWWA stuff
from support import ldmbridge, TextProduct, reference
import secret
import common
log.startLogging(open('logs/template.log','a'))
log.FileLogObserver.timeFormat = "%Y/%m/%d %H:%M:%S %Z"
POSTGIS = pg.connect(secret.dbname, secret.dbhost, user=secret.dbuser,
passwd=secret.dbpass)
DBPOOL = adbapi.ConnectionPool("psycopg2", database=secret.dbname,
host=secret.dbhost, password=secret.dbpass)
EMAILS = 10
def email_error(message, product_text):
"""
Generic something to send email error messages
"""
global EMAILS
log.msg( message )
EMAILS -= 1
if (EMAILS < 0):
return
msg = MIMEText("Exception:\n%s\n\nRaw Product:\n%s" \
% (message, product_text))
msg['subject'] = 'template.py Traceback'
msg['From'] = secret.parser_user
msg['To'] = secret.error_user
smtp.sendmail("localhost", msg["From"], msg["To"], msg)
# LDM Ingestor
class MyProductIngestor(ldmbridge.LDMProductReceiver):
""" I receive products from ldmbridge and process them 1 by 1 :) """
def connectionLost(self, reason):
print 'connectionLost', reason
reactor.callLater(5, self.shutdown)
def shutdown(self):
reactor.callWhenRunning(reactor.stop)
def process_data(self, buf):
""" Process the product """
try:
real_parser(buf)
except Exception, myexp:
email_error(myexp, buf)
def real_parser(buf):
jabber.sendMessage(buf)
myJid = jid.JID('%s@%s/template_%s' % \
(secret.iembot_ingest_user, secret.chatserver, \
mx.DateTime.gmt().strftime("%Y%m%d%H%M%S") ) )
factory = client.basicClientFactory(myJid, secret.iembot_ingest_password)
jabber = common.JabberClient(myJid)
factory.addBootstrap('//event/stream/authd', jabber.authd)
factory.addBootstrap("//event/client/basicauth/invaliduser", jabber.debug)
factory.addBootstrap("//event/client/basicauth/authfailed", jabber.debug)
factory.addBootstrap("//event/stream/error", jabber.debug)
factory.addBootstrap(xmlstream.STREAM_END_EVENT, jabber._disconnect )
reactor.connectTCP(secret.connect_chatserver, 5222, factory)
ldm = ldmbridge.LDMProductFactory( MyProductIngestor() )
reactor.run()
| mit | Python | |
e19c8d52719d1bc00023406842e9a445580d98d3 | add wrappers.py just a dispatch wrapper for linear models | jstoxrocky/statsmodels,saketkc/statsmodels,pprett/statsmodels,astocko/statsmodels,rgommers/statsmodels,hlin117/statsmodels,musically-ut/statsmodels,detrout/debian-statsmodels,ChadFulton/statsmodels,josef-pkt/statsmodels,Averroes/statsmodels,saketkc/statsmodels,yarikoptic/pystatsmodels,YihaoLu/statsmodels,DonBeo/statsmodels,alekz112/statsmodels,wzbozon/statsmodels,wwf5067/statsmodels,waynenilsen/statsmodels,cbmoore/statsmodels,musically-ut/statsmodels,jseabold/statsmodels,phobson/statsmodels,wdurhamh/statsmodels,josef-pkt/statsmodels,rgommers/statsmodels,hainm/statsmodels,bzero/statsmodels,bashtage/statsmodels,nvoron23/statsmodels,alekz112/statsmodels,josef-pkt/statsmodels,musically-ut/statsmodels,cbmoore/statsmodels,Averroes/statsmodels,josef-pkt/statsmodels,wesm/statsmodels,phobson/statsmodels,nguyentu1602/statsmodels,josef-pkt/statsmodels,edhuckle/statsmodels,gef756/statsmodels,jseabold/statsmodels,wkfwkf/statsmodels,bsipocz/statsmodels,bavardage/statsmodels,adammenges/statsmodels,YihaoLu/statsmodels,bavardage/statsmodels,YihaoLu/statsmodels,statsmodels/statsmodels,saketkc/statsmodels,waynenilsen/statsmodels,YihaoLu/statsmodels,bashtage/statsmodels,nvoron23/statsmodels,pprett/statsmodels,pprett/statsmodels,wzbozon/statsmodels,alekz112/statsmodels,nvoron23/statsmodels,huongttlan/statsmodels,jstoxrocky/statsmodels,hainm/statsmodels,phobson/statsmodels,DonBeo/statsmodels,cbmoore/statsmodels,statsmodels/statsmodels,jseabold/statsmodels,wwf5067/statsmodels,nguyentu1602/statsmodels,edhuckle/statsmodels,YihaoLu/statsmodels,ChadFulton/statsmodels,nguyentu1602/statsmodels,kiyoto/statsmodels,huongttlan/statsmodels,kiyoto/statsmodels,statsmodels/statsmodels,detrout/debian-statsmodels,huongttlan/statsmodels,bert9bert/statsmodels,wdurhamh/statsmodels,Averroes/statsmodels,ChadFulton/statsmodels,adammenges/statsmodels,wzbozon/statsmodels,hainm/statsmodels,kiyoto/statsmodels,waynenilsen/statsmodels,bsipocz/statsmodels,bsipocz/statsmodels,cbmoore/statsmodels,rgommers/statsmodels,adammenges/statsmodels,wwf5067/statsmodels,rgommers/statsmodels,bzero/statsmodels,wkfwkf/statsmodels,bert9bert/statsmodels,hlin117/statsmodels,jseabold/statsmodels,statsmodels/statsmodels,wesm/statsmodels,kiyoto/statsmodels,jseabold/statsmodels,detrout/debian-statsmodels,statsmodels/statsmodels,edhuckle/statsmodels,bashtage/statsmodels,alekz112/statsmodels,yl565/statsmodels,bzero/statsmodels,wzbozon/statsmodels,Averroes/statsmodels,wdurhamh/statsmodels,hainm/statsmodels,wdurhamh/statsmodels,bashtage/statsmodels,ChadFulton/statsmodels,josef-pkt/statsmodels,gef756/statsmodels,yl565/statsmodels,bashtage/statsmodels,wzbozon/statsmodels,yl565/statsmodels,bert9bert/statsmodels,DonBeo/statsmodels,musically-ut/statsmodels,DonBeo/statsmodels,yarikoptic/pystatsmodels,edhuckle/statsmodels,bert9bert/statsmodels,bsipocz/statsmodels,astocko/statsmodels,jstoxrocky/statsmodels,hlin117/statsmodels,rgommers/statsmodels,gef756/statsmodels,wesm/statsmodels,nvoron23/statsmodels,wkfwkf/statsmodels,ChadFulton/statsmodels,nguyentu1602/statsmodels,wwf5067/statsmodels,gef756/statsmodels,detrout/debian-statsmodels,kiyoto/statsmodels,wkfwkf/statsmodels,jstoxrocky/statsmodels,huongttlan/statsmodels,DonBeo/statsmodels,astocko/statsmodels,wkfwkf/statsmodels,saketkc/statsmodels,bashtage/statsmodels,saketkc/statsmodels,phobson/statsmodels,pprett/statsmodels,bzero/statsmodels,yarikoptic/pystatsmodels,bavardage/statsmodels,adammenges/statsmodels,ChadFulton/statsmodels,hlin117/statsmodels,bavardage/statsmodels,statsmodels/statsmodels,astocko/statsmodels,bavardage/statsmodels,bzero/statsmodels,phobson/statsmodels,waynenilsen/statsmodels,nvoron23/statsmodels,wdurhamh/statsmodels,yl565/statsmodels,cbmoore/statsmodels,gef756/statsmodels,yl565/statsmodels,edhuckle/statsmodels,bert9bert/statsmodels | scikits/statsmodels/wrappers.py | scikits/statsmodels/wrappers.py | # -*- coding: utf-8 -*-
"""Convenience Wrappers
Created on Sat Oct 30 14:56:35 2010
Author: josef-pktd
License: BSD
"""
import numpy as np
import scikits.statsmodels as sm
from scikits.statsmodels import GLS, WLS, OLS
def remove_nanrows(y, x):
'''remove common rows in [y,x] that contain at least one nan
TODO: this should be made more flexible,
arbitrary number of arrays and 1d or 2d arrays
'''
mask = ~np.isnan(y)
mask *= ~(np.isnan(x).any(-1)) #* or &
y = y[mask]
x = x[mask]
return y, x
def linmod(y, x, weights=None, sigma=None, add_const=True, filter_missing=True,
**kwds):
'''get linear model with extra options for entry
dispatches to regular model class and does not wrap the output
If several options are exclusive, for example sigma and weights, then the
chosen class depends on the implementation sequence.
'''
if filter_missing:
y, x = remove_nanrows(y, x)
#do the same for masked arrays
if add_const:
x = sm.add_constant(x, prepend=True)
if not sigma is None:
return GLS(y, x, sigma=sigma, **kwds)
elif not weights is None:
return WLS(y, x, weights=weights, **kwds)
else:
return OLS(y, x, **kwds)
| bsd-3-clause | Python | |
c63ad26327f294393434dcfe4d5454656a0c1b4b | Add initial generate movie | opcon/plutokore,opcon/plutokore | scripts/generate-movie-plots.py | scripts/generate-movie-plots.py | #!/usr/bin/env python3
import os
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument('sim_dir', help='Simulation directory')
parser.add_argument('output_dir', help='Output directory')
parser.add_argument('-s', '--skip', help='Skip pattern for outputs', default=1, type=int)
args = parser.parse_args()
print(args)
if __name__ == '__main__':
main()
| mit | Python | |
dbdb247ad03ca6b9168f193eadaf28638d718072 | Change docstring for NamedEntity filth | deanmalmgren/scrubadub,datascopeanalytics/scrubadub,datascopeanalytics/scrubadub,deanmalmgren/scrubadub | scrubadub/filth/named_entity.py | scrubadub/filth/named_entity.py | from .base import Filth
class NamedEntityFilth(Filth):
"""
Default filth type, for named entities (e.g. the ones in https://nightly.spacy.io/models/en#en_core_web_lg-labels),
except the ones represented in any other filth.
"""
type = 'named_entity'
def __init__(self, *args, label: str, **kwargs):
super(NamedEntityFilth, self).__init__(*args, **kwargs)
self.label = label.lower()
self.replacement_string = "{}_{}".format(self.type, self.label)
| from .base import Filth
class NamedEntityFilth(Filth):
"""
Named entity filth. Upon initialisation provide a label for named entity (e.g. name, org)
"""
type = 'named_entity'
def __init__(self, *args, label: str, **kwargs):
super(NamedEntityFilth, self).__init__(*args, **kwargs)
self.label = label.lower()
self.replacement_string = "{}_{}".format(self.type, self.label)
| mit | Python |
11f4add6873c7c089b5674415276a71b4c03cb42 | add example mbsubmit plugin | pkess/beets,artemutin/beets,Freso/beets,xsteadfastx/beets,diego-plan9/beets,MyTunesFreeMusic/privacy-policy,Freso/beets,SusannaMaria/beets,lengtche/beets,SusannaMaria/beets,sampsyo/beets,shamangeorge/beets,MyTunesFreeMusic/privacy-policy,swt30/beets,artemutin/beets,lengtche/beets,beetbox/beets,jcoady9/beets,shamangeorge/beets,ibmibmibm/beets,Freso/beets,xsteadfastx/beets,swt30/beets,jcoady9/beets,mosesfistos1/beetbox,beetbox/beets,mried/beets,jackwilsdon/beets,artemutin/beets,mosesfistos1/beetbox,Kraymer/beets,Freso/beets,ibmibmibm/beets,mosesfistos1/beetbox,beetbox/beets,parapente/beets,jackwilsdon/beets,diego-plan9/beets,sampsyo/beets,mried/beets,shamangeorge/beets,pkess/beets,jackwilsdon/beets,parapente/beets,beetbox/beets,LordSputnik/beets,madmouser1/beets,LordSputnik/beets,sampsyo/beets,lengtche/beets,madmouser1/beets,LordSputnik/beets,mried/beets,pkess/beets,diego-plan9/beets,Kraymer/beets,madmouser1/beets,lengtche/beets,jackwilsdon/beets,SusannaMaria/beets,swt30/beets,pkess/beets,Kraymer/beets,sampsyo/beets,MyTunesFreeMusic/privacy-policy,LordSputnik/beets,ibmibmibm/beets,SusannaMaria/beets,mried/beets,xsteadfastx/beets,MyTunesFreeMusic/privacy-policy,ibmibmibm/beets,parapente/beets,jcoady9/beets,swt30/beets,parapente/beets,diego-plan9/beets,artemutin/beets,Kraymer/beets,xsteadfastx/beets,jcoady9/beets,mosesfistos1/beetbox,madmouser1/beets,shamangeorge/beets | beetsplug/mbsubmit.py | beetsplug/mbsubmit.py | # -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2015, Adrian Sampson and Diego Moreda.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Aid in submitting information to MusicBrainz.
This plugin allows the user to print track information in a format that is
parseable by the MusicBrainz track parser. Programmatic submitting is not
implemented by MusicBrainz yet.
"""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
from beets.autotag import Recommendation
from beets.importer import action
from beets.plugins import BeetsPlugin
from beets.ui.commands import ExtraChoice
from beetsplug.info import print_data
class MBSubmitPlugin(BeetsPlugin):
def __init__(self):
super(MBSubmitPlugin, self).__init__()
self.register_listener('before_choose_candidate',
self.before_choose_candidate_event)
def before_choose_candidate_event(self, session, task):
# This intends to illustrate a simple plugin that adds choices
# depending on conditions.
# Plugins should return a list of ExtraChoices (basically, the
# "cosmetic" values and a callback function). This list is received and
# flattened on plugins.send('before_choose_candidate').
if not task.candidates or task.rec == Recommendation.none:
return [ExtraChoice(self, 'PRINT', 'Print tracks',
self.print_tracks),
ExtraChoice(self, 'PRINT_SKIP', 'print tracks and sKip',
self.print_tracks_and_skip)]
# Callbacks for choices.
def print_tracks(self, session, task):
for i in task.items:
print_data(None, i, '$track. $artist - $title ($length)')
def print_tracks_and_skip(self, session, task):
# Example of a function that automatically sets the next action,
# avoiding the user to be prompted again. It has some drawbacks (for
# example, actions such as action.MANUAL are not handled properly, as
# they do not exit the main TerminalImportSession.choose_match loop).
#
# The idea is that if a callback function returns an action.X value,
# task.action is set to that value after the callback is processed.
for i in task.items:
print_data(None, i, '$track. $artist - $title ($length)')
return action.SKIP
| mit | Python | |
27bec5bc3dab9798d4ddbfcd84563b3c5056e8c8 | delete again | CCallahanIV/PyChart,CCallahanIV/PyChart,CCallahanIV/PyChart | pychart/pychart_datarender/migrations/0001_initial.py | pychart/pychart_datarender/migrations/0001_initial.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-15 00:50
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('pychart_profile', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Data',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('description', models.CharField(blank=True, max_length=255, null=True)),
('data', models.FileField(blank=True, null=True, upload_to='data')),
('date_uploaded', models.DateField(auto_now=True)),
('date_modified', models.DateField(auto_now=True)),
('owner', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='data_sets', to='pychart_profile.PyChartProfile')),
],
),
migrations.CreateModel(
name='Render',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('description', models.CharField(blank=True, max_length=255, null=True)),
('render_type', models.CharField(blank=True, choices=[('Scatter', 'Scatter'), ('Bar', 'Bar'), ('Histogram', 'Histogram')], max_length=255, null=True)),
('render', models.TextField(blank=True, null=True)),
('date_uploaded', models.DateField(auto_now=True)),
('date_modified', models.DateField(auto_now=True)),
('data_sets', models.ManyToManyField(related_name='renders', to='pychart_datarender.Data')),
('owner', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='renders', to='pychart_profile.PyChartProfile')),
],
),
]
| mit | Python | |
7791ed9269b3d074b84a27f7f72b88a69c9ebd52 | add chaining classes | waliens/sldc | sldc/chaining.py | sldc/chaining.py | # -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod
__author__ = "Romain Mormont <r.mormont@student.ulg.ac.be>"
class ImageProvider(object):
"""
An interface for any component that genre
"""
__metaclass__ = ABCMeta
@abstractmethod
def get_images(self):
pass
class WorkflowLinker(object):
"""
An interface that links two different workflows. It is given the outputs of an execution of a workflow
instance and generates images to be processed by a second workflow instance
"""
__metaclass__ = ABCMeta
@abstractmethod
def get_images(self, image, polygons_classes):
"""Given result of the application of an instance of the sldc workflow, produces images objects for the next
steps
Parameters
----------
image: Image
The image processed by the previous step
polygons_classes: Array of tuples
The polygons and their predicted classes as produced by the previous class. Tuples are structured
as (polygon, class) when polygon is an instance of shapely.geometry.Polygon and class is an integer
code representing the actual class
"""
pass
class PostProcessor(object):
"""
A post processor is a class encapsulating the processing of the results of several SLDCWorkflow
"""
def post_process(self, image, polygons_classes):
"""Actually process the results
Parameters
----------
image: Image
The image processed by the previous step
polygons_classes: Array of tuples
The polygons and their predicted classes as produced by the previous class. Tuples are structured
as (polygon, class) when polygon is an instance of shapely.geometry.Polygon and class is an integer
code representing the actual class
"""
pass
class WorkflowChain(object):
"""
This class encapsulates the sequential execution of several instances of the sldc workflow on the same image.
A processing chain might look like this :
{ImageProvider} --images--> {Workflow}
[ --polygons_classes--> {WorkflowLinker} --images--> {Workflow2} [...] ]
All the generated polygons_classes are then post_processed by the PostProcessor.
"""
def __init__(self, image_provider, workflow, post_processor, n_jobs=1):
"""Constructor for WorkflowChain objects
Parameters
----------
image_provider: ImageProvider
An image provider that will provide the images to be processed by the first workflow
workflow: SLDCWorkflow
The first instance of the workflow to be applied
post_processor: PostProcessor
The post-processor to execute when an image has gone through the whole processing chain
n_jobs: int, optional (default: 1)
The number of jobs that can be used to process the images in parallel, -1 for using the number of available
cores
"""
self._post_processor = post_processor
self._image_provider = image_provider
self._first_workflow = workflow
self._workflows = list()
self._linkers = list()
self._n_jobs = n_jobs
def append_workflow(self, workflow, workflow_linker):
"""Append a workflow to apply after the current registered sequence
Parameters
----------
workflow: SLDCWorkflow
The workflow to append at the end of the chain
workflow_linker: WorkflowLinker
A linker to produce images from the prediction produced by the last workflow in the chain
"""
self._workflows.append(workflow)
self._linkers.append(workflow_linker)
# TODO implement parallel implementation
def execute(self):
"""
Execute the processing
"""
images = self._image_provider.get_image()
for image in images:
self._process_image(image)
def _process_image(self, image):
"""
Execute one image's processing
Parameters
----------
image: Image
The image to process
"""
polygons_classes = list()
prev = self._first_workflow.process(image)
polygons_classes.append(prev)
for workflow, linker in zip(self._workflows, self._linkers):
sub_images = linker.get_images(image, prev)
curr = list()
for sub_image in sub_images:
curr.append(workflow.process(sub_image))
polygons_classes.append(curr)
prev = curr
self._post_processor.post_process(image, polygons_classes)
| mit | Python | |
27896063f7632afa327c4933248435c874b91b7a | Create __init__.py | crhaithcock/RushHour,crhaithcock/RushHour,crhaithcock/RushHour | tests/__init__.py | tests/__init__.py | cc0-1.0 | Python | ||
24c85bf4550c4560e2a192fd8513f3788ea2148e | add tools calcuate microseconds between two times | ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study | skills/time-tool/microsecond.py | skills/time-tool/microsecond.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2016 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
from datetime import datetime
import time
def main():
t1 = datetime.now()
time.sleep(0.1)
t2 = datetime.now()
print ('step time is : %d' % int((t2 - t1).total_seconds() * 1000))
if __name__ == '__main__':
main()
| bsd-2-clause | Python | |
cb3158cbb116153d516a01ba1f63e26301338bbb | Create sigmoid_upper_bounds.py (#629) | probml/pyprobml,probml/pyprobml,probml/pyprobml,probml/pyprobml | scripts/sigmoid_upper_bounds.py | scripts/sigmoid_upper_bounds.py | # Upper bounds for sigmoid function
import numpy as np
import math
import matplotlib.pyplot as plt
import pyprobml_utils as pml
sigmoid = lambda x: np.exp(x) / (1 + np.exp(x))
fstar = lambda eta: -eta * math.log(eta) - (1 - eta) * math.log(1 - eta)
sigmoid_upper = lambda eta, x: np.exp(eta * x - fstar(eta))
eta1, eta2 = 0.2, 0.7
start, stop, step = -6, 6, 1 / 10
xs = np.arange(start, stop + step, step)
plt.plot(xs, sigmoid(xs), 'r', linewidth=3)
plt.plot(xs, sigmoid_upper(eta1, xs), 'b', linewidth=3)
plt.plot(xs, sigmoid_upper(eta2, xs), 'b', linewidth=3)
plt.text(1 / 2 + 1 / 2, sigmoid_upper(eta1, 1 / 2), 'eta=0.2')
plt.text(0 + 1 / 2, sigmoid_upper(eta2, 0), 'eta=0.7')
plt.xlim([start, stop])
plt.ylim([0, 1])
pml.savefig('sigmoid_upper_bound.pdf', dpi=300)
plt.show()
| mit | Python | |
9bfb182f92b8ac82ddb1b35c886b4a3f79708696 | Add script for train/test split | YerevaNN/mimic3-benchmarks | scripts/split_train_and_test.py | scripts/split_train_and_test.py | import os
import shutil
import argparse
import random
random.seed(47297)
parser = argparse.ArgumentParser(description='Split data into train and test sets.')
parser.add_argument('subjects_root_path', type=str, help='Directory containing subject sub-directories.')
args, _ = parser.parse_known_args()
def move_to_partition(patients, partition):
if not os.path.exists(os.path.join(args.subjects_root_path, partition)):
os.mkdir(os.path.join(args.subjects_root_path, partition))
for patient in patients:
src = os.path.join(args.subjects_root_path, patient)
dest = os.path.join(args.subjects_root_path, partition, patient)
shutil.move(src, dest)
folders = os.listdir(args.subjects_root_path)
folders = filter(str.isdigit, folders)
random.shuffle(folders)
train_cnt = int(0.85 * len(folders))
train_patients = sorted(folders[:train_cnt])
test_patients = sorted(folders[train_cnt:])
assert len(set(train_patients) & set(test_patients)) == 0
move_to_partition(train_patients, "train")
move_to_partition(test_patients, "test")
| mit | Python | |
284da54cc9fc322c32e44706716b548bcd652dc4 | Test axe app. | soasme/axe | tests/test_axe.py | tests/test_axe.py | # -*- coding: utf-8 -*-
import pytest
from axe import Axe, errors
@pytest.fixture
def axe():
return Axe()
def test_build_from_urls(axe):
func = lambda: ''
axe.build({'/': func})
assert '/' in axe.urls
assert axe.urls['/'] == func
def test_register_ext_success(axe):
@axe.register_ext
def test(request):
pass
assert axe.exts['test'] == test
def test_register_ext_duplicated(axe):
with pytest.raises(errors.DuplicatedExtension):
@axe.register_ext
def query(request):
pass
| mit | Python | |
f9d7612dfbad8d5f394bd7c0a9ed6db5f6234eb5 | add imf_tools | zpace/stellarmass_pca | imf_tools.py | imf_tools.py | '''
Define several stellar initial mass functions,
with some tools for working with them
'''
class IMF(object):
'''
stellar initial mass function
'''
__version__ = '0.1'
def __init__(self, imftype='salpeter', ml=0.1, mh=150., mf=1., dm=.005):
'''
set up an IMF with some probability distribution, lower mass limit,
and upper mass limit, that formed some mass
all masses & luminosities are implicitly in solar units, and times
are in Gyr
I've provided several choices of IMF
'''
self.imftype = imftype
self.ml = ml # low mass limit
self.mh = mh # high mass limit
self.dm = dm # standard mass differential for computations
@staticmethod
def salpeter_pdf_u(m):
'''straight up power law'''
return 1./2.28707 * m**-2.35
@staticmethod
def miller_scalo_pdf_u(m):
bdy = 1.
inds = [0., -2.3]
branch = np.argmax(np.stack([]))
return m**-2.35
@staticmethod
def kroupa_u(m):
bdys = [.08, .5]
inds = [-0.3, -1.3, -2.3]
@staticmethod
def chabrier_u(m):
def mass_at_age(t):
raise NotImplementedError
| mit | Python | |
9efa97198f81f5afce03e30c3bce5f5fc23a8d28 | add test for Row | esjeon/eatable | tests/test_row.py | tests/test_row.py |
import unittest
from eatable import Table, Row
class RowTestCase(unittest.TestCase):
def setUp(self):
self.header = ('A', 'B', 'C')
self.table = Table(self.header)
def test_init(self):
Row(self.table, 0, ('a1', 'b2', 'c2'))
def test_getitem(self):
row = Row(self.table, 0, ('a1', 'b1', 'c1'))
self.assertEqual(row['A'], 'a1')
self.assertEqual(row['B'], 'b1')
self.assertEqual(row['C'], 'c1')
if __name__ == '__main__':
unittest.main()
| mit | Python | |
e0bfc2bdff3d44c8839e4c04948e8da824f7b260 | Write requests-like get() | Pringley/spyglass | spyglass/util.py | spyglass/util.py | from urllib2 import urlopen
from collections import namedtuple
Response = namedtuple('Response', ['text'])
def get(url):
return Response(text=urlopen(url).read())
| mit | Python | |
96c08b94d40850b5dd703b052943de2827ebf9f9 | create command.py and abstract command template | 6180/foxybot | foxybot/command.py | foxybot/command.py | """Provide a template for making commands and a decorator to register them."""
from abc import abstractmethod, abstractclassmethod, ABCMeta
from enum import Enum
from registrar import CommandRegistrar
def bot_command(cls):
command = cls()
if not issubclass(command.__class__, AbstractCommand):
print(f'[ERROR] {command.__module__} is not a subclass of AbstractCommand and wont be loaded.')
return
command_registrar = CommandRegistrar.instance()
for alias in command.aliases:
if alias.lower() not in command_registrar.command_table.keys():
command_registrar.command_table[alias] = command
else:
print(f'Error: duplicate alias {alias.lower()} in {command.__module__}.py...')
print(f'Duplicate is in {command_registrar.command_table[alias.lower()].__module__}')
class AbstractCommand(metaclass=ABCMeta):
"""Ensure all commands have a consistent interface"""
@staticmethod
@abstractclassmethod
def execute(shards, shard, msg):
"""Executes this instances command"""
raise NotImplementedError
@property
@abstractmethod
def aliases(self):
"""The aliases that can be used to call this command"""
raise NotImplementedError
| bsd-2-clause | Python | |
e85d1f0e9b198184103973f198bf1ceddbca6a65 | declare the federica rspec schemas | onelab-eu/sfa,onelab-eu/sfa,yippeecw/sfa,onelab-eu/sfa,yippeecw/sfa,yippeecw/sfa | sfa/rspecs/versions/federica.py | sfa/rspecs/versions/federica.py | from sfa.rspecs.versions.pgv2 import PGv2Ad, PGv2Request, PGv2Manifest
class FedericaAd (PGv2Ad):
enabled = True
schema = 'http://sorch.netmode.ntua.gr/ws/RSpec/ad.xsd'
namespace = 'http://sorch.netmode.ntua.gr/ws/RSpec'
class FedericaRequest (PGv2Request):
enabled = True
schema = 'http://sorch.netmode.ntua.gr/ws/RSpec/request.xsd'
namespace = 'http://sorch.netmode.ntua.gr/ws/RSpec'
class FedericaManifest (PGv2Manifest):
enabled = True
schema = 'http://sorch.netmode.ntua.gr/ws/RSpec/manifest.xsd'
namespace = 'http://sorch.netmode.ntua.gr/ws/RSpec'
| mit | Python | |
c055009077546b22090897f79f4facce8bdb97d5 | change module names in hvc/__init__.py | NickleDave/hybrid-vocal-classifier | hvc/__init__.py | hvc/__init__.py | """
__init__.py imports key functions from modules to package level
"""
from .utils.features import load_feature_file
from .extract import extract
from .predict import predict
from .select import select
from .parseconfig import parse_config
from . import metrics
from . import plot | """
__init__.py imports key functions from modules to package level
"""
from .utils.features import load_feature_file
from .featureextract import extract
from .labelpredict import predict
from .modelselect import select
from .parseconfig import parse_config
from . import metrics
from . import plot | bsd-3-clause | Python |
fa375d06128e493f86524e82fa93c892f4d925b7 | Add script to find forms missing in ES | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | corehq/apps/data_pipeline_audit/management/commands/find_sql_forms_not_in_es.py | corehq/apps/data_pipeline_audit/management/commands/find_sql_forms_not_in_es.py | from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from datetime import datetime
from django.core.management.base import BaseCommand
import sys
from django.db.models import Q, F
from django.db.models.functions import Greatest
from corehq.form_processor.models import XFormInstanceSQL
from corehq.apps.es import FormES
import argparse
from dimagi.utils.chunked import chunked
DATE_FORMAT = "%Y-%m-%d"
def valid_date(s):
try:
return datetime.strptime(s, DATE_FORMAT)
except ValueError:
msg = "Not a valid date: '{0}'.".format(s)
raise argparse.ArgumentTypeError(msg)
class Command(BaseCommand):
help = "Print IDs of sql forms that are in the primary DB but not in ES."
def add_arguments(self, parser):
parser.add_argument(
'-s',
'--startdate',
dest='start',
type=valid_date,
help="The start date. Only applicable to forms on SQL domains. - format YYYY-MM-DD",
)
parser.add_argument(
'-e',
'--enddate',
dest='end',
type=valid_date,
help="The end date. Only applicable to forms on SQL domains. - format YYYY-MM-DD",
)
def handle(self, **options):
startdate = options.get('start')
enddate = options.get('end')
print("Fetching all form ids...", file=sys.stderr)
all_ids = list(iter_form_ids_by_last_modified(startdate, enddate))
print("Woo! Done fetching. Here we go", file=sys.stderr)
for doc_ids in chunked(all_ids, 100):
es_ids = (FormES()
.remove_default_filter('is_xform_instance')
.doc_id(doc_ids).values_list('_id', flat=True))
missing_ids = set(doc_ids) - set(es_ids)
for form_id in missing_ids:
print(form_id)
def iter_form_ids_by_last_modified(start_datetime, end_datetime):
from corehq.sql_db.util import run_query_across_partitioned_databases
annotate = {
'last_modified': Greatest('received_on', 'edited_on', 'deleted_on'),
}
return run_query_across_partitioned_databases(
XFormInstanceSQL,
(Q(last_modified__gt=start_datetime, last_modified__lt=end_datetime) &
Q(state=F('state').bitand(XFormInstanceSQL.DELETED) +
F('state').bitand(XFormInstanceSQL.DEPRECATED) +
F('state').bitand(XFormInstanceSQL.DUPLICATE) +
F('state').bitand(XFormInstanceSQL.ERROR) +
F('state').bitand(XFormInstanceSQL.SUBMISSION_ERROR_LOG) +
F('state'))),
annotate=annotate,
values=['form_id'],
)
| bsd-3-clause | Python | |
51372b15e9abe4c0ae35294ec51694751fe2ae32 | Add a py2exe configuration setup. | robotframework/RIDE,HelioGuilherme66/RIDE,HelioGuilherme66/RIDE,HelioGuilherme66/RIDE,robotframework/RIDE,HelioGuilherme66/RIDE,robotframework/RIDE,robotframework/RIDE | src/bin/setup.py | src/bin/setup.py | from distutils.core import setup
import py2exe, sys
from glob import glob
sys.path.append("C:\\Temp\\Microsoft.VC90.CRT")
data_files = [("Microsoft.VC90.CRT", glob(r'C:\Temp\Microsoft.VC90.CRT\*.*'))]
setup(
data_files=data_files,
console=['ride.py'])
| apache-2.0 | Python | |
d8521011d5be28812c222b58901a07e8f30e87ac | Add testing code for memory leak. | abhiskk/fast-neural-style,onai/fast-neural-style,abhiskk/fast-neural-style,darkstar112358/fast-neural-style,darkstar112358/fast-neural-style | neuralstyle/testing-train.py | neuralstyle/testing-train.py | from __future__ import print_function
import argparse
import numpy as np
import torch
from torch.autograd import Variable
from torch.optim import Adam
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision import datasets
from transformernet import TransformerNet
from vgg16 import Vgg16
import utils
import os
def main():
parser = argparse.ArgumentParser(description="parser for fast-neural-style")
parser.add_argument("--batch-size", "-b", type=int, default=4)
parser.add_argument("--epochs", "-e", type=int, default=2)
parser.add_argument("--cuda", type=int, default=0)
parser.add_argument("--dataset", type=str, default="MSCOCO")
parser.add_argument("--image-size", type=int, default=256)
args = parser.parse_args()
if args.cuda and not torch.cuda.is_available():
print("WARNING: torch.cuda not available, using CPU.")
args.cuda = 0
if args.cuda:
kwargs = {'num_workers': 1, 'pin_memory': False}
else:
kwargs = {}
print("=====================")
print("TEST MODE")
print("using 1 worker")
print("=====================")
print("=====================")
print("TORCH VERSION:", torch.__version__)
print("BATCH SIZE:", args.batch_size)
print("EPOCHS:", args.epochs)
print("CUDA:", args.cuda)
print("DATASET:", args.dataset)
print("IMAGE SIZE:", args.image_size)
print("=====================\n")
transform = transforms.Compose([transforms.Scale(args.image_size),
transforms.CenterCrop(args.image_size),
transforms.ToTensor(),
transforms.Lambda(lambda x: x.mul(255))])
train_dataset = datasets.ImageFolder(args.dataset, transform)
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, **kwargs)
for e in range(args.epochs):
batch_id = 0
for x in train_loader:
if batch_id < 10 or batch_id % 500 == 0:
print("Processing batch:", batch_id)
batch_id += 1
print("\nDone :)")
if __name__ == "__main__":
main()
| mit | Python | |
5a2308cc98a99e9c74c14611fdb45adf7601d390 | prepare bruteforce for http basic authentication; do not forget to create the b64 encoder in zap payload processor; | DarkLighting/OWASP-ZAP-Scripts | payload_generator/bruteforce.py | payload_generator/bruteforce.py | # Auxiliary variables/constants for payload generation.
INITIAL_VALUE = 0;
count = INITIAL_VALUE;
user = str('admin');
passfile_path = 'C:\\Users\\user\\Documents\\wordlists\\test.txt';
NUMBER_OF_PAYLOADS = sum(1 for line in open(passfile_path));
passwd = list();
for line in open(passfile_path): # initializing passwords into list
passwd.append(line.rstrip());
print('NUMBER_OF_PAYLOADS = ' + str(NUMBER_OF_PAYLOADS));
print('len(passwd) = ' + str(len(passwd)));
print('count = '+str(count));
# The number of generated payloads, zero to indicate unknown number.
# The number is used as a hint for progress calculations.
def getNumberOfPayloads():
return NUMBER_OF_PAYLOADS;
# Returns true if there are still payloads to generate, false otherwise.
# Called before each call to next().
def hasNext():
return (count < NUMBER_OF_PAYLOADS);
# Returns the next generated payload.
# This method is called while hasNext() returns true.
def next():
global count;
print('next_count = ' + str(count));
payload = count;
print('payload = ' + str(payload));
count+=1;
print('incremented next_count = ' + str(count));
print(user+':'+passwd[payload]);
return user+':'+passwd[payload];
# Resets the internal state of the payload generator, as if no calls to
# hasNext() or next() have been previously made.
# Normally called once the method hasNext() returns false and while payloads
# are still needed.
def reset():
count = INITIAL_VALUE;
# Releases any resources used for generation of payloads (for example, a file).
# Called once the payload generator is no longer needed.
def close():
pass;
| mit | Python | |
0ced2a66affd65a3dda90dc49bac8bd43e1c6fa7 | Remove index on LogRecord.message. | fairview/django-peavy | peavy/migrations/0004_drop_message_index.py | peavy/migrations/0004_drop_message_index.py | # encoding: utf-8
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing index on 'LogRecord', fields ['message']
db.delete_index('peavy_logrecord', ['message'])
def backwards(self, orm):
# Adding index on 'LogRecord', fields ['message']
db.create_index('peavy_logrecord', ['message'])
models = {
'peavy.logrecord': {
'Meta': {'ordering': "('-timestamp',)", 'object_name': 'LogRecord'},
'application': ('django.db.models.fields.CharField', [], {'default': "'sandbox'", 'max_length': '256', 'db_index': 'True'}),
'client_ip': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'blank': 'True'}),
'debug_page': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'db_index': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'origin_server': ('django.db.models.fields.CharField', [], {'default': "'kaze.jkcl.local'", 'max_length': '256', 'db_index': 'True'}),
'stack_trace': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'user_pk': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '256', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '256', 'blank': 'True'})
}
}
complete_apps = ['peavy']
| mit | Python | |
f3e91020f0426fedfe229e94bf1ddc69dd64a136 | Add new example plot for `match_template`. | SamHames/scikit-image,warmspringwinds/scikit-image,ClinicalGraphics/scikit-image,paalge/scikit-image,emmanuelle/scikits.image,newville/scikit-image,almarklein/scikit-image,chintak/scikit-image,warmspringwinds/scikit-image,rjeli/scikit-image,chriscrosscutler/scikit-image,Britefury/scikit-image,GaZ3ll3/scikit-image,Britefury/scikit-image,vighneshbirodkar/scikit-image,emon10005/scikit-image,youprofit/scikit-image,michaelaye/scikit-image,robintw/scikit-image,jwiggins/scikit-image,youprofit/scikit-image,paalge/scikit-image,chriscrosscutler/scikit-image,almarklein/scikit-image,ofgulban/scikit-image,bennlich/scikit-image,robintw/scikit-image,ofgulban/scikit-image,SamHames/scikit-image,keflavich/scikit-image,keflavich/scikit-image,blink1073/scikit-image,bennlich/scikit-image,dpshelio/scikit-image,ajaybhat/scikit-image,almarklein/scikit-image,michaelpacer/scikit-image,almarklein/scikit-image,vighneshbirodkar/scikit-image,Hiyorimi/scikit-image,vighneshbirodkar/scikit-image,emmanuelle/scikits.image,michaelpacer/scikit-image,newville/scikit-image,ajaybhat/scikit-image,GaZ3ll3/scikit-image,michaelaye/scikit-image,rjeli/scikit-image,WarrenWeckesser/scikits-image,chintak/scikit-image,chintak/scikit-image,SamHames/scikit-image,oew1v07/scikit-image,juliusbierk/scikit-image,Hiyorimi/scikit-image,jwiggins/scikit-image,emmanuelle/scikits.image,emon10005/scikit-image,emmanuelle/scikits.image,ClinicalGraphics/scikit-image,bsipocz/scikit-image,juliusbierk/scikit-image,bsipocz/scikit-image,blink1073/scikit-image,ofgulban/scikit-image,chintak/scikit-image,rjeli/scikit-image,Midafi/scikit-image,SamHames/scikit-image,Midafi/scikit-image,WarrenWeckesser/scikits-image,pratapvardhan/scikit-image,oew1v07/scikit-image,dpshelio/scikit-image,paalge/scikit-image,pratapvardhan/scikit-image | doc/examples/plot_template_alt.py | doc/examples/plot_template_alt.py | """
=================
Template Matching
=================
In this example, we use template matching to identify the occurrence of an
image patch (in this case, a sub-image centered on a single coin). Here, we
return a single match (the exact same coin), so the maximum value in the
``match_template`` result corresponds to the coin location. The other coins
look similar, and thus have local maxima; if you expect multiple matches, you
should use a proper peak-finding function.
The ``match_template`` function uses fast, normalized cross-correlation [1]_
to find instances of the template in the image. Note that the peaks in the
output of ``match_template`` correspond to the origin (i.e. top-left corner) of
the template.
.. [1] J. P. Lewis, "Fast Normalized Cross-Correlation", Industrial Light and
Magic.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage.feature import match_template
image = data.coins()
coin = image[170:220, 75:130]
result = match_template(image, coin)
ij = np.unravel_index(np.argmax(result), result.shape)
x, y = ij[::-1]
fig, (ax1, ax2, ax3) = plt.subplots(ncols=3, figsize=(8, 3))
ax1.imshow(coin)
ax1.set_axis_off()
ax1.set_title('template')
ax2.imshow(image)
ax2.set_axis_off()
ax2.set_title('image')
# highlight matched region
hcoin, wcoin = coin.shape
rect = plt.Rectangle((x, y), wcoin, hcoin, edgecolor='r', facecolor='none')
ax2.add_patch(rect)
ax3.imshow(result)
ax3.set_axis_off()
ax3.set_title('`match_template`\nresult')
# highlight matched region
ax3.autoscale(False)
ax3.plot(x, y, 'o', markeredgecolor='r', markerfacecolor='none', markersize=10)
plt.show()
| bsd-3-clause | Python | |
fc1c0a563f8bd4fd33e63285ab6af79825b8b927 | Add a modified terminalcolors.py | joeyates/vim-jgy-256-theme,joeyates/vim-jgy-256-theme | bin/terminalcolors.py | bin/terminalcolors.py | #!/usr/bin/env python
# Copyright (C) 2006 by Johannes Zellner, <johannes@zellner.org>
# modified by mac@calmar.ws to fit my output needs
# modified by crncosta@carloscosta.org to fit my output needs
# modified by joeyates, 2014
from os import system
def foreground(n):
system('tput setaf %u' % n)
def background(n):
system('tput setab %u' % n)
def out(n):
background(n)
system('echo -n "% 4d"' % n)
background(0)
def table(start, end, width):
def is_end_of_row(n):
return (n - start + 1) % width == 0
for n in range(start, end + 1):
out(n)
if is_end_of_row(n):
print
foreground(16)
# normal colors
table(0, 15, 8)
print
# other colors
table(16, 231, 6)
print
# greyscale
table(232, 255, 6)
foreground(7)
background(0)
| mit | Python | |
1de610b2460b3b3bff24b79398d214001097e562 | Implement Gmail OAuth 2.0. | blukat29/notifyhere,blukat29/notifyhere | notifyhere/dash/api/gmail.py | notifyhere/dash/api/gmail.py | from httplib import HTTPSConnection
import json
import base
import tools
import secrets
class GmailApi(base.ApiBase):
def __init__(self):
base.ApiBase.__init__(self, "gmail")
self.token = ""
def icon_url(self):
return "https://mail.google.com/favicon.ico"
def oauth_link(self):
url = "https://accounts.google.com/o/oauth2/auth"
args = {
"response_type":"code",
"client_id":secrets.GMAIL_CLIENT_ID,
"redirect_uri":secrets.BASE_REDIRECT_URL + "gmail",
"scope":"https://mail.google.com/",
}
return url + "?" + tools.encode_params(args)
def oauth_callback(self, params):
if 'code' not in params:
return None
conn = HTTPSConnection("accounts.google.com")
body = tools.encode_params({
"grant_type":"authorization_code",
"code":params['code'],
"client_id":secrets.GMAIL_CLIENT_ID,
"client_secret":secrets.GMAIL_CLIENT_SECRET,
"redirect_uri":secrets.BASE_REDIRECT_URL + "gmail",
})
headers = {
"Content-Type":"application/x-www-form-urlencoded",
}
conn.request("POST", "/o/oauth2/token", body, headers)
resp = conn.getresponse()
try:
self.token = json.loads(resp.read())['access_token']
self.is_auth = True
except (KeyError, ValueError):
return None
def logout():
self.is_auth = False
self.token = ""
| mit | Python | |
5f81d53c16816289cf52a5b4118e482b7650defe | Add MaintenanceMiddleware | SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange | app/soc/middleware/maintenance.py | app/soc/middleware/maintenance.py | #!/usr/bin/python2.5
#
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Middleware to handle exceptions.
"""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
from google.appengine.runtime.apiproxy_errors import CapabilityDisabledError
from django import http
from django.utils.translation import ugettext
from soc.views.helper import responses
DEF_DOWN_FOR_MAINTENANCE_MSG = ugettext("Down for maintenance")
DEF_IN_UNEXPECTED_MAINTENANCE_MSG = ugettext(
"Down for unexpected maintenance.")
class MaintenanceMiddleware(object):
"""Middleware to handle maintenance mode.
"""
def maintenance(self, request):
"""Returns a 'down for maintenance' view.
"""
context = responses.getUniversalContext(request)
context['page_name'] = ugettext('Maintenance')
notice = context.pop('site_notice')
if not notice:
context['body_content'] = DEF_IN_UNEXPECTED_MAINTENANCE_MSG
else:
context['body_content'] = notice
context['header_title'] = DEF_DOWN_FOR_MAINTENANCE_MSG
context['sidebar_menu_items'] = [
{'heading': DEF_DOWN_FOR_MAINTENANCE_MSG,
'group': ''},
]
template = 'soc/base.html'
return responses.respond(request, template, context=context)
def process_request(self, request):
context = responses.getUniversalContext(request)
if not context['is_admin'] and context['in_maintenance']:
return self.maintenance(request)
def process_exception(self, request, exception):
if isinstance(exception, CapabilityDisabledError):
# assume the site is in maintenance if we get CDE
return maintenance(request)
# let the exception handling middleware handle it
return None
| apache-2.0 | Python | |
4f1bb01bba0c2241a190bbf7fb21683be630abfa | Create Glyph3D.py | lorensen/VTKExamples,lorensen/VTKExamples,lorensen/VTKExamples,lorensen/VTKExamples,lorensen/VTKExamples,lorensen/VTKExamples,lorensen/VTKExamples | src/Python/Filtering/Glyph3D.py | src/Python/Filtering/Glyph3D.py | #!/usr/bin/env python
import vtk
def main():
colors = vtk.vtkNamedColors()
points = vtk.vtkPoints()
points.InsertNextPoint(0,0,0)
points.InsertNextPoint(1,1,1)
points.InsertNextPoint(2,2,2)
polydata = vtk.vtkPolyData()
polydata.SetPoints(points)
# Create anything you want here, we will use a cube for the demo.
cubeSource = vtk.vtkCubeSource()
glyph3D = vtk.vtkGlyph3D()
glyph3D.SetSourceConnection(cubeSource.GetOutputPort())
glyph3D.SetInputData(polydata)
glyph3D.Update()
# Visualize
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(glyph3D.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
renderer = vtk.vtkRenderer()
renderWindow = vtk.vtkRenderWindow()
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
renderer.AddActor(actor)
renderer.SetBackground(colors.GetColor3d("SlateGray")) # Background Slate Gray
renderWindow.Render()
renderWindowInteractor.Start()
if __name__ == '__main__':
main()
| apache-2.0 | Python | |
89262fbd2375724ff9120fe01799a036b1c34f6f | add new package at v1.1.6 (#20598) | LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack | var/spack/repos/builtin/packages/py-mercantile/package.py | var/spack/repos/builtin/packages/py-mercantile/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyMercantile(PythonPackage):
"""Web mercator XYZ tile utilities."""
homepage = "https://github.com/mapbox/mercantile"
url = "https://pypi.io/packages/source/m/mercantile/mercantile-1.1.6.tar.gz"
maintainers = ['adamjstewart']
version('1.1.6', sha256='0dff4cbc2c92ceca0e0dfbb3dc74392a96d33cfa29afb1bdfcc80283d3ef4207')
depends_on('py-setuptools', type='build')
depends_on('py-click@3.0:', type=('build', 'run'))
| lgpl-2.1 | Python | |
41bc3c33cc1442105f019e06c40d189c27f65758 | add save_json helper | undertherain/vsmlib | vsmlib/misc/data.py | vsmlib/misc/data.py | import json
def save_json(data, path):
# if not os.path.isdir(path):
# os.makedirs(path)
s = json.dumps(data, ensure_ascii=False, indent=4, sort_keys=True)
f = open(path, 'w')
f.write(s)
f.close()
| apache-2.0 | Python | |
8483a311f75a3d3682e66fba2f805ea20ebf6870 | add memory usage beacon | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | salt/beacons/memusage.py | salt/beacons/memusage.py | # -*- coding: utf-8 -*-
'''
Beacon to monitor memory usage.
.. versionadded::
:depends: python-psutil
'''
# Import Python libs
from __future__ import absolute_import
import logging
import re
# Import Salt libs
import salt.utils
# Import Third Party Libs
try:
import psutil
HAS_PSUTIL = True
except ImportError:
HAS_PSUTIL = False
log = logging.getLogger(__name__)
__virtualname__ = 'memusage'
def __virtual__():
if salt.utils.is_windows():
return False
elif HAS_PSUTIL is False:
return False
else:
return __virtualname__
def validate(config):
'''
Validate the beacon configuration
'''
# Configuration for diskusage beacon should be a list of dicts
if not isinstance(config, dict):
log.info('Configuration for diskusage beacon must be a dictionary.')
return False
return True
def beacon(config):
'''
Monitor the memory usage of the minion
Specify thresholds for percent used and only emit a beacon if it is exceeded.
.. code-block:: yaml
beacons:
memusage:
- percent: 63%
'''
ret = []
for memusage in config:
mount = memusage.keys()[0]
_current_usage = psutil.virtual_memory()
current_usage = _current_usage.percent
monitor_usage = memusage[mount]
if '%' in monitor_usage:
monitor_usage = re.sub('%', '', monitor_usage)
monitor_usage = float(monitor_usage)
if current_usage >= monitor_usage:
ret.append({'memusage': current_usage})
return ret
| apache-2.0 | Python | |
16193b302bb07429c604af2a9637850c2e751d1f | Add script to be run as cron job | mozilla/stoneridge,mozilla/stoneridge,mozilla/stoneridge,mozilla/stoneridge,mozilla/stoneridge,mozilla/stoneridge,mozilla/stoneridge,mozilla/stoneridge | stoneridge_cronjob.py | stoneridge_cronjob.py | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import ConfigParser
import os
import subprocess
import sys
import tempfile
import time
import stoneridge
class StoneRidgeException(Exception):
pass
class StoneRidgeCronJob(object):
def __init__(self, conffile, srroot, srwork):
self.srroot = srroot
self.srwork = srwork
self.logfile = None
self.log = None
self.archive_on_failure = False
self.cleaner_called = False
cp = ConfigParser.SafeConfigParser()
cp.read([conffile])
self.dl_server = cp.get('download', 'server')
self.dl_rootdir = cp.get('download', 'root')
def do_error(self, stage):
self.log.write('Error running %s: see %s\n' % (stage, self.logfile))
raise StoneRidgeException, 'Error exit during %s' % (stage,)
def run_process(self, stage, *args):
script = os.path.join(self.srroot, 'stoneridge_%s.py' % (stage,))
command = [sys.executable,
script,
'--root', self.srroot,
'--workdir', self.srwork]
command.extend(args)
self.log.write('### Running %s@%s\n' % (stage, int(time.time())))
self.log.write(' %s\n' % (' '.join(command),))
rval = subprocess.call(command, stdout=self.log,
stderr=subprocess.STDOUT)
if rval:
self.log.write('### FAILED: %s@%s\n' % (stage, int(time.time())))
if self.archive_on_failure:
self.archive_on_failure = False
try:
self.run_process('archiver')
except StoneRidgeException, e:
pass
if not self.cleaner_called:
self.cleaner_called = True
try:
self.run_process('cleaner')
except StoneRidgeException, e:
pass
self.do_error(stage)
else:
self.log.write('### SUCCEEDED: %s@%s\n' % (stage, int(time.time())))
def run(self):
stoneridge.ArgumentParser.setup_dirnames(self.srroot, self.srwork)
for d in (stoneridge.outdir, stoneridge.downloaddir):
os.mkdir(d)
for d in (stoneridge.archivedir, stoneridge.logdir):
if not os.path.exists(d):
os.mkdir(d)
self.logfile = os.path.join(stoneridge.logdir,
'stoneridge_%s.log' % (int(time.time()),))
with file(self.logfile, 'w') as f:
self.log = f
self.run_process('downloader', '--server', self.dl_server,
'--downloaddir', self.dl_rootdir)
self.run_process('unpacker')
self.run_process('info_gatherer')
self.archive_on_failure = True
self.run_process('runner')
self.run_process('collater')
self.run_process('uploader')
self.archive_on_failure = False
self.run_process('archiver')
self.cleaner_called = True
self.run_process('cleaner')
self.log = None
@stoneridge.main
def main():
parser = argparse.ArgumentParser()
parser.add_option('--config', dest='config', default='/etc/stoneridge.ini')
parser.add_option('--no-update', dest='update', default=True,
action='store_false')
args = parser.parse_arguments()
if args['update']:
stoneridge.update(parser['config']):
return subprocess.call([sys.executable, sys.executable, __file__,
'--no-update'])
srroot = os.path.split(__file__)[0]
srwork = tempfile.mkdtemp()
cronjob = StoneRidgeCronJob(args['config'], srroot, srwork)
cronjob.run()
| mpl-2.0 | Python | |
9e6f8768d60d38e69074c5275637deaa62e6fc9e | check how often URL matching would match the right documents in the test corpus | ModernMT/DataCollection,ModernMT/DataCollection,ModernMT/DataCollection,ModernMT/DataCollection,ModernMT/DataCollection | baseline/url_matching.py | baseline/url_matching.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
from strip_language_from_uri import LanguageStripper
import chardet
from collections import defaultdict
import re
import urlparse
def has_prefix(prefixes, s):
"Returns true if s starts with one of the prefixes"
for p in prefixes:
if s.startswith(p):
return True
return False
def original_url(html):
m = re.search(r"<!-- Mirrored from ([^>]+) by HTTrack Website Copier",
html)
if m is None:
return "unknown_url"
return m.groups()[0]
def clean_whitespace(s):
# remove empty lines
s = [l.strip() for l in s.split("\n") if l.strip()]
return "\n".join(re.sub("\s+", " ", l) for l in s)
def read_file(filename):
# sys.stderr.write("reading: %s\n" % filename)
f = open(filename, 'r')
html = f.read()
try:
html = html.decode("utf-8")
except:
encoding = chardet.detect(html)
try:
html = html.decode(encoding["encoding"])
except:
sys.stderr.write(
"Fallback: ignoring errors for file%s\n" % filename)
return html.decode("utf-8", errors='ignore')
return html
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('outfile', type=argparse.FileType('w'),
help='output file')
parser.add_argument('-prefix', help='prefix added to make filenames',
default="/fs/syn0/pkoehn/crawl/data/site-crawls")
parser.add_argument('-slang', help='source language', default='en')
parser.add_argument('-tlang', help='target language', default='fr')
args = parser.parse_args(sys.argv[1:])
correct = 0
stripper = LanguageStripper()
for line in sys.stdin:
was_stripped = 0
domain, a, b = line.strip().split("\t")
urls = defaultdict(list)
for s in (a, b):
filename = os.path.join(args.prefix, domain, s)
html = read_file(filename)
url = original_url(html)
url = "http://" + url
# print url
parsed_url = urlparse.urlparse(url)
stripped_path = stripper.strip(parsed_url.path).replace("//", "/")
stripped_query = stripper.strip(
parsed_url.query).replace("//", "/")
stripped_url = urlparse.ParseResult(parsed_url.scheme,
parsed_url.netloc,
stripped_path,
parsed_url.params,
stripped_query,
parsed_url.fragment).geturl()
urls[stripped_url].append(url)
if stripped_url != url:
was_stripped += 1
if len(urls) == 1:
correct += 1
print len(urls), was_stripped, correct, urls.items()
print "correct: ", correct
| apache-2.0 | Python | |
d1ffd984bae034076244ac4449632a1aa04d5ffe | Refactor to Linter v2 API | yashtrivedi96/coala-bears,horczech/coala-bears,Vamshi99/coala-bears,coala-analyzer/coala-bears,seblat/coala-bears,yash-nisar/coala-bears,sounak98/coala-bears,ku3o/coala-bears,shreyans800755/coala-bears,yash-nisar/coala-bears,seblat/coala-bears,meetmangukiya/coala-bears,mr-karan/coala-bears,damngamerz/coala-bears,shreyans800755/coala-bears,ankit01ojha/coala-bears,dosarudaniel/coala-bears,shreyans800755/coala-bears,kaustubhhiware/coala-bears,horczech/coala-bears,dosarudaniel/coala-bears,aptrishu/coala-bears,Shade5/coala-bears,madhukar01/coala-bears,kaustubhhiware/coala-bears,damngamerz/coala-bears,damngamerz/coala-bears,gs0510/coala-bears,shreyans800755/coala-bears,Vamshi99/coala-bears,Shade5/coala-bears,naveentata/coala-bears,yash-nisar/coala-bears,aptrishu/coala-bears,ankit01ojha/coala-bears,arjunsinghy96/coala-bears,yash-nisar/coala-bears,incorrectusername/coala-bears,coala-analyzer/coala-bears,arjunsinghy96/coala-bears,gs0510/coala-bears,yashtrivedi96/coala-bears,madhukar01/coala-bears,coala/coala-bears,aptrishu/coala-bears,coala/coala-bears,ankit01ojha/coala-bears,Vamshi99/coala-bears,naveentata/coala-bears,Vamshi99/coala-bears,Asnelchristian/coala-bears,gs0510/coala-bears,madhukar01/coala-bears,coala-analyzer/coala-bears,yashtrivedi96/coala-bears,dosarudaniel/coala-bears,vijeth-aradhya/coala-bears,kaustubhhiware/coala-bears,Shade5/coala-bears,madhukar01/coala-bears,mr-karan/coala-bears,Asnelchristian/coala-bears,ku3o/coala-bears,chriscoyfish/coala-bears,arjunsinghy96/coala-bears,ku3o/coala-bears,srisankethu/coala-bears,LWJensen/coala-bears,ankit01ojha/coala-bears,srisankethu/coala-bears,incorrectusername/coala-bears,vijeth-aradhya/coala-bears,arjunsinghy96/coala-bears,horczech/coala-bears,mr-karan/coala-bears,vijeth-aradhya/coala-bears,SanketDG/coala-bears,ku3o/coala-bears,kaustubhhiware/coala-bears,SanketDG/coala-bears,madhukar01/coala-bears,LWJensen/coala-bears,Asnelchristian/coala-bears,horczech/coala-bears,damngamerz/coala-bears,horczech/coala-bears,seblat/coala-bears,chriscoyfish/coala-bears,coala/coala-bears,meetmangukiya/coala-bears,arjunsinghy96/coala-bears,yashtrivedi96/coala-bears,aptrishu/coala-bears,coala/coala-bears,shreyans800755/coala-bears,shreyans800755/coala-bears,Vamshi99/coala-bears,gs0510/coala-bears,ankit01ojha/coala-bears,kaustubhhiware/coala-bears,incorrectusername/coala-bears,horczech/coala-bears,mr-karan/coala-bears,SanketDG/coala-bears,kaustubhhiware/coala-bears,mr-karan/coala-bears,ankit01ojha/coala-bears,incorrectusername/coala-bears,madhukar01/coala-bears,sounak98/coala-bears,refeed/coala-bears,meetmangukiya/coala-bears,ku3o/coala-bears,Asnelchristian/coala-bears,sounak98/coala-bears,srisankethu/coala-bears,SanketDG/coala-bears,aptrishu/coala-bears,ankit01ojha/coala-bears,naveentata/coala-bears,meetmangukiya/coala-bears,horczech/coala-bears,refeed/coala-bears,refeed/coala-bears,naveentata/coala-bears,yashtrivedi96/coala-bears,coala-analyzer/coala-bears,SanketDG/coala-bears,meetmangukiya/coala-bears,yash-nisar/coala-bears,incorrectusername/coala-bears,chriscoyfish/coala-bears,LWJensen/coala-bears,seblat/coala-bears,LWJensen/coala-bears,naveentata/coala-bears,vijeth-aradhya/coala-bears,Vamshi99/coala-bears,coala/coala-bears,incorrectusername/coala-bears,srisankethu/coala-bears,refeed/coala-bears,chriscoyfish/coala-bears,aptrishu/coala-bears,madhukar01/coala-bears,Shade5/coala-bears,LWJensen/coala-bears,yash-nisar/coala-bears,vijeth-aradhya/coala-bears,Asnelchristian/coala-bears,damngamerz/coala-bears,coala/coala-bears,arjunsinghy96/coala-bears,ku3o/coala-bears,LWJensen/coala-bears,madhukar01/coala-bears,Shade5/coala-bears,SanketDG/coala-bears,shreyans800755/coala-bears,seblat/coala-bears,sounak98/coala-bears,kaustubhhiware/coala-bears,Vamshi99/coala-bears,yash-nisar/coala-bears,meetmangukiya/coala-bears,yash-nisar/coala-bears,gs0510/coala-bears,yash-nisar/coala-bears,mr-karan/coala-bears,ankit01ojha/coala-bears,arjunsinghy96/coala-bears,shreyans800755/coala-bears,vijeth-aradhya/coala-bears,yashtrivedi96/coala-bears,incorrectusername/coala-bears,LWJensen/coala-bears,damngamerz/coala-bears,ankit01ojha/coala-bears,dosarudaniel/coala-bears,gs0510/coala-bears,vijeth-aradhya/coala-bears,arjunsinghy96/coala-bears,refeed/coala-bears,Shade5/coala-bears,yashtrivedi96/coala-bears,Vamshi99/coala-bears,naveentata/coala-bears,mr-karan/coala-bears,meetmangukiya/coala-bears,shreyans800755/coala-bears,sounak98/coala-bears,coala/coala-bears,dosarudaniel/coala-bears,yashtrivedi96/coala-bears,dosarudaniel/coala-bears,Shade5/coala-bears,horczech/coala-bears,chriscoyfish/coala-bears,sounak98/coala-bears,incorrectusername/coala-bears,srisankethu/coala-bears,SanketDG/coala-bears,gs0510/coala-bears,Asnelchristian/coala-bears,Vamshi99/coala-bears,refeed/coala-bears,coala/coala-bears,damngamerz/coala-bears,srisankethu/coala-bears,dosarudaniel/coala-bears,srisankethu/coala-bears,gs0510/coala-bears,Vamshi99/coala-bears,Shade5/coala-bears,dosarudaniel/coala-bears,coala-analyzer/coala-bears,yash-nisar/coala-bears,sounak98/coala-bears,Vamshi99/coala-bears,coala-analyzer/coala-bears,horczech/coala-bears,coala/coala-bears,coala-analyzer/coala-bears,refeed/coala-bears,vijeth-aradhya/coala-bears,naveentata/coala-bears,aptrishu/coala-bears,naveentata/coala-bears,chriscoyfish/coala-bears,LWJensen/coala-bears,ku3o/coala-bears,refeed/coala-bears,srisankethu/coala-bears,sounak98/coala-bears,meetmangukiya/coala-bears,ku3o/coala-bears,srisankethu/coala-bears,coala-analyzer/coala-bears,chriscoyfish/coala-bears,srisankethu/coala-bears,damngamerz/coala-bears,arjunsinghy96/coala-bears,incorrectusername/coala-bears,gs0510/coala-bears,coala/coala-bears,refeed/coala-bears,madhukar01/coala-bears,coala/coala-bears,kaustubhhiware/coala-bears,srisankethu/coala-bears,SanketDG/coala-bears,mr-karan/coala-bears,seblat/coala-bears,ankit01ojha/coala-bears,horczech/coala-bears,refeed/coala-bears,Asnelchristian/coala-bears,coala/coala-bears,shreyans800755/coala-bears,aptrishu/coala-bears,damngamerz/coala-bears,naveentata/coala-bears,SanketDG/coala-bears,dosarudaniel/coala-bears,Shade5/coala-bears,Asnelchristian/coala-bears,kaustubhhiware/coala-bears,aptrishu/coala-bears,ku3o/coala-bears,aptrishu/coala-bears,vijeth-aradhya/coala-bears,aptrishu/coala-bears,coala-analyzer/coala-bears,refeed/coala-bears,chriscoyfish/coala-bears,meetmangukiya/coala-bears,damngamerz/coala-bears,LWJensen/coala-bears,sounak98/coala-bears,horczech/coala-bears,seblat/coala-bears,shreyans800755/coala-bears,damngamerz/coala-bears,yashtrivedi96/coala-bears,ankit01ojha/coala-bears,yash-nisar/coala-bears,Asnelchristian/coala-bears,seblat/coala-bears | bears/php/PHPLintBear.py | bears/php/PHPLintBear.py | from coalib.bearlib.abstractions.Linter import linter
from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY
@linter(executable='php',
output_format='regex',
output_regex=r'(?P<severity>Parse|Fatal) error: (?P<message>.*) in '
r'.* on line (?P<line>\d+)',
severity_map={'Parse': RESULT_SEVERITY.MAJOR,
'Fatal': RESULT_SEVERITY.MAJOR})
class PHPLintBear:
"""
Checks the code with ``php -l``. This runs it on each file separately.
"""
LANGUAGES = "PHP"
@staticmethod
def create_arguments(filename, file, config_file):
return ('-l', '-n', '-d', 'display_errors=On', '-d', 'log_errors=Off',
filename)
| import re
from coalib.bearlib.abstractions.Lint import Lint
from coalib.bears.LocalBear import LocalBear
from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY
class PHPLintBear(LocalBear, Lint):
executable = 'php'
arguments = '-l -n -d display_errors=On -d log_errors=Off {filename}'
output_regex = re.compile(
r'(?P<severity>\S+) error: '
r'(?P<message>.*) in (?P<file_name>.*) on line (?P<line>\d+)')
severity_map = {
"Parse": RESULT_SEVERITY.MAJOR,
"Fatal": RESULT_SEVERITY.MAJOR}
LANGUAGES = "PHP"
def run(self, filename, file):
'''
Checks the code with `php -l`. This runs it on each file separately.
'''
return self.lint(filename)
| agpl-3.0 | Python |
34560978ee8f33ab8ddc60a1a3525979119a952e | Add run script | thomasgibson/firedrake-hybridization | profile_compressible_solver/run_profiler.py | profile_compressible_solver/run_profiler.py | from firedrake.petsc import PETSc
from argparse import ArgumentParser
from driver import run_profliler
import sys
PETSc.Log.begin()
parser = ArgumentParser(description=("""
Profile of 3D compressible solver for the Euler equations (dry atmosphere).
"""), add_help=False)
parser.add_argument("--hybridization",
action="store_true",
help="Use a hybridized compressible solver.")
parser.add_argument("--model_degree",
default=1,
type=int,
action="store",
help="Model degree")
parser.add_argument("--model_family",
default="RTCF",
choices=["RTCF"],
help="Family of finite element spaces")
parser.add_argument("--mesh_degree",
default=3,
type=int,
action="store",
help="Coordinate space degree")
parser.add_argument("--cfl",
default=1.,
type=float,
action="store",
help="CFL number to run at (determines dt).")
parser.add_argument("--dt",
default=0.0,
type=float,
action="store",
help="Manually set dt")
parser.add_argument("--refinements",
default=4,
type=int,
action="store",
help="Resolution scaling parameter.")
parser.add_argument("--richardson_scale",
default=1.0,
type=float,
action="store",
help="Set the Richardson parameter for the trace system.")
parser.add_argument("--flexsolver",
action="store_true",
help="Switch to flex-GMRES and AMG.")
parser.add_argument("--gmres_ilu_only",
action="store_true",
help="Switch to only gmres+bilu on traces")
parser.add_argument("--layers",
default=16,
type=int,
action="store",
help="Number of vertical layers.")
parser.add_argument("--debug",
action="store_true",
help="Turn on KSP monitors")
parser.add_argument("--rtol",
default=1.0e-6,
type=float,
help="Rtolerance for the linear solve.")
parser.add_argument("--help",
action="store_true",
help="Show help.")
args, _ = parser.parse_known_args()
if args.help:
help = parser.format_help()
PETSc.Sys.Print("%s\n" % help)
sys.exit(1)
run_profliler(args, suppress_data_output=True)
| mit | Python | |
ee2a4c1edb6d2f1273bb08080e8fc00b0a0e9074 | add pack1/mymodule1.py | weizhenwei/tech-docs-2016,weizhenwei/tech-docs-2016,weizhenwei/tech-docs-2016,weizhenwei/tech-docs-2016,weizhenwei/tech-docs-2016,weizhenwei/tech-docs-2016 | python/18-package/parent/pack1/mymodule1.py | python/18-package/parent/pack1/mymodule1.py | #!/usr/bin/env python
#-*- coding=utf-8 -*-
def function1():
print "function1 running"
if __name__ == "__main__":
print "mymodule1 running as main program"
else:
print "mymodule1 initializing"
| bsd-2-clause | Python | |
a29e340efa60ecb05d85e9c6d87ec709ba26822f | Add new extractor(closes #14361) | rg3/youtube-dl,ozburo/youtube-dl,rg3/youtube-dl,Tatsh/youtube-dl,ozburo/youtube-dl,Tatsh/youtube-dl | youtube_dl/extractor/bibeltv.py | youtube_dl/extractor/bibeltv.py | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class BibelTVIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?bibeltv\.de/mediathek/videos/(?:crn/)?(?P<id>\d+)'
_TESTS = [{
'url': 'https://www.bibeltv.de/mediathek/videos/329703-sprachkurs-in-malaiisch',
'md5': '252f908192d611de038b8504b08bf97f',
'info_dict': {
'id': 'ref:329703',
'ext': 'mp4',
'title': 'Sprachkurs in Malaiisch',
'description': 'md5:3e9f197d29ee164714e67351cf737dfe',
'timestamp': 1608316701,
'uploader_id': '5840105145001',
'upload_date': '20201218',
}
}, {
'url': 'https://www.bibeltv.de/mediathek/videos/crn/326374',
'only_matching': True,
}]
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/5840105145001/default_default/index.html?videoId=ref:%s'
def _real_extract(self, url):
crn_id = self._match_id(url)
return self.url_result(
self.BRIGHTCOVE_URL_TEMPLATE % crn_id, 'BrightcoveNew')
| unlicense | Python | |
76cce82d65868619b096d74a5adb3a616cfe771d | Create new package. (#5810) | mfherbst/spack,EmreAtes/spack,EmreAtes/spack,tmerrick1/spack,mfherbst/spack,matthiasdiener/spack,matthiasdiener/spack,matthiasdiener/spack,matthiasdiener/spack,krafczyk/spack,krafczyk/spack,skosukhin/spack,iulian787/spack,tmerrick1/spack,iulian787/spack,tmerrick1/spack,tmerrick1/spack,lgarren/spack,lgarren/spack,LLNL/spack,lgarren/spack,EmreAtes/spack,EmreAtes/spack,LLNL/spack,tmerrick1/spack,lgarren/spack,mfherbst/spack,matthiasdiener/spack,iulian787/spack,krafczyk/spack,iulian787/spack,skosukhin/spack,skosukhin/spack,skosukhin/spack,krafczyk/spack,krafczyk/spack,skosukhin/spack,EmreAtes/spack,mfherbst/spack,LLNL/spack,iulian787/spack,LLNL/spack,mfherbst/spack,lgarren/spack,LLNL/spack | var/spack/repos/builtin/packages/r-affyilm/package.py | var/spack/repos/builtin/packages/r-affyilm/package.py | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAffyilm(RPackage):
"""affyILM is a preprocessing tool which estimates gene
expression levels for Affymetrix Gene Chips. Input from
physical chemistry is employed to first background subtract
intensities before calculating concentrations on behalf
of the Langmuir model."""
homepage = "https://www.bioconductor.org/packages/affyILM/"
url = "https://git.bioconductor.org/packages/affyILM"
version('1.28.0', git='https://git.bioconductor.org/packages/affyILM', commit='307bee3ebc599e0ea4a1d6fa8d5511ccf8bef7de')
depends_on('r@3.4.0:3.4.9', when='@1.28.0')
depends_on('r-gcrma', type=('build', 'run'))
depends_on('r-affxparser', type=('build', 'run'))
depends_on('r-affy', type=('build', 'run'))
depends_on('r-biobase', type=('build', 'run'))
| lgpl-2.1 | Python | |
d7017acef8ed540bb2f3c00d268cd417d75f09e3 | add import script for Fareham (closes #858) | chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,chris48s/UK-Polling-Stations | polling_stations/apps/data_collection/management/commands/import_fareham.py | polling_stations/apps/data_collection/management/commands/import_fareham.py | from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = 'E07000087'
addresses_name = 'parl.2017-06-08/Version 1/Democracy_Club__08June2017 (1).tsv'
stations_name = 'parl.2017-06-08/Version 1/Democracy_Club__08June2017 (1).tsv'
elections = ['parl.2017-06-08']
csv_delimiter = '\t'
| bsd-3-clause | Python | |
675b87d5bc072d5b6fbd1f9a54ec61d98b1139ac | Add lab2 file. | greggy/pylessons | lab2.py | lab2.py | # -*- coding: utf-8 -*-
from math import pow
def mymap1(fun, l):
res = []
for i in l:
res.append(fun(i))
return res
#print(mymap1(str, [3, 1, 7, 4, 6, 9]))
def mymap2(fun, *l):
res = []
for i in zip(*l):
print(i)
res.append(fun(*i))
return res
#print(mymap2(lambda a, b, c: a+b+c, [3, 1, 7, 4, 6], [6, 3, 8, 5, 9], [2, 5, 2, 4, 5]))
def mymap3(fun, *l):
for i in zip(*l):
print(i)
yield fun(*i)
print(mymap3(pow, [3, 1, 7, 4, 6, 9], [1, 5, 7, 3, 2, 1]))
print(list(mymap3(pow, [3, 1, 7, 4, 6, 9], [1, 5, 7, 3, 2, 1])))
| lgpl-2.1 | Python | |
bea495bb58146fc3795d2217ef3b27ce0325014b | create the Spider for Israel of McDonalds | iandees/all-the-places,iandees/all-the-places,iandees/all-the-places | locations/spiders/mcdonalds_il.py | locations/spiders/mcdonalds_il.py | # -*- coding: utf-8 -*-
import scrapy
import json
import re
from locations.items import GeojsonPointItem
class McDonalsILSpider(scrapy.Spider):
name = "mcdonalds_il"
allowed_domains = ["www.mcdonalds.co.il"]
start_urls = (
'https://www.mcdonalds.co.il/%D7%90%D7%99%D7%AA%D7%95%D7%A8_%D7%9E%D7%A1%D7%A2%D7%93%D7%94',
)
def store_hours(self, data):
day_groups = []
this_day_group = {}
weekdays = ['Su', 'Mo', 'Th', 'We', 'Tu', 'Fr', 'Sa']
for day_hour in data:
if day_hour['idx'] > 7:
continue
hours = ''
start, end = day_hour['value'].split("-")[0].strip(), day_hour['value'].split("-")[1].strip()
short_day = weekdays[day_hour['idx'] - 1]
hours = '{}:{}-{}:{}'.format(start[:2], start[3:], end[:2], end[3:])
if not this_day_group:
this_day_group = {
'from_day': short_day,
'to_day': short_day,
'hours': hours,
}
elif hours == this_day_group['hours']:
this_day_group['to_day'] = short_day
elif hours != this_day_group['hours']:
day_groups.append(this_day_group)
this_day_group = {
'from_day': short_day,
'to_day': short_day,
'hours': hours,
}
day_groups.append(this_day_group)
if not day_groups:
return None
opening_hours = ''
if len(day_groups) == 1 and not day_groups[0]:
return None
if len(day_groups) == 1 and day_groups[0]['hours'] in ('00:00-23:59', '00:00-00:00'):
opening_hours = '24/7'
else:
for day_group in day_groups:
if day_group['from_day'] == day_group['to_day']:
opening_hours += '{from_day} {hours}; '.format(**day_group)
else:
opening_hours += '{from_day}-{to_day} {hours}; '.format(**day_group)
opening_hours = opening_hours [:-2]
return opening_hours
def parse_Ref(self, data):
match = re.search(r'store_id=(.*\d)', data)
ref = match.groups()[0]
return ref
def parse_name(self, name):
name = name.xpath('//h1/text()').extract_first()
return name.strip()
def parse_latlon(self, data):
lat = lon = ''
data = data.xpath('//div[@id="map"]')
lat = data.xpath('//@data-lat').extract_first()
lon = data.xpath('//@data-lng').extract_first()
return lat, lon
def parse_phone(self, phone):
phone = phone.xpath('//div[@class="padding_hf_v sp_padding_qt_v"]/a/text()').extract_first()
if not phone:
return ""
return phone.strip()
def parse_address(self, address):
address = address.xpath('//h2/strong/text()').extract_first()
return address.strip()
def parse_store(self, response):
data = response.body_as_unicode()
name = self.parse_name(response)
address = self.parse_address(response)
phone = self.parse_phone(response)
lat, lon = self.parse_latlon(response)
properties = {
'ref': response.meta['ref'],
'phone': phone,
'lon': lon,
'lat': lat,
'name': name,
'addr_full': address
}
yield GeojsonPointItem(**properties)
def parse(self, response):
stores = response.xpath('//div[@class="store_wrap link"]/a/@href').extract()
for store in stores:
ref = self.parse_Ref(store)
yield scrapy.Request('https:' + store, meta={'ref': ref}, callback=self.parse_store)
| mit | Python | |
252925fa998412ac868eb63790fbd515c429ac67 | add main entry point (untested, but should be complete now) | paultopia/minitrue | main.py | main.py | """
Core namespace. Handles:
1. Call out to hashio to check hashes, save log, and return results
2. Load tweetlog and tweet creds
3. Generate and log tweets for changed files
4. Generate and log tweets for new files
4. Save tweetlog
"""
import hash, hashio, twitter, json
from copy import deepcopy
def load_tweetlog():
try:
with open("tweetlog.json", 'r') as tl:
tweetlog = json.load(tl)
except FileNotFoundError:
tweetlog = []
return tweetlog
def load_tweetcreds():
"""
I'm perfectly happy with this just throwing if there are no
twitter creds. Maybe for next version there can be
some kind of functionality to run with a no-twitter mode that
just dumps hashes to printable form to disk.
"""
with open("twittercreds.json") as tc:
creds = json.load(tc)
return creds
## NEED TO CREATE TARGET ID 'uuid' IN ADDITIONS
## just use (str(uuid.uuid4())) which is 36 chars.
## put creation into hashio
## and remove length limitation from name since it shouldn't
## get tweeted.
## uuid = 36 + hash = 64 = 100, leaving 40 char for words etc.
def tweet_new_targets(newlist, tweetfn, tweetlog):
log = deepcopy(tweetlog)
for n in newlist:
tweet = "Watching: " + n["uuid"] + " hash: " + n["hash"] + "."
response = tweetfn(tweet)
response["uuid"] = n["uuid"]
response["hash"] = n["hash"] # just to facilitate searching
log.append(response)
return log
def tweet_changed_targets(changed, tweetfn, tweetlog):
log = deepcopy(tweetlog)
for c in changed:
tweet = "CHANGED! " + n["uuid"] + " new hash: " + n["hash"] + "."
response = tweetfn(tweet)
response["uuid"] = n["uuid"]
response["hash"] = n["hash"] # just to facilitate searching
log.append(response)
return log
if __name__ == "__main__":
checked = hashio.check_from_file("targets.json")
tweetlog = load_tweetlog()
creds = load_tweetcreds()
post_tweet = twitter.twitter_poster(creds)
tweetlog = tweet_new_targets(checked["additions"], post_tweet, tweetlog)
tweetlog = tweet_changed_targets(checked["changes"], post_tweet, tweetlog)
with open('tweetlog.json', "w") as tl:
json.dump(tweetlog, tl, sort_keys = True, indent = 4) | mit | Python | |
21028c13585fbcd5315efd74ab55f5d03d69c500 | add probe nsrl | hirokihamasaki/irma,quarkslab/irma,hirokihamasaki/irma,quarkslab/irma,quarkslab/irma,hirokihamasaki/irma,hirokihamasaki/irma,quarkslab/irma,hirokihamasaki/irma | nsrl.py | nsrl.py | import hashlib
from pymongo import MongoClient
from lib.irma.common.exceptions import IrmaDatabaseError
class NsrlInfo(object):
_uri = "mongodb://localhost:27017/"
_dbname = "nsrl"
_collection = "hashset"
def __init__(self):
self._dbh = None
def _connect(self):
try:
if not self._dbh:
print "DEBUG: mongo connection"
client = MongoClient(self._uri)
dbh = client[self._dbname]
self._dbh = dbh[self._collection]
except Exception as e:
raise IrmaDatabaseError("{0}".format(e))
def get_info(self, sha1):
try:
self._connect()
res = self._collection.find_one({'SHA-1':sha1}, {'_id': False})
if not res:
return 'Not found'
return res
except Exception as e:
raise IrmaDatabaseError("{0}".format(e))
nsrlinfo = NsrlInfo()
def scan(sfile):
res = {}
sha1 = hashlib.sha1(sfile.data).hexdigest()
res['result'] = nsrlinfo.get_info(sha1.upper())
return res
| apache-2.0 | Python | |
f151d1cc5ddb3b60c6410153e147ccd5c0378904 | Add Sequence object | totallyhuman/py-oeis | oeis.py | oeis.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
py-oeis
A Python library to access the OEIS.
Sumant Bhaskaruni
v0.1
"""
import requests
class Sequence(object):
"""An object to represent a single OEIS sequence.
Initializer arguments:
number (int): The OEIS sequence ID
"""
def __init__(self, seq_id):
"""See class docstring for details."""
self.seq_id = seq_id
self.val_url = 'https://oeis.org/A{0:d}/b{0:d}.txt'.format(seq_id)
self.info = requests.get(
'https://oeis.org/search?fmt=json&q=id:A{:d}'.format(
seq_id)).json()['results'][0]
self.name = self.info['name']
self.author = self.info['author']
self.created = self.info['created']
| mit | Python | |
eb56d833efad16e9a84724d18121528177f37adb | add 41 | ericdahl/project-euler,ericdahl/project-euler,ericdahl/project-euler,ericdahl/project-euler,ericdahl/project-euler,ericdahl/project-euler | p041.py | p041.py | import utils
primes = utils.primes(7654321)
def p(n):
sn = str(n)
lsn = len(sn)
if lsn > 10:
return False
return set([ int(d) for d in sn ]) == set(range(1, len(sn) + 1))
primes.reverse()
for prime in primes:
if p(prime):
print prime
break
| bsd-3-clause | Python | |
025103ad59d389981532dbb42332dd2a26e475c5 | add torperf2.py script to measure hidden service performance | aaronsw/torperf,aaronsw/torperf,aaronsw/torperf | torperf2.py | torperf2.py | import socket, sys, time, subprocess, threading, signal
import TorCtl.TorCtl
debug = sys.stderr
HOST = '127.0.0.1'
PORT = 10951
shared = dict(
torprocess = None,
torlock = threading.Lock()
)
TORRC = """\
SocksListenAddress %s
SocksPort %d
ControlPort %d
CookieAuthentication 1
RunAsDaemon 0
Log info file logfile
DataDir .tor
""" % (HOST, PORT, PORT+1)
def start_tor():
global TORPROCESS
file('torrc', 'w').write(TORRC)
shared['torprocess'] = subprocess.Popen(['tor', '-f', 'torrc'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
shared['torlock'].acquire()
def end_tor(signum=None, frame=None):
shared['torprocess'].kill()
signal.signal(signal.SIGTERM, end_tor)
triggers = dict(
GOT_TOR = 'Bootstrapped 100%: Done.',
GOT_REQUEST = 'Got a hidden service request for ID',
START_FETCH = 'Sending fetch request for v2 descriptor',
END_FETCH = 'Successfully fetched v2 rendezvous descriptor.',
START_RENDCIRC = 'Sending an ESTABLISH_RENDEZVOUS cell',
GOT_RENDCIRC = 'Got rendezvous ack. This circuit is now ready for rendezvous.',
GOT_INTROCIRC = 'introcirc is open',
# SEND_INTRODUCE1
GOT_RENDEZVOUS2 = 'Got RENDEZVOUS2 cell from hidden service'
)
class EventHandler(TorCtl.TorCtl.DebugEventHandler):
def __init__(self, host, port):
self.host = host
self.port = port
self.last_event = None
TorCtl.TorCtl.DebugEventHandler.__init__(self)
def log(self, event):
now = time.time()
print now, event,
if self.last_event:
print '(%.2f seconds)' % (now-self.last_event)
else:
print
self.last_event = now
def msg_event(self, log_event):
for k in triggers:
if triggers[k] in log_event.msg:
self.log(k)
if k == 'GOT_TOR': shared['torlock'].release()
break
def grab_page(h):
h.log('START_REQUEST')
p = subprocess.Popen(['curl', '-sN', '--socks4a', h.host + ':%d' % (h.port-1),
'http://duskgytldkxiuqc6.onion/'], bufsize=0, stdout=subprocess.PIPE)
b = ''
while not b: b = p.stdout.read(1)
h.log('GOT_FIRST_BYTE')
while b: b = p.stdout.read()
h.log('GOT_LAST_BYTE')
h.log('END_REQUEST')
def main(host, port):
handler = EventHandler(host, port+1)
handler.log('START_TOR')
start_tor()
time.sleep(2)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port+1))
c = TorCtl.TorCtl.Connection(s)
c.set_event_handler(handler)
c.authenticate()
EVT = TorCtl.TorCtl.EVENT_TYPE
c.set_events([EVT.INFO, EVT.NOTICE])
shared['torlock'].acquire()
grab_page(handler)
if __name__ == "__main__":
try:
main(HOST, PORT)
except KeyboardInterrupt:
end_tor()
finally:
end_tor()
| bsd-3-clause | Python | |
d433f9926ea14d35a8be9cd258300671051547a5 | Add refine_multiple_shards_joint.py | rstebbing/shards,rstebbing/shards | refine_multiple_shards_joint.py | refine_multiple_shards_joint.py | # refine_multiple_shards_joint.py
# Imports
import argparse
import matplotlib.pyplot as plt
import numpy as np
import visualise_progress as vis
from functools import partial
from operator import itemgetter
from pickle_ import dump
from solve import fit_and_colour_shards
from time import time
# main
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input_all_iterations_Xy_path')
parser.add_argument('output_dir')
parser.add_argument('--visualise-progress',
action='store_true',
default=False)
parser.add_argument('--ftol', type=float, default=1e-8)
parser.add_argument('--xtol', type=float, default=1e-8)
parser.add_argument('--maxfev', type=int, default=0)
parser.add_argument('--epsilon', type=float, default=1e-6)
args = parser.parse_args()
ensure_output_path = partial(vis.ensure_path, args.output_dir)
all_iterations_Xy, orig_args = np.load(args.input_all_iterations_Xy_path)
print '<-', orig_args['input_path']
I = plt.imread(orig_args['input_path']).astype(np.float64)[..., :3]
if orig_args['base'] == 'white':
J0 = np.ones_like(I)
elif orig_args['base'] == 'black':
J0 = np.zeros_like(I)
else:
head, tail = os.path.split(orig_args['base'])
root, ext = os.path.splitext(tail)
if ext == '.dat':
J0 = np.load(orig_args['base'])
else:
J0 = plt.imread(orig_args['base']).astype(np.float64)[..., :3]
Xs0, ys0 = zip(*map(itemgetter(-1), all_iterations_Xy))
print 'Solving with `fit_and_colour_shards` ...'
np.seterr(over='ignore')
t0 = time()
(Xs, ys, all_Xs_ys), (exit_code, E0, E1, J, J1) = fit_and_colour_shards(
I, J0, orig_args['alpha'],
Xs0, ys0,
k=orig_args['k'],
epsilon=args.epsilon,
ftol=args.ftol,
xtol=args.xtol,
maxfev=args.maxfev,
return_info=True,
verbose=True)
t1 = time()
np.seterr(over='warn')
print 'E0:', E0
print 'E1:', E1
print 'Exit code: %d' % exit_code
print 'Time taken: %.3fs' % (t1 - t0)
output_path = ensure_output_path('all_Xs_ys.dat')
print '->', output_path
dump(output_path, (all_Xs_ys, args.__dict__), raise_on_failure=False)
output_path = ensure_output_path('J.dat')
print '->', output_path
dump(output_path, (J, args.__dict__), raise_on_failure=False)
output_path = ensure_output_path('J1.dat')
print '->', output_path
dump(output_path, (J1, args.__dict__), raise_on_failure=False)
if args.visualise_progress:
output_path = ensure_output_path('J.png')
print '->', output_path
f, ax = vis.make_image_figure(J)
vis.save_image_figure(output_path, f, J.shape)
output_path = ensure_output_path('J1.png')
print '->', output_path
f, ax = vis.make_image_figure(J1)
vis.save_image_figure(output_path, f, J1.shape)
if __name__ == '__main__':
main()
| mit | Python | |
9efa33b28b86feaa204ebb84955022b7716a98ba | resolve conflicts | ssadedin/seqr,macarthur-lab/xbrowse,macarthur-lab/xbrowse,macarthur-lab/xbrowse,ssadedin/seqr,macarthur-lab/seqr,ssadedin/seqr,macarthur-lab/seqr,macarthur-lab/seqr,macarthur-lab/xbrowse,macarthur-lab/seqr,macarthur-lab/seqr,macarthur-lab/xbrowse,ssadedin/seqr,ssadedin/seqr,macarthur-lab/xbrowse | seqr/migrations/0057_merge_20190513_2009.py | seqr/migrations/0057_merge_20190513_2009.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-05-13 20:09
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('seqr', '0056_auto_20190513_1621'),
('seqr', '0056_auto_20190424_2059'),
]
operations = [
]
| agpl-3.0 | Python | |
6af2adf3257e9cb9130909fed531cc2f6bae8945 | Add a Mac-specifc snapshot build archive bisecting tool. | wistoch/meego-app-browser,wistoch/meego-app-browser,wistoch/meego-app-browser,wistoch/meego-app-browser,wistoch/meego-app-browser,wistoch/meego-app-browser,wistoch/meego-app-browser,wistoch/meego-app-browser,wistoch/meego-app-browser,wistoch/meego-app-browser | build/build-bisect.py | build/build-bisect.py | #!/usr/bin/python2.5
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Snapshot Build Bisect Tool
This script bisects the Mac snapshot archive using binary search. It starts at
a bad revision (it will try to guess HEAD) and asks for a last known-good
revision. It will then binary search across this revision range by downloading,
unzipping, and opening Chromium for you. After testing the specific revision,
it will ask you whether it is good or bad before continuing the search.
Currently this only works on Mac, but with some effort it could be ported to
other platforms.
"""
# Base URL to download snapshots from.
BUILD_BASE_URL = \
"http://build.chromium.org/buildbot/snapshots/chromium-rel-mac"
# Location of the latest build revision number
BUILD_LATEST_URL = "%s/LATEST" % BUILD_BASE_URL
# The location of the builds.
BUILD_ARCHIVE_URL = "/%d/"
# Name of the build archive.
BUILD_ZIP_NAME = "chrome-mac.zip"
# Directory name inside the archive.
BUILD_DIR_NAME = "chrome-mac"
# Name of the executable.
BUILD_EXE_NAME = "Chromium.app"
# URL to the ViewVC commit page.
BUILD_VIEWVC_URL = "http://src.chromium.org/viewvc/chrome?view=rev&revision=%d"
###############################################################################
import math
import os
import re
import shutil
import sys
import urllib
def ParseDirectoryIndex(url):
"""Parses the HTML directory listing into a list of revision numbers."""
handle = urllib.urlopen(url)
dirindex = handle.read()
handle.close()
return re.findall(r'<a href="([0-9]*)/">\1/</a>', dirindex)
def GetRevList(good, bad):
"""Gets the list of revision numbers between |good| and |bad|."""
# Download the main revlist.
revlist = ParseDirectoryIndex(BUILD_BASE_URL)
revlist = map(int, revlist)
revlist = filter(lambda r: range(good, bad).__contains__(int(r)), revlist)
revlist.sort()
return revlist
def TryRevision(rev):
"""Downloads revision |rev|, unzips it, and opens it for the user to test."""
# Clear anything that's currently there.
try:
os.remove(BUILD_ZIP_NAME)
shutil.rmtree(BUILD_DIR_NAME, True)
except Exception, e:
pass
# Download the file.
download_url = BUILD_BASE_URL + (BUILD_ARCHIVE_URL % rev) + BUILD_ZIP_NAME
try:
urllib.urlretrieve(download_url, BUILD_ZIP_NAME)
except Exception, e:
print("Could not retrieve the download. Sorry.")
print("Tried to get: %s" % download_url)
sys.exit(-1)
# Unzip the file.
os.system("unzip -q %s" % BUILD_ZIP_NAME)
# Tell Finder to open the app.
os.system("open %s/%s" % (BUILD_DIR_NAME, BUILD_EXE_NAME))
def AskIsGoodBuild(rev):
"""Annoyingly ask the user whether build |rev| is good or bad."""
while True:
check = raw_input("Build %d [g/b]: " % int(rev))[0]
if (check == "g" or check == "b"):
return (check == "g")
else:
print("Just answer the question...")
def main():
print("chrome-bisect: Perform binary search on the snapshot builds")
# Pick a starting point, try to get HEAD for this.
bad_rev = 0
try:
nh = urllib.urlopen(BUILD_LATEST_URL)
latest = int(nh.read())
nh.close()
bad_rev = raw_input("Bad revision [HEAD:%d]: " % latest)
if (bad_rev == ""):
bad_rev = latest
bad_rev = int(bad_rev)
except Exception, e:
print("Could not determine latest revision. This could be bad...")
bad_rev = int(raw_input("Bad revision: "))
# Find out when we were good.
good_rev = 0
try:
good_rev = int(raw_input("Last known good [0]: "))
except Exception, e:
pass
# Get a list of revisions to bisect across.
revlist = GetRevList(good_rev, bad_rev)
# If we don't have a |good_rev|, set it to be the first revision possible.
if good_rev == 0:
good_rev = revlist[0]
# These are indexes of |revlist|.
good = 0
bad = len(revlist) - 1
# Binary search time!
while good < bad:
candidates = revlist[good:bad]
num_poss = len(candidates)
if num_poss > 10:
print("%d candidates. %d tries left." %
(num_poss, round(math.log(num_poss, 2))))
else:
print("Candidates: %s" % revlist[good:bad])
# Cut the problem in half...
test = int((bad - good) / 2) + good
test_rev = revlist[test]
# Let the user give this revision a spin.
TryRevision(test_rev)
if AskIsGoodBuild(test_rev):
good = test + 1
else:
bad = test
# We're done. Let the user know the results in an official manner.
print("You are probably looking for build %d." % revlist[bad])
print("This is the ViewVC URL for the potential bustage:")
print(BUILD_VIEWVC_URL % revlist[bad])
if __name__ == '__main__':
main()
| bsd-3-clause | Python | |
ba8d38f278169b5d71e85e4d74a43fcd4a3892ae | Test decorator | zzz0072/Python_Exercises,zzz0072/Python_Exercises | 99_misc/decorator.py | 99_misc/decorator.py | #/usr/bin/env python
def my_func1(callback):
def func_wrapper(x):
print("my_func1: {0} ".format(callback(x)))
return func_wrapper
@my_func1
def my_func2(x):
return x
# Actuall call sequence is similar to:
# deco = my_func1(my_func2)
# deco("test") => func_wrapper("test")
my_func2("test")
#-------------------------------------------
# Test decorator with parameter
def dec_param(param):
def my_func3(callback):
def func_wrapper(x):
print("my_func3: {0} {1} ".format(param, callback(x)))
return func_wrapper
return my_func3
@dec_param("tag")
def my_func4(x):
return x
# Actuall call sequence is similar to:
# deco = dec_pram("tag", my_func3(my_func4))
# deco("test") => func_wrapper("test")
my_func4("test")
| bsd-2-clause | Python | |
82d5856b09c42b09f857976075d40b6c6568a7c8 | Create gate_chk_aws.py | EnyaKitakagaya/Tonis,EnyaKitakagaya/Tonis | gate_chk/gate_chk_aws.py | gate_chk/gate_chk_aws.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import nfc
import spidev
import smbus
import re
import mysql.connector
import time
def getid(tag):
global id
a = '%s' % tag
id = re.findall("ID=([0-9A-F]*)",a)[0]
con = mysql.connector.connect(user=‘xxxxxxxxxx', password=‘xxxxxxxxxx', host=‘xxxxxxxxxx-xxxxx-xxx.xxx.amazonaws.com', database='fablabkitakagaya', charset='utf8', ssl_ca='/home/pi/xxxxxxxxx.pem')
cursor = con.cursor()
clf = nfc.ContactlessFrontend('usb')
while (True):
print "会員カードをかざして下さい。"
clf.connect(rdwr={'on-connect': getid})
sql = "select now()"
cursor.execute(sql)
now = cursor.fetchone()[0]
cardid = id
sql = "select userid,validity from card where cardid = '%s'" % cardid
cursor.execute(sql)
ans = cursor.fetchone()
try:
ans != None
userid = ans[0]
validity = ans[1]
if (validity ==0):
print "Not a valid card !!"
else:
print userid
sql = "select start_at, end_at from riyou where userid = '%s' and end_at is NULL" % userid
cursor.execute(sql)
ans = cursor.fetchone()
if (ans == None):
print "Hello !"
sql = "insert into riyou(userid,start_at) values ('%s','%s')" % (userid, now)
cursor.execute(sql)
con.commit()
else:
print "Bye !"
sql = "update riyou set end_at = '%s' where userid = '%s' and end_at is NULL" % (now, userid)
cursor.execute(sql)
con.commit()
except:
print "Invalid card !!"
time.sleep(2)
cursor.close()
con.close()
| apache-2.0 | Python | |
a8266c9ff0526b1ada6f48c849892d1d29907710 | Add the workers which compute who receive which notification and where | jeremycline/fmn,jeremycline/fmn,jeremycline/fmn | fmn/consumer/worker.py | fmn/consumer/worker.py | # FMN worker figuring out for a fedmsg message the list of recipient and
# contexts
import json
import logging
import time
import random
import fmn.lib
import fmn.rules.utils
import fedmsg
import fedmsg.meta
from fmn.consumer.util import load_preferences
from fedmsg_meta_fedora_infrastructure import fasshim
import pika
log = logging.getLogger("fmn")
log.setLevel('DEBUG')
CONFIG = fedmsg.config.load_config()
fedmsg.meta.make_processors(**CONFIG)
from dogpile.cache import make_region
_cache = make_region(
key_mangler=lambda key: "fmn.consumer:dogpile:" + key
).configure(**CONFIG['fmn.rules.cache'])
fasshim.make_fas_cache(**CONFIG)
CNT = 0
connection = pika.BlockingConnection()
channel = connection.channel()
ch = channel.queue_declare('workers', durable=True)
print 'started at', ch.method.message_count
valid_paths = fmn.lib.load_rules(root="fmn.rules")
def callback(ch, method, properties, body):
start = time.time()
global CNT
CNT += 1
raw_msg = json.loads(body)
#print body
topic, msg = raw_msg['topic'], raw_msg['body']
print topic
# First, make a thread-local copy of our shared cached prefs
session = fmn.lib.models.init(CONFIG.get('fmn.sqlalchemy.uri', None))
preferences = _cache.get_or_create(
'preferences',
load_preferences,
(session, CONFIG, valid_paths)
)
session.close()
# Shuffle it so that not all threads step through the list in the same
# order. This should cut down on competition for the dogpile lock when
# getting pkgdb info at startup.
random.shuffle(preferences)
# And do the real work of comparing every rule against the message.
results = fmn.lib.recipients(preferences, msg, valid_paths, CONFIG)
log.debug("Recipients found %i dt %0.2fs %s %s",
len(results), time.time() - start,
msg['msg_id'], msg['topic'])
# Let's look at the results of our matching operation and send stuff
# where we need to.
for context, recipients in results.items():
if not recipients:
continue
print context, recipients
backend_chan = connection.channel()
backend_chan.queue_declare('backends', durable=True)
backend_chan.basic_publish(
exchange='',
routing_key='backends',
body=json.dumps({
'context': context,
'recipients': recipients,
'raw_msg': raw_msg,
}),
properties=pika.BasicProperties(
delivery_mode=2
)
)
backend_chan.close()
log.debug("Done. %0.2fs %s %s",
time.time() - start, msg['msg_id'], msg['topic'])
channel.basic_ack(delivery_tag=method.delivery_tag)
chan = channel.queue_declare('workers', durable=True)
print chan.method.message_count
# Make sure we leave any other messages in the queue
channel.basic_qos(prefetch_count=1)
channel.basic_consume(callback, queue='workers')
try:
print 'Starting consuming'
channel.start_consuming()
except KeyboardInterrupt:
channel.cancel()
connection.close()
finally:
print '%s tasks proceeded' % CNT
| lgpl-2.1 | Python | |
870d30a0cb7788055cfc9c22854cdbe6293036fa | create class to list preset and metapreset | CaptainDesAstres/Simple-Blender-Render-Manager,CaptainDesAstres/Blender-Render-Manager | settingMod/PresetList.py | settingMod/PresetList.py | #!/usr/bin/python3.4
# -*-coding:Utf-8 -*
'''module to manage preset list'''
import xml.etree.ElementTree as xmlMod
import os
class PresetList:
'''class to manage preset list'''
def __init__(self, xml= None):
'''initialize preset list with default value or values extracted from an xml object'''
if xml is None:
self.defaultInit()
else:
self.fromXml(xml)
def defaultInit(self):
'''initialize preset list with default value'''
def fromXml(self, xml):
'''initialize preset list with values extracted from an xml object'''
def toXml(self):
'''export preset list into xml syntaxed string'''
txt = '<presetList>\n'
txt += '</presetList>\n'
return txt
def see(self, log, versions):
'''menu to explore and edit preset list settings'''
change = False
log.menuIn('preset list')
while True:
os.system('clear')
log.print()
self.print()
print('''\n\n Menu :
0- Save and quit
''')
choice = input('Action?').strip().lower()
if choice in ['0', 'q', 'quit', 'cancel']:
log.menuOut()
return change
else:
log.error('Unvalid menu choice', False)
def print(self):
'''a method to print preset list'''
| mit | Python | |
de743ccb3d4b6556c66765a8cab93729abc22fa5 | Add a script for batch provisioning of SecurityMonkey role | Dklotz-Circle/security_monkey,firebitsbr/security_monkey,kevgliss/security_monkey,odin1314/security_monkey,gene1wood/security_monkey,airbnb/security_monkey,daichenge/security_monkey,gene1wood/security_monkey,pradeep-aradhya/security_monkey,firebitsbr/security_monkey,odin1314/security_monkey,Fsero/security_monkey,markofu/security_monkey,pradeep-aradhya/security_monkey,odin1314/security_monkey,stackArmor/security_monkey,stackArmor/security_monkey,bunjiboys/security_monkey,gorcz/security_monkey,monkeysecurity/security_monkey,firebitsbr/security_monkey,stackArmor/security_monkey,Dklotz-Circle/security_monkey,Netflix/security_monkey,kevgliss/security_monkey,Netflix/security_monkey,vijaykumar0690/security_monkey,kevgliss/security_monkey,bunjiboys/security_monkey,monkeysecurity/security_monkey,lucab/security_monkey,Fsero/security_monkey,gene1wood/security_monkey,Yelp/security_monkey,Yelp/security_monkey,Fsero/security_monkey,markofu/security_monkey,markofu/security_monkey,markofu/security_monkey,vijaykumar0690/security_monkey,monkeysecurity/security_monkey,lucab/security_monkey,bunjiboys/security_monkey,Dklotz-Circle/security_monkey,gorcz/security_monkey,daichenge/security_monkey,lucab/security_monkey,pradeep-aradhya/security_monkey,Netflix/security_monkey,Netflix/security_monkey,Dklotz-Circle/security_monkey,Netflix/security_monkey,vijaykumar0690/security_monkey,kevgliss/security_monkey,Fsero/security_monkey,daichenge/security_monkey,airbnb/security_monkey,vijaykumar0690/security_monkey,kevgliss/security_monkey,Yelp/security_monkey,gorcz/security_monkey,gorcz/security_monkey,bunjiboys/security_monkey,gorcz/security_monkey,gene1wood/security_monkey,pradeep-aradhya/security_monkey,gene1wood/security_monkey,Yelp/security_monkey,stackArmor/security_monkey,odin1314/security_monkey,firebitsbr/security_monkey,Fsero/security_monkey,lucab/security_monkey,daichenge/security_monkey,airbnb/security_monkey,stackArmor/security_monkey,vijaykumar0690/security_monkey,daichenge/security_monkey,lucab/security_monkey,firebitsbr/security_monkey,Yelp/security_monkey,bunjiboys/security_monkey,odin1314/security_monkey,airbnb/security_monkey,Dklotz-Circle/security_monkey,lucab/security_monkey,airbnb/security_monkey,monkeysecurity/security_monkey,pradeep-aradhya/security_monkey,monkeysecurity/security_monkey,markofu/security_monkey | scripts/secmonkey_role_setup.py | scripts/secmonkey_role_setup.py | #!/usr/bin/env python
# Copyright 2014 Rocket-Internet
# Luca Bruno <luca.bruno@rocket-internet.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
SecurityMonkey AWS role provisioning script
Grab credentials from ~/.boto (or other standard credentials sources).
Optionally accept "profile_name" as CLI parameter.
"""
import sys, json
import urllib
import boto
# FILL THIS IN
# Supervision account that can assume monitoring role
secmonkey_arn = 'arn:aws:iam::<awsaccountnumber>:role/SecurityMonkeyInstanceProfile'
trust_relationship = \
'''
{
"Version": "2008-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"AWS": "%s"
},
"Action": "sts:AssumeRole"
}
]
}
'''
# Role with restricted security policy (list/get only)
role_name = 'SecurityMonkey'
role_policy_name = 'SecurityMonkeyPolicy'
policy = \
'''
{
"Statement": [
{
"Action": [
"cloudwatch:Describe*",
"cloudwatch:Get*",
"cloudwatch:List*",
"ec2:Describe*",
"elasticloadbalancing:Describe*",
"iam:List*",
"iam:Get*",
"route53:Get*",
"route53:List*",
"rds:Describe*",
"s3:GetBucketAcl",
"s3:GetBucketCORS",
"s3:GetBucketLocation",
"s3:GetBucketLogging",
"s3:GetBucketPolicy",
"s3:GetBucketVersioning",
"s3:GetLifecycleConfiguration",
"s3:ListAllMyBuckets",
"sdb:GetAttributes",
"sdb:List*",
"sdb:Select*",
"ses:Get*",
"ses:List*",
"sns:Get*",
"sns:List*",
"sqs:GetQueueAttributes",
"sqs:ListQueues",
"sqs:ReceiveMessage"
],
"Effect": "Allow",
"Resource": "*"
}
]
}
'''
def main(profile = None):
# Sanitize JSON
assume_policy = json.dumps(json.loads(trust_relationship % secmonkey_arn))
security_policy = json.dumps(json.loads(policy))
# Connect to IAM
(role_exist, current_policy) = (False, "")
try:
iam = boto.connect_iam(profile_name = profile)
except boto.exception.NoAuthHandlerFound:
sys.exit("Authentication failed, please check your credentials under ~/.boto")
# Check if role already exists
rlist = iam.list_roles()
for r in rlist['list_roles_response']['list_roles_result']['roles']:
if r['role_name'] == role_name:
role_exist = True
current_policy = json.loads(urllib.unquote(r['assume_role_policy_document']))
for p in current_policy['Statement']:
if p['Action'] == 'sts:AssumeRole':
if secmonkey_arn in p['Principal']['AWS'] :
# Already ok
sys.exit('Role "%s" already configured, not touching it.' % role_name)
else:
# Add another monitoring account
new_policy = [secmonkey_arn]
new_policy.extend(p['Principal']['AWS'])
p['Principal']['AWS'] = new_policy
assume_policy = json.dumps(current_policy)
# Add SecurityMonkey monitoring role and link it to supervisor ARN
if not role_exist:
role = iam.create_role(role_name, assume_policy)
else:
role = iam.update_assume_role_policy(role_name, assume_policy)
# Add our own role policy
iam.put_role_policy(role_name, role_policy_name, security_policy)
print('Added role "%s", linked to ARN "%s".' % (role_name, secmonkey_arn))
if __name__ == "__main__":
profile = None
if len(sys.argv) >= 2:
profile = sys.argv[1]
main(profile)
| apache-2.0 | Python | |
430ca4b6a6f134346efaae430fac2bfaff195fe1 | Add files via upload | DiginessForever/machineLearning | 1stANNrecoded2Python.py | 1stANNrecoded2Python.py | #imports here: numpy, os, whatever I need
n = 1000
e = (1.0 + 1.0/n)^n
#Instantiate a new layer with the number of neurons desired, give the neurons (Q: do neurons have separate values than their weights?) random values.
def layerFactory(numberOfNeurons):
#create weights between layers (essentially, populate the first layer's weight matrix)
def connectLayers(layer1, layer2):
#I don't think I'll have a network factory this time right away - there are too many variables. I will have a network object, but it'll just have
#a list of layers.
#I need the network class here. | mit | Python | |
5b098392cee7f6526947d45bfc620573c631e4cf | Create add-P67-wikidata-url | Xi-Plus/Xiplus-Wikipedia-Bot,Xi-Plus/Xiplus-Wikipedia-Bot | my-ACG/add-P67-wikidata-url/edit.py | my-ACG/add-P67-wikidata-url/edit.py | # -*- coding: utf-8 -*-
import argparse
import csv
import os
os.environ['PYWIKIBOT_DIR'] = os.path.dirname(os.path.realpath(__file__))
import pywikibot
site = pywikibot.Site()
site.login()
datasite = site.data_repository()
def addWikidataUrl(title, targettitle):
print(title)
if title[0] == 'Q':
myitem = pywikibot.ItemPage(datasite, title)
url = 'https://www.wikidata.org/wiki/{}'.format(targettitle)
elif title[0] == 'P':
url = 'https://www.wikidata.org/wiki/Property:{}'.format(targettitle)
myitem = pywikibot.PropertyPage(datasite, title)
else:
print('\t Not Wikibase page')
return
new_claim = pywikibot.page.Claim(datasite, 'P67')
new_claim.setTarget(url)
print('\t', new_claim)
myitem.addClaim(new_claim, summary='設定維基數據網址')
def main(filename):
with open(filename) as csvfile:
reader = csv.reader(csvfile)
for row in reader:
addWikidataUrl(row[0], row[1])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('filename')
args = parser.parse_args()
main(args.filename)
| mit | Python | |
52fd7e5e6ae5ec6ab7de8a858fd2b132fe0d4081 | Create CGOLprintToScreen.py | marcuskainth/game-of-life | CGOLprintToScreen.py | CGOLprintToScreen.py | import sys
tiles_size = 64
class cell:
def __init__(self, location, alive=False):
self.alive = alive
self.location = location
class Rules:
def rule(self): # if alive
for i in range(tiles_size):
for j in range(tiles_size):
c = self.neighbourscounter(tile[i][j])
if tile[i][j].alive:
if c != 2 or c != 3:
tile[i][j].alive = False
else:
tile[i][j].alive = True
else:
if c == 3:
tile[i][j].alive = True
def neighbourscounter(self, cell_): # Return the number of Neighbours alive#
c = 0
cell_loc = cell_.location
try:
if tile[abs(cell_loc[0] - 1)][abs(cell_loc[1] - 1)].alive:
c += 1
except Exception:
pass
try:
if tile[abs(cell_loc[0] - 1)][abs(cell_loc[1])].alive:
c += 1
except Exception:
pass
try:
if tile[abs(cell_loc[0])][abs(cell_loc[1] - 1)].alive:
c += 1
except Exception:
pass
try:
if tile[abs(cell_loc[0] + 1)][abs(cell_loc[1] - 1)].alive:
c += 1
except Exception:
pass
try:
if tile[abs(cell_loc[0] + 1)][abs(cell_loc[1])].alive:
c += 1
except Exception:
pass
try:
if tile[abs(cell_loc[0] - 1)][abs(cell_loc[1] + 1)].alive:
c += 1
except Exception:
pass
try:
if tile[abs(cell_loc[0])][abs(cell_loc[1] + 1)].alive:
c += 1
except Exception:
pass
try:
if tile[abs(cell_loc[0] + 1)][abs(cell_loc[1] + 1)].alive:
c += 1
except Exception:
pass
return c
tile = []
for i in range(tiles_size):
tile.append([])
for g in range(tiles_size):
tile[i].insert(g, cell((i, g)))
Rules = Rules()
# Hardcode pattern.
tile [1][1].alive = True
tile [1][2].alive = True
tile [2][1].alive = True
tile [3][1].alive = True
tile [3][2].alive = True
tile [2][2].alive = True
# Print too screen implementing rules.
for a in range(0,3):
for i in range(tiles_size):
for g in range(tiles_size):
if tile[i][g].alive == True:
sys.stdout.write("X")
else:
sys.stdout.write("O")
print()
print()
print()
Rules.rule()
| mit | Python | |
9b5f070705de9896c8c6f8347dc0f733ae748793 | Add harvesting blog data example | fabriciojoc/redes-sociais-web,fabriciojoc/redes-sociais-web | harvesting_blog_data.py | harvesting_blog_data.py | import os
import sys
import json
import feedparser
from bs4 import BeautifulSoup
FEED_URL = 'http://g1.globo.com/dynamo/rss2.xml'
def cleanHtml(html):
return BeautifulSoup(html, 'lxml').get_text()
fp = feedparser.parse(FEED_URL)
print "Fetched %s entries from '%s'" % (len(fp.entries[0].title), fp.feed.title)
blog_posts = []
for e in fp.entries:
blog_posts.append({'title': e.title,
'published': e.published,
'summary': cleanHtml(e.summary),
'link': e.link})
out_file = os.path.join('./', 'feed.json')
f = open(out_file, 'w')
f.write(json.dumps(blog_posts, indent=1))
f.close()
print 'Wrote output file to %s' % (f.name, )
| apache-2.0 | Python | |
630d857c188259d04794e817f83f7c10e9ce9896 | Add OTP testing | kotfic/girder,Xarthisius/girder,manthey/girder,RafaelPalomar/girder,jbeezley/girder,jbeezley/girder,Kitware/girder,kotfic/girder,data-exp-lab/girder,Kitware/girder,manthey/girder,RafaelPalomar/girder,kotfic/girder,Xarthisius/girder,RafaelPalomar/girder,RafaelPalomar/girder,girder/girder,girder/girder,jbeezley/girder,Xarthisius/girder,manthey/girder,girder/girder,girder/girder,data-exp-lab/girder,Kitware/girder,Xarthisius/girder,kotfic/girder,Kitware/girder,data-exp-lab/girder,data-exp-lab/girder,manthey/girder,kotfic/girder,Xarthisius/girder,jbeezley/girder,RafaelPalomar/girder,data-exp-lab/girder | test/test_user_otp.py | test/test_user_otp.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import re
import pytest
from girder.exceptions import AccessException
from girder.models.user import User
from pytest_girder.assertions import assertStatus, assertStatusOk
def testInitializeOtp(user):
otpUris = User().initializeOtp(user)
# A URI for TOTP should be returned
assert re.match(r'', otpUris['totpUri'])
# OTP should not be enabled yet, since it's not finalized
assert user['otp']['enabled'] is False
# TOTP parameters should be generated
assert 'totp' in user['otp']
def testHasOtp(user):
assert User().hasOtp(user) is False
User().initializeOtp(user)
# OTP is not yet enabled
assert User().hasOtp(user) is False
user['otp']['enabled'] = True
assert User().hasOtp(user) is True
def _tokenFromTotpUri(totpUri, valid=True):
# Create an external TOTP instance
from passlib.totp import TOTP
totp = TOTP.from_uri(totpUri)
# Generate a valid token
otpToken = totp.generate().token
if not valid:
# Increment the token by 1 to invalidate it
otpToken = str((int(otpToken) + 1) % int(1e6))
return otpToken
def testVerifyOtp(user):
# Enable OTP
otpUris = User().initializeOtp(user)
user['otp']['enabled'] = True
# Generate an invalid token
otpToken = _tokenFromTotpUri(otpUris['totpUri'], False)
with pytest.raises(AccessException):
User().verifyOtp(user, otpToken)
# Generate a valid token
otpToken = _tokenFromTotpUri(otpUris['totpUri'])
# Verify the token, which should succeed without raising an exception
User().verifyOtp(user, otpToken)
# Re-verify the same token, which should fail
with pytest.raises(AccessException):
User().verifyOtp(user, otpToken)
def testAuthenticateWithOtp(user):
# Providing an unnecessary token should fail
with pytest.raises(AccessException):
User().authenticate('user', 'password', '123456')
# Enable OTP and save user
otpUris = User().initializeOtp(user)
user['otp']['enabled'] = True
User().save(user)
# Providing no token should now fail
with pytest.raises(AccessException):
User().authenticate('user', 'password')
# Generate a valid token
otpToken = _tokenFromTotpUri(otpUris['totpUri'])
# Authenticate successfully with the valid token
User().authenticate('user', 'password', otpToken)
def testOtpAPIWorkflow(server, user):
# Try to finalize OTP before it's been initialized
resp = server.request(
path='/user/%s/otp' % user['_id'], method='PUT', user=user,
additionalHeaders=[('Girder-OTP', '123456')])
# This should fail cleanly
assertStatus(resp, 400)
assert 'not initialized' in resp.json['message']
# Initialize OTP
resp = server.request(path='/user/%s/otp' % user['_id'], method='POST', user=user)
assertStatusOk(resp)
# Save the URI
totpUri = resp.json['totpUri']
# Login without an OTP
resp = server.request(path='/user/authentication', method='GET', basicAuth='user:password')
# Since OTP has not been finalized, this should still succeed
assertStatusOk(resp)
# Finalize without an OTP
resp = server.request(
path='/user/%s/otp' % user['_id'], method='PUT', user=user)
assertStatus(resp, 400)
assert 'Girder-OTP' in resp.json['message']
# Finalize with an invalid OTP
resp = server.request(
path='/user/%s/otp' % user['_id'], method='PUT', user=user,
additionalHeaders=[('Girder-OTP', _tokenFromTotpUri(totpUri, False))])
assertStatus(resp, 403)
assert 'validation failed' in resp.json['message']
# Finalize with a valid OTP
resp = server.request(
path='/user/%s/otp' % user['_id'], method='PUT', user=user,
additionalHeaders=[('Girder-OTP', _tokenFromTotpUri(totpUri))])
assertStatusOk(resp)
# Login without an OTP
resp = server.request(path='/user/authentication', method='GET', basicAuth='user:password')
assertStatus(resp, 401)
assert 'Girder-OTP' in resp.json['message']
# Login with an invalid OTP
resp = server.request(
path='/user/authentication', method='GET', basicAuth='user:password',
additionalHeaders=[('Girder-OTP', _tokenFromTotpUri(totpUri, False))])
assertStatus(resp, 401)
assert 'validation failed' in resp.json['message']
# Login with a valid OTP
resp = server.request(
path='/user/authentication', method='GET', basicAuth='user:password',
additionalHeaders=[('Girder-OTP', _tokenFromTotpUri(totpUri))])
assertStatusOk(resp)
| apache-2.0 | Python | |
f40788bdc60566fc15a7abb46bfca61bb9131823 | Test update | treyh0/pullrequest-reviewer | test.py | test.py | #!/usr/bin/env python
def main():
print "Hello world"
if __name__ == "__main__":
main()
| agpl-3.0 | Python | |
b07ca938d68dff3386007885a6da4f5b2e593941 | Add prototype | matwey/pybeam | test.py | test.py | #!/usr/bin/python
from construct import *
import sys
def align4(n):
return n + ((n+4) % 4)
chunk_atom = Struct("chunk_atom",
UBInt32("len"),
Array(lambda ctx: ctx.len, PascalString("atom"))
)
chunk_expt = Struct("chunk_expt",
UBInt32("len"),
Array(lambda ctx: ctx.len, Struct("entry",
UBInt32("function"),
UBInt32("arity"),
UBInt32("label"),
)
)
)
chunk_impt = Struct("chunk_impt",
UBInt32("len"),
Array(lambda ctx: ctx.len, Struct("entry",
UBInt32("module"),
UBInt32("function"),
UBInt32("arity"),
)
)
)
chunk_loct = Struct("chunk_loct",
UBInt32("len"),
Array(lambda ctx: ctx.len, Struct("entry",
UBInt32("function"),
UBInt32("arity"),
UBInt32("label"),
)
)
)
chunk = Struct("chunk",
String("chunk_name",4),
UBInt32("size"),
Switch("payload", lambda ctx: ctx.chunk_name,
{
"Atom" : chunk_atom,
"ExpT" : chunk_expt,
"ImpT" : chunk_impt,
# "Code" : chunk_code,
# "StrT" : chunk_strt,
# "Attr" : chunk_attr,
# "CInf" : chunk_cinf,
"LocT" : chunk_loct,
# "Trac" : chunk_trac,
},
default = String("skip", lambda ctx: align4(ctx.size))
),
)
beam_construct = Struct("beam",
OneOf(String('for1',4),['FOR1']),
UBInt32("size"),
OneOf(String('beam',4),['BEAM']),
GreedyRange(chunk),
)
filename = sys.argv[1]
beam = file(filename,"r").read()
print beam_construct.parse(beam)
| mit | Python | |
d5aecde4806a130550786f21f8fdd13c27996e16 | add test.py and copyright comments | itmard/persian.py,itmard/Persian | test.py | test.py | # encoding: utf-8
from toPersian import *
print enToPersianNumb('شماره کلاس 312')
print enToPersianNumb(3123123.9012)
print enToPersianNumb(123)
print enToPersianchar('sghl ]i ofv')
print arToPersianNumb('٣٤٥٦')
print arToPersianChar(' ك جمهوري اسلامي ايران')
'''
شماره کلاس ۳۱۲
۳۱۲۳۱۲۳.۹۰۱۲
۱۲۳
سلام چه خبر
۳۴۵۶
ک جمهوری اسلامی ایران
''' | apache-2.0 | Python | |
3a160d3aed9d5eb7cebe2427f9009b4e0e2f07c4 | return doi resolver url instead of doi resolver name | jcherqui/searx,dalf/searx,jcherqui/searx,jcherqui/searx,asciimoo/searx,asciimoo/searx,asciimoo/searx,dalf/searx,dalf/searx,dalf/searx,jcherqui/searx,asciimoo/searx | searx/plugins/oa_doi_rewrite.py | searx/plugins/oa_doi_rewrite.py | from flask_babel import gettext
import re
from searx.url_utils import urlparse, parse_qsl
from searx import settings
regex = re.compile(r'10\.\d{4,9}/[^\s]+')
name = gettext('Open Access DOI rewrite')
description = gettext('Avoid paywalls by redirecting to open-access versions of publications when available')
default_on = False
preference_section = 'privacy'
doi_resolvers = settings['doi_resolvers']
def extract_doi(url):
match = regex.search(url.path)
if match:
return match.group(0)
for _, v in parse_qsl(url.query):
match = regex.search(v)
if match:
return match.group(0)
return None
def get_doi_resolver(args, preference_doi_resolver):
doi_resolvers = settings['doi_resolvers']
doi_resolver = args.get('doi_resolver', preference_doi_resolver)[0]
if doi_resolver not in doi_resolvers:
doi_resolvers = settings['default_doi_resolver']
doi_resolver_url = doi_resolvers[doi_resolver]
return doi_resolver_url
def on_result(request, search, result):
doi = extract_doi(result['parsed_url'])
if doi and len(doi) < 50:
for suffix in ('/', '.pdf', '/full', '/meta', '/abstract'):
if doi.endswith(suffix):
doi = doi[:-len(suffix)]
result['url'] = get_doi_resolver(request.args, request.preferences.get_value('doi_resolver')) + doi
result['parsed_url'] = urlparse(result['url'])
return True
| from flask_babel import gettext
import re
from searx.url_utils import urlparse, parse_qsl
from searx import settings
regex = re.compile(r'10\.\d{4,9}/[^\s]+')
name = gettext('Open Access DOI rewrite')
description = gettext('Avoid paywalls by redirecting to open-access versions of publications when available')
default_on = False
preference_section = 'privacy'
doi_resolvers = settings['doi_resolvers']
def extract_doi(url):
match = regex.search(url.path)
if match:
return match.group(0)
for _, v in parse_qsl(url.query):
match = regex.search(v)
if match:
return match.group(0)
return None
def get_doi_resolver(args, preference_doi_resolver):
doi_resolvers = settings['doi_resolvers']
doi_resolver = args.get('doi_resolver', preference_doi_resolver)[0]
if doi_resolver not in doi_resolvers:
doi_resolvers = settings['default_doi_resolver']
return doi_resolver
def on_result(request, search, result):
doi = extract_doi(result['parsed_url'])
if doi and len(doi) < 50:
for suffix in ('/', '.pdf', '/full', '/meta', '/abstract'):
if doi.endswith(suffix):
doi = doi[:-len(suffix)]
result['url'] = get_doi_resolver(request.args, request.preferences.get_value('doi_resolver')) + doi
result['parsed_url'] = urlparse(result['url'])
return True
| agpl-3.0 | Python |
995e35c2a66fd51f9216ed5acc829bac0ac3ddeb | add i3-debug-console script to examples | acrisci/i3ipc-python | examples/i3-debug-console.py | examples/i3-debug-console.py | #!/usr/bin/env python3
import i3ipc
from curses import wrapper
from threading import Timer
def con_type_to_text(con):
if con.type != 'con':
return con.type
if len(con.nodes):
return 'container'
else:
return 'view'
def layout_txt(con):
if con.layout == 'splith':
return 'HORIZ'
elif con.layout == 'splitv':
return 'VERT'
else:
return ''
def container_to_text(con, indent):
t = con_type_to_text(con)
txt = (' ' * indent) + '('
txt += t + ' ' + layout_txt(con)
if con.focused:
txt += ' focus'
has_children = len(con.nodes)
for c in con.nodes:
txt += '\n'
txt += container_to_text(c, indent + 4)
if has_children:
txt += '\n' + (' ' * indent)
txt += ')'
return txt
last_txt = ''
def main(stdscr):
ipc = i3ipc.Connection()
def on_event(ipc, e):
txt = ''
for ws in ipc.get_tree().workspaces():
txt += container_to_text(ws, 0) + '\n'
global last_txt
if txt == last_txt:
return
stdscr.clear()
for l in txt:
try:
stdscr.addstr(l)
except Exception:
break
stdscr.refresh()
last_txt = txt
on_window(ipc, None)
ipc.on('window', on_event)
ipc.on('binding', on_event)
ipc.on('workspace', on_event)
ipc.main()
wrapper(main)
| bsd-3-clause | Python | |
1524a8fd55c682bd8b77b52b9d2d5e5c030c9d2d | Add first tests | samuell/sciluigi_deprecated | test/sciluigi_test.py | test/sciluigi_test.py | import sciluigi
from nose.tools import with_setup
# Make these variables global
#shell_task = None
def setup():
global shell_task
shell_task = sciluigi.shell("cat <i:input> > <o:output:out.txt>")
return shell_task
def teardown():
global shell_task
shell_task = None
@with_setup(setup, teardown)
def test_inports_nonempty():
assert len(shell_task.inports) == 1
@with_setup(setup, teardown)
def test_outports_nonempty():
assert len(shell_task.outports) == 1
| mit | Python | |
ed63c9c828cc609d82eb5afb21f6e24b358bc3cf | Add DoubleLinkedQueue | xliiauo/leetcode,xiao0720/leetcode,xiao0720/leetcode,xliiauo/leetcode,xliiauo/leetcode | DoubleLinkedQueue.py | DoubleLinkedQueue.py | class _DoubleLinkedList:
class _Node:
__slots__ = '_element', '_prev', '_next'
def __init__(self, element, prev, next):
self._element = element
self._prev = prev
self._next = next
def __init__(self):
self.header = self._Node(None, None, None)
self.tailer = self._Node(None, None, None)
self.header._next = self.tailer
self.tailer._prev = self.header
self._size = 0
def __len__(self):
return self._size
def is_empty(self):
return self._size == 0
def insert_between(self, e, prev, next):
node = self._Node(e, prev, next)
prev._next = node
next._prev = node
self._size += 1
return node
def delete_node(self, node):
node._prev._next = node._next
node._next._prev = node._prev
self._size -= 1
e = node._element
node._prev = node._next = node._element = None
return e
class DoubleLinedQueue(_DoubleLinkedList):
def first(self):
if self.is_empty():
return None
else:
return self.header._next
def last(self):
if self.is_empty():
return None
else:
return self.tailer._prev
def insert_first(self, e):
self.insert_between(e, self.header, self.header._next)
def insert_last(self, e):
self.insert_between(e, self.tailer._prev, self.tailer)
def delete_first(self):
if self.is_empty():
return None
else:
return self.delete_node(self.header._next)
def delete_last(self):
if self.is_empty():
return None
else:
return self.delete_node(self.tailer._prev)
| mit | Python | |
42f66ea6e1921040d6e3055c41372b02511e6a5a | Add directory for CYK tests | PatrikValkovic/grammpy | tests/CYK/__init__.py | tests/CYK/__init__.py | #!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 31.08.2017 14:50
:Licence GNUv3
Part of pyparsers
""" | mit | Python | |
4dc6462a0a8231ba4ffca09d5c9546d8b6d0dd6f | Fix bug in config. | ynvb/DIE,nihilus/DIE,ekse/DIE,kakkojc/DIE,AlexWMF/DIE,isra17/DIE,xujun10110/DIE,HackerTool/DIE,melbcat/DIE | DIE/Lib/DieConfig.py | DIE/Lib/DieConfig.py | import logging
import os
import ConfigParser
import idaapi
import yaml
from attrdict import AttrMap
class DIEConfig(object):
DEFAULT = os.path.join(os.path.dirname(__file__), "config.yml")
def __init__(self):
with open(self.DEFAULT, "rb") as f:
default = yaml.safe_load(f)
self._config = AttrMap(default)
@property
def install_path(self):
return os.path.normpath(os.path.join(os.path.dirname(__file__), ".."))
@property
def icons_path(self):
return self.install_path + "\\Icons"
@property
def parser_path(self):
return self.install_path + "\\Plugins\\DataParsers"
def load(self, path):
with open(path, "rb") as f:
custom = yaml.safe_load(f)
custom = AttrMap(custom)
for attr in self._config:
if attr in custom:
self._config[attr].update(custom[attr])
def save(self, path):
with open(path, "wb") as f:
yaml.safe_dump(dict(self._config), f, default_flow_style=False)
def __getattr__(self, name):
return getattr(self._config, name)
def __setattr__(self, name, value):
if name.startswith("_"):
return super(DIEConfig, self).__setattr__(name, value)
return setattr(self._config, name, value)
#############################################################################
# Singleton
#############################################################################
_config_parser = None
def get_config():
"""
Return a singleton instance of the global configuration object
"""
return _config_parser
def initialize():
global _config_parser
_config_parser = DIEConfig()
| import logging
import os
import ConfigParser
import idaapi
import yaml
from attrdict import AttrMap
class DIEConfig(object):
DEFAULT = os.path.join(os.path.dirname(__file__), "config.yml")
def __init__(self):
with open(self.DEFAULT, "rb") as f:
default = yaml.safe_load(f)
self._config = AttrMap(default)
@property
def install_path(self):
return os.path.normpath(os.path.join(os.path.dirname(__file__), ".."))
@property
def icons_path(self):
return self.install_path + "\\Icons"
@property
def parser_path(self):
return self.install_path + "\\Plugins\\DataParsers"
def load(self, path):
with open(path, "rb") as f:
custom = yaml.safe_load(f)
self._config.update(custom)
def save(self, path):
with open(path, "wb") as f:
yaml.safe_dump(dict(self._config), f, default_flow_style=False)
def __getattr__(self, name):
return getattr(self._config, name)
def __setattr__(self, name, value):
if name.startswith("_"):
return super(DIEConfig, self).__setattr__(name, value)
return setattr(self._config, name, value)
#############################################################################
# Singleton
#############################################################################
_config_parser = None
def get_config():
"""
Return a singleton instance of the global configuration object
"""
return _config_parser
def initialize():
global _config_parser
_config_parser = DIEConfig()
| mit | Python |
5ac4f0be3f9f1179a50670989915bae0d3ae157e | Add globals.ffmpeg module to retrieve ffmpeg executable | AntumDeluge/desktop_recorder,AntumDeluge/desktop_recorder | source/globals/ffmpeg.py | source/globals/ffmpeg.py | # -*- coding: utf-8 -*-
## \package globals.ffmpeg
#
# Retrieves the FFmpeg executable
# MIT licensing
# See: LICENSE.txt
import subprocess
from subprocess import PIPE
from subprocess import STDOUT
def GetExecutable(cmd):
sp = subprocess.Popen([u'which', cmd,], stdout=PIPE, stderr=STDOUT)
output, returncode = sp.communicate()
print(u'Return code: {}\nSTDOUT: {}'.format(returncode, output))
if returncode:
return None
return output
CMD_ffmpeg = GetExecutable(u'ffmpeg')
| mit | Python | |
4ae114dd1da8118cc9d2ee87e30f5e0a1f3324f2 | Add some tests for monitor class | jamesoff/simplemonitor,jamesoff/simplemonitor,jamesoff/simplemonitor,jamesoff/simplemonitor,jamesoff/simplemonitor | tests/test_monitor.py | tests/test_monitor.py | import unittest
import Monitors.monitor
class TestMonitor(unittest.TestCase):
safe_config = {'partition': '/', 'limit': '10G'}
one_KB = 1024
one_MB = one_KB * 1024
one_GB = one_MB * 1024
one_TB = one_GB * 1024
def test_MonitorInit(self):
m = Monitors.monitor.Monitor(config_options={
'depend': 'a, b',
'urgent': 0,
'tolerance': 2,
'remote_alert': 1,
'recover_command': 'true'
})
self.assertEqual(m.name, 'unnamed', 'Monitor did not set name')
self.assertEqual(m.urgent, 0, 'Monitor did not set urgent')
self.assertEqual(m.tolerance, 2, 'Monitor did not set tolerance')
self.assertTrue(m.remote_alerting, 'Monitor did not set remote_alerting')
self.assertEqual(m.recover_command, 'true', 'Monitor did not set recover_command')
def test_MonitorSuccess(self):
m = Monitors.monitor.Monitor()
m.record_success('yay')
self.assertEqual(m.get_error_count(), 0, 'Error count is not 0')
self.assertEqual(m.get_success_count(), 1, 'Success count is not 1')
self.assertEqual(m.tests_run, 1, 'Tests run is not 1')
self.assertFalse(m.was_skipped, 'was_skipped is not false')
self.assertEqual(m.last_result, 'yay', 'Last result is not correct')
def test_MonitorFail(self):
m = Monitors.monitor.Monitor()
m.record_fail('boo')
self.assertEqual(m.get_error_count(), 1, 'Error count is not 1')
self.assertEqual(m.get_success_count(), 0, 'Success count is not 0')
self.assertEqual(m.tests_run, 1, 'Tests run is not 1')
self.assertFalse(m.was_skipped, 'was_skipped is not false')
self.assertEqual(m.last_result, 'boo', 'Last result is not correct')
def test_MonitorWindows(self):
m = Monitors.monitor.Monitor()
self.assertFalse(m.is_windows())
def test_MonitorSkip(self):
m = Monitors.monitor.Monitor()
m.record_skip('a')
self.assertEqual(m.get_success_count(), 1, 'Success count is not 1')
self.assertTrue(m.was_skipped, 'was_skipped is not true')
self.assertEqual(m.skip_dep, 'a', 'skip_dep is not correct')
self.assertTrue(m.skipped(), 'skipped() is not true')
| bsd-3-clause | Python | |
b0f0ee685ca525de90fdcd5a57a203c8b42b936a | test for the bootstrap | pypa/setuptools,pypa/setuptools,pypa/setuptools | tests/install_test.py | tests/install_test.py | import urllib2
import sys
import os
print '**** Starting Test'
print '\n\n'
is_jython = sys.platform.startswith('java')
if is_jython:
import subprocess
print 'Downloading bootstrap'
file = urllib2.urlopen('http://nightly.ziade.org/bootstrap.py')
f = open('bootstrap.py', 'w')
f.write(file.read())
f.close()
# running it
args = [sys.executable] + ['bootstrap.py']
if is_jython:
subprocess.Popen([sys.executable] + args).wait()
else:
os.spawnv(os.P_WAIT, sys.executable, args)
# now checking if Distribute is installed
args = [sys.executable] + ['-c', 'import setuptools; import sys; sys.exit(hasattr(setuptools, "_distribute"))']
if is_jython:
res = subprocess.call([sys.executable] + args)
else:
res = os.spawnv(os.P_WAIT, sys.executable, args)
print '\n\n'
if res:
print '**** Test is OK'
else:
print '**** Test failed, please send me the output at tarek@ziade.org'
| mit | Python | |
f9b2bba394ad6ce31ffae5cf6ccf445dc280ba95 | Solve C Mais ou Menos? in python | deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground | solutions/beecrowd/2486/2486.py | solutions/beecrowd/2486/2486.py | import sys
MIN_VITAMIN_C = 110
MAX_VITAMIN_C = 130
vitamin_c_catalogue = {
'suco de laranja': 120,
'morango fresco': 85,
'mamao': 85,
'goiaba vermelha': 70,
'manga': 56,
'laranja': 50,
'brocolis': 34,
}
for test in sys.stdin:
t = int(test)
if not t:
break
total_c_vitamin = 0
for _ in range(t):
line = input()
n, food = line.split(' ', 1)
n = int(n)
total_c_vitamin += n * vitamin_c_catalogue[food]
if total_c_vitamin < MIN_VITAMIN_C:
print(f'Mais {MIN_VITAMIN_C - total_c_vitamin} mg')
elif total_c_vitamin > MAX_VITAMIN_C:
print(f'Menos {total_c_vitamin - MAX_VITAMIN_C} mg')
else:
print(f'{total_c_vitamin} mg')
| mit | Python | |
9db669a311c10b84799084e1d4ba8101137ec234 | Add .ycm_extra_conf.py | krafczyk/KSync,krafczyk/KSync | .ycm_extra_conf.py | .ycm_extra_conf.py | import os
# This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import ycm_core
import subprocess
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c++11',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c++',
'-I',
'/home/matthew/Software/ksync/ksync/server/inc',
'-I',
'/home/matthew/Software/ksync/ksync/client/inc',
'-I',
'/home/matthew/Software/ksync/ksync/libksync/inc',
'-I',
'/home/matthew/Sources/nanomsg/nanomsg-install/include'
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if compilation_database_folder:
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def FlagsForFile( filename ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = database.GetCompilationInfoForFile( filename )
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% YOU NEED IT.
try:
final_flags.remove( '-stdlib=libc++' )
except ValueError:
pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| mit | Python | |
96c8d93cf1b6a01e867ca8250fee4dea5e870c79 | Add files via upload | anthonyw12123/4chanwebscraper | 4ChanWebScraper.py | 4ChanWebScraper.py | import requests
import os
import sys
import re
from BeautifulSoup import BeautifulSoup
from PIL import Image
from StringIO import StringIO
# try:
# opts, args = getopt.getopt(argv, "u:", ["url="])
# except getopt.GetoptError:
# print('usage: python 4ChanWebScraper.py -u=<url>')
# sys.exit(2)
url = sys.argv[1]
print('Attempting to capture ' + url)
response = requests.get(url)
html = response.content
soup = BeautifulSoup(html)
folderName = soup.title.string.replace('/','')
print('Capturing ' + folderName)
if not os.path.exists('./' + folderName):
os.makedirs('./'+folderName)
print('created folder '+ folderName)
else:
print('folder already exists')
for link in soup.findAll('a', 'fileThumb'):
imageName = link.get('href')
print('Getting ' + imageName)
fileName = re.search('\d+\.\w+$',imageName)
savePath = './'+ folderName +'/' + fileName.group(0)
print('saving:' + savePath)
img = requests.get('http:' + imageName)
i = Image.open(StringIO(img.content))
i.save(savePath)
# for thumb in table.findAll('fileThumb'):
# print row.text | mit | Python | |
27fca35a08278a44bb7ba693f222c6c182061872 | Add the enemy file and start it up. | di1111/mlg-fite | Enemy.py | Enemy.py | import pygame
class Enemy(pygame.sprite.Sprite):
def __init__(self, x, y):
super().__init__()
self.image = pygame.image.load("images/enemy.png").convert_alpha()
self.rect = self.image.get_rect(center=(x, y))
def
| mit | Python | |
8821024705c6500ea998431656b3c604b3066898 | Add prototype dotcode generator | Iniquitatis/mgba,Iniquitatis/mgba,Iniquitatis/mgba,libretro/mgba,mgba-emu/mgba,libretro/mgba,libretro/mgba,mgba-emu/mgba,Iniquitatis/mgba,libretro/mgba,libretro/mgba,mgba-emu/mgba,mgba-emu/mgba | tools/make-dotcode.py | tools/make-dotcode.py | import numpy as np
import PIL.Image
import PIL.ImageChops
import sys
with open(sys.argv[1], 'rb') as f:
data = f.read()
size = len(data)
blocksize = 104
blocks = size // blocksize
height = 36
width = 35
margin = 2
dots = np.zeros((width * blocks + margin * 2 + 1, height + margin * 2), dtype=np.bool)
anchor = np.array([[0, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[0, 1, 1, 1, 0]], dtype=np.bool)
alignment = np.array([1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0], dtype=np.bool)
nybbles = [[0, 0, 0, 0, 0], [0, 0, 0, 0, 1], [0, 0, 0, 1, 0], [1, 0, 0, 1, 0],
[0, 0, 1, 0, 0], [0, 0, 1, 0, 1], [0, 0, 1, 1, 0], [1, 0, 1, 1, 0],
[0, 1, 0, 0, 0], [0, 1, 0, 0, 1], [0, 1, 0, 1, 0], [1, 0, 1, 0, 0],
[0, 1, 1, 0, 0], [0, 1, 1, 0, 1], [1, 0, 0, 0, 1], [1, 0, 0, 0, 0]]
addr = [0x03FF]
for i in range(1, 54):
addr.append(addr[i - 1] ^ ((i & -i) * 0x769))
if (i & 0x07) == 0:
addr[i] ^= 0x769
if (i & 0x0F) == 0:
addr[i] ^= 0x769 << 1
if (i & 0x1F) == 0:
addr[i] ^= (0x769 << 2) ^ 0x769
base = 1 if blocks == 18 else 25
for i in range(blocks + 1):
dots[i * width:i * width + 5, 0:5] = anchor
dots[i * width:i * width + 5, height + margin * 2 - 5:height + margin * 2] = anchor
dots[i * width + margin, margin + 5] = 1
a = addr[base + i]
for j in range(16):
dots[i * width + margin, margin + 14 + j] = a & (1 << (15 - j))
for i in range(blocks):
dots[i * width:(i + 1) * width, margin] = alignment
dots[i * width:(i + 1) * width, height + margin - 1] = alignment
block = []
for byte in data[i * blocksize:(i + 1) * blocksize]:
block.extend(nybbles[byte >> 4])
block.extend(nybbles[byte & 0xF])
j = 0
for y in range(3):
dots[i * width + margin + 5:i * width + margin + 31, margin + 2 + y] = block[j:j + 26]
j += 26
for y in range(26):
dots[i * width + margin + 1:i * width + margin + 35, margin + 5 + y] = block[j:j + 34]
j += 34
for y in range(3):
dots[i * width + margin + 5:i * width + margin + 31, margin + 31 + y] = block[j:j + 26]
j += 26
im = PIL.Image.fromarray(dots.T)
im = PIL.ImageChops.invert(im)
im.save('dotcode.png')
| mpl-2.0 | Python | |
982cd61d7532365d9de56b308c7a4d8308302c15 | Add a test to demonstrate issue with django 1.11 | kmmbvnr/django-fsm,kmmbvnr/django-fsm | tests/testapp/tests/test_model_create_with_generic.py | tests/testapp/tests/test_model_create_with_generic.py | try:
from django.contrib.contenttypes.fields import GenericForeignKey
except ImportError:
# Django 1.6
from django.contrib.contenttypes.generic import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.test import TestCase
from django_fsm import FSMField, transition
class Ticket(models.Model):
class Meta:
app_label = 'testapp'
class Task(models.Model):
class STATE:
NEW = 'new'
DONE = 'done'
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
causality = GenericForeignKey('content_type', 'object_id')
state = FSMField(default=STATE.NEW)
@transition(field=state, source=STATE.NEW, target=STATE.DONE)
def do(self):
pass
class Meta:
app_label = 'testapp'
class Test(TestCase):
def setUp(self):
self.ticket = Ticket.objects.create()
def test_model_objects_create(self):
"""Check a model with state field can be created
if one of the other fields is a property or a virtual field.
"""
Task.objects.create(causality=self.ticket)
| mit | Python | |
efcda7dad6efb189713b8cebb20b4d8b64a85c71 | Add tools/msgpack2json.py | takumak/tuna,takumak/tuna | tools/msgpack2json.py | tools/msgpack2json.py | import sys, json, umsgpack
json.dump(umsgpack.unpack(sys.stdin.buffer), sys.stdout)
| mit | Python | |
0dc5154daa12ea196bb5fdeb1342f6f7b3e6e62b | Add markov model baseline | ankur-gos/PSL-Bipedal,ankur-gos/PSL-Bipedal | MarkovModel/model.py | MarkovModel/model.py | '''
Markov Model for transportation
Ankur Goswami
'''
def load_inputs(datafiles):
inputs = {}
for file in datafiles:
with open(file, 'r') as rf:
for line in rf:
split = line.split('\t', 1)
segnum = split[0]
if segnum is in inputs:
inputs[segnum] += segnum[1]
else:
inputs[segnum] = segnum[1]
final_inputs = []
for key in inputs:
final_inputs.append(inputs[key])
return final_inputs
def run(inputs):
counts = {}
for transition in inputs:
if transition is in counts:
counts[transition] += 1
else:
counts[transition] = 1
sum = 0
for key, val in counts.values():
# There must be a minimum of 2 trips to be considered
if counts[key] == 1:
del counts[key]
else:
sum += counts[key]
for key, val in counts.values():
counts[key] = val / sum
return counts
def output(counts, output_file):
sorted_tuples = sorted(counts.items(), key=lambda x: x[1])
with open(output_file, 'w+') as wf:
for tup in sorted_tuples:
wf.write("%s\t%f\n")
| mit | Python | |
50bfa56b660d5d39c1dd7b3d426fcd589a9719bb | add univdump.py for extracting password dumps [wip] | The-Orizon/nlputils,The-Orizon/nlputils,The-Orizon/nlputils | univdump.py | univdump.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
import sys
import collections
'''
This script tries its best to fvck these various esoteric hard-to-process
user database dump or leak files.
'''
re_field = re.compile(r'(<\w+>)')
RecFormat = collections.namedtuple('RecFormat', ('regex', 'fields'))
FORMATS = (
'<password>',
'<email>,<password>',
'<email>\t<password>',
'<email>----<password>',
'<email> <password>',
'<email> <password>',
'<username>\t<password>\t<email>',
'<username>\s+<md5>\s+<email>',
'<username>\t<md5>\t<email>',
'<username>\t<md5>\t<email>\t<password>',
'<username>\t<md5>\t\t\t<email>\t<password>',
'<username>\t\|\t<md5>\t\|\t<email>\t\|\t<password>',
'<email>\t<md5>\t<username>\t<email>\t<password>',
'<username>,<password>,<email>',
'<email>\t<md5>\t<name>\t<username>\t<md5>\t<phone>\t<digits>',
'<digits>\t<username>\t<md5>\t<other>\t<other>\t<digits>\t<email>\t<ignore>',
"\(<digits>,\s+'<email>',\s+'<extuname>',\s+'<md5>',\s+<digits>\),",
)
class FormatDetector:
TEMPLATES = {
'password': '[ -~]+',
'email': '[A-Za-z0-9._-]+@[A-Za-z0-9.-]+',
'username': '[\w.]+',
'extuname': '\S+',
'name': '[\w .]+',
'md5': '[A-Fa-f0-9]{32}',
'phone': '[0-9 +-]{5,}',
'digits': '[0-9]+',
'other': '.+?',
'ignore': '.+',
}
def __init__(self, formats):
self.formats = []
def makeindex():
pass
if __name__ == '__main__':
pass
| mit | Python | |
b2e27f42b3f8de10e11faf128183ca5fa3c0ea3f | Add 0025 | starlightme/My-Solutions-For-Show-Me-the-Code,starlightme/My-Solutions-For-Show-Me-the-Code | Jimmy66/0025/0025.py | Jimmy66/0025/0025.py | #!/usr/bin/env python3
import speech_recognition as sr
import webbrowser
# obtain path to "test.wav" in the same folder as this script
from os import path
WAV_FILE = path.join(path.dirname(path.realpath(__file__)), "test.wav")
# use "test.wav" as the audio source
r = sr.Recognizer()
with sr.WavFile(WAV_FILE) as source:
audio = r.record(source) # read the entire WAV file
# recognize speech using Google Speech Recognition
try:
# for testing purposes, we're just using the default API key
# to use another API key, use `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")`
# instead of `r.recognize_google(audio)`
print("Google Speech Recognition thinks you said " + r.recognize_google(audio))
result = r.recognize_google(audio)
webbrowser.open_new_tab(result)
print(result)
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
| mpl-2.0 | Python | |
07f522bed6a285507aadd66df89b14022e1e2a04 | add new package : openresty (#14169) | iulian787/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,iulian787/spack,iulian787/spack,LLNL/spack,iulian787/spack,iulian787/spack | var/spack/repos/builtin/packages/openresty/package.py | var/spack/repos/builtin/packages/openresty/package.py | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Openresty(AutotoolsPackage):
"""
OpenResty is a full-fledged web application server by bundling the
standard nginx core, lots of 3rd-party nginx modules, as well as
most of their external dependencies.
"""
homepage = "https://github.com/openresty"
url = "https://github.com/openresty/openresty/releases/download/v1.15.8.2/openresty-1.15.8.2.tar.gz"
version('1.15.8.2', sha256='bf92af41d3ad22880047a8b283fc213d59c7c1b83f8dae82e50d14b64d73ac38')
version('1.15.8.1', sha256='89a1238ca177692d6903c0adbea5bdf2a0b82c383662a73c03ebf5ef9f570842')
version('1.13.6.2', sha256='946e1958273032db43833982e2cec0766154a9b5cb8e67868944113208ff2942')
depends_on('pcre', type='build')
def configure_args(self):
args = ['--without-http_rewrite_module']
return args
| lgpl-2.1 | Python | |
20fc164862f72527ef7d06bcbfe9dc4329ef9fa7 | add problem, hackerrank 005 plus minus | caoxudong/code_practice,caoxudong/code_practice,caoxudong/code_practice,caoxudong/code_practice | hackerrank/005_plus_minus.py | hackerrank/005_plus_minus.py | #!/bin/python3
"""
https://www.hackerrank.com/challenges/plus-minus?h_r=next-challenge&h_v=zen
Given an array of integers, calculate which fraction of its elements are positive, which fraction of its elements are negative, and which fraction of its elements are zeroes, respectively. Print the decimal value of each fraction on a new line.
Note: This challenge introduces precision problems. The test cases are scaled to six decimal places, though answers with absolute error of up to 10^(-4) are acceptable.
Input Format
The first line contains an integer, N, denoting the size of the array.
The second line contains N space-separated integers describing an array of numbers (a0, a1, a2,...,a(n-1)).
Output Format
You must print the following lines:
A decimal representing of the fraction of positive numbers in the array.
A decimal representing of the fraction of negative numbers in the array.
A decimal representing of the fraction of zeroes in the array.
Sample Input
6
-4 3 -9 0 4 1
Sample Output
0.500000
0.333333
0.166667
Explanation
There are 3 positive numbers, 2 negative numbers, and 1 zero in the array.
The respective fractions of positive numbers, negative numbers and zeroes are 3 / 6 = 0.500000, 2 / 6 = 0.333333 and , 1 / 6 = 0.166667, respectively.
"""
import sys
n = int(input().strip())
arr = [int(arr_temp) for arr_temp in input().strip().split(' ')]
| mit | Python | |
6e0f585a8f8433d4f6800cb1f093f97f8a1d4ff7 | Update imports for new functions | mhvk/astropy,saimn/astropy,funbaker/astropy,bsipocz/astropy,tbabej/astropy,pllim/astropy,stargaser/astropy,AustereCuriosity/astropy,aleksandr-bakanov/astropy,dhomeier/astropy,funbaker/astropy,kelle/astropy,saimn/astropy,larrybradley/astropy,lpsinger/astropy,pllim/astropy,dhomeier/astropy,bsipocz/astropy,larrybradley/astropy,lpsinger/astropy,pllim/astropy,MSeifert04/astropy,astropy/astropy,mhvk/astropy,aleksandr-bakanov/astropy,MSeifert04/astropy,tbabej/astropy,tbabej/astropy,tbabej/astropy,saimn/astropy,stargaser/astropy,joergdietrich/astropy,astropy/astropy,kelle/astropy,kelle/astropy,dhomeier/astropy,MSeifert04/astropy,mhvk/astropy,StuartLittlefair/astropy,saimn/astropy,StuartLittlefair/astropy,lpsinger/astropy,larrybradley/astropy,tbabej/astropy,DougBurke/astropy,AustereCuriosity/astropy,dhomeier/astropy,lpsinger/astropy,AustereCuriosity/astropy,DougBurke/astropy,lpsinger/astropy,StuartLittlefair/astropy,aleksandr-bakanov/astropy,kelle/astropy,funbaker/astropy,DougBurke/astropy,joergdietrich/astropy,larrybradley/astropy,aleksandr-bakanov/astropy,astropy/astropy,joergdietrich/astropy,StuartLittlefair/astropy,mhvk/astropy,bsipocz/astropy,joergdietrich/astropy,AustereCuriosity/astropy,DougBurke/astropy,dhomeier/astropy,pllim/astropy,AustereCuriosity/astropy,bsipocz/astropy,funbaker/astropy,pllim/astropy,saimn/astropy,astropy/astropy,mhvk/astropy,kelle/astropy,stargaser/astropy,astropy/astropy,larrybradley/astropy,stargaser/astropy,MSeifert04/astropy,StuartLittlefair/astropy,joergdietrich/astropy | imageutils/__init__.py | imageutils/__init__.py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Image processing utilities for Astropy.
"""
# Affiliated packages may add whatever they like to this file, but
# should keep this content at the top.
# ----------------------------------------------------------------------------
from ._astropy_init import *
# ----------------------------------------------------------------------------
# For egg_info test builds to pass, put package imports here.
if not _ASTROPY_SETUP_:
from .scale_img import *
from .array_utils import *
from .sampling import *
__all__ = ['find_imgcuts', 'img_stats', 'rescale_img', 'scale_linear',
'scale_sqrt', 'scale_power', 'scale_log', 'scale_asinh',
'downsample', 'upsample', 'extract_array_2d', 'add_array_2d',
'subpixel_indices', 'fix_prf_nan']
| bsd-3-clause | Python | |
0573ed88c4de497b2da7088795b0d747bb2bd2ce | Add ICT device | lnls-fac/sirius | pymodels/middlelayer/devices/ict.py | pymodels/middlelayer/devices/ict.py | #!/usr/bin/env python-sirius
from epics import PV
class ICT:
def __init__(self, name):
if name in ['ICT-1', 'ICT-2']:
self._charge = PV('LI-01:DI-' + name + ':Charge-Mon')
self._charge_avg = PV('LI-01:DI-' + name + 'ICT-1:ChargeAvg-Mon')
self._charge_max = PV('LI-01:DI-' + name + 'ICT-1:ChargeMax-Mon')
self._charge_min = PV('LI-01:DI-' + name + 'ICT-1:ChargeMin-Mon')
self._charge_std = PV('LI-01:DI-' + name + 'ICT-1:ChargeStd-Mon')
self._pulse_cnt = PV('LI-01:DI-' + name + ':PulseCount-Mon')
else:
raise Exception('Set device name: ICT-1 or ICT-2')
@property
def connected(self):
conn = self._charge.connected
conn &= self._charge_avg.connected
conn &= self._charge_max.connected
conn &= self._charge_min.connected
conn &= self._charge_std.connected
conn &= self._pulse_cnt.connected
return conn
@property
def charge(self):
return self._charge.get()
@property
def charge_avg(self):
return self._charge_avg.get()
@property
def charge_max(self):
return self._charge_max.get()
@property
def charge_min(self):
return self._charge_min.get()
@property
def charge_std(self):
return self._charge_std.get()
@property
def pulse_count(self):
return self._pulse_cnt.get()
class TranspEff:
def __init__(self):
self._eff = PV('LI-Glob:AP-TranspEff:Eff-Mon')
@property
def connected(self):
return self._eff.connected
@property
def efficiency(self):
return self._eff.get()
| mit | Python | |
a080713a1dd0dd0c1b9c487f9c5413f3e4419db9 | Create MQTT2StepperMotor.py | Anton04/RaspPy-StepperMotor-Driver | MQTT2StepperMotor.py | MQTT2StepperMotor.py | # Author: Anton Gustafsson
# Released under MIT license
#!/usr/bin/python
from StepperMotorDriver import MotorControl
class
| mit | Python | |
d26069ddbb35a10f4a368c855d94d1dde1872a82 | Add better solution for etl | always-waiting/exercism-python | etl/etl.better.py | etl/etl.better.py | def transform(d):
'''Just reverse the dictionary'''
return {l.lower(): p for p, letters in d.items() for l in letters}
def transform(strs):
result = {}
for k,v in strs.items():
for i in v:
result.update({i.lower():k})
return dict(result.items())
| mit | Python | |
61c2ec9efdf72f0ab02ed12c8486bc9ca8f690e6 | Add MLP code | meet-vora/mlp-classifier | neuralnet.py | neuralnet.py | import numpy as np
from scipy.special import expit
from constants import *
class NeuralNetMLP(object):
def __init__(self, layers, random_state=None):
""" Initialise the layers as list(input_layer, ...hidden_layers..., output_layer) """
np.random.seed(random_state)
self.num_layers = len(layers)
self.layers = layers
self.initialize_weights()
def initialize_weights(self):
""" Randomly generate biases and weights for hidden layers.
Weights have a Gaussian distribution with mean 0 and
standard deviation 1 over the square root of the number
of weights connecting to the same neuron """
self.biases = [np.random.randn(y, 1) for y in self.layers[1:]]
self.weights = [np.random.randn(y, x)/np.sqrt(x) for x, y in zip(self.layers[:-1], self.layers[1:])]
def fit(self, training_data, l1=0.0, l2=0.0, epochs=500, eta=0.001, minibatches=1, regularization = L2):
""" Fits the parameters according to training data.
l1(2) is the L1(2) regularization coefficient. """
self.l1 = l1
self.l2 = l2
n = len(training_data)
for epoch in xrange(epochs):
random.shuffle(training_data)
mini_batches = [training_data[k:k+mini_batch_size] for k in xrange(0, n, minibatches)]
for mini_batch in mini_batches:
self.batch_update( mini_batch, eta, len(training_data), regularization)
def batch_update(self, mini_batch, eta, n, regularization=L2):
""" Update the network's weights and biases by applying gradient
descent using backpropagation to a single mini batch. """
nabla_b = [np.zeroes(b.shape) for b in self.baises]
nabla_w = [np.zeros(w.shape) for w in self.weights]
for x, y in mini_batch:
delta_nabla_b, delta_nabla_w = self.backprop(x, y)
nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]
nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]
self.biases = [b-(eta/len(mini_batch))*nb for b, nb in zip(self.biases, nabla_b)]
if regularization == L2:
self.weights = [(1-eta*(self.l2/n))*w-(eta/len(mini_batch))*nw for w, nw in zip(self.weights, nabla_w)]
elif regularization == L1:
self.weights = [w - eta*self.l1*np.sign(w)/n-(eta/len(mini_batch))*nw for w, nw in zip(self.weights, nabla_w)]
def back_propogation(self, x, y, fn = SIGMOID):
""" Gradient for cost function is calculated from a(L) and
back-propogated to the input layer.
Cross Entropy cost functionis associated with sigmoid neurons, while
Log-Likelihood cost function is associated with softmax neurons."""
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
activation = x
activations = [x]
zs = []
for b, w in zip(self.biases, self.weights):
z = np.dot(w, activation)+b
zs.append(z)
if fn == SIGMOID:
activation = sigmoid(z)
else:
activation = softmax(z)
activations.append(activation)
dell = delta(activations[-1], y)
nabla_b[-1] = dell
nabla_w[-1] = np.dot(dell, activations[-2].transpose())
for l in xrange(2, self.num_layers -2, 0, -1):
dell = np.dot(self.weights[l+1].transpose(), dell) * derivative(zs[l], fn)
nabla_b[-l] = dell
nabla_w[-l] = np.dot(dell, activations[-l-1].transpose())
return (nabla_b, nabla_w)
def cross_entropy_loss(a, y):
return np.sum(np.nan_to_num(-y*np.log(a)-(1-y)*np.log(1-a)))
def log_likelihood_loss(a, y):
return -np.dot(y, softmax(a).transpose())
def delta(a, y):
""" delta for both activations works out to be the same"""
return (a-y)
def sigmoid(z):
""" expit is equivalent to 1.0/(1.0 + np.exp(-z)) """
return expit(z)
def softmax(z):
e = np.exp(float(z))
return (e/np.sum(e))
def derivative(z, fn):
""" derivative for f is f(1-f) for respective cost functions """
if fn == SIGMOID:
f = sigmoid
elif fn == SOFTMAX:
f = softmax
return f(z)*(1-f(z)) | mit | Python | |
54b94346d2669347cf2a9a2b24df6b657cf80c5b | Mask computation utilities (from nipy). | abenicho/isvr | nisl/mask.py | nisl/mask.py | import numpy as np
from scipy import ndimage
###############################################################################
# Operating on connect component
###############################################################################
def largest_cc(mask):
""" Return the largest connected component of a 3D mask array.
Parameters
-----------
mask: 3D boolean array
3D array indicating a mask.
Returns
--------
mask: 3D boolean array
3D array indicating a mask, with only one connected component.
"""
# We use asarray to be able to work with masked arrays.
mask = np.asarray(mask)
labels, label_nb = ndimage.label(mask)
if not label_nb:
raise ValueError('No non-zero values: no connected components')
if label_nb == 1:
return mask.astype(np.bool)
label_count = np.bincount(labels.ravel())
# discard 0 the 0 label
label_count[0] = 0
return labels == label_count.argmax()
###############################################################################
# Utilities to calculate masks
###############################################################################
def compute_mask(mean_volume, m=0.2, M=0.9, cc=True,
exclude_zeros=False):
"""
Compute a mask file from fMRI data in 3D or 4D ndarrays.
Compute and write the mask of an image based on the grey level
This is based on an heuristic proposed by T.Nichols:
find the least dense point of the histogram, between fractions
m and M of the total image histogram.
In case of failure, it is usually advisable to increase m.
Parameters
----------
mean_volume : 3D ndarray
mean EPI image, used to compute the threshold for the mask.
m : float, optional
lower fraction of the histogram to be discarded.
M: float, optional
upper fraction of the histogram to be discarded.
cc: boolean, optional
if cc is True, only the largest connect component is kept.
exclude_zeros: boolean, optional
Consider zeros as missing values for the computation of the
threshold. This option is useful if the images have been
resliced with a large padding of zeros.
Returns
-------
mask : 3D boolean ndarray
The brain mask
"""
sorted_input = np.sort(mean_volume.reshape(-1))
if exclude_zeros:
sorted_input = sorted_input[sorted_input != 0]
limite_inf = np.floor(m * len(sorted_input))
limite_sup = np.floor(M * len(sorted_input))
delta = sorted_input[limite_inf + 1:limite_sup + 1] \
- sorted_input[limite_inf:limite_sup]
ia = delta.argmax()
threshold = 0.5 * (sorted_input[ia + limite_inf]
+ sorted_input[ia + limite_inf + 1])
mask = (mean_volume >= threshold)
if cc:
mask = largest_cc(mask)
return mask.astype(bool)
| bsd-3-clause | Python | |
8e6c1a296be39c5cd1e75d5ff9974f80449690e3 | Add VVT tool class | sosy-lab/benchexec,sosy-lab/benchexec,sosy-lab/benchexec,dbeyer/benchexec,sosy-lab/benchexec,martin-neuhaeusser/benchexec,ultimate-pa/benchexec,IljaZakharov/benchexec,IljaZakharov/benchexec,sosy-lab/benchexec,ultimate-pa/benchexec,ultimate-pa/benchexec,dbeyer/benchexec,sosy-lab/benchexec,martin-neuhaeusser/benchexec,dbeyer/benchexec,IljaZakharov/benchexec,IljaZakharov/benchexec,dbeyer/benchexec,martin-neuhaeusser/benchexec,ultimate-pa/benchexec,ultimate-pa/benchexec,martin-neuhaeusser/benchexec,ultimate-pa/benchexec | benchexec/tools/vvt.py | benchexec/tools/vvt.py | """
BenchExec is a framework for reliable benchmarking.
This file is part of BenchExec.
Copyright (C) 2007-2015 Dirk Beyer
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import benchexec.tools.template
import benchexec.util as util
import benchexec.result as result
class Tool(benchexec.tools.template.BaseTool):
"""
Tool wrapper for the Vienna Verification Toolkit
"""
def executable(self):
return util.find_executable('vvt-svcomp-bench.sh')
def version(self,executable):
return 'prerelease'
def name(self):
return 'VVT'
def cmdline(self, executable, options, tasks, propertyfile, rlimits):
return [executable] + tasks
def determine_result(self, returncode, returnsignal, output, isTimeOut):
try:
if "No bug found.\n" in output:
return result.RESULT_TRUE_PROP
elif "Bug found:\n" in output:
return result.RESULT_FALSE_REACH
else:
return result.RESULT_UNKNOWN
except Exception:
return result.RESULT_UNKNOWN
| apache-2.0 | Python | |
f333b9c5741a7ffbf49caa0a6130831a834b944f | Add unit tests for recent bugfix and move operation | Bklyn/dotfiles,aparente/Dotfiles,nilehmann/dotfiles-1,aparente/Dotfiles,aparente/Dotfiles,aparente/Dotfiles | test_dotfiles.py | test_dotfiles.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import shutil
import tempfile
import unittest
from dotfiles import core
def touch(fname, times=None):
with file(fname, 'a'):
os.utime(fname, times)
class DotfilesTestCase(unittest.TestCase):
def setUp(self):
"""Create a temporary home directory."""
self.home = tempfile.mkdtemp()
# create a repository for the tests to use
self.repo = os.path.join(self.home, 'Dotfiles')
os.mkdir(self.repo)
def tearDown(self):
"""Delete the temporary home directory and its contents."""
shutil.rmtree(self.home)
def test_force_sync_directory(self):
"""Test forced sync when the dotfile is a directory.
I installed the lastpass chrome extension which stores a socket in
~/.lastpass. So I added that directory as an external to /tmp and
attempted a forced sync. An error occurred because sync() calls
os.remove() as it mistakenly assumes the dotfile is a file and not
a directory.
"""
os.mkdir(os.path.join(self.home, '.lastpass'))
externals = {'.lastpass': '/tmp'}
dotfiles = core.Dotfiles(home=self.home, repo=self.repo, prefix='',
ignore=[], externals=externals)
dotfiles.sync(force=True)
self.assertEqual(
os.path.realpath(os.path.join(self.home, '.lastpass')), '/tmp')
def test_move_repository(self):
"""Test the move() method for a Dotfiles repository."""
touch(os.path.join(self.repo, 'bashrc'))
dotfiles = core.Dotfiles(
home=self.home, repo=self.repo, prefix='',
ignore=[], force=True, externals={})
dotfiles.sync()
# make sure sync() did the right thing
self.assertEqual(
os.path.realpath(os.path.join(self.home, '.bashrc')),
os.path.join(self.repo, 'bashrc'))
target = os.path.join(self.home, 'MyDotfiles')
dotfiles.move(target)
self.assertTrue(os.path.exists(os.path.join(target, 'bashrc')))
self.assertEqual(
os.path.realpath(os.path.join(self.home, '.bashrc')),
os.path.join(target, 'bashrc'))
def suite():
suite = unittest.TestLoader().loadTestsFromTestCase(DotfilesTestCase)
return suite
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
| isc | Python | |
2b0e13039dad8d116a5719540004bed317bb6960 | Add tests and fixtures for the Organizations API wrapper | jbogarin/ciscosparkapi | tests/api/test_organizations.py | tests/api/test_organizations.py | # -*- coding: utf-8 -*-
"""pytest Licenses functions, fixtures and tests."""
import pytest
import ciscosparkapi
# Helper Functions
def list_organizations(api, max=None):
return list(api.organizations.list(max=max))
def get_organization_by_id(api, orgId):
return api.organizations.get(orgId)
def is_valid_organization(obj):
return isinstance(obj, ciscosparkapi.Organization) and obj.id is not None
def are_valid_organizations(iterable):
return all([is_valid_organization(obj) for obj in iterable])
# pytest Fixtures
@pytest.fixture(scope="session")
def organizations_list(api):
return list_organizations(api)
# Tests
class TestOrganizationsAPI(object):
"""Test OrganizationsAPI methods."""
def test_list_organizations(self, organizations_list):
assert are_valid_organizations(organizations_list)
def test_get_organization_by_id(self, api, organizations_list):
assert len(organizations_list) >= 1
org_id = organizations_list[0].id
org = get_organization_by_id(api, orgId=org_id)
assert is_valid_organization(org)
| mit | Python | |
feea11952ceab35523052a93a8ca6ff822d1357c | add 141 | zeyuanxy/project-euler,EdisonAlgorithms/ProjectEuler,zeyuanxy/project-euler,EdisonAlgorithms/ProjectEuler,zeyuanxy/project-euler,EdisonAlgorithms/ProjectEuler,zeyuanxy/project-euler,EdisonAlgorithms/ProjectEuler | vol3/141.py | vol3/141.py | import math
def gcd(a, b):
if a % b == 0:
return b
return gcd(b, a % b)
def is_square(n):
sqrt_n = int(math.sqrt(n))
return n == sqrt_n * sqrt_n
if __name__ == "__main__":
L = 10 ** 12
s = set()
for a in xrange(2, 10000):
for b in xrange(1, a):
if a * a * a * b + b * b >= L:
break
if gcd(a, b) > 1:
continue
c = 1
while True:
n = a * a * a * b * c * c + b * b * c
if n >= L:
break
if is_square(n):
s.add(n)
c += 1
print sum(s)
| mit | Python | |
284c29d257b7c6902b5973ca05278ee5b05571e9 | test subclassing! | SexualHealthInnovations/callisto-core,SexualHealthInnovations/callisto-core,project-callisto/callisto-core,project-callisto/callisto-core | tests/delivery/test_frontend.py | tests/delivery/test_frontend.py | from wizard_builder.tests import test_frontend as wizard_builder_tests
class EncryptedFrontendTest(wizard_builder_tests.FrontendTest):
secret_key = 'soooooo seekrit'
def setUp(self):
super().setUp()
self.browser.find_element_by_css_selector(
'[name="key"]').send_keys(self.secret_key)
self.browser.find_element_by_css_selector(
'[name="key_confirmation"]').send_keys(self.secret_key)
self.browser.find_element_by_css_selector(
'[type="submit"]').click()
| agpl-3.0 | Python | |
bd9f509bbd97f3a28eb24740dc08bc153cf82613 | add voronoi cell class | ipudu/order | order/avc.py | order/avc.py | ###############################################################################
# -*- coding: utf-8 -*-
# Order: A tool to characterize the local structure of liquid water
# by geometric order parameters
#
# Authors: Pu Du
#
# Released under the MIT License
###############################################################################
from __future__ import division, print_function
from six.moves import range
import numpy as np
class VoronoiCell(object):
"""asphericity of the Voronoi cell"""
def __init__(self):
pass
def compute_vc(self, planes):
"""compute the Voronoi cell"""
#total area of all planes
S = 0.0
#total volume of Voronoi polyhedron
V = 0.0
#compute S and V
for plane in planes:
outter_p = 0.0
for i in range(1, len(plane)-1):
outter_p += np.linalg.norm(np.outter((plane[i] - plane[0]),
(plane[i+1] - plane[0])))
vol = 0.0
for i in range(1, len(plane)-1):
vol += np.linalg.norm(np.dot(np.outter(plane[0], plane[i]),
plane[i+1]))
outter_p *= 0.5
vol *= 1 / 6
S += outter_p
V += vol
#voronoi cell
eta = S ** 3 / (36 * np.pi * V ** 2)
return eta | mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.